Back to packages list

Vals using langchain/schema

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
import { getModelBuilder } from "https://esm.town/v/bluemsn/getModelBuilder";
export const modelSampleChatCall = (async () => {
const builder = await getModelBuilder({
type: "chat",
provider: "openai",
});
const model = await builder();
const { SystemMessage, HumanMessage } = await import("npm:langchain/schema");
const data = await model.call([
new SystemMessage(
"You are a helpful assistant that translates English to Chinese.",
),
new HumanMessage("Translate: I love programming."),
]);
return data?.content;
})();
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
import { getLangSmithBuilder } from "https://esm.town/v/webup/getLangSmithBuilder";
import { getModelBuilder } from "https://esm.town/v/webup/getModelBuilder";
export const pipeSampleLLM = (async () => {
const { PromptTemplate } = await import("npm:langchain/prompts");
const { RunnableSequence } = await import("npm:langchain/schema/runnable");
const { StringOutputParser } = await import(
"npm:langchain/schema/output_parser"
);
const builder = await getModelBuilder();
const model = await builder();
const promptTemplate = PromptTemplate.fromTemplate(
"Tell me a joke about {topic}",
);
const outputParser = new StringOutputParser();
const tb = await getLangSmithBuilder();
const tracer = await tb();
const chain = RunnableSequence.from([promptTemplate, model, outputParser]);
const result = await chain.invoke({ topic: "bears" }, {
callbacks: [tracer],
});
return result;
})();
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
import { getModelBuilder } from "https://esm.town/v/webup/getModelBuilder";
export const modelSampleChatCall = (async () => {
const builder = await getModelBuilder({
type: "chat",
provider: "openai",
});
const model = await builder();
const { SystemMessage, HumanMessage } = await import("npm:langchain/schema");
const data = await model.call([
new SystemMessage(
"You are a helpful assistant that translates English to Chinese.",
),
new HumanMessage("Translate: I love programming."),
]);
return data?.content;
})();
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import { getVectorStoreBuilder } from "https://esm.town/v/webup/getVectorStoreBuilder";
import { getSampleDocuments } from "https://esm.town/v/webup/getSampleDocuments";
import { getLangSmithBuilder } from "https://esm.town/v/webup/getLangSmithBuilder";
import { getModelBuilder } from "https://esm.town/v/webup/getModelBuilder";
export const pipeSampleLLMRetriever = (async () => {
const { PromptTemplate } = await import("npm:langchain/prompts");
const { RunnableSequence, RunnablePassthrough } = await import(
"npm:langchain/schema/runnable"
);
const { StringOutputParser } = await import(
"npm:langchain/schema/output_parser"
);
const { Document } = await import("npm:langchain/document");
const modelBuilder = await getModelBuilder();
const model = await modelBuilder();
const tracerBuilder = await getLangSmithBuilder();
const tracer = await tracerBuilder();
const docs = await getSampleDocuments();
const vectorBuilder = await getVectorStoreBuilder(docs);
const vector = await vectorBuilder();
const retriever = vector.asRetriever();
const prompt = PromptTemplate.fromTemplate(
`Answer the question based only on the following context:
{context}
Question: {question}`,
);
const serializeDocs = (docs) => docs.map((doc) => doc.pageContent).join("\n");
const chain = RunnableSequence.from([
{
context: retriever.pipe(serializeDocs),
question: new RunnablePassthrough(),
},
prompt,
model,
new StringOutputParser(),
]);
return await chain.invoke("What is pinecone?", { callbacks: [tracer] });
})();
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
import { getLangSmithBuilder } from "https://esm.town/v/webup/getLangSmithBuilder";
import { getModelBuilder } from "https://esm.town/v/webup/getModelBuilder";
import process from "node:process";
export const pipeSampleLLMTool = (async () => {
const { SerpAPI } = await import("npm:langchain/tools");
const { PromptTemplate } = await import("npm:langchain/prompts");
const { StringOutputParser } = await import(
"npm:langchain/schema/output_parser"
);
const search = new SerpAPI(process.env.SERP);
const prompt = PromptTemplate.fromTemplate(
`Turn the following user input into a search query for a search engine:
{input}`,
);
const builder = await getModelBuilder();
const model = await builder();
const tb = await getLangSmithBuilder();
const tracer = await tb();
const chain = prompt.pipe(model).pipe(new StringOutputParser()).pipe(search);
return await chain.invoke({
input: "Who is the current prime minister of Malaysia?",
}, { callbacks: [tracer] });
})();
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
import process from "node:process";
export const untitled_silverPinniped = (async () => {
const { ChatPromptTemplate } = await import("npm:langchain/prompts");
const { ChatOpenAI } = await import("npm:langchain/chat_models/openai");
const { StringOutputParser } = await import(
"npm:langchain/schema/output_parser"
);
const SYSTEM_TEMPLATE = `You are an AI programming assistant.`;
const redesignTemplate = ChatPromptTemplate.fromMessages<{
input: string;
initial_code: string;
}>([
["system", SYSTEM_TEMPLATE],
["human", "{input}"],
]);
const model = new ChatOpenAI({
modelName: "gpt-4",
temperature: 0.2,
openAIApiKey: process.env.OPENAI_API_KEY,
});
// Output parser converts the chat message into a raw string. Also works with streaming.
const chain = redesignTemplate.pipe(model).pipe(new StringOutputParser());
const output = await chain.invoke({
input: "A word guessing game.",
initial_code:
`<TextInput id="nameInput" placeholder="Starting test input" />`,
});
return output;
})();
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import { getVectorStoreBuilder } from "https://esm.town/v/webup/getVectorStoreBuilder";
import { getSampleDocuments } from "https://esm.town/v/webup/getSampleDocuments";
import { getModelBuilder } from "https://esm.town/v/webup/getModelBuilder";
export const pipeSampleLLMRetrieverConversation = (async () => {
const { PromptTemplate } = await import("npm:langchain/prompts");
const { RunnableSequence, RunnablePassthrough } = await import(
"npm:langchain/schema/runnable"
);
const { StringOutputParser } = await import(
"npm:langchain/schema/output_parser"
);
const { Document } = await import("npm:langchain/document");
const modelBuilder = await getModelBuilder();
const model = await modelBuilder();
const docs = await getSampleDocuments();
const vectorBuilder = await getVectorStoreBuilder(docs);
const vector = await vectorBuilder();
const retriever = vector.asRetriever();
const condenseQuestionTemplate =
`Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:`;
const CONDENSE_QUESTION_PROMPT = PromptTemplate.fromTemplate(
condenseQuestionTemplate,
);
const answerTemplate =
`Answer the question based only on the following context:
{context}
Question: {question}
`;
const ANSWER_PROMPT = PromptTemplate.fromTemplate(answerTemplate);
const combineDocumentsFn = (docs, separator = "\n\n") => {
const serializedDocs = docs.map((doc) => doc.pageContent);
return serializedDocs.join(separator);
};
const formatChatHistory = (chatHistory: [
string,
string,
][]) => {
const formattedDialogueTurns = chatHistory.map((dialogueTurn) =>
`Human: ${dialogueTurn[0]}\nAssistant: ${dialogueTurn[1]}`
);
return formattedDialogueTurns.join("\n");
};
type ConversationalRetrievalQAChainInput = {
question: string;
chat_history: [
string,
string,
][];
};
const standaloneQuestionChain = RunnableSequence.from([
{
question: (input: ConversationalRetrievalQAChainInput) => input.question,
chat_history: (input: ConversationalRetrievalQAChainInput) =>
formatChatHistory(input.chat_history),
},
CONDENSE_QUESTION_PROMPT,
model,
new StringOutputParser(),
]);
const answerChain = RunnableSequence.from([
{
context: retriever.pipe(combineDocumentsFn),
question: new RunnablePassthrough(),
},
ANSWER_PROMPT,
model,
]);
const conversationalRetrievalQAChain = standaloneQuestionChain.pipe(
answerChain,
);
const result1 = await conversationalRetrievalQAChain.invoke({
question: "What is pinecone?",
chat_history: [],
});
console.log(result1);
return await conversationalRetrievalQAChain.invoke({
question: "Is pinecone a db?",
chat_history: [
[
"What is pinecone?",
"Pinecone is the woody fruiting body of a pine tree.",
],
],
});
})();
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import { getVectorStoreBuilder } from "https://esm.town/v/webup/getVectorStoreBuilder";
import { getSampleDocuments } from "https://esm.town/v/webup/getSampleDocuments";
import { getModelBuilder } from "https://esm.town/v/webup/getModelBuilder";
export const pipeSampleLLMRetrieverInputs = (async () => {
const { PromptTemplate } = await import("npm:langchain/prompts");
const { RunnableSequence, RunnablePassthrough } = await import(
"npm:langchain/schema/runnable"
);
const { StringOutputParser } = await import(
"npm:langchain/schema/output_parser"
);
const { Document } = await import("npm:langchain/document");
const modelBuilder = await getModelBuilder();
const model = await modelBuilder();
const docs = await getSampleDocuments();
const vectorBuilder = await getVectorStoreBuilder(docs);
const vector = await vectorBuilder();
const retriever = vector.asRetriever();
const prompt = PromptTemplate.fromTemplate(
`Answer the question based only on the following context:
{context}
Question: {question}
Answer in the following language: {language}`,
);
type LanguageChainInput = {
question: string;
language: string;
};
const serializeDocs = (docs) => docs.map((doc) => doc.pageContent).join("\n");
const chain = RunnableSequence.from([
{
question: (input: LanguageChainInput) => input.question,
language: (input: LanguageChainInput) => input.language,
context: (input: LanguageChainInput) =>
retriever.pipe(serializeDocs).invoke(input.question),
},
prompt,
model,
new StringOutputParser(),
]);
return await chain.invoke({
question: "What is pinecone?",
language: "Chinese",
});
})();
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
import { getModelBuilder } from "https://esm.town/v/webup/getModelBuilder?v=9";
import process from "node:process";
export const pipeSampleLLMTool = (async () => {
const { SerpAPI } = await import("npm:langchain/tools");
const { PromptTemplate } = await import("npm:langchain/prompts");
const { StringOutputParser } = await import(
"npm:langchain/schema/output_parser"
);
const search = new SerpAPI(process.env.SERPAPI_KEY);
const prompt = PromptTemplate.fromTemplate(
`Turn the following user input into a search query for a search engine:
{input}`,
);
const builder = await getModelBuilder();
const model = await builder();
const chain = prompt.pipe(model).pipe(new StringOutputParser()).pipe(search);
return await chain.invoke({
input: "Who is the current prime minister of Malaysia?",
});
})();
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import { getVectorStoreBuilder } from "https://esm.town/v/webup/getVectorStoreBuilder";
import { getModelBuilder } from "https://esm.town/v/webup/getModelBuilder";
export const retrieverSampleSelfQuery = (async () => {
await import("npm:peggy");
const { AttributeInfo } = await import(
"npm:langchain/schema/query_constructor"
);
const { Document } = await import("npm:langchain/document");
const { SelfQueryRetriever } = await import(
"npm:langchain/retrievers/self_query"
);
const { FunctionalTranslator } = await import(
"npm:langchain/retrievers/self_query/functional"
);
/**
* First, we create a bunch of documents. You can load your own documents here instead.
* Each document has a pageContent and a metadata field. Make sure your metadata matches the AttributeInfo below.
*/
const docs = [
new Document({
pageContent:
"A bunch of scientists bring back dinosaurs and mayhem breaks loose",
metadata: { year: 1993, rating: 7.7, genre: "science fiction" },
}),
new Document({
pageContent:
"Leo DiCaprio gets lost in a dream within a dream within a dream within a ...",
metadata: { year: 2010, director: "Christopher Nolan", rating: 8.2 },
}),
new Document({
pageContent:
"A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea",
metadata: { year: 2006, director: "Satoshi Kon", rating: 8.6 },
}),
new Document({
pageContent:
"A bunch of normal-sized women are supremely wholesome and some men pine after them",
metadata: { year: 2019, director: "Greta Gerwig", rating: 8.3 },
}),
new Document({
pageContent: "Toys come alive and have a blast doing so",
metadata: { year: 1995, genre: "animated" },
}),
new Document({
pageContent:
"Three men walk into the Zone, three men walk out of the Zone",
metadata: {
year: 1979,
director: "Andrei Tarkovsky",
genre: "science fiction",
rating: 9.9,
},
}),
];
/**
* Next, we define the attributes we want to be able to query on.
* in this case, we want to be able to query on the genre, year, director, rating, and length of the movie.
* We also provide a description of each attribute and the type of the attribute.
* This is used to generate the query prompts.
*/
const attributeInfo: typeof AttributeInfo[] = [
{
name: "genre",
description: "The genre of the movie",
type: "string or array of strings",
},
{
name: "year",
description: "The year the movie was released",
type: "number",
},
{
name: "director",
description: "The director of the movie",
type: "string",
},
{
name: "rating",
description: "The rating of the movie (1-10)",
type: "number",
},
{
name: "length",
description: "The length of the movie in minutes",
type: "number",
},
];
/**
* Next, we instantiate a vector store. This is where we store the embeddings of the documents.
* We also need to provide an embeddings object. This is used to embed the documents.
*/
const modelBuilder = await getModelBuilder();
const llm = await modelBuilder();
const vsBuilder = await getVectorStoreBuilder(docs);
const vectorStore = await vsBuilder();
const documentContents = "Brief summary of a movie";
const selfQueryRetriever = await SelfQueryRetriever.fromLLM({
llm,
vectorStore,
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
import { getModelBuilder } from "https://esm.town/v/webup/getModelBuilder";
export const modelSampleChatGenerate = (async () => {
const builder = await getModelBuilder({
type: "chat",
provider: "openai",
});
const model = await builder();
const { SystemMessage, HumanMessage } = await import("npm:langchain/schema");
return await model.generate([
[
new SystemMessage(
"You are a helpful assistant that translates English to Chinese.",
),
new HumanMessage(
"Translate this sentence from English to Chinese. I love programming.",
),
],
[
new SystemMessage(
"You are a helpful assistant that translates English to Chinese.",
),
new HumanMessage(
"Translate this sentence from English to Chinese. I love artificial intelligence.",
),
],
]);
})();
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
import { getLangSmithBuilder } from "https://esm.town/v/webup/getLangSmithBuilder";
import { getModelBuilder } from "https://esm.town/v/webup/getModelBuilder";
export const pipeSampleMap = (async () => {
const { PromptTemplate } = await import("npm:langchain/prompts");
const { RunnableSequence } = await import("npm:langchain/schema/runnable");
const { StringOutputParser } = await import(
"npm:langchain/schema/output_parser"
);
const prompt1 = PromptTemplate.fromTemplate(
`What is the city {person} is from? Only respond with the name of the city.`,
);
const prompt2 = PromptTemplate.fromTemplate(
`What country is the city {city} in? Respond in {language}.`,
);
const mb = await getModelBuilder();
const model = await mb();
const tb = await getLangSmithBuilder();
const tracer = await tb();
const chain = prompt1.pipe(model).pipe(new StringOutputParser());
const combinedChain = RunnableSequence.from([
{
city: chain,
language: (input) => input.language,
},
prompt2,
model,
new StringOutputParser(),
]);
return await combinedChain.invoke({
person: "Chairman Mao",
language: "Chinese",
}, { callbacks: [tracer] });
})();
1
Next