Back to packages list

Vals using langchain/prompts

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
import { getModelBuilder } from "https://esm.town/v/bluemsn/getModelBuilder";
export const parserSampleListCustom = (async () => {
const { PromptTemplate } = await import("npm:langchain/prompts");
const { CustomListOutputParser } = await import(
"npm:langchain/output_parsers"
);
// With a `CustomListOutputParser`, we can parse a list with a specific length and separator.
const parser = new CustomListOutputParser({ length: 3, separator: "\n" });
const formatInstructions = parser.getFormatInstructions();
const prompt = new PromptTemplate({
template: "Provide a list of {subject}.\n{format_instructions}",
inputVariables: ["subject"],
partialVariables: { format_instructions: formatInstructions },
});
const builder = await getModelBuilder();
const model = await builder();
const input = await prompt.format({
subject: "great fiction books (book, author)",
});
console.log('input---\n', input);
const response = await model.call(input);
console.log(response);
return await parser.parse(response);
})();
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
export const promptSampleTemplatesPartial = (async () => {
const { PromptTemplate } = await import("npm:langchain/prompts");
const date = () => new Date().toISOString();
const prompt = new PromptTemplate({
template: "Tell me a {adjective} joke about the day {date}",
inputVariables: ["adjective", "date"],
});
console.log("prompt:-----\n", prompt);
const partialPrompt = await prompt.partial({ date });
console.log("partialPrompt:-----\n", partialPrompt);
const formattedPrompt = await partialPrompt.format({ adjective: "funny" });
console.log("formattedPrompt:-----\n", formattedPrompt);
return formattedPrompt;
})();
1
2
3
4
5
6
7
8
9
10
11
12
export const promptSampleTemplates = (async () => {
const { PromptTemplate } = await import("npm:langchain/prompts");
const prompt = new PromptTemplate({
inputVariables: ["adjective", "content"],
template: "Tell me a {adjective} joke about {content}.",
});
const formattedPrompt = await prompt.format({
adjective: "funny",
content: "chickens",
});
return formattedPrompt;
})();
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
import process from "node:process";
export const untitled_silverPinniped = (async () => {
const { ChatPromptTemplate } = await import("npm:langchain/prompts");
const { ChatOpenAI } = await import("npm:langchain/chat_models/openai");
const { StringOutputParser } = await import(
"npm:langchain/schema/output_parser"
);
const SYSTEM_TEMPLATE = `You are an AI programming assistant.`;
const redesignTemplate = ChatPromptTemplate.fromMessages<{
input: string;
initial_code: string;
}>([
["system", SYSTEM_TEMPLATE],
["human", "{input}"],
]);
const model = new ChatOpenAI({
modelName: "gpt-4",
temperature: 0.2,
openAIApiKey: process.env.OPENAI_API_KEY,
});
// Output parser converts the chat message into a raw string. Also works with streaming.
const chain = redesignTemplate.pipe(model).pipe(new StringOutputParser());
const output = await chain.invoke({
input: "A word guessing game.",
initial_code:
`<TextInput id="nameInput" placeholder="Starting test input" />`,
});
return output;
})();
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
import { getLangSmithBuilder } from "https://esm.town/v/webup/getLangSmithBuilder";
import { getModelBuilder } from "https://esm.town/v/webup/getModelBuilder";
export const pipeSampleMap = (async () => {
const { PromptTemplate } = await import("npm:langchain/prompts");
const { RunnableSequence } = await import("npm:langchain/schema/runnable");
const { StringOutputParser } = await import(
"npm:langchain/schema/output_parser"
);
const prompt1 = PromptTemplate.fromTemplate(
`What is the city {person} is from? Only respond with the name of the city.`,
);
const prompt2 = PromptTemplate.fromTemplate(
`What country is the city {city} in? Respond in {language}.`,
);
const mb = await getModelBuilder();
const model = await mb();
const tb = await getLangSmithBuilder();
const tracer = await tb();
const chain = prompt1.pipe(model).pipe(new StringOutputParser());
const combinedChain = RunnableSequence.from([
{
city: chain,
language: (input) => input.language,
},
prompt2,
model,
new StringOutputParser(),
]);
return await combinedChain.invoke({
person: "Chairman Mao",
language: "Chinese",
}, { callbacks: [tracer] });
})();
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
import { getLangSmithBuilder } from "https://esm.town/v/webup/getLangSmithBuilder";
import { getModelBuilder } from "https://esm.town/v/webup/getModelBuilder";
export const pipeSampleLLMBind = (async () => {
const { PromptTemplate } = await import("npm:langchain/prompts");
const prompt = PromptTemplate.fromTemplate(`Tell me a joke about {subject}`);
const mb = await getModelBuilder({
type: "chat",
provider: "openai",
});
const model = await mb();
const tb = await getLangSmithBuilder();
const tracer = await tb();
const functionSchema = [
{
name: "joke",
description: "A joke",
parameters: {
type: "object",
properties: {
setup: {
type: "string",
description: "The setup for the joke",
},
punchline: {
type: "string",
description: "The punchline for the joke",
},
},
required: ["setup", "punchline"],
},
},
];
const chain = prompt.pipe(model.bind({
functions: functionSchema,
function_call: { name: "joke" },
}));
return await chain.invoke({ subject: "bears" }, { callbacks: [tracer] });
})();
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import { getVectorStoreBuilder } from "https://esm.town/v/webup/getVectorStoreBuilder";
import { getSampleDocuments } from "https://esm.town/v/webup/getSampleDocuments";
import { getModelBuilder } from "https://esm.town/v/webup/getModelBuilder";
export const pipeSampleLLMRetrieverConversation = (async () => {
const { PromptTemplate } = await import("npm:langchain/prompts");
const { RunnableSequence, RunnablePassthrough } = await import(
"npm:langchain/schema/runnable"
);
const { StringOutputParser } = await import(
"npm:langchain/schema/output_parser"
);
const { Document } = await import("npm:langchain/document");
const modelBuilder = await getModelBuilder();
const model = await modelBuilder();
const docs = await getSampleDocuments();
const vectorBuilder = await getVectorStoreBuilder(docs);
const vector = await vectorBuilder();
const retriever = vector.asRetriever();
const condenseQuestionTemplate =
`Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:`;
const CONDENSE_QUESTION_PROMPT = PromptTemplate.fromTemplate(
condenseQuestionTemplate,
);
const answerTemplate =
`Answer the question based only on the following context:
{context}
Question: {question}
`;
const ANSWER_PROMPT = PromptTemplate.fromTemplate(answerTemplate);
const combineDocumentsFn = (docs, separator = "\n\n") => {
const serializedDocs = docs.map((doc) => doc.pageContent);
return serializedDocs.join(separator);
};
const formatChatHistory = (chatHistory: [
string,
string,
][]) => {
const formattedDialogueTurns = chatHistory.map((dialogueTurn) =>
`Human: ${dialogueTurn[0]}\nAssistant: ${dialogueTurn[1]}`
);
return formattedDialogueTurns.join("\n");
};
type ConversationalRetrievalQAChainInput = {
question: string;
chat_history: [
string,
string,
][];
};
const standaloneQuestionChain = RunnableSequence.from([
{
question: (input: ConversationalRetrievalQAChainInput) => input.question,
chat_history: (input: ConversationalRetrievalQAChainInput) =>
formatChatHistory(input.chat_history),
},
CONDENSE_QUESTION_PROMPT,
model,
new StringOutputParser(),
]);
const answerChain = RunnableSequence.from([
{
context: retriever.pipe(combineDocumentsFn),
question: new RunnablePassthrough(),
},
ANSWER_PROMPT,
model,
]);
const conversationalRetrievalQAChain = standaloneQuestionChain.pipe(
answerChain,
);
const result1 = await conversationalRetrievalQAChain.invoke({
question: "What is pinecone?",
chat_history: [],
});
console.log(result1);
return await conversationalRetrievalQAChain.invoke({
question: "Is pinecone a db?",
chat_history: [
[
"What is pinecone?",
"Pinecone is the woody fruiting body of a pine tree.",
],
],
});
})();
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import { getVectorStoreBuilder } from "https://esm.town/v/webup/getVectorStoreBuilder";
import { getSampleDocuments } from "https://esm.town/v/webup/getSampleDocuments";
import { getModelBuilder } from "https://esm.town/v/webup/getModelBuilder";
export const pipeSampleLLMRetrieverInputs = (async () => {
const { PromptTemplate } = await import("npm:langchain/prompts");
const { RunnableSequence, RunnablePassthrough } = await import(
"npm:langchain/schema/runnable"
);
const { StringOutputParser } = await import(
"npm:langchain/schema/output_parser"
);
const { Document } = await import("npm:langchain/document");
const modelBuilder = await getModelBuilder();
const model = await modelBuilder();
const docs = await getSampleDocuments();
const vectorBuilder = await getVectorStoreBuilder(docs);
const vector = await vectorBuilder();
const retriever = vector.asRetriever();
const prompt = PromptTemplate.fromTemplate(
`Answer the question based only on the following context:
{context}
Question: {question}
Answer in the following language: {language}`,
);
type LanguageChainInput = {
question: string;
language: string;
};
const serializeDocs = (docs) => docs.map((doc) => doc.pageContent).join("\n");
const chain = RunnableSequence.from([
{
question: (input: LanguageChainInput) => input.question,
language: (input: LanguageChainInput) => input.language,
context: (input: LanguageChainInput) =>
retriever.pipe(serializeDocs).invoke(input.question),
},
prompt,
model,
new StringOutputParser(),
]);
return await chain.invoke({
question: "What is pinecone?",
language: "Chinese",
});
})();
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import { getVectorStoreBuilder } from "https://esm.town/v/webup/getVectorStoreBuilder";
import { getSampleDocuments } from "https://esm.town/v/webup/getSampleDocuments";
import { getLangSmithBuilder } from "https://esm.town/v/webup/getLangSmithBuilder";
import { getModelBuilder } from "https://esm.town/v/webup/getModelBuilder";
export const pipeSampleLLMRetriever = (async () => {
const { PromptTemplate } = await import("npm:langchain/prompts");
const { RunnableSequence, RunnablePassthrough } = await import(
"npm:langchain/schema/runnable"
);
const { StringOutputParser } = await import(
"npm:langchain/schema/output_parser"
);
const { Document } = await import("npm:langchain/document");
const modelBuilder = await getModelBuilder();
const model = await modelBuilder();
const tracerBuilder = await getLangSmithBuilder();
const tracer = await tracerBuilder();
const docs = await getSampleDocuments();
const vectorBuilder = await getVectorStoreBuilder(docs);
const vector = await vectorBuilder();
const retriever = vector.asRetriever();
const prompt = PromptTemplate.fromTemplate(
`Answer the question based only on the following context:
{context}
Question: {question}`,
);
const serializeDocs = (docs) => docs.map((doc) => doc.pageContent).join("\n");
const chain = RunnableSequence.from([
{
context: retriever.pipe(serializeDocs),
question: new RunnablePassthrough(),
},
prompt,
model,
new StringOutputParser(),
]);
return await chain.invoke("What is pinecone?", { callbacks: [tracer] });
})();
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
import { getLangSmithBuilder } from "https://esm.town/v/webup/getLangSmithBuilder";
import { getModelBuilder } from "https://esm.town/v/webup/getModelBuilder";
export const pipeSampleLLM = (async () => {
const { PromptTemplate } = await import("npm:langchain/prompts");
const { RunnableSequence } = await import("npm:langchain/schema/runnable");
const { StringOutputParser } = await import(
"npm:langchain/schema/output_parser"
);
const builder = await getModelBuilder();
const model = await builder();
const promptTemplate = PromptTemplate.fromTemplate(
"Tell me a joke about {topic}",
);
const outputParser = new StringOutputParser();
const tb = await getLangSmithBuilder();
const tracer = await tb();
const chain = RunnableSequence.from([promptTemplate, model, outputParser]);
const result = await chain.invoke({ topic: "bears" }, {
callbacks: [tracer],
});
return result;
})();
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
import { getModelBuilder } from "https://esm.town/v/webup/getModelBuilder";
export const parserSampleJSON = (async () => {
const { PromptTemplate } = await import("npm:langchain/prompts");
const { StructuredOutputParser } = await import(
"npm:langchain/output_parsers"
);
// With a `StructuredOutputParser` we can define a schema for the output.
const parser = StructuredOutputParser.fromNamesAndDescriptions({
answer: "answer to the user's question",
source: "source used to answer the user's question, should be a website.",
});
const formatInstructions = parser.getFormatInstructions();
const prompt = new PromptTemplate({
template:
"Answer the users question as best as possible.\n{format_instructions}\n{question}",
inputVariables: ["question"],
partialVariables: { format_instructions: formatInstructions },
});
const builder = await getModelBuilder();
const model = await builder();
const input = await prompt.format({
question: "What is the capital of France?",
});
const response = await model.call(input);
console.log(input);
return await parser.parse(response);
})();
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
import { getModelBuilder } from "https://esm.town/v/webup/getModelBuilder";
export const parserSampleListCustom = (async () => {
const { PromptTemplate } = await import("npm:langchain/prompts");
const { CustomListOutputParser } = await import(
"npm:langchain/output_parsers"
);
// With a `CustomListOutputParser`, we can parse a list with a specific length and separator.
const parser = new CustomListOutputParser({ length: 3, separator: "\n" });
const formatInstructions = parser.getFormatInstructions();
const prompt = new PromptTemplate({
template: "Provide a list of {subject}.\n{format_instructions}",
inputVariables: ["subject"],
partialVariables: { format_instructions: formatInstructions },
});
const builder = await getModelBuilder();
const model = await builder();
const input = await prompt.format({
subject: "great fiction books (book, author)",
});
const response = await model.call(input);
console.log(response);
return await parser.parse(response);
})();
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
export const promptSampleSelectorLength = (async () => {
const { LengthBasedExampleSelector, PromptTemplate, FewShotPromptTemplate } =
await import("npm:langchain/prompts");
// Create a prompt template that will be used to format the examples.
const examplePrompt = new PromptTemplate({
inputVariables: ["input", "output"],
template: "Input: {input}\nOutput: {output}",
});
// Create a LengthBasedExampleSelector that will be used to select the examples.
const exampleSelector = await LengthBasedExampleSelector.fromExamples([
{ input: "happy", output: "sad" },
{ input: "tall", output: "short" },
{ input: "energetic", output: "lethargic" },
{ input: "sunny", output: "gloomy" },
{ input: "windy", output: "calm" },
], {
examplePrompt,
maxLength: 25,
});
// Create a FewShotPromptTemplate that will use the example selector.
const dynamicPrompt = new FewShotPromptTemplate({
// We provide an ExampleSelector instead of examples.
exampleSelector,
examplePrompt,
prefix: "Give the antonym of every input",
suffix: "Input: {adjective}\nOutput:",
inputVariables: ["adjective"],
});
// An example with small input, so it selects all examples.
console.log(await dynamicPrompt.format({ adjective: "big" }));
const longString =
"big and huge and massive and large and gigantic and tall and much much much much much bigger than everything else";
return await dynamicPrompt.format({ adjective: longString });
})();
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
export const promptSampleTemplatesPipeline = (async () => {
const { PromptTemplate, PipelinePromptTemplate } = await import(
"npm:langchain/prompts"
);
const fullPrompt = PromptTemplate.fromTemplate(`{introduction}
{example}
{start}`);
const introductionPrompt = PromptTemplate.fromTemplate(
`You are impersonating {person}.`,
);
const examplePrompt = PromptTemplate.fromTemplate(
`Here's an example of an interaction:
Q: {example_q}
A: {example_a}`,
);
const startPrompt = PromptTemplate.fromTemplate(`Now, do this for real!
Q: {input}
A:`);
const composedPrompt = new PipelinePromptTemplate({
pipelinePrompts: [
{
name: "introduction",
prompt: introductionPrompt,
},
{
name: "example",
prompt: examplePrompt,
},
{
name: "start",
prompt: startPrompt,
},
],
finalPrompt: fullPrompt,
});
const formattedPrompt = await composedPrompt.format({
person: "Elon Musk",
example_q: `What's your favorite car?`,
example_a: "Telsa",
input: `What's your favorite social media site?`,
});
return formattedPrompt;
})();
1
2
3
4
5
6
7
8
9
10
11
export const promptSampleTemplatesPartial = (async () => {
const { PromptTemplate } = await import("npm:langchain/prompts");
const date = () => new Date().toISOString();
const prompt = new PromptTemplate({
template: "Tell me a {adjective} joke about the day {date}",
inputVariables: ["adjective", "date"],
});
const partialPrompt = await prompt.partial({ date });
const formattedPrompt = await partialPrompt.format({ adjective: "funny" });
return formattedPrompt;
})();