Avatar

@jacoblee93

11 likes11 public vals
Joined May 25, 2023
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
const conversationalRetrievalQAChainSummaryMemory = (async () => {
const { ChatOpenAI } = await import(
"https://esm.sh/langchain/chat_models/openai"
);
const { OpenAIEmbeddings } = await import(
"https://esm.sh/langchain/embeddings/openai"
);
const { ConversationSummaryMemory } = await import(
"https://esm.sh/langchain/memory"
);
const { MemoryVectorStore } = await import(
"https://esm.sh/langchain/vectorstores/memory"
);
const { ConversationalRetrievalQAChain } = await import(
"https://esm.sh/langchain/chains"
);
const chatModel = new ChatOpenAI({
openAIApiKey: @me.secrets.OPENAI_API_KEY,
});
/* Create the vectorstore */
const vectorStore = await MemoryVectorStore.fromTexts(
[
"Mitochondria are the powerhouse of the cell",
"Bye bye",
"Mitochondria are made of lipids",
],
[{ id: 2 }, { id: 1 }, { id: 3 }],
new OpenAIEmbeddings({
openAIApiKey: @me.secrets.OPENAI_API_KEY,
}),
);
/* Create the chain */
const chain = ConversationalRetrievalQAChain.fromLLM(
chatModel,
vectorStore.asRetriever(),
{
returnSourceDocuments: true,
memory: new ConversationSummaryMemory({
memoryKey: "chat_history",
llm: chatModel,
outputKey: "text",
returnMessages: true,
}),
qaChainOptions: {
type: "map_reduce",
},
},
);
/* Ask it a question */
const question = "What is the powerhouse of the cell?";
const res = await chain.call({ question });
console.log(res);
/* Ask it a follow up question */
const followUpRes = await chain.call({
question: "What are they made out of?",
});
console.log(followUpRes);
return { res, followUpRes };
})();
{"text":"The powerhouse of the cell is the mitochondria.","sourceDocuments":[{"pageContent":"Mitochondria are the powerhouse of the cell","metadata":{"id":2}},{"pageContent":"Mitochondria are made of lipids","metadata":{"id":3}},{"pageContent":"Bye bye","metadata":{"id":1}}]} {"text":"Mitochondria are made of lipids.","sourceDocuments":[{"pageContent":"Mitochondria are made of lipids","metadata":{"id":3}},{"pageContent":"Mitochondria are the powerhouse of the cell","metadata":{"id":2}},{"pageContent":"Bye bye","metadata":{"id":1}}]}
0
0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
const conversationalRetrievalQAChainStreamingExample = (async () => {
const { ChatOpenAI } = await import(
"https://esm.sh/langchain/chat_models/openai"
);
const { OpenAIEmbeddings } = await import(
"https://esm.sh/langchain/embeddings/openai"
);
const { BufferMemory } = await import("https://esm.sh/langchain/memory");
const { MemoryVectorStore } = await import(
"https://esm.sh/langchain/vectorstores/memory"
);
const { ConversationalRetrievalQAChain } = await import(
"https://esm.sh/langchain/chains"
);
let streamedResponse = "";
const streamingModel = new ChatOpenAI({
openAIApiKey: @me.secrets.OPENAI_API_KEY,
streaming: true,
callbacks: [{
handleLLMNewToken(token) {
streamedResponse += token;
},
}],
});
const nonStreamingModel = new ChatOpenAI({
openAIApiKey: @me.secrets.OPENAI_API_KEY,
});
/* Create the vectorstore */
const vectorStore = await MemoryVectorStore.fromTexts(
[
"Mitochondria are the powerhouse of the cell",
"Bye bye",
"Mitochondria are made of lipids",
],
[{ id: 2 }, { id: 1 }, { id: 3 }],
new OpenAIEmbeddings({
openAIApiKey: @me.secrets.OPENAI_API_KEY,
}),
);
/* Create the chain */
const chain = ConversationalRetrievalQAChain.fromLLM(
streamingModel,
vectorStore.asRetriever(),
{
memory: new BufferMemory({
memoryKey: "chat_history",
outputKey: "text",
returnMessages: true,
}),
questionGeneratorChainOptions: {
llm: nonStreamingModel,
},
},
);
/* Ask it a question */
const question = "What is the powerhouse of the cell?";
const res = await chain.call({ question });
// console.log(res);
console.log("streamed response", streamedResponse);
return streamedResponse;
})();
"streamed response" "The powerhouse of the cell is the mitochondria."
0
0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
const multipleKeysAndMemoryConversationChainExample = (async () => {
const { ChatOpenAI } = await import(
"https://esm.sh/langchain/chat_models/openai"
);
const { BufferMemory } = await import("https://esm.sh/langchain/memory");
const {
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
} = await import("https://esm.sh/langchain/prompts");
const { ConversationChain } = await import("https://esm.sh/langchain/chains");
const llm = new ChatOpenAI({
modelName: "gpt-3.5-turbo",
openAIApiKey: @me.secrets.OPENAI_API_KEY,
temperature: 0,
});
const memory = new BufferMemory({
memoryKey: "chat_history",
inputKey: "question",
returnMessages: true,
});
const prompt = ChatPromptTemplate.fromPromptMessages([
SystemMessagePromptTemplate.fromTemplate(
`There are {vegetables}, {fruit} and {meat} sorts available for cooking, create a receipte given human input using some of these ingredients as a basis.
This is the existing menu {menu}, dishes must not include any ingredient already in the existing menu.`,
),
new MessagesPlaceholder("chat_history"),
HumanMessagePromptTemplate.fromTemplate("{question}"),
]);
const chain = new ConversationChain({ llm, memory, prompt });
const result = await chain.call({
vegetables: ["carrot", "potato", "tomato"].join(", "),
fruit: ["apple", "banana", "orange"].join(", "),
meat: ["chicken", "beef", "pork"].join(", "),
menu: ["chicken soup", "beef steak", "pork chop"].join(", "),
question: "What is a good recipe with the above ingredients?",
});
console.log({ result });
return result;
})();
{"result":{"response":"One recipe that can be made using the given ingredients is a vegetable and fruit stir-fry with chicken. Here's how to make it:\n\nIngredients:\n- 2 chicken breasts, sliced\n- 1 large carrot, sliced\n- 1 large potato, diced\n- 1 large tomato, diced\n- 1 apple, diced\n- 1 banana, sliced\n- 1 orange, peeled and segmented\n- 2 cloves garlic, minced\n- 1 tbsp soy sauce\n- 1 tbsp honey\n- 1 tbsp olive oil\n- Salt and pepper to taste\n\nInstructions:\n\n1. Heat the olive oil in a large skillet over medium-high heat. Add the chicken and cook until browned on all sides, about 5-7 minutes. Remove from the skillet and set aside.\n\n2. In the same skillet, add the garlic and cook for 1-2 minutes until fragrant. Add the carrot and potato and cook for 5-7 minutes until slightly softened.\n\n3. Add the tomato, apple, banana, and orange to the skillet and cook for another 2-3 minutes until heated through.\n\n4. In a small bowl, whisk together the soy sauce and honey. Pour the mixture over the vegetables and fruit and stir to combine.\n\n5. Add the chicken back to the skillet and stir to combine. Cook for another 2-3 minutes until everything is heated through.\n\n6. Season with salt and pepper to taste. Serve hot.\n\nThis dish is a healthy and flavorful way to incorporate a variety of fruits and vegetables into your diet, while also getting a good source of protein from the chicken. Enjoy!"}}
0
0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
const conversationalQAChainEx = (async () => {
const { ChatOpenAI } = await import(
"https://esm.sh/langchain/chat_models/openai"
);
const { HNSWLib } = await import(
"https://esm.sh/langchain/vectorstores/hnswlib"
);
const { OpenAIEmbeddings } = await import(
"https://esm.sh/langchain/embeddings/openai"
);
const { ConversationalRetrievalQAChain } = await import(
"https://esm.sh/langchain/chains"
);
const gpt35 = new ChatOpenAI({
openAIApiKey: @me.secrets.OPENAI_API_KEY,
modelName: "gpt-3.5-turbo",
temperature: 0,
});
const gpt4 = new ChatOpenAI({
openAIApiKey: @me.secrets.OPENAI_API_KEY,
modelName: "gpt-4",
temperature: 0,
});
const vectorStore = await HNSWLib.fromTexts(
["Hello world", "Bye bye", "hello nice world", "bye", "hi"],
[{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }],
new OpenAIEmbeddings({
openAIApiKey: @me.secrets.OPENAI_API_KEY,
}),
);
const qaChain = ConversationalRetrievalQAChain.fromLLM(
gpt4,
vectorStore.asRetriever(),
{
questionGeneratorChainOptions: {
llm: gpt35, // Need for speed!
},
},
);
const chatHistory = [];
const query = `What did the president say about Ketanji Brown Jackson?`;
const response = await qaChain;
console.log("response");
})();
Error: Please install hnswlib-node as a dependency with, e.g. `npm install -S hnswlib-node`
0
0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
const chatAgentWithCustomPrompt = (async () => {
const { ChatOpenAI } = await import(
"https://esm.sh/langchain/chat_models/openai"
);
const { initializeAgentExecutorWithOptions } = await import(
"https://esm.sh/langchain/agents"
);
const { Calculator } = await import(
"https://esm.sh/langchain/tools/calculator"
);
const model = new ChatOpenAI({
temperature: 0,
openAIApiKey: @me.secrets.OPENAI_API_KEY,
});
const tools = [
new Calculator(),
];
const executor = await initializeAgentExecutorWithOptions(tools, model, {
agentType: "chat-zero-shot-react-description",
verbose: true,
agentArgs: {
humanMessageTemplate:
"{input}\n\n{agent_scratchpad}\nYou must also always give your final answer in French, as the human you are talking to only speaks French.",
},
});
const result = await executor.call({
input: `How is your day going?`,
});
// You could also just translate the English output of the agent into French with another LLM call
return result;
})();
0
0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
const runAgent = (async () => {
const { z } = await import("npm:zod");
const { ChatOpenAI } = await import("npm:langchain/chat_models/openai");
const { ChatAnthropic } = await import("npm:langchain/chat_models/anthropic");
const { DynamicTool, Tool, SerpAPI } = await import("npm:langchain/tools");
const { initializeAgentExecutorWithOptions } = await import(
"npm:langchain/agents"
);
const cheerio = await import("npm:cheerio");
const { LLMChain } = await import("npm:langchain/chains");
const { ChatPromptTemplate, HumanMessagePromptTemplate } = await import(
"npm:langchain/prompts"
);
const { StructuredOutputParser, OutputFixingParser } = await import(
"npm:langchain/output_parsers"
);
const model = new ChatOpenAI({
openAIApiKey: @me.secrets.OPENAI_API_KEY,
modelName: "gpt-4",
maxTokens: 2048,
});
const anthropicModel = new ChatAnthropic({
modelName: "claude-v1",
anthropicApiKey: @me.secrets.ANTHROPIC_API_KEY,
temperature: 0,
});
// I had an idea where the agent could scrape individual PR pages, didn't implement
const outputParser = StructuredOutputParser.fromZodSchema(z.array(
z.object({
contributor: z.string().describe(
"The name of the main contributor of the PR",
),
description: z.string().describe(
"A description of what the pull request is for",
),
isFirstContribution: z.boolean().describe(
"Whether it is the contributor's first contribution",
),
pullRequestNumber: z.number().describe("The number of the pull request"),
}).describe("An objects representing a pull request"),
));
const outputFixingParser = OutputFixingParser.fromLLM(model, outputParser);
const tools = [
new DynamicTool({
name: "langchain-release-summarizer",
description:
"Extracts information about the pull requests merged as part of a LangChain release. Takes a GitHub URL as input.",
func: async (input, runManager) => {
const response = await fetch(input.trim());
const pageContent = await response.text();
const $ = cheerio.load(pageContent);
const releaseNotes = $("#repo-content-pjax-container").text();
const prExtractionChain = new LLMChain({
llm: anthropicModel,
prompt: ChatPromptTemplate.fromPromptMessages([
HumanMessagePromptTemplate.fromTemplate(`{query}\n\n{pageContent}`),
]),
outputParser: outputFixingParser,
outputKey: "pullRequests",
});
const summarizationResult = await prExtractionChain.call({
query:
`The following webpage contains the release notes for LangChain, an open source framework for building apps with LLMs.
List all of the pull requests mentioned in the release notes.
Extract the name of the main contributor, a description of the pull request, whether it is their first contribution, and the number of the pull request.
Be extremely verbose!`,
pageContent: releaseNotes,
}, runManager?.getChild());
return JSON.stringify(summarizationResult.pullRequests);
},
}),
new SerpAPI(@me.secrets.SERPAPI_API_KEY, {
location: "Austin,Texas,United States",
hl: "en",
gl: "us",
}),
];
const agent = await initializeAgentExecutorWithOptions(tools, model, {
agentType: "chat-conversational-react-description",
});
const result = await agent.call({
input: `Generate a Twitter thread announcing a new LangChain release.
The release notes are available at this URL: https://github.com/hwchase17/langchainjs/releases/tag/0.0.84.
The release notes include a short description of each merged pull request and the contributor who built the feature.
The thread must start with a header tweet summarizing all the changes in the release.
The thread must contain a tweet for each pull request merged as part of the release that adds a significant feature, and must go into deep detail about what the contribution adds.
If you don't know what something mentioned in the release notes is, look it up with the provided tool so that you can get full context.
Each tweet should also thank the contributor by name, and congratulate them if it is their first contribution and put a medal emoji 🥇 next to their name.
Try to avoid repetitive language in your tweets.
Be extremely verbose and descriptive in your final response.
Below is an example of the format that tweets in the final output Twitter thread should follow. Individual tweets should be separated by a "-----" sequence:
-----Header tweet-----
@LangChainAI 🦜🔗 JS/TS 0.0.83 out with loads of @GoogleAI and PaLM!
💬 Google Vertex AI chat model + embeddings
🔎 Google Custom Search tool
⛓️ API chain: make API calls from docs
Read on 🧵
-----Example individual tweet-----
Thank you 🥇 USER for adding an @upstash Redis persistent message store, and congrats on your first contribution!
Upstash Redis differs from the existing Redis store in that it uses stateless HTTP connections, making it perfect for serverless environments!
-----Example wrapup bugfix tweet-----
Thank you 🥇 USER 🥇 USER2, and 🥇 USER3 for your important bugfixes 👏! Congrats to all of you on your first contributions!
`,
});
console.log({ result });
})();
Timeout: val execution took longer than 60 seconds
0
0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
const questionsWithGuidelinesChain = (async () => {
const { ChatOpenAI } = await import(
"https://esm.sh/langchain@0.0.150/chat_models/openai"
);
const { LLMChain } = await import("https://esm.sh/langchain@0.0.150/chains");
const { ChatPromptTemplate } = await import(
"https://esm.sh/langchain@0.0.150/prompts"
);
const { StringOutputParser } = await import(
"https://esm.sh/langchain@0.0.150/schema/output_parser"
);
const { RunnableSequence } = await import(
"https://esm.sh/langchain@0.0.150/schema/runnable"
);
const questionPrompt = ChatPromptTemplate.fromPromptMessages([
["user", "Generate 5 questions about the following paragraph: {paragraph}"],
]);
const questionChain = questionPrompt
.pipe(new ChatOpenAI({
openAIApiKey: @me.secrets.OPENAI_API_KEY,
})
.pipe(new StringOutputParser()));
const stylePrompt = ChatPromptTemplate.fromPromptMessages([
[
"user",
"Transform the following questions to meet the guidelines:\n\nQuestions:\n\n{questions}\n\nGuidelines:{guidelines}",
],
]);
const styleChain = stylePrompt
.pipe(
new ChatOpenAI({
openAIApiKey: @me.secrets.OPENAI_API_KEY,
}),
)
.pipe(new StringOutputParser());
// RunnableSequence.from() is equivalent to `.pipe().pipe()`
// but will coerce objects (and functions) into runnables
const questionStyleChain = RunnableSequence.from([
{
questions: questionChain,
// Each property in the object gets the same input,
// and we want to just pass through the "guidelines"
// property from the original input as a parameter
// to "styleChain".
guidelines: (input) => input.guidelines,
},
styleChain,
]);
const result = await questionStyleChain.invoke({
paragraph:
"Harrison went to harvard and worked at kensho. Kensho is in boston. Harrison likes boston. harvard is in boston.",
guidelines: "Questions are in Spanish",
});
return result;
})();
0
0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
const untitled_chocolateSquid = (async () => {
const { ChatOpenAI } = await import(
"https://esm.sh/langchain@0.0.146/chat_models/openai"
);
const { LLMChain } = await import("https://esm.sh/langchain@0.0.146/chains");
const { ChatPromptTemplate } = await import(
"https://esm.sh/langchain@0.0.146/prompts"
);
const template =
"You are a helpful assistant that translates {input_language} to {output_language}.";
const humanTemplate = "{text}";
const chatPrompt = ChatPromptTemplate.fromPromptMessages([
["system", template],
["human", humanTemplate],
]);
const chat = new ChatOpenAI({
temperature: 0,
openAIApiKey: @me.secrets.OPENAI_API_KEY,
});
const chain = new LLMChain({
llm: chat,
prompt: chatPrompt,
});
const result = await chain.call({
input_language: "English",
output_language: "French",
text: "I love programming!",
});
return result;
})();
0
0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
const streamingTest = (async () => {
const { OpenAI } = await import("https://esm.sh/langchain/llms/openai");
// To enable streaming, we pass in `streaming: true` to the LLM constructor.
// Additionally, we pass in a handler for the `handleLLMNewToken` event.
const chat = new OpenAI({
maxTokens: 25,
streaming: true,
openAIApiKey: @me.secrets.OPENAI_API_KEY,
});
const response = await chat.call("Tell me a joke.", undefined, [
{
handleLLMNewToken(token: string) {
console.log({ token });
},
},
]);
console.log(response);
})();
ReferenceError: window is not defined
0
0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
// Shows how to use the Brave Search tool in a LangChain agent
const braveAgent = (async () => {
const { ChatOpenAI } = await import(
"https://esm.sh/langchain/chat_models/openai"
);
const { BraveSearch } = await import("https://esm.sh/langchain/tools");
const { Calculator } = await import(
"https://esm.sh/langchain/tools/calculator"
);
const { initializeAgentExecutorWithOptions } = await import(
"https://esm.sh/langchain/agents"
);
const model = new ChatOpenAI({
temperature: 0,
openAIApiKey: @me.secrets.OPENAI_API_KEY,
});
const tools = [
new BraveSearch({
apiKey: @me.secrets.BRAVE_SEARCH_API_KEY,
}),
new Calculator(),
];
const executor = await initializeAgentExecutorWithOptions(tools, model, {
agentType: "chat-zero-shot-react-description",
verbose: true,
});
const input =
`Who is Dua Lipa's boyfriend? What is his current age raised to the 0.23 power?`;
console.log(`Executing with input "${input}"...`);
const result = await executor.call({ input });
return result;
})();
"Executing with input \"Who is Dua Lipa's boyfriend? What is his current age raised to the 0.23 power?\"..."
0
0