Avatar

@jacoblee93

11 likes13 public vals
Joined May 25, 2023
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import process from "node:process";
export const conversationalRetrievalQAChainSummaryMemory = (async () => {
const { ChatOpenAI } = await import(
"https://esm.sh/langchain/chat_models/openai"
);
const { OpenAIEmbeddings } = await import(
"https://esm.sh/langchain/embeddings/openai"
);
const { ConversationSummaryMemory } = await import(
"https://esm.sh/langchain/memory"
);
const { MemoryVectorStore } = await import(
"https://esm.sh/langchain/vectorstores/memory"
);
const { ConversationalRetrievalQAChain } = await import(
"https://esm.sh/langchain/chains"
);
const chatModel = new ChatOpenAI({
openAIApiKey: process.env.OPENAI_API_KEY,
});
/* Create the vectorstore */
const vectorStore = await MemoryVectorStore.fromTexts(
[
"Mitochondria are the powerhouse of the cell",
"Bye bye",
"Mitochondria are made of lipids",
],
[{ id: 2 }, { id: 1 }, { id: 3 }],
new OpenAIEmbeddings({
openAIApiKey: process.env.OPENAI_API_KEY,
}),
);
/* Create the chain */
const chain = ConversationalRetrievalQAChain.fromLLM(
chatModel,
vectorStore.asRetriever(),
{
returnSourceDocuments: true,
memory: new ConversationSummaryMemory({
memoryKey: "chat_history",
llm: chatModel,
outputKey: "text",
returnMessages: true,
}),
qaChainOptions: {
type: "map_reduce",
},
},
);
/* Ask it a question */
const question = "What is the powerhouse of the cell?";
const res = await chain.call({ question });
console.log(res);
/* Ask it a follow up question */
const followUpRes = await chain.call({
question: "What are they made out of?",
});
console.log(followUpRes);
return { res, followUpRes };
})();
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
import process from "node:process";
export const conversationalRetrievalQAChainStreamingExample = (async () => {
const { ChatOpenAI } = await import(
"https://esm.sh/langchain/chat_models/openai"
);
const { OpenAIEmbeddings } = await import(
"https://esm.sh/langchain/embeddings/openai"
);
const { BufferMemory } = await import("https://esm.sh/langchain/memory");
const { MemoryVectorStore } = await import(
"https://esm.sh/langchain/vectorstores/memory"
);
const { ConversationalRetrievalQAChain } = await import(
"https://esm.sh/langchain/chains"
);
let streamedResponse = "";
const streamingModel = new ChatOpenAI({
openAIApiKey: process.env.OPENAI_API_KEY,
streaming: true,
callbacks: [{
handleLLMNewToken(token) {
streamedResponse += token;
},
}],
});
const nonStreamingModel = new ChatOpenAI({
openAIApiKey: process.env.OPENAI_API_KEY,
});
/* Create the vectorstore */
const vectorStore = await MemoryVectorStore.fromTexts(
[
"Mitochondria are the powerhouse of the cell",
"Bye bye",
"Mitochondria are made of lipids",
],
[{ id: 2 }, { id: 1 }, { id: 3 }],
new OpenAIEmbeddings({
openAIApiKey: process.env.OPENAI_API_KEY,
}),
);
/* Create the chain */
const chain = ConversationalRetrievalQAChain.fromLLM(
streamingModel,
vectorStore.asRetriever(),
{
memory: new BufferMemory({
memoryKey: "chat_history",
outputKey: "text",
returnMessages: true,
}),
questionGeneratorChainOptions: {
llm: nonStreamingModel,
},
},
);
/* Ask it a question */
const question = "What is the powerhouse of the cell?";
const res = await chain.call({ question });
// console.log(res);
console.log("streamed response", streamedResponse);
return streamedResponse;
})();
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import process from "node:process";
export const multipleKeysAndMemoryConversationChainExample = (async () => {
const { ChatOpenAI } = await import(
"https://esm.sh/langchain/chat_models/openai"
);
const { BufferMemory } = await import("https://esm.sh/langchain/memory");
const {
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
} = await import("https://esm.sh/langchain/prompts");
const { ConversationChain } = await import("https://esm.sh/langchain/chains");
const llm = new ChatOpenAI({
modelName: "gpt-3.5-turbo",
openAIApiKey: process.env.OPENAI_API_KEY,
temperature: 0,
});
const memory = new BufferMemory({
memoryKey: "chat_history",
inputKey: "question",
returnMessages: true,
});
const prompt = ChatPromptTemplate.fromPromptMessages([
SystemMessagePromptTemplate.fromTemplate(
`There are {vegetables}, {fruit} and {meat} sorts available for cooking, create a receipte given human input using some of these ingredients as a basis.
This is the existing menu {menu}, dishes must not include any ingredient already in the existing menu.`,
),
new MessagesPlaceholder("chat_history"),
HumanMessagePromptTemplate.fromTemplate("{question}"),
]);
const chain = new ConversationChain({ llm, memory, prompt });
const result = await chain.call({
vegetables: ["carrot", "potato", "tomato"].join(", "),
fruit: ["apple", "banana", "orange"].join(", "),
meat: ["chicken", "beef", "pork"].join(", "),
menu: ["chicken soup", "beef steak", "pork chop"].join(", "),
question: "What is a good recipe with the above ingredients?",
});
console.log({ result });
return result;
})();
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import process from "node:process";
export const conversationalQAChainEx = (async () => {
const { ChatOpenAI } = await import(
"https://esm.sh/langchain/chat_models/openai"
);
const { HNSWLib } = await import(
"https://esm.sh/langchain/vectorstores/hnswlib"
);
const { OpenAIEmbeddings } = await import(
"https://esm.sh/langchain/embeddings/openai"
);
const { ConversationalRetrievalQAChain } = await import(
"https://esm.sh/langchain/chains"
);
const gpt35 = new ChatOpenAI({
openAIApiKey: process.env.OPENAI_API_KEY,
modelName: "gpt-3.5-turbo",
temperature: 0,
});
const gpt4 = new ChatOpenAI({
openAIApiKey: process.env.OPENAI_API_KEY,
modelName: "gpt-4",
temperature: 0,
});
const vectorStore = await HNSWLib.fromTexts(
["Hello world", "Bye bye", "hello nice world", "bye", "hi"],
[{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }],
new OpenAIEmbeddings({
openAIApiKey: process.env.OPENAI_API_KEY,
}),
);
const qaChain = ConversationalRetrievalQAChain.fromLLM(
gpt4,
vectorStore.asRetriever(),
{
questionGeneratorChainOptions: {
llm: gpt35, // Need for speed!
},
},
);
const chatHistory = [];
const query = `What did the president say about Ketanji Brown Jackson?`;
const response = await qaChain;
console.log("response");
})();
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import process from "node:process";
export const chatAgentWithCustomPrompt = (async () => {
const { ChatOpenAI } = await import(
"https://esm.sh/langchain/chat_models/openai"
);
const { initializeAgentExecutorWithOptions } = await import(
"https://esm.sh/langchain/agents"
);
const { Calculator } = await import(
"https://esm.sh/langchain/tools/calculator"
);
const model = new ChatOpenAI({
temperature: 0,
openAIApiKey: process.env.OPENAI_API_KEY,
});
const tools = [
new Calculator(),
];
const executor = await initializeAgentExecutorWithOptions(tools, model, {
agentType: "chat-zero-shot-react-description",
verbose: true,
agentArgs: {
humanMessageTemplate:
"{input}\n\n{agent_scratchpad}\nYou must also always give your final answer in French, as the human you are talking to only speaks French.",
},
});
const result = await executor.call({
input: `How is your day going?`,
});
// You could also just translate the English output of the agent into French with another LLM call
return result;
})();
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import { fetch } from "https://esm.town/v/std/fetch";
import process from "node:process";
export const runAgent = (async () => {
const { z } = await import("npm:zod");
const { ChatOpenAI } = await import("npm:langchain/chat_models/openai");
const { ChatAnthropic } = await import("npm:langchain/chat_models/anthropic");
const { DynamicTool, Tool, SerpAPI } = await import("npm:langchain/tools");
const { initializeAgentExecutorWithOptions } = await import(
"npm:langchain/agents"
);
const cheerio = await import("npm:cheerio");
const { LLMChain } = await import("npm:langchain/chains");
const { ChatPromptTemplate, HumanMessagePromptTemplate } = await import(
"npm:langchain/prompts"
);
const { StructuredOutputParser, OutputFixingParser } = await import(
"npm:langchain/output_parsers"
);
const model = new ChatOpenAI({
openAIApiKey: process.env.OPENAI_API_KEY,
modelName: "gpt-4",
maxTokens: 2048,
});
const anthropicModel = new ChatAnthropic({
modelName: "claude-v1",
anthropicApiKey: process.env.ANTHROPIC_API_KEY,
temperature: 0,
});
// I had an idea where the agent could scrape individual PR pages, didn't implement
const outputParser = StructuredOutputParser.fromZodSchema(z.array(
z.object({
contributor: z.string().describe(
"The name of the main contributor of the PR",
),
description: z.string().describe(
"A description of what the pull request is for",
),
isFirstContribution: z.boolean().describe(
"Whether it is the contributor's first contribution",
),
pullRequestNumber: z.number().describe("The number of the pull request"),
}).describe("An objects representing a pull request"),
));
const outputFixingParser = OutputFixingParser.fromLLM(model, outputParser);
const tools = [
new DynamicTool({
name: "langchain-release-summarizer",
description:
"Extracts information about the pull requests merged as part of a LangChain release. Takes a GitHub URL as input.",
func: async (input, runManager) => {
const response = await fetch(input.trim());
const pageContent = await response.text();
const $ = cheerio.load(pageContent);
const releaseNotes = $("#repo-content-pjax-container").text();
const prExtractionChain = new LLMChain({
llm: anthropicModel,
prompt: ChatPromptTemplate.fromPromptMessages([
HumanMessagePromptTemplate.fromTemplate(`{query}\n\n{pageContent}`),
]),
outputParser: outputFixingParser,
outputKey: "pullRequests",
});
const summarizationResult = await prExtractionChain.call({
query:
`The following webpage contains the release notes for LangChain, an open source framework for building apps with LLMs.
List all of the pull requests mentioned in the release notes.
Extract the name of the main contributor, a description of the pull request, whether it is their first contribution, and the number of the pull request.
Be extremely verbose!`,
pageContent: releaseNotes,
}, runManager?.getChild());
return JSON.stringify(summarizationResult.pullRequests);
},
}),
new SerpAPI(process.env.SERPAPI_API_KEY, {
location: "Austin,Texas,United States",
hl: "en",
gl: "us",
}),
];
const agent = await initializeAgentExecutorWithOptions(tools, model, {
agentType: "chat-conversational-react-description",
});
const result = await agent.call({
input: `Generate a Twitter thread announcing a new LangChain release.
The release notes are available at this URL: https://github.com/hwchase17/langchainjs/releases/tag/0.0.84.
The release notes include a short description of each merged pull request and the contributor who built the feature.
The thread must start with a header tweet summarizing all the changes in the release.
The thread must contain a tweet for each pull request merged as part of the release that adds a significant feature, and must go into deep detail about what the contribution adds.
If you don't know what something mentioned in the release notes is, look it up with the provided tool so that you can get full context.
Each tweet should also thank the contributor by name, and congratulate them if it is their first contribution and put a medal emoji 🥇 next to their name.
Try to avoid repetitive language in your tweets.
Be extremely verbose and descriptive in your final response.
Below is an example of the format that tweets in the final output Twitter thread should follow. Individual tweets should be separated by a "-----" sequence:
-----Header tweet-----
@LangChainAI 🦜🔗 JS/TS 0.0.83 out with loads of @GoogleAI and PaLM!
💬 Google Vertex AI chat model + embeddings
1
2
3
4
5
6
7
8
9
import { ChatOpenAI } from "langchain/chat_models/openai";
const model = new ChatOpenAI({
temperature: 0.9,
openAIApiKey: @me.secrets.OPENAI_API_KEY,
});
return model.invoke("What is your name?");
export { model };
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
import process from "node:process";
export const untitled_silverPinniped = (async () => {
const { ChatPromptTemplate } = await import("npm:langchain/prompts");
const { ChatOpenAI } = await import("npm:langchain/chat_models/openai");
const { StringOutputParser } = await import(
"npm:langchain/schema/output_parser"
);
const SYSTEM_TEMPLATE = `You are an AI programming assistant.`;
const redesignTemplate = ChatPromptTemplate.fromMessages<{
input: string;
initial_code: string;
}>([
["system", SYSTEM_TEMPLATE],
["human", "{input}"],
]);
const model = new ChatOpenAI({
modelName: "gpt-4",
temperature: 0.2,
openAIApiKey: process.env.OPENAI_API_KEY,
});
// Output parser converts the chat message into a raw string. Also works with streaming.
const chain = redesignTemplate.pipe(model).pipe(new StringOutputParser());
const output = await chain.invoke({
input: "A word guessing game.",
initial_code:
`<TextInput id="nameInput" placeholder="Starting test input" />`,
});
return output;
})();
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
import process from "node:process";
export const questionsWithGuidelinesChain = (async () => {
const { ChatOpenAI } = await import(
"https://esm.sh/langchain@0.0.150/chat_models/openai"
);
const { LLMChain } = await import("https://esm.sh/langchain@0.0.150/chains");
const { ChatPromptTemplate } = await import(
"https://esm.sh/langchain@0.0.150/prompts"
);
const { StringOutputParser } = await import(
"https://esm.sh/langchain@0.0.150/schema/output_parser"
);
const { RunnableSequence } = await import(
"https://esm.sh/langchain@0.0.150/schema/runnable"
);
const questionPrompt = ChatPromptTemplate.fromPromptMessages([
["user", "Generate 5 questions about the following paragraph: {paragraph}"],
]);
const questionChain = questionPrompt
.pipe(new ChatOpenAI({
openAIApiKey: process.env.OPENAI_API_KEY,
})
.pipe(new StringOutputParser()));
const stylePrompt = ChatPromptTemplate.fromPromptMessages([
[
"user",
"Transform the following questions to meet the guidelines:\n\nQuestions:\n\n{questions}\n\nGuidelines:{guidelines}",
],
]);
const styleChain = stylePrompt
.pipe(
new ChatOpenAI({
openAIApiKey: process.env.OPENAI_API_KEY,
}),
)
.pipe(new StringOutputParser());
// RunnableSequence.from() is equivalent to `.pipe().pipe()`
// but will coerce objects (and functions) into runnables
const questionStyleChain = RunnableSequence.from([
{
questions: questionChain,
// Each property in the object gets the same input,
// and we want to just pass through the "guidelines"
// property from the original input as a parameter
// to "styleChain".
guidelines: (input) => input.guidelines,
},
styleChain,
]);
const result = await questionStyleChain.invoke({
paragraph:
"Harrison went to harvard and worked at kensho. Kensho is in boston. Harrison likes boston. harvard is in boston.",
guidelines: "Questions are in Spanish",
});
return result;
})();
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
import process from "node:process";
export const untitled_chocolateSquid = (async () => {
const { ChatOpenAI } = await import(
"https://esm.sh/langchain@0.0.146/chat_models/openai"
);
const { LLMChain } = await import("https://esm.sh/langchain@0.0.146/chains");
const { ChatPromptTemplate } = await import(
"https://esm.sh/langchain@0.0.146/prompts"
);
const template =
"You are a helpful assistant that translates {input_language} to {output_language}.";
const humanTemplate = "{text}";
const chatPrompt = ChatPromptTemplate.fromPromptMessages([
["system", template],
["human", humanTemplate],
]);
const chat = new ChatOpenAI({
temperature: 0,
openAIApiKey: process.env.OPENAI_API_KEY,
});
const chain = new LLMChain({
llm: chat,
prompt: chatPrompt,
});
const result = await chain.call({
input_language: "English",
output_language: "French",
text: "I love programming!",
});
return result;
})();