Back to packages list

Vals using langchain/chat_models/openai

1
2
3
4
5
6
7
8
9
import process from "node:process";
import { ChatOpenAI } from "npm:langchain/chat_models/openai";
const model = new ChatOpenAI({
temperature: 0.9,
openAIApiKey: process.env.openai,
});
export const modelInvoke = model.invoke("What is your name?");
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import process from "node:process";
export const chatAgentWithCustomPrompt = (async () => {
const { ChatOpenAI } = await import(
"https://esm.sh/langchain/chat_models/openai"
);
const { initializeAgentExecutorWithOptions } = await import(
"https://esm.sh/langchain/agents"
);
const { Calculator } = await import(
"https://esm.sh/langchain/tools/calculator"
);
const model = new ChatOpenAI({
temperature: 0,
openAIApiKey: process.env.OPENAI_API_KEY,
});
const tools = [
new Calculator(),
];
const executor = await initializeAgentExecutorWithOptions(tools, model, {
agentType: "chat-zero-shot-react-description",
verbose: true,
agentArgs: {
humanMessageTemplate:
"{input}\n\n{agent_scratchpad}\nYou must also always give your final answer in French, as the human you are talking to only speaks French.",
},
});
const result = await executor.call({
input: `How is your day going?`,
});
// You could also just translate the English output of the agent into French with another LLM call
return result;
})();
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import process from "node:process";
export const conversationalRetrievalQAChainSummaryMemory = (async () => {
const { ChatOpenAI } = await import(
"https://esm.sh/langchain/chat_models/openai"
);
const { OpenAIEmbeddings } = await import(
"https://esm.sh/langchain/embeddings/openai"
);
const { ConversationSummaryMemory } = await import(
"https://esm.sh/langchain/memory"
);
const { MemoryVectorStore } = await import(
"https://esm.sh/langchain/vectorstores/memory"
);
const { ConversationalRetrievalQAChain } = await import(
"https://esm.sh/langchain/chains"
);
const chatModel = new ChatOpenAI({
openAIApiKey: process.env.OPENAI_API_KEY,
});
/* Create the vectorstore */
const vectorStore = await MemoryVectorStore.fromTexts(
[
"Mitochondria are the powerhouse of the cell",
"Bye bye",
"Mitochondria are made of lipids",
],
[{ id: 2 }, { id: 1 }, { id: 3 }],
new OpenAIEmbeddings({
openAIApiKey: process.env.OPENAI_API_KEY,
}),
);
/* Create the chain */
const chain = ConversationalRetrievalQAChain.fromLLM(
chatModel,
vectorStore.asRetriever(),
{
returnSourceDocuments: true,
memory: new ConversationSummaryMemory({
memoryKey: "chat_history",
llm: chatModel,
outputKey: "text",
returnMessages: true,
}),
qaChainOptions: {
type: "map_reduce",
},
},
);
/* Ask it a question */
const question = "What is the powerhouse of the cell?";
const res = await chain.call({ question });
console.log(res);
/* Ask it a follow up question */
const followUpRes = await chain.call({
question: "What are they made out of?",
});
console.log(followUpRes);
return { res, followUpRes };
})();
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
import process from "node:process";
export const conversationalRetrievalQAChainStreamingExample = (async () => {
const { ChatOpenAI } = await import(
"https://esm.sh/langchain/chat_models/openai"
);
const { OpenAIEmbeddings } = await import(
"https://esm.sh/langchain/embeddings/openai"
);
const { BufferMemory } = await import("https://esm.sh/langchain/memory");
const { MemoryVectorStore } = await import(
"https://esm.sh/langchain/vectorstores/memory"
);
const { ConversationalRetrievalQAChain } = await import(
"https://esm.sh/langchain/chains"
);
let streamedResponse = "";
const streamingModel = new ChatOpenAI({
openAIApiKey: process.env.OPENAI_API_KEY,
streaming: true,
callbacks: [{
handleLLMNewToken(token) {
streamedResponse += token;
},
}],
});
const nonStreamingModel = new ChatOpenAI({
openAIApiKey: process.env.OPENAI_API_KEY,
});
/* Create the vectorstore */
const vectorStore = await MemoryVectorStore.fromTexts(
[
"Mitochondria are the powerhouse of the cell",
"Bye bye",
"Mitochondria are made of lipids",
],
[{ id: 2 }, { id: 1 }, { id: 3 }],
new OpenAIEmbeddings({
openAIApiKey: process.env.OPENAI_API_KEY,
}),
);
/* Create the chain */
const chain = ConversationalRetrievalQAChain.fromLLM(
streamingModel,
vectorStore.asRetriever(),
{
memory: new BufferMemory({
memoryKey: "chat_history",
outputKey: "text",
returnMessages: true,
}),
questionGeneratorChainOptions: {
llm: nonStreamingModel,
},
},
);
/* Ask it a question */
const question = "What is the powerhouse of the cell?";
const res = await chain.call({ question });
// console.log(res);
console.log("streamed response", streamedResponse);
return streamedResponse;
})();
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
import process from "node:process";
// Shows how to use the Brave Search tool in a LangChain agent
export const braveAgent = (async () => {
const { ChatOpenAI } = await import(
"https://esm.sh/langchain/chat_models/openai"
);
const { BraveSearch } = await import("https://esm.sh/langchain/tools");
const { Calculator } = await import(
"https://esm.sh/langchain/tools/calculator"
);
const { initializeAgentExecutorWithOptions } = await import(
"https://esm.sh/langchain/agents"
);
const model = new ChatOpenAI({
temperature: 0,
openAIApiKey: process.env.OPENAI_API_KEY,
});
const tools = [
new BraveSearch({
apiKey: process.env.BRAVE_SEARCH_API_KEY,
}),
new Calculator(),
];
const executor = await initializeAgentExecutorWithOptions(tools, model, {
agentType: "chat-zero-shot-react-description",
verbose: true,
});
const input =
`Who is Dua Lipa's boyfriend? What is his current age raised to the 0.23 power?`;
console.log(`Executing with input "${input}"...`);
const result = await executor.call({ input });
return result;
})();
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import process from "node:process";
export const multipleKeysAndMemoryConversationChainExample = (async () => {
const { ChatOpenAI } = await import(
"https://esm.sh/langchain/chat_models/openai"
);
const { BufferMemory } = await import("https://esm.sh/langchain/memory");
const {
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
} = await import("https://esm.sh/langchain/prompts");
const { ConversationChain } = await import("https://esm.sh/langchain/chains");
const llm = new ChatOpenAI({
modelName: "gpt-3.5-turbo",
openAIApiKey: process.env.openai,
temperature: 0,
});
const memory = new BufferMemory({
memoryKey: "chat_history",
inputKey: "question",
returnMessages: true,
});
const prompt = ChatPromptTemplate.fromPromptMessages([
SystemMessagePromptTemplate.fromTemplate(
`There are {vegetables}, {fruit} and {meat} sorts available for cooking, create a receipte given human input using some of these ingredients as a basis.
This is the existing menu {menu}, dishes must not include any ingredient already in the existing menu.`,
),
new MessagesPlaceholder("chat_history"),
HumanMessagePromptTemplate.fromTemplate("{question}"),
]);
const chain = new ConversationChain({ llm, memory, prompt });
const result = await chain.call({
vegetables: ["carrot", "potato", "tomato"].join(", "),
fruit: ["apple", "banana", "orange"].join(", "),
meat: ["chicken", "beef", "pork"].join(", "),
menu: ["chicken soup", "beef steak", "pork chop"].join(", "),
question: "What is a good recipe with the above ingredients?",
});
return result.response;
})();
// Forked from @jacoblee93.multipleKeysAndMemoryConversationChainExample
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import process from "node:process";
export const multipleKeysAndMemoryConversationChainExample = (async () => {
const { ChatOpenAI } = await import(
"https://esm.sh/langchain/chat_models/openai"
);
const { BufferMemory } = await import("https://esm.sh/langchain/memory");
const {
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
} = await import("https://esm.sh/langchain/prompts");
const { ConversationChain } = await import("https://esm.sh/langchain/chains");
const llm = new ChatOpenAI({
modelName: "gpt-3.5-turbo",
openAIApiKey: process.env.OPENAI_API_KEY,
temperature: 0,
});
const memory = new BufferMemory({
memoryKey: "chat_history",
inputKey: "question",
returnMessages: true,
});
const prompt = ChatPromptTemplate.fromPromptMessages([
SystemMessagePromptTemplate.fromTemplate(
`There are {vegetables}, {fruit} and {meat} sorts available for cooking, create a receipte given human input using some of these ingredients as a basis.
This is the existing menu {menu}, dishes must not include any ingredient already in the existing menu.`,
),
new MessagesPlaceholder("chat_history"),
HumanMessagePromptTemplate.fromTemplate("{question}"),
]);
const chain = new ConversationChain({ llm, memory, prompt });
const result = await chain.call({
vegetables: ["carrot", "potato", "tomato"].join(", "),
fruit: ["apple", "banana", "orange"].join(", "),
meat: ["chicken", "beef", "pork"].join(", "),
menu: ["chicken soup", "beef steak", "pork chop"].join(", "),
question: "What is a good recipe with the above ingredients?",
});
console.log({ result });
return result;
})();
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import process from "node:process";
export const conversationalQAChainEx = (async () => {
const { ChatOpenAI } = await import(
"https://esm.sh/langchain/chat_models/openai"
);
const { HNSWLib } = await import(
"https://esm.sh/langchain/vectorstores/hnswlib"
);
const { OpenAIEmbeddings } = await import(
"https://esm.sh/langchain/embeddings/openai"
);
const { ConversationalRetrievalQAChain } = await import(
"https://esm.sh/langchain/chains"
);
const gpt35 = new ChatOpenAI({
openAIApiKey: process.env.OPENAI_API_KEY,
modelName: "gpt-3.5-turbo",
temperature: 0,
});
const gpt4 = new ChatOpenAI({
openAIApiKey: process.env.OPENAI_API_KEY,
modelName: "gpt-4",
temperature: 0,
});
const vectorStore = await HNSWLib.fromTexts(
["Hello world", "Bye bye", "hello nice world", "bye", "hi"],
[{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }],
new OpenAIEmbeddings({
openAIApiKey: process.env.OPENAI_API_KEY,
}),
);
const qaChain = ConversationalRetrievalQAChain.fromLLM(
gpt4,
vectorStore.asRetriever(),
{
questionGeneratorChainOptions: {
llm: gpt35, // Need for speed!
},
},
);
const chatHistory = [];
const query = `What did the president say about Ketanji Brown Jackson?`;
const response = await qaChain;
console.log("response");
})();
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
import process from "node:process";
export const langchainEx = (async () => {
const { ChatOpenAI } = await import(
"https://esm.sh/langchain/chat_models/openai"
);
const { PromptTemplate } = await import("https://esm.sh/langchain/prompts");
const { LLMChain } = await import("https://esm.sh/langchain/chains");
const model = new ChatOpenAI({
temperature: 0.9,
openAIApiKey: process.env.OPENAI_API_KEY,
verbose: true,
});
const template = "What is a good name for a company that makes {product}?";
const prompt = new PromptTemplate({
template: template,
inputVariables: ["product"],
});
const chain = new LLMChain({ llm: model, prompt: prompt, verbose: true });
const res = await chain.call({ product: "colorful socks" });
return res;
})();
// Forked from @stevekrouse.langchainEx
1
Next