1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import process from "node:process";
export const conversationalRetrievalQAChainSummaryMemory = (async () => {
const { ChatOpenAI } = await import(
"https://esm.sh/langchain/chat_models/openai"
);
const { OpenAIEmbeddings } = await import(
"https://esm.sh/langchain/embeddings/openai"
);
const { ConversationSummaryMemory } = await import(
"https://esm.sh/langchain/memory"
);
const { MemoryVectorStore } = await import(
"https://esm.sh/langchain/vectorstores/memory"
);
const { ConversationalRetrievalQAChain } = await import(
"https://esm.sh/langchain/chains"
);
const chatModel = new ChatOpenAI({
openAIApiKey: process.env.OPENAI_API_KEY,
});
/* Create the vectorstore */
const vectorStore = await MemoryVectorStore.fromTexts(
[
"Mitochondria are the powerhouse of the cell",
"Bye bye",
"Mitochondria are made of lipids",
],
[{ id: 2 }, { id: 1 }, { id: 3 }],
new OpenAIEmbeddings({
openAIApiKey: process.env.OPENAI_API_KEY,
}),
);
/* Create the chain */
const chain = ConversationalRetrievalQAChain.fromLLM(
chatModel,
vectorStore.asRetriever(),
{
returnSourceDocuments: true,
memory: new ConversationSummaryMemory({
memoryKey: "chat_history",
llm: chatModel,
outputKey: "text",
returnMessages: true,
}),
qaChainOptions: {
type: "map_reduce",
},
},
);
/* Ask it a question */
const question = "What is the powerhouse of the cell?";
const res = await chain.call({ question });
console.log(res);
/* Ask it a follow up question */
const followUpRes = await chain.call({
question: "What are they made out of?",
});
console.log(followUpRes);
return { res, followUpRes };
})();
👆 This is a val. Vals are TypeScript snippets of code, written in the browser and run on our servers. Create scheduled functions, email yourself, and persist small pieces of data — all from the browser.