1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import { secrets } from "https://esm.town/v/me/secrets";
export const conversationalRetrievalQAChainStreamingExample = (async () => {
const { OpenAI } = await import("https://esm.sh/langchain/llms/openai");
const { OpenAIEmbeddings } = await import(
"https://esm.sh/langchain/embeddings/openai"
);
const { BufferMemory } = await import("https://esm.sh/langchain/memory");
const { MemoryVectorStore } = await import(
"https://esm.sh/langchain/vectorstores/memory"
);
const { ConversationalRetrievalQAChain } = await import(
"https://esm.sh/langchain/chains"
);
let streamedResponse = "";
const streamingModel = new OpenAI({
openAIApiKey: secrets.OPENAI_API_KEY,
streaming: true,
callbacks: [{
handleLLMNewToken(token) {
streamedResponse += token;
},
}],
});
const nonStreamingModel = new OpenAI({
openAIApiKey: secrets.OPENAI_API_KEY,
});
/* Create the vectorstore */
const vectorStore = await MemoryVectorStore.fromTexts(
["Hello world", "Bye bye", "hello nice world"],
[{ id: 2 }, { id: 1 }, { id: 3 }],
new OpenAIEmbeddings({
openAIApiKey: secrets.OPENAI_API_KEY,
}),
);
/* Create the chain */
const chain = ConversationalRetrievalQAChain.fromLLM(
streamingModel,
vectorStore.asRetriever(),
{
memory: new BufferMemory({
memoryKey: "chat_history",
}),
questionGeneratorChainOptions: {
llm: nonStreamingModel,
},
},
);
/* Ask it a question */
const question = "What did the president say about Justice Breyer?";
const res = await chain.call({ question });
// console.log(res);
/* Ask it a follow up question */
const followUpRes = await chain.call({
question: "Was that nice?",
});
// console.log(followUpRes);
console.log("streamed response", streamedResponse);
return streamedResponse;
})();
// Forked from @jacoblee93.conversationalRetrievalQAChainStreamingExample
1
Next