import process from "node:process";
export const conversationalRetrievalQAChainStreamingExample = (async () => {
const { ChatOpenAI } = await import(
"https://esm.sh/langchain/chat_models/openai"
);
const { OpenAIEmbeddings } = await import(
"https://esm.sh/langchain/embeddings/openai"
);
const { BufferMemory } = await import("https://esm.sh/langchain/memory");
const { MemoryVectorStore } = await import(
"https://esm.sh/langchain/vectorstores/memory"
);
const { ConversationalRetrievalQAChain } = await import(
"https://esm.sh/langchain/chains"
);
let streamedResponse = "";
const streamingModel = new ChatOpenAI({
openAIApiKey: process.env.OPENAI_API_KEY,
streaming: true,
callbacks: [{
handleLLMNewToken(token) {
streamedResponse += token;
},
}],
});
const nonStreamingModel = new ChatOpenAI({
openAIApiKey: process.env.OPENAI_API_KEY,
});
const vectorStore = await MemoryVectorStore.fromTexts(
[
"Mitochondria are the powerhouse of the cell",
"Bye bye",
"Mitochondria are made of lipids",
],
[{ id: 2 }, { id: 1 }, { id: 3 }],
new OpenAIEmbeddings({
openAIApiKey: process.env.OPENAI_API_KEY,
}),
);
const chain = ConversationalRetrievalQAChain.fromLLM(
streamingModel,
vectorStore.asRetriever(),
{
memory: new BufferMemory({
memoryKey: "chat_history",
outputKey: "text",
returnMessages: true,
}),
questionGeneratorChainOptions: {
llm: nonStreamingModel,
},
},
);
const question = "What is the powerhouse of the cell?";
const res = await chain.call({ question });
console.log("streamed response", streamedResponse);
return streamedResponse;
})();