Back to packages list

Vals using @types/openai

OpenAI ChatGPT helper function

This val uses your OpenAI token if you have one, and the @std/openai if not, so it provides limited OpenAI usage for free.

Create valimport { chat } from "https://esm.town/v/stevekrouse/openai"; const { content } = await chat("Hello, GPT!"); console.log(content);
Create valimport { chat } from "https://esm.town/v/stevekrouse/openai"; const { content } = await chat( [ { role: "system", content: "You are Alan Kay" }, { role: "user", content: "What is the real computer revolution?"} ], { max_tokens: 50, model: "gpt-4" } ); console.log(content);
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import type { ChatCompletion, ChatCompletionCreateParamsNonStreaming, Message } from "npm:@types/openai";
async function getOpenAI() {
// if you don't have a key, use our std library version
if (Deno.env.get("OPENAI_API_KEY") === undefined) {
const { OpenAI } = await import("https://esm.town/v/std/openai");
return new OpenAI();
} else {
const { OpenAI } = await import("npm:openai");
return new OpenAI();
}
}
/**
* Initiates a chat conversation with OpenAI's GPT model and retrieves the content of the first response.
* This function can handle both single string inputs and arrays of message objects.
* It supports various GPT models, allowing for flexibility in choosing the model based on the application's needs.
*
* @param {string | Message[]} input - The input message(s) to send to GPT. Can be a single string or an array of message objects.
* @param {object} options - Additional options for the completion request.
* @returns {Promise<string>} A promise that resolves to the content of the first response from the completion.
*/
export async function chat(
input: string | Message[],
options?: Omit<ChatCompletionCreateParamsNonStreaming, "messages">,
): Promise<ChatCompletion & { content: string }> {
const openai = await getOpenAI();
const messages = Array.isArray(input) ? input : [{ role: "user", content: input }];
const createParams: ChatCompletionCreateParamsNonStreaming = {
max_tokens: 30,
model: "gpt-3.5-turbo",
...(options ?? {}),
messages,
};
const completion = await openai.chat.completions.create(createParams);
return { ...completion, content: completion.choices[0].message.content };
}

OpenAI ChatGPT helper function

This val uses your OpenAI token if you have one, and the @std/openai if not, so it provides limited OpenAI usage for free.

Create valimport { chat } from "https://esm.town/v/stevekrouse/openai"; const { content } = await chat("Hello, GPT!"); console.log(content);
Create valimport { chat } from "https://esm.town/v/stevekrouse/openai"; const { content } = await chat( [ { role: "system", content: "You are Alan Kay" }, { role: "user", content: "What is the real computer revolution?"} ], { max_tokens: 50, model: "gpt-4" } ); console.log(content);
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import type { ChatCompletion, ChatCompletionCreateParamsNonStreaming, Message } from "npm:@types/openai";
async function getOpenAI() {
// if you don't have a key, use our std library version
if (Deno.env.get("OPENAI_API_KEY") === undefined) {
const { OpenAI } = await import("https://esm.town/v/std/openai");
return new OpenAI();
} else {
const { OpenAI } = await import("npm:openai");
return new OpenAI();
}
}
/**
* Initiates a chat conversation with OpenAI's GPT model and retrieves the content of the first response.
* This function can handle both single string inputs and arrays of message objects.
* It supports various GPT models, allowing for flexibility in choosing the model based on the application's needs.
*
* @param {string | Message[]} input - The input message(s) to send to GPT. Can be a single string or an array of message objects.
* @param {object} options - Additional options for the completion request.
* @returns {Promise<string>} A promise that resolves to the content of the first response from the completion.
*/
export async function chat(
input: string | Message[],
options?: Omit<ChatCompletionCreateParamsNonStreaming, "messages">,
): Promise<ChatCompletion & { content: string }> {
const openai = await getOpenAI();
const messages = Array.isArray(input) ? input : [{ role: "user", content: input }];
const createParams: ChatCompletionCreateParamsNonStreaming = {
max_tokens: 30,
model: "gpt-3.5-turbo",
...(options ?? {}),
messages,
};
const completion = await openai.chat.completions.create(createParams);
return { ...completion, content: completion.choices[0].message.content };
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import type { ChatCompletion, ChatCompletionCreateParamsNonStreaming, Message } from "npm:@types/openai";
async function getOpenAI() {
// if you don't have a key, use our std library version
if (Deno.env.get("OPENAI_API_KEY") === undefined) {
const { OpenAI } = await import("https://esm.town/v/std/openai");
return new OpenAI();
} else {
const { OpenAI } = await import("npm:openai");
return new OpenAI();
}
}
function startChat(chatOptions: Omit<ChatCompletionCreateParamsNonStreaming, "messages"> & { system: string } = {
max_tokens: 30,
model: "gpt-3.5-turbo",
system: "You are a helpful assistant.",
}) {
const { system, ...options } = chatOptions;
return async function gpt(strings, ...values) {
const openai = await getOpenAI();
const input = strings.reduce((result, str, i) => {
result += str + (values[i] || "");
return result;
}, "");
const messages = [{ role: "system", content: system }, { role: "user", content: input }];
const createParams: ChatCompletionCreateParamsNonStreaming = {
...options,
messages,
};
const completion = await openai.chat.completions.create(createParams);
return { ...completion, content: completion.choices[0].message.content };
};
}
// Initialize the gpt function with the system message
const gpt = startChat({ system: "You are New Yorker cartoonist", max_tokens: 200, model: "gpt-4" });
const answer = await gpt`Draw me cartoon depicting https://www.val.town user in their fighting TypeScript a-ha moment`;
console.log(answer.content);
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
import type { ChatCompletion, ChatCompletionCreateParamsNonStreaming, Message } from "npm:@types/openai";
// a test comment for vt-backup
// another test comment for vt-backup
async function getOpenAI() {
// if you don't have a key, use our std library version
if (Deno.env.get("OPENAI_API_KEY") === undefined) {
const { OpenAI } = await import("https://esm.town/v/std/openai");
return new OpenAI();
} else {
const { OpenAI } = await import("npm:openai");
return new OpenAI();
}
}
export function startChat(chatOptions: Omit<ChatCompletionCreateParamsNonStreaming, "messages"> & { system: string } = {
max_tokens: 30,
model: "gpt-3.5-turbo",
system: "You are a helpful assistant.",
}) {
const { system, ...options } = chatOptions;
return async function gpt(strings, ...values) {
const openai = await getOpenAI();
const input = String.raw({ raw: strings }, ...values);
const messages = [{ role: "system", content: system }, { role: "user", content: input }];
const createParams: ChatCompletionCreateParamsNonStreaming = {
...options,
messages,
};
const completion = await openai.chat.completions.create(createParams);
return { ...completion, content: completion.choices[0].message.content };
};
}

OpenAI ChatGPT helper function

This val uses your OpenAI token if you have one, and the @std/openai if not, so it provides limited OpenAI usage for free.

Create valimport { chat } from "https://esm.town/v/stevekrouse/openai"; const { content } = await chat("Hello, GPT!"); console.log(content);
Create valimport { chat } from "https://esm.town/v/stevekrouse/openai"; const { content } = await chat( [ { role: "system", content: "You are Alan Kay" }, { role: "user", content: "What is the real computer revolution?"} ], { max_tokens: 50, model: "gpt-4" } ); console.log(content);
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
import type { ChatCompletion, ChatCompletionCreateParamsNonStreaming, Message } from "npm:@types/openai";
async function getOpenAI() {
// if you don't have a key, use our std library version
if (Deno.env.get("OPENAI_API_KEY") === undefined) {
const { OpenAI } = await import("https://esm.town/v/std/openai");
return new OpenAI();
} else {
const { OpenAI } = await import("npm:openai");
return new OpenAI();
}
}
/**
* Initiates a chat conversation with OpenAI's GPT model and retrieves the content of the first response.
* This function can handle both single string inputs and arrays of message objects.
* It supports various GPT models, allowing for flexibility in choosing the model based on the application's needs.
*
* @param {string | Message[]} input - The input message(s) to send to GPT. Can be a single string or an array of message objects.
* @param {object} options - Additional options for the completion request.
* @returns {Promise<string>} A promise that resolves to the content of the first response from the completion.
*/
export async function chat(
input: string | Message[],
options: Omit<ChatCompletionCreateParamsNonStreaming, "messages"> = {
max_tokens: 30,
model: "gpt-3.5-turbo",
},
): Promise<ChatCompletion & { content: string }> {
const openai = await getOpenAI();
const messages = Array.isArray(input) ? input : [{ role: "user", content: input }];
const createParams: ChatCompletionCreateParamsNonStreaming = {
...options,
messages,
};
const completion = await openai.chat.completions.create(createParams);
return { ...completion, content: completion.choices[0].message.content };
}

OpenAI ChatGPT helper function

This val uses your OpenAI token if you have one, and the @std/openai if not, so it provides limited OpenAI usage for free.

Create valimport { chat } from "https://esm.town/v/stevekrouse/openai"; const { content } = await chat("Hello, GPT!"); console.log(content);
Create valimport { chat } from "https://esm.town/v/stevekrouse/openai"; const { content } = await chat( [ { role: "system", content: "You are Alan Kay" }, { role: "user", content: "What is the real computer revolution?"} ], { max_tokens: 50, model: "gpt-4" } ); console.log(content);
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import type { ChatCompletion, ChatCompletionCreateParamsNonStreaming, Message } from "npm:@types/openai";
async function getOpenAI() {
// if you don't have a key, use our std library version
if (Deno.env.get("OPENAI_API_KEY") === undefined) {
const { OpenAI } = await import("https://esm.town/v/std/openai");
return new OpenAI();
} else {
const { OpenAI } = await import("npm:openai");
return new OpenAI();
}
}
/**
* Initiates a chat conversation with OpenAI's GPT model and retrieves the content of the first response.
* This function can handle both single string inputs and arrays of message objects.
* It supports various GPT models, allowing for flexibility in choosing the model based on the application's needs.
*
* @param {string | Message[]} input - The input message(s) to send to GPT. Can be a single string or an array of message objects.
* @param {object} options - Additional options for the completion request.
* @returns {Promise<string>} A promise that resolves to the content of the first response from the completion.
*/
export async function chat(
input: string | Message[],
options?: Omit<ChatCompletionCreateParamsNonStreaming, "messages">,
): Promise<ChatCompletion & { content: string }> {
const openai = await getOpenAI();
const messages = Array.isArray(input) ? input : [{ role: "user", content: input }];
const createParams: ChatCompletionCreateParamsNonStreaming = {
max_tokens: 30,
model: "gpt-3.5-turbo",
...(options ?? {}),
messages,
};
const completion = await openai.chat.completions.create(createParams);
return { ...completion, content: completion.choices[0].message.content };
}
1
Next