Search results

Algo
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import { OpenAI } from "npm:openai";
const openai = new OpenAI();
const functionExpression = await openai.chat.completions.create({
"messages": [
{ "role": "user", "content": "What is the weather like in Boston?" },
],
"functions": [
{
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
},
},
"required": ["location"],
},
},
],
model: "gpt-4-1106-preview",
max_tokens: 30,
});
console.log(functionExpression);
// TODO pull out function call and initial message
let args = functionExpression.choices[0].message.function_call.arguments;
let functionCallResult = { "temperature": "22", "unit": "celsius", "description": "Sunny" };
const result = await openai.chat.completions.create({
"messages": [
{ "role": "user", "content": "What is the weather like in Boston?" },
{
"role": "assistant",
"content": null,
"function_call": { "name": "get_current_weather", "arguments": "{ \"location\": \"Boston, MA\"}" },
},
{
"role": "function",
"name": "get_current_weather",
"content": JSON.stringify(functionCallResult),
},
],
"functions": [
{
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
},
},
"required": ["location"],
},
},
],
model: "gpt-4-1106-preview",
max_tokens: 30,
});
export default result;
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
export let openaiOpenAPI = `
openapi: 3.0.0
info:
title: Val Town API
description: |
The Val Town API provides services to evaluate JavaScript and TypeScript expressions, run vals as APIs, either as functions or Express handlers.
Learn more at [https://docs.val.town](https://docs.val.town)
version: 1.0.0
servers:
- url: https://stevekrouse-chatgptplugin.express.val.run
description: Val Town API v1
components:
securitySchemes:
bearerAuth:
type: http
scheme: bearer
bearerFormat: API Key
schemas:
JSON:
oneOf:
- type: string
- type: number
- type: object
- type: array
items: {}
- type: boolean
description: "Can be anything: string, number, array, object, etc., including \`null\`"
parameters:
expression:
in: path
name: expression
required: true
description: |
The JavaScript or TypeScript expression to be evaluated.
This should be a single expression, like a single function
call, assignment operation, or calculation. If you need
to execute multiple expressions, wrap them in a function.
schema:
type: string
examples:
simpleAddition:
value: "1+1"
summary: Simple addition
functionCall:
value: "@stevekrouse.addOne(@stevekrouse.example1)"
summary: Calling a user's val function
handle:
name: handle
in: path
required: true
description: |
The handle of the val owner, *not* including the \`@\` symbol.
schema:
type: string
examples:
stevekrouse:
value: stevekrouse
summary: Steve Krouse's handle
runVal:
name: val
in: path
required: true
description: The val to run.
schema:
type: string
examples:
id:
value: id
summary: "id"
description: |
This val is a function that returns its arguments. It is useful for testing how the API handles the arguments passed to it.
View the val at [https://val.town/v/stevekrouse.id](https://val.town/v/stevekrouse.id)
responses:
ExpressionResult:
description: The returned result of executing the passed expression successfully. The result can be of any JSON type. It will not include any logs that were generated during execution.
content:
application/json:
schema:
oneOf:
- type: string
- type: number
- type: object
- type: array
items: {}
- type: boolean
examples:
simpleAddition:
value: 2
summary: Simple addition result
functionCall:
value: 42
summary: Calling a function result
security:
- bearerAuth: []
- {}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
import { fetch } from "https://esm.town/v/std/fetch";
export async function openaiUploadFile({ key, data, filename = "data.json", purpose = "assistants" }: {
key: string;
data: any;
filename: string;
purpose: string;
}) {
let file = data instanceof File || data instanceof Blob
? data
: typeof data === "string"
? new Blob([data])
: Array.isArray(data)
? new Blob([data.map((d) => JSON.stringify(d)).join("\n")])
: new Blob([JSON.stringify(data)]);
let formData = new FormData();
formData.append("purpose", purpose);
formData.append("file", file, filename);
let result = await fetch("https://api.openai.com/v1/files", {
method: "POST",
headers: {
"authorization": `Bearer ${key}`,
},
body: formData,
}).then((r) => r.json());
if (result.error)
throw new Error("OpenAI Upload Error: " + result.error.message);
else
return result;
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
import { delay } from "https://esm.town/v/stevekrouse/delay";
import { openaiFineTune } from "https://esm.town/v/stevekrouse/openaiFineTune";
import { openaiUploadFile } from "https://esm.town/v/stevekrouse/openaiUploadFile?v=15";
export async function openaiFineTuneData({ key, data, model }: {
key: string;
data: any;
model?: string;
}) {
let upload = await openaiUploadFile({
key,
data,
});
let fineTune = await openaiFineTune({
key,
model,
trainingFile: upload.id,
});
let tries = 10;
while (fineTune.error?.message?.includes("is not ready") && tries-- > 0) {
console.log("File not ready, will try again in a second");
await delay(1000);
fineTune = await openaiFineTune({
key,
model,
trainingFile: upload.id,
});
}
return fineTune;
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
import { fetchJSON } from "https://esm.town/v/stevekrouse/fetchJSON";
export function openaiFineTune({ key, model, trainingFile }: {
key: string;
model?: string;
trainingFile: string;
}) {
return fetchJSON(
"https://api.openai.com/v1/fine_tuning/jobs",
{
method: "POST",
body: JSON.stringify({
training_file: trainingFile,
model: model ?? "gpt-3.5-turbo-0613",
}),
bearer: key,
},
);
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
import { fetch } from "https://esm.town/v/std/fetch";
export async function openaiUploadFile({ key, data, purpose = "assistants" }: {
key: string;
data: any;
purpose: string;
}) {
let file = data instanceof File || data instanceof Blob
? data
: typeof data === "string"
? new Blob([data])
: Array.isArray(data)
? new Blob([data.map((d) => JSON.stringify(d)).join("\n")])
: new Blob([JSON.stringify(data)]);
let formData = new FormData();
formData.append("purpose", purpose);
formData.append("file", file, "data.json");
let result = await fetch("https://api.openai.com/v1/files", {
method: "POST",
headers: {
"authorization": `Bearer ${key}`,
},
body: formData,
}).then((r) => r.json());
if (result.error)
throw new Error("OpenAI Upload Error: " + result.error.message);
else
return result;
}

OpenAI - Docs ↗

Use OpenAI's chat completion API with std/openai. This integration enables access to OpenAI's language models without needing to acquire API keys.

For free Val Town users, all calls are sent to gpt-3.5-turbo.

Streaming is not yet supported. Upvote the HTTP response streaming feature request if you need it!

Usage

Create valimport { OpenAI } from "https://esm.town/v/std/openai"; const openai = new OpenAI(); const completion = await openai.chat.completions.create({ messages: [ { role: "user", content: "Say hello in a creative way" }, ], model: "gpt-4", max_tokens: 30, }); console.log(completion.choices[0].message.content);

Limits

While our wrapper simplifies the integration of OpenAI, there are a few limitations to keep in mind:

  • Usage Quota: We limit each user to 10 requests per minute.
  • Features: Chat completions is the only endpoint available.

If these limits are too low, let us know! You can also get around the limitation by using your own keys:

  1. Create your own API key on OpenAI's website
  2. Create an environment variable named OPENAI_API_KEY
  3. Use the OpenAI client from npm:openai:
Create valimport { OpenAI } from "npm:openai"; const openai = new OpenAI();

📝 Edit docs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import { type ClientOptions, OpenAI as RawOpenAI } from "npm:openai";
/**
* API Client for interfacing with the OpenAI API. Uses Val Town credentials.
*/
export class OpenAI {
private rawOpenAIClient: RawOpenAI;
/**
* API Client for interfacing with the OpenAI API. Uses Val Town credentials.
*
* @param {number} [opts.timeout=10 minutes] - The maximum amount of time (in milliseconds) the client will wait for a response before timing out.
* @param {number} [opts.httpAgent] - An HTTP agent used to manage HTTP(s) connections.
* @param {Core.Fetch} [opts.fetch] - Specify a custom `fetch` function implementation.
* @param {number} [opts.maxRetries=2] - The maximum number of times the client will retry a request.
* @param {Core.Headers} opts.defaultHeaders - Default headers to include with every request to the API.
* @param {Core.DefaultQuery} opts.defaultQuery - Default query parameters to include with every request to the API.
* @param {boolean} [opts.dangerouslyAllowBrowser=false] - By default, client-side use of this library is not allowed, as it risks exposing your secret API credentials to attackers.
*/
constructor(options: Omit<ClientOptions, "baseURL" | "apiKey" | "organization"> = {}) {
this.rawOpenAIClient = new RawOpenAI({
...options,
baseURL: "https://std-openaiproxy.web.val.run/v1",
apiKey: Deno.env.get("valtown"),
organization: null,
});
}
get chat() {
return this.rawOpenAIClient.chat;
}
readonly beta = {
get chat(): RawOpenAI["beta"]["chat"] {
return this.rawOpenAIClient.beta.chat;
}
}
}

OpenAI

Get started using OpenAI's chat completion without the need to set your own API keys.

Usage

Here's a quick example to get you started with the Val Town OpenAI wrapper:

Create valimport { OpenAI } from "https://esm.town/v/std/openai"; const openai = new OpenAI(); const functionExpression = await openai.chat.completions.create({ "messages": [ { "role": "user", "content": "Say hello in a creative way" }, ], model: "gpt-4", max_tokens: 30, }); console.log(functionExpression.choices[0].message.content);
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
import { type ClientOptions, OpenAI as RawOpenAI } from "npm:openai";
/**
* API Client for interfacing with the OpenAI API. Uses Val Town credentials.
*/
export class OpenAI {
private rawOpenAIClient: RawOpenAI;
/**
* API Client for interfacing with the OpenAI API. Uses Val Town credentials.
*
* @param {number} [opts.timeout=10 minutes] - The maximum amount of time (in milliseconds) the client will wait for a response before timing out.
* @param {number} [opts.httpAgent] - An HTTP agent used to manage HTTP(s) connections.
* @param {Core.Fetch} [opts.fetch] - Specify a custom `fetch` function implementation.
* @param {number} [opts.maxRetries=2] - The maximum number of times the client will retry a request.
* @param {Core.Headers} opts.defaultHeaders - Default headers to include with every request to the API.
* @param {Core.DefaultQuery} opts.defaultQuery - Default query parameters to include with every request to the API.
* @param {boolean} [opts.dangerouslyAllowBrowser=false] - By default, client-side use of this library is not allowed, as it risks exposing your secret API credentials to attackers.
*/
constructor(options: Omit<ClientOptions, "baseURL" | "apiKey" | "organization"> = {}) {
this.rawOpenAIClient = new RawOpenAI({
...options,
baseURL: "https://std-openaiproxy.web.val.run/v1",
apiKey: Deno.env.get("valtown"),
organization: null,
});
}
get chat() {
return this.rawOpenAIClient.chat;
}
get beta() {
return this.rawOpenAIClient.beta;
}
}

OpenAI ChatGPT helper function

This val uses your OpenAI token if you have one, and the @std/openai if not, so it provides limited OpenAI usage for free.

Create valimport { chat } from "https://esm.town/v/stevekrouse/openai"; const { content } = await chat("Hello, GPT!"); console.log(content);
Create valimport { chat } from "https://esm.town/v/stevekrouse/openai"; const { content } = await chat( [ { role: "system", content: "You are Alan Kay" }, { role: "user", content: "What is the real computer revolution?"} ], { max_tokens: 50, model: "gpt-4o" } ); console.log(content);
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import type { ChatCompletion, ChatCompletionCreateParamsNonStreaming, Message } from "npm:@types/openai";
async function getOpenAI() {
// if you don't have a key, use our std library version
if (Deno.env.get("OPENAI_API_KEY") === undefined) {
const { OpenAI } = await import("https://esm.town/v/std/openai");
return new OpenAI();
} else {
const { OpenAI } = await import("npm:openai");
return new OpenAI();
}
}
/**
* Initiates a chat conversation with OpenAI's GPT model and retrieves the content of the first response.
* This function can handle both single string inputs and arrays of message objects.
* It supports various GPT models, allowing for flexibility in choosing the model based on the application's needs.
*
* @param {string | Message[]} input - The input message(s) to send to GPT. Can be a single string or an array of message objects.
* @param {object} options - Additional options for the completion request.
* @returns {Promise<string>} A promise that resolves to the content of the first response from the completion.
*/
export async function chat(
input: string | Message[],
options?: Omit<ChatCompletionCreateParamsNonStreaming, "messages">,
): Promise<ChatCompletion & { content: string }> {
const openai = await getOpenAI();
const messages = Array.isArray(input) ? input : [{ role: "user", content: input }];
const createParams: ChatCompletionCreateParamsNonStreaming = {
max_tokens: 30,
model: "gpt-3.5-turbo",
...(options ?? {}),
messages,
};
const completion = await openai.chat.completions.create(createParams);
return { ...completion, content: completion.choices[0].message.content };
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
import { OpenAI } from "npm:openai";
// Create a secret named OPENAI_API_KEY at https://www.val.town/settings/environment-variables
const openai = new OpenAI();
const functionExpression = await openai.chat.completions.create({
"messages": [
{ "role": "user", "content": "Say hello in a creative way" },
],
model: "gpt-4",
max_tokens: 30,
});
console.log(functionExpression.choices[0].message.content);
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import { chat } from "https://esm.town/v/webup/chat";
import { schemasWeather } from "https://esm.town/v/webup/schemasWeather";
export const chatSampleFunctionSingle = (async () => {
// Example dummy function hard coded to return the same weather
// In production, this could be your backend API or an external API
const getCurrentWeather = (location, unit = "fahrenheit") => ({
unit,
location,
temperature: "72",
forecast: ["sunny", "windy"],
});
// Step 1: send the conversation and available functions to GPT
const messages = [{
"role": "user",
"content":
"What's the weather like in Boston, and what's the weather in Huston?",
}];
const functions = [schemasWeather[0]];
const response = await chat(messages, {
functions,
function_call: "auto", // auto is default, but we'll be explicit
});
console.log(response);
// Step 2: Check if GPT wanted to call a function
if (typeof response !== "object")
return;
// Step 3: Call the function
// Note: The JSON response may not always be valid; be sure to handle errors
const { name } = response;
const args = JSON.parse(response.arguments);
const funcResponse = { getCurrentWeather }[name]?.(
args?.location,
args?.unit,
);
if (!functions)
return;
// Step 4: Send the info on the function call and function response to GPT
// Extend conversation with assistant's reply
messages.push({
role: "assistant",
function_call: response,
content: "",
});
// Extend conversation with function response
messages.push({
role: "function",
name,
content: JSON.stringify(funcResponse),
});
// Get a new response from GPT where it can see the function response
return await chat(messages);
})();
1
2
3
4
5
6
7
8
9
10
11
import { fetch } from "https://esm.town/v/std/fetch";
import { prettifyTS } from "https://esm.town/v/pomdtr/prettifyTS";
export async function openapi2TS(url: string) {
const yaml = await import("npm:yaml");
const resp = await fetch(url);
const spec = yaml.parse(await resp.text());
return prettifyTS(
`export default ${JSON.stringify(spec)} as const;`,
);
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
import { nowcastPMAqi } from "https://esm.town/v/stevekrouse/nowcastPMAqi";
import { msHour } from "https://esm.town/v/stevekrouse/msHour";
import { fetchJSON } from "https://esm.town/v/stevekrouse/fetchJSON";
export async function openAqNowcastAQI({ location_id }) {
const { results } = await fetchJSON(
"https://api.openaq.org/v2/measurements?" +
new URLSearchParams({
limit: "100",
page: "1",
location_id,
date_from: new Date(Date.now() - 12 * msHour)
.toISOString(),
date_to: new Date().toISOString(),
parameter: "pm25",
order_by: "datetime",
}),
);
return nowcastPMAqi(results.map((o) => o.value));
}
1
2
3
4
5
6
7
8
9
10
11
const { default: OpenAI } = await import("npm:openai");
export async function chat(apiKey, messages) {
const openai = new OpenAI({ apiKey });
return openai.chat.completions.create({
messages,
model: "gpt-3.5-turbo",
stream: false,
});
}
1
2
3
4
5
6
7
8
9
10
11
12
import process from "node:process";
export const openaiCompletion = async (prompt) => {
const { OpenAI } = await import("https://deno.land/x/openai/mod.ts");
const openAI = new OpenAI(process.env.OPENAI_API_KEY);
const completion = openAI.createCompletion({
model: "text-davinci-003",
prompt: prompt,
maxTokens: 100,
});
return (await completion).choices[0].text;
};