Back to packages list

Vals using openai

Description from the NPM package:
The official TypeScript library for the OpenAI API
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
import process from "node:process";
export const chat = async (
prompt: string | object = "Hello world",
options = {},
) => {
// Initialize OpenAI API stub
const { Configuration, OpenAIApi } = await import(
"https://esm.sh/openai@3.3.0"
);
const configuration = new Configuration({
apiKey: Deno.env.get("OPENAI_API_KEY"),
});
const openai = new OpenAIApi(configuration);
// Request chat completion
const messages = typeof prompt === "string"
? [{ role: "user", content: prompt }]
: prompt;
const { data } = await openai.createChatCompletion({
model: "gpt-3.5-turbo-0613",
messages,
...options,
});
const message = data.choices[0].message;
return message.function_call ? message.function_call : message.content;
};

OpenAI - Docs ↗

Use OpenAI's chat completion API with std/openai. This integration enables access to OpenAI's language models without needing to acquire API keys.

Streaming is not yet supported. Upvote the HTTP response streaming feature request if you need it!

Usage

Create valimport { OpenAI } from "https://esm.town/v/std/openai"; const openai = new OpenAI(); const completion = await openai.chat.completions.create({ messages: [ { role: "user", content: "Say hello in a creative way" }, ], model: "gpt-4", max_tokens: 30, }); console.log(completion.choices[0].message.content);

Limits

While our wrapper simplifies the integration of OpenAI, there are a few limitations to keep in mind:

  • Usage Quota: We limit each user to 10 requests per minute.
  • Features: Chat completions is the only endpoint available.

If these limits are too low, let us know! You can also get around the limitation by using your own keys:

  1. Create your own API key on OpenAI's website
  2. Create an environment variable named OPENAI_API_KEY
  3. Use the OpenAI client from npm:openai:
Create valimport { OpenAI } from "npm:openai"; const openai = new OpenAI();

📝 Edit docs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import { type ClientOptions, OpenAI as RawOpenAI } from "npm:openai";
/**
* API Client for interfacing with the OpenAI API. Uses Val Town credentials.
*/
export class OpenAI {
private rawOpenAIClient: RawOpenAI;
/**
* API Client for interfacing with the OpenAI API. Uses Val Town credentials.
*
* @param {number} [opts.timeout=10 minutes] - The maximum amount of time (in milliseconds) the client will wait for a response before timing out.
* @param {number} [opts.httpAgent] - An HTTP agent used to manage HTTP(s) connections.
* @param {Core.Fetch} [opts.fetch] - Specify a custom `fetch` function implementation.
* @param {number} [opts.maxRetries=2] - The maximum number of times the client will retry a request.
* @param {Core.Headers} opts.defaultHeaders - Default headers to include with every request to the API.
* @param {Core.DefaultQuery} opts.defaultQuery - Default query parameters to include with every request to the API.
* @param {boolean} [opts.dangerouslyAllowBrowser=false] - By default, client-side use of this library is not allowed, as it risks exposing your secret API credentials to attackers.
*/
constructor(options: Omit<ClientOptions, "baseURL" | "apiKey" | "organization"> = {}) {
this.rawOpenAIClient = new RawOpenAI({
...options,
baseURL: "https://std-openaiproxy.web.val.run/v1",
apiKey: Deno.env.get("valtown"),
organization: null,
});
}
get chat() {
return this.rawOpenAIClient.chat;
}
readonly beta = {
get chat(): RawOpenAI["beta"]["chat"] {
return this.rawOpenAIClient.beta.chat;
},
};
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
import process from "node:process";
export const chat = async (
prompt: string | object = "Hello world",
options = {},
) => {
// Initialize OpenAI API stub
const { Configuration, OpenAIApi } = await import(
"https://esm.sh/openai@3.3.0"
);
console.log(process.env);
// const configuration = new Configuration({
// apiKey: process.env.OPENAI,
// });
// const openai = new OpenAIApi(configuration);
// // Request chat completion
// const messages = typeof prompt === "string"
// ? [{ role: "user", content: prompt }]
// : prompt;
// const { data } = await openai.createChatCompletion({
// model: "gpt-3.5-turbo-0613",
// messages,
// ...options,
// });
// const message = data.choices[0].message;
// return message.function_call ? message.function_call : message.content;
};
chat();
1
2
3
4
5
6
7
8
9
10
11
12
13
14
import { OpenAI } from "npm:openai";
// Create a secret named OPENAI_API_KEY at https://www.val.town/settings/environment-variables
const openai = new OpenAI();
const functionExpression = await openai.chat.completions.create({
"messages": [
{ "role": "user", "content": "Say hello in a creative way" },
],
model: "gpt-4",
max_tokens: 30,
});
console.log(functionExpression.choices[0].message.content);

OpenAI

Get started using OpenAI's chat completion without the need to set your own API keys.

Usage

Here's a quick example to get you started with the Val Town OpenAI wrapper:

Create valimport { OpenAI } from "https://esm.town/v/std/openai"; const openai = new OpenAI(); const functionExpression = await openai.chat.completions.create({ "messages": [ { "role": "user", "content": "Say hello in a creative way" }, ], model: "gpt-4", max_tokens: 30, }); console.log(functionExpression.choices[0].message.content);
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
import { type ClientOptions, OpenAI as RawOpenAI } from "npm:openai";
/**
* API Client for interfacing with the OpenAI API. Uses Val Town credentials.
*/
export class OpenAI {
private rawOpenAIClient: RawOpenAI;
/**
* API Client for interfacing with the OpenAI API. Uses Val Town credentials.
*
* @param {number} [opts.timeout=10 minutes] - The maximum amount of time (in milliseconds) the client will wait for a response before timing out.
* @param {number} [opts.httpAgent] - An HTTP agent used to manage HTTP(s) connections.
* @param {Core.Fetch} [opts.fetch] - Specify a custom `fetch` function implementation.
* @param {number} [opts.maxRetries=2] - The maximum number of times the client will retry a request.
* @param {Core.Headers} opts.defaultHeaders - Default headers to include with every request to the API.
* @param {Core.DefaultQuery} opts.defaultQuery - Default query parameters to include with every request to the API.
* @param {boolean} [opts.dangerouslyAllowBrowser=false] - By default, client-side use of this library is not allowed, as it risks exposing your secret API credentials to attackers.
*/
constructor(options: Omit<ClientOptions, "baseURL" | "apiKey" | "organization"> = {}) {
this.rawOpenAIClient = new RawOpenAI({
...options,
baseURL: "https://std-openaiproxy.web.val.run/v1",
apiKey: Deno.env.get("valtown"),
organization: null,
});
}
get chat() {
return this.rawOpenAIClient.chat;
}
get beta() {
return this.rawOpenAIClient.beta;
}
}

OpenAI - Docs ↗

Use OpenAI's chat completion API with std/openai. This integration enables access to OpenAI's language models without needing to acquire API keys.

For free Val Town users, all calls are sent to gpt-3.5-turbo.

Streaming is not yet supported. Upvote the HTTP response streaming feature request if you need it!

Usage

Create valimport { OpenAI } from "https://esm.town/v/std/openai"; const openai = new OpenAI(); const completion = await openai.chat.completions.create({ messages: [ { role: "user", content: "Say hello in a creative way" }, ], model: "gpt-4", max_tokens: 30, }); console.log(completion.choices[0].message.content);

Limits

While our wrapper simplifies the integration of OpenAI, there are a few limitations to keep in mind:

  • Usage Quota: We limit each user to 10 requests per minute.
  • Features: Chat completions is the only endpoint available.

If these limits are too low, let us know! You can also get around the limitation by using your own keys:

  1. Create your own API key on OpenAI's website
  2. Create an environment variable named OPENAI_API_KEY
  3. Use the OpenAI client from npm:openai:
Create valimport { OpenAI } from "npm:openai"; const openai = new OpenAI();

📝 Edit docs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import { type ClientOptions, OpenAI as RawOpenAI } from "npm:openai";
/**
* API Client for interfacing with the OpenAI API. Uses Val Town credentials.
*/
export class OpenAI {
private rawOpenAIClient: RawOpenAI;
/**
* API Client for interfacing with the OpenAI API. Uses Val Town credentials.
*
* @param {number} [opts.timeout=10 minutes] - The maximum amount of time (in milliseconds) the client will wait for a response before timing out.
* @param {number} [opts.httpAgent] - An HTTP agent used to manage HTTP(s) connections.
* @param {Core.Fetch} [opts.fetch] - Specify a custom `fetch` function implementation.
* @param {number} [opts.maxRetries=2] - The maximum number of times the client will retry a request.
* @param {Core.Headers} opts.defaultHeaders - Default headers to include with every request to the API.
* @param {Core.DefaultQuery} opts.defaultQuery - Default query parameters to include with every request to the API.
* @param {boolean} [opts.dangerouslyAllowBrowser=false] - By default, client-side use of this library is not allowed, as it risks exposing your secret API credentials to attackers.
*/
constructor(options: Omit<ClientOptions, "baseURL" | "apiKey" | "organization"> = {}) {
this.rawOpenAIClient = new RawOpenAI({
...options,
baseURL: "https://std-openaiproxy.web.val.run/v1",
apiKey: Deno.env.get("valtown"),
organization: null,
});
}
get chat() {
return this.rawOpenAIClient.chat;
}
readonly beta = {
get chat(): RawOpenAI["beta"]["chat"] {
return this.rawOpenAIClient.beta.chat;
}
}
}

If you fork this, you'll need to set OPENAI_API_KEY in your Val Town Secrets.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import { email } from "https://esm.town/v/std/email?v=11";
import { OpenAI } from "npm:openai";
let location = "brooklyn ny";
let lang = "en";
const weather = await fetch(
`https://wttr.in/${location}?lang=${lang}&format=j1`,
).then(r => r.json());
const openai = new OpenAI();
let chatCompletion = await openai.chat.completions.create({
messages: [{
role: "user",
content: `Based the weather data below,
give me suggestions on how warmly to dress,
ie pants or shorts, a light jacket or a warm jacket,
a scarf and gloves or not, if I should carry an umbrella, etc.
In your response, use temperature data from the weather data below
throughout the day to explain your reccomendation.
Be as concice as possible. Assume I'll wear the same thing the whole day.
Do not use a bulleted list. Use 2-3 sentences. Only use Fahrenheit`.replaceAll("\n", ""),
}, {
role: "user",
content: JSON.stringify(weather),
}],
model: "gpt-4-1106-preview",
max_tokens: 150,
});
const text = chatCompletion.choices[0].message.content;
console.log(text);
export async function weatherGPT() {
await email({ subject: "Weather Today", text });
}

If you fork this, you'll need to set OPENAI_API_KEY in your Val Town Secrets.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import { email } from "https://esm.town/v/std/email?v=11";
import { fetch } from "https://esm.town/v/std/fetch";
import { OpenAI } from "npm:openai";
let location = "shenzhen";
let lang = "zh";
const weather = await fetch(
`https://wttr.in/${location}?lang=${lang}&format=j1`,
).then(r => r.json());
const openai = new OpenAI();
let chatCompletion = await openai.chat.completions.create({
messages: [{
role: "user",
content: `基于下面的天气预报信息,播报当天深圳的天气情况,给我相关的建议告诉我如何穿衣,如果下雨我是否需要携带雨伞。
给出合理的理由同时带上温度和天气播报。
尽可能的准确,假设我一天都不会更换衣服的。
使用2-3个句子概述一下。`.replaceAll("\n", ""),
}, {
role: "user",
content: JSON.stringify(weather),
}],
model: "gpt-3.5-turbo",
max_tokens: 150,
});
const text = chatCompletion.choices[0].message.content;
console.log(text);
export async function weatherGPT() {
const result = await fetch(`https://hello.liaolile.com/test/weather`, {
method: "POST",
body: JSON.stringify({
"info": text,
}),
}).then(r => r.json());
console.log(result);
await email({ subject: "Weather Today", text });
}

GPT4 Example

This uses the brand new gpt-4-1106-preview.

To use this, set OPENAI_API_KEY in your Val Town Secrets.

1
2
3
4
5
6
7
8
9
10
11
12
13
import { OpenAI } from "npm:openai";
Deno.env.get("OPENAI_API_KEY");
const openai = new OpenAI();
let chatCompletion = await openai.chat.completions.create({
messages: [{
role: "user",
content: "Teach me about a super rare word",
}],
model: "gpt-4-1106-preview",
max_tokens: 30,
});
export let gpt4Example = chatCompletion.choices[0].message.content;
1
2
3
4
5
6
7
8
9
10
11
const { default: OpenAI } = await import("npm:openai");
export async function chat(apiKey, messages) {
const openai = new OpenAI({ apiKey });
return openai.chat.completions.create({
messages,
model: "gpt-3.5-turbo",
stream: false,
});
}

Code Documentation Assistant

The Code Documentation Assistant is an AI-powered tool that helps generate documentation for code. It uses the OpenAI GPT-3.5 Turbo model to generate readme files in GitHub-flavored markdown based on the provided code.

Usage

Importing the Code Documentation Assistant

import { draftReadme, writeReadme } from "code-doc-assistant";

Function: draftReadme

async function draftReadme(options: WriterOptions): Promise<string>

The draftReadme function generates a readme file based on the provided options.

Parameters

  • options (required): An object containing the following properties:
    • username (string): The username of the code owner.
    • valName (string): The name of the Val containing the code.
    • model (optional, default: "gpt-3.5-turbo"): The OpenAI model to use for generating the readme.
    • userPrompt (optional): Additional prompt to include in the documentation.

Return Value

A promise that resolves to a string representing the generated readme file.

Function: writeReadme

async function writeReadme(options: WriterOptions): Promise<string>

The writeReadme function generates a readme file and updates the readme of the corresponding Val with the generated content.

Parameters

  • options (required): An object containing the following properties:
    • username (string): The username of the code owner.
    • valName (string): The name of the Val containing the code.
    • model (optional, default: "gpt-3.5-turbo"): The OpenAI model to use for generating the readme.
    • userPrompt (optional): Additional prompt to include in the documentation.

Return Value

A promise that resolves to a string indicating the success of the readme update.

Example

import { draftReadme, writeReadme } from "code-doc-assistant";

const options = {
  username: "your-username",
  valName: "your-val-name",
};

const generatedReadme = await draftReadme(options);
console.log(generatedReadme);

const successMessage = await writeReadme(options);
console.log(successMessage);

License

This project is licensed under the MIT License.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import { fetch } from "https://esm.town/v/std/fetch?v=4";
import OpenAI, { type ClientOptions } from "npm:openai";
export interface WriterOptions extends ClientOptions {
username: string;
valName: string;
model?: string;
userPrompt?: string;
}
function createPrompt(code: string, userPrompt?: string) {
return `
You are an AI assistant that writes documentation for code. You output readmes
in GitHub flavored markdown. Usage sections should include a single code snippet
that a user can copy and paste. Never return anything other than documentation for
the code you are provided.
${userPrompt}
Take the below code and return a markdown readme:
${code}
`;
}
async function getVal(username: string, valName: string) {
try {
const res = await fetch(`https://api.val.town/v1/alias/${username}/${valName}`, {
method: "GET",
headers: {
"accept": "*/*",
"Content-Type": "application/json",
"Authorization": `Bearer ${Deno.env.get("valtown")}`,
},
});
const { id, code } = await res.json();
return { id, code };
} catch (error) {
throw new Error("Error getting val code: " + error.message);
}
}
async function performOpenAICall(prompt: string, model: string, openaiOptions: ClientOptions) {
const openai = new OpenAI(openaiOptions);
try {
const response = await openai.chat.completions.create({
messages: [{ role: "system", content: prompt }],
model: model,
});
if (!response.choices || response.choices.length === 0) {
throw new Error("No response from OpenAI");
}
const readme = response.choices[0].message?.content;
if (!readme) {
throw new Error("No readme returned by OpenAI. Try again.");
}
return readme;
} catch (error) {
throw new Error("Error generating readme: " + error.message);
}
}
async function updateReadme(id: string, readme: string) {
try {
const res = await fetch(`https://api.val.town/v1/vals/${id}`, {
method: "PUT",
headers: {
"accept": "*/*",
"Content-Type": "application/json",
"Authorization": `Bearer ${Deno.env.get("valtown")}`,
},
body: JSON.stringify({ "readme": readme }),
});
return res.status;
} catch (error) {
throw new Error("Error updating readme: " + error.message);
}
}
async function draftReadme(options: WriterOptions) {
const { username, valName, model = "gpt-3.5-turbo", userPrompt, ...openaiOptions } = options;
const { id, code } = await getVal(username, valName);
const prompt = createPrompt(code, userPrompt);
const readme = await performOpenAICall(prompt, model, openaiOptions);
return readme;
}
async function writeReadme(options: WriterOptions) {
const { username, valName, model = "gpt-3.5-turbo", userPrompt, ...openaiOptions } = options;
const { id, code } = await getVal(username, valName);
const prompt = createPrompt(code, userPrompt);
const readme = await performOpenAICall(prompt, model, openaiOptions);
try {
const update = await updateReadme(id, readme);
return "Readme updated successfully!";
} catch (error) {
1
2
3
4
5
import { type ClientOptions } from "npm:openai";
export interface WriterOptions extends ClientOptions {
model?: string;
}

Val Town AI Readme Writer

This val provides a class ReadmeWriter for generating readmes for vals with OpenAI. It can both draft readmes and update them directly

PRs welcome! See Todos below for some ideas I have.

Usage

To draft a readme for a given code, use the draftReadme method:

import { ReadmeWriter } from "https://esm.town/v/nbbaier/readmeGPT";

const readmeWriter = new ReadmeWriter({});
const val = "https://www.val.town/v/:username/:valname";

const generatedReadme = await readmeWriter.draftReadme(val);

To write and update a readme for a given code, use the writeReadme method:

import { ReadmeWriter } from "https://esm.town/v/nbbaier/readmeGPT";

const readmeWriter = new ReadmeWriter({});
const val = "https://www.val.town/v/:username/:valname";

const successMessage = await readmeWriter.writeReadme(val);

API Reference

Class: ReadmeWriter

The ReadmeWriter class represents a utility for generating and updating README files.

Constructor

Creates an instance of the ReadmeWriter class.

Parameters:
  • model (optional): The model to be used for generating the readme. Defaults to "gpt-3.5-turbo".
  • apiKey (optional): An OpenAI API key. Defaults to Deno.env.get("OPENAI_API_KEY").

Methods

  • draftReadme(val: string): Promise<string>: Generates a readme for the given val.

    • Parameters:

      • val: URL of the code repository.
    • Returns:

      • A promise that resolves to the generated readme.
  • writeReadme(val: string): Promise<string>: Generates and updates a readme for the given val.

    • Parameters:

      • val: URL of the code repository.
    • Returns:

      • A promise that resolves to a success message if the update is successful.

Todos

  • Additional options to pass to the OpenAI model
  • Ability to pass more instructions to the prompt to modify how the readme is constructed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import { type WriterOptions } from "https://esm.town/v/nbbaier/WriterOptions";
import { fetch } from "https://esm.town/v/std/fetch?v=4";
import OpenAI, { type ClientOptions } from "npm:openai";
export class ReadmeWriter {
model: string;
openai: OpenAI;
apiKey: string;
valtownKey: string;
constructor(options: WriterOptions) {
const { model, ...openaiOptions } = options;
this.model = model ? model : "gpt-3.5-turbo";
this.openai = new OpenAI(openaiOptions);
this.valtownKey = Deno.env.get("valtown");
}
private createPrompt(code: string, userPrompt?: string) {
return `
You are an AI assistant that writes documentation for code. You output readmes
in GitHub flavored markdown. Usage sections should include a single code snippet
that a user can copy and paste. Never return anything other than documentation for
the code you are provided.
${userPrompt}
Take the below code and return a markdown readme:
${code}
`;
}
private async getVal(username: string, valName: string) {
try {
const res = await fetch(`https://api.val.town/v1/alias/${username}/${valName}`, {
method: "GET",
headers: {
"accept": "*/*",
"Content-Type": "application/json",
"Authorization": `Bearer ${this.valtownKey}`,
},
});
const { id, code } = await res.json();
return { id, code };
} catch (error) {
throw new Error("Error getting val code: " + error.message);
}
}
private async performOpenAICall(prompt: string) {
try {
const response = await this.openai.chat.completions.create({
messages: [{ role: "system", content: prompt }],
model: this.model,
});
if (!response.choices || response.choices.length === 0) {
throw new Error("No response from OpenAI");
}
const readme = response.choices[0].message?.content;
if (!readme) {
throw new Error("No readme returned by OpenAI. Try again.");
}
return readme;
} catch (error) {
throw new Error("Error generating readme: " + error.message);
}
}
private async updateReadme(id: string, readme: string) {
try {
const res = await fetch(`https://api.val.town/v1/vals/${id}`, {
method: "PUT",
headers: {
"accept": "*/*",
"Content-Type": "application/json",
"Authorization": `Bearer ${this.valtownKey}`,
},
body: JSON.stringify({ "readme": readme }),
});
return res.status;
} catch (error) {
throw new Error("Error updating readme: " + error.message);
}
}
private async processRequest(val: string, userPrompt?: string) {
const url = new URL(val);
const [, _, username, valName] = url.pathname.split("/");
const { id, code } = await this.getVal(username, valName);
const prompt = this.createPrompt(code, userPrompt);
const readme = await this.performOpenAICall(prompt);
return { id, readme };
}
async draftReadme(val: string, userPrompt?: string) {
const { readme } = await this.processRequest(val, userPrompt);
return readme;

SQLite QueryWriter

The QueryWriter class is a utility for generating and executing SQL queries using natural language and OpenAI. It provides a simplified interface for interacting with your Val Town SQLite database and generating SQL queries based on user inputs.

This val is inspired by prisma-gpt. PRs welcome! See Todos below for some ideas I have.

Usage

  1. Import the QueryWriter class into your script:
Create valimport { QueryWriter } from "https://esm.town/v/nbbaier/sqliteWriter";
  1. Create an instance of QueryWriter, providing the desired table and an optional model:
Create valconst writer = new QueryWriter({ table: "my_table", model: "gpt-4-1106-preview" });
  1. Call the writeQuery() method to generate an SQL query based on a user input string:
Create valconst userInput = "Show me all the customers with more than $1000 in purchases."; const query = await writer.writeQuery(userInput);
  1. Alternatively, use the gptQuery() method to both generate and execute the SQL query:
Create valconst userInput = "Show me all the customers with more than $1000 in purchases."; const result = await writer.gptQuery(userInput);
  1. Handle the generated query or query result according to your application's needs.

API

new QueryWriter(args: { table: string; model?: string }): QueryWriter

Creates a new instance of the QueryWriter class.

  • table: The name of the database table to operate on.
  • model (optional): The model to use for generating SQL queries. Defaults to "gpt-3.5-turbo".
  • apiKey (optional): An OpenAI API key. Defaults to Deno.env.get("OPENAI_API_KEY").

writeQuery(str: string): Promise<string>

Generates an SQL query based on the provided user input string.

  • str: The user input string describing the desired query.

Returns a Promise that resolves to the generated SQL query.

gptQuery(str: string): Promise<any>

Generates and executes an SQL query based on the provided user input string.

  • str: The user input string describing the desired query.

Returns a Promise that resolves to the result of executing the generated SQL query.

Todos

  • Handle multiple tables for more complex use cases
  • Edit prompt to allow for more than just SELECT queries
  • Allow a user to add to the system prompt maybe?
  • Expand usage beyond just Turso SQLite to integrate with other databases
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import { type WriterOptions } from "https://esm.town/v/nbbaier/WriterOptions";
import { sqlite } from "https://esm.town/v/std/sqlite";
import OpenAI from "npm:openai";
interface QueryWriterOptons extends WriterOptions {
table: string;
}
export class QueryWriter {
table: string;
model: string;
apiKey: string;
openai: OpenAI;
constructor(options: QueryWriterOptons) {
const { table, model, ...openaiOptions } = options;
this.table = table;
this.model = model;
// this.apiKey = openaiOptions.apiKey ? openaiOptions.apiKey : Deno.env.get("OPENAI_API_KEY");
this.openai = new OpenAI(openaiOptions);
}
private async getSchema() {
const tableCols = (await sqlite.execute(`PRAGMA table_info(${this.table})`)).rows.map(column => {
return `${column[1]} ${column[2]}`;
}).join(", ");
return `${this.table}(${tableCols})`;
}
private async executeQuery(query: string) {
try {
return await sqlite.execute(query);
} catch (error) {
// Handle the error appropriately
throw new Error("Error executing query: " + error.message);
}
}
private createPrompt(schema: string, str: string) {
return `
You are an AI assistant that returns raw SQL queries using natural language.
You only output raw SQLite queries. Never return anything other than raw SQLite.
Always begin the query with SELECT. You will be given the following schema:
${schema}
Take the below query and return raw SQLite:
${str}
`;
}
async writeQuery(str: string) {
const schema = await this.getSchema();
const prompt = this.createPrompt(schema, str);
try {
const response = await this.openai.chat.completions.create({
messages: [{ role: "system", content: prompt }],
model: this.model,
});
if (!response.choices || response.choices.length === 0) {
throw new Error("No response from OpenAI");
}
const query = response.choices[0].message?.content;
if (!query) {
throw new Error("No SQL returned from OpenAI. Try again.");
}
return query;
} catch (error) {
throw new Error("Error generating query: " + error.message);
}
}
async gptQuery(str: string) {
const schema = await this.getSchema();
const prompt = this.createPrompt(schema, str);
try {
const response = await this.openai.chat.completions.create({
messages: [{ role: "system", content: prompt }],
model: this.model,
});
if (!response.choices || response.choices.length === 0) {
throw new Error("No response from OpenAI");
}
const query = response.choices[0].message?.content;
if (!query) {
throw new Error("No SQL returned from OpenAI. Try again.");
}
return this.executeQuery(query);
} catch (error) {
throw new Error("Error generating and executing query: " + error.message);
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
import { OpenAI } from "npm:openai";
const openai = new OpenAI();
export const gpt4 = async (content: string, max_tokens: number = 50) => {
let chatCompletion = await openai.chat.completions.create({
messages: [{
role: "user",
content,
}],
model: "gpt-4-1106-preview",
max_tokens: max_tokens,
});
return chatCompletion.choices[0].message.content;
};