Avatar

@patrickjm

2 likes12 public vals
Joined January 12, 2023
Resident
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import { trackOpenAiFreeUsage } from "https://esm.town/v/patrickjm/trackOpenAiFreeUsage";
import { openAiTextCompletion } from "https://esm.town/v/patrickjm/openAiTextCompletion";
import { openAiModeration } from "https://esm.town/v/patrickjm/openAiModeration";
import { openAiFreeQuotaExceeded } from "https://esm.town/v/patrickjm/openAiFreeQuotaExceeded";
import { openAiFreeUsageConfig } from "https://esm.town/v/patrickjm/openAiFreeUsageConfig";
/**
* OpenAI text completion. https://platform.openai.com/docs/api-reference/completions
*
* val.town has generously provided a free daily quota. Until the quota is met, no need to provide an API key.
* To see if the quota has been met, you can run @patrickjm.openAiFreeQuotaExceeded()
*
* For full REST API access, see @patrickjm.openAiTextCompletion
*/
export let gpt3 = async (params: {
openAiKey?: string,
prompt: string,
maxTokens?: number,
}) => {
const MODEL = "text-davinci-003";
// Determine whether to use provided apiKey or free usage apiKey based on daily quota.
const apiKey = params.openAiKey ?? openAiFreeUsageConfig.key;
const exceeded = await openAiFreeQuotaExceeded();
if (!params.openAiKey && exceeded) {
throw new Error(openAiFreeUsageConfig.quota_error);
}
// If using free token, first check inputs against moderation api
if (!params.openAiKey) {
const moderation = await openAiModeration({
apiKey,
input: params.prompt,
});
if (moderation.results.some((r) => r.flagged)) {
throw new Error(
"Sorry, this prompt was flagged by OpenAI moderation. If you provide your own API key, moderation will be turned off."
);
}
}
// Call completion API
const response = await openAiTextCompletion({
apiKey: apiKey,
prompt: params.prompt,
model: MODEL,
max_tokens: params.maxTokens ?? 1000,
});
// If using free token, track usage against the quota.
try {
if (!params.openAiKey) {
await trackOpenAiFreeUsage(MODEL, response.usage.total_tokens);
}
} catch (e) {}
return response.choices?.[0]?.text?.trim();
};
1
2
3
4
5
6
7
8
9
export let truncateString = (str: string, maxLen: number) => {
if (!str) {
return str;
}
if (str.length > maxLen) {
return str.slice(0, maxLen - 3) + "...";
}
return str;
};
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import { fetchJSON } from "https://esm.town/v/stevekrouse/fetchJSON?v=41";
/**
Get the status or result of a prediction on replicate.com
https://replicate.com/docs/reference/http#get-prediction
*/
export async function getReplicatePrediction({
/** https://replicate.com/account */
apiKey,
predictionId,
}: {
apiKey: string;
predictionId: string;
}) {
if (!apiKey) {
throw new Error("missing apiKey; visit https://replicate.com/account");
} else if (!predictionId) {
throw new Error("missing predictionId");
}
const result = await fetchJSON(
`https://api.replicate.com/v1/predictions/${encodeURIComponent(predictionId)}`,
{
headers: {
Authorization: `Token ${apiKey}`,
},
},
);
return result as Prediction;
}
interface Prediction {
id: string;
version: string;
urls: {
get: string;
cancel: string;
};
created_at: string;
started_at: string;
completed_at: string;
source: string;
status: "starting" | "processing" | "succeeded" | "failed" | "canceled";
input: {
prompt: string;
};
output: Array<string>;
error: any;
logs: string;
metrics: {
predict_time?: number;
};
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
import { fetchJSON } from "https://esm.town/v/stevekrouse/fetchJSON?v=41";
/**
* Calls the OpenAI moderation model. Useful for determining if OpenAI will flag something you did.
* https://platform.openai.com/docs/api-reference/moderations
*/
export let openAiModeration = async ({
apiKey,
input,
model,
}: {
apiKey: string,
input: string|string[],
model?: "text-moderation-latest" | "text-moderation-stable",
}) => {
if (!apiKey) {
throw new Error("You must provide an OpenAI API Key");
}
const body: { model?: string, input: string|string[] } = {
input,
};
if (model) {
body.model = model;
}
const result = await fetchJSON(
"https://api.openai.com/v1/moderations",
{
method: "POST",
headers: {
Authorization: `Bearer ${apiKey}`,
},
body: JSON.stringify(body),
}
);
return result as Result;
};
interface Result {
id: string;
model: string;
results: {
flagged: boolean;
categories: {
sexual: boolean;
hate: boolean;
violence: boolean;
"self-harm": boolean;
"sexual/minors": boolean;
"hate/threatening": boolean;
"violence/graphic": boolean;
},
category_scores: {
sexual: number;
hate: number;
violence: number;
"self-harm": number;
"sexual/minors": number;
"hate/threatening": number;
"violence/graphic": number;
}
}[];
}
1
2
3
4
import { openAiFreeUsage } from "https://esm.town/v/patrickjm/openAiFreeUsage";
export let openAiFreeQuotaExceeded = () =>
openAiFreeUsage.exceeded;
1
2
// set at Sat Dec 09 2023 01:45:57 GMT+0000 (Coordinated Universal Time)
export let openAiFreeUsage = {"used_quota":12709400,"used_quota_usd":1.27094,"exceeded":false};
1
2
3
4
5
6
7
8
9
10
import process from "node:process";
import { gpt3 } from "https://esm.town/v/patrickjm/gpt3";
export let aiSarcasticMotivationalMessage = gpt3({
prompt: [
"Write a sarcastic, personal motivational message to someone to start the day.",
"Emphasize a morbid sense of humor.",
].join("\n"),
openAiKey: process.env.openai_key,
});
1
2
3
4
5
6
7
import process from "node:process";
import { weatherTomorrowGpt3 } from "https://esm.town/v/patrickjm/weatherTomorrowGpt3";
export let weatherTomorrowGpt3Example = weatherTomorrowGpt3({
city: "New York City",
openAiKey: process.env.openai_key,
});
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
import { gpt3 } from "https://esm.town/v/patrickjm/gpt3";
import { simpleWeather } from "https://esm.town/v/patrickjm/simpleWeather";
export let weatherTomorrowGpt3 = (params: { openAiKey: string, city: string }) =>
simpleWeather(params.city).then((weather) =>
gpt3({
openAiKey: params.openAiKey,
prompt: `
Given a JSON sequence, give a short, plain-English summary about the weather tomorrow.
The hourly forecast uses military time, where i.e. 600 = 6 A.M. and 1500 = 3 P.M.
Here is the JSON string:
${JSON.stringify(weather.forecast[1])}
`,
})
);
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
import { weather as weather2 } from "https://esm.town/v/marcel/weather?v=4";
export let simpleWeather = async (city: string) =>
weather2(city)
.then(({ current_condition, weather }) => ({
c: current_condition[0],
w: weather,
}))
.then(({ c, w }) => ({
temp: `${c.temp_F}`,
feelsLike: `${c.FeelsLikeF}`,
currentHumidity: `${c.humidity}%`,
description: c.weatherDesc?.[0]?.value,
windSpeed: `${c.windspeedMiles}mph`,
forecast: w.map((f) => ({
date: new Date(f.date),
avgTemp: `${f.avgtempF}`,
hourly: f.hourly.map((h) => ({
rainChance: `${h.chanceofrain}`,
time: h.time,
description: h.weatherDesc?.[0]?.value,
windSpeed: `${h.windspeedMiles}mph`,
temp: `${h.tempF}`,
feelsLike: `${h.FeelsLikeF}`,
})),
})),
}));