Search

3,263 results found for openai (1737ms)

Code
3,168

"description": "A sample blah manifest demonstrating various tool types and configurations."
"env": {
"OPENAI_API_KEY": Deno.env.get("OPENAI_API_KEY"),
},
"tools": [
import { openai } from "npm:@ai-sdk/openai";
import { generateText } from "npm:ai";
try {
const { text: fact } = await generateText({
model: openai("gpt-4o-mini"),
system: "You are an expert in conspiracy.",
prompt: `Provide an interesting conspiracy for fun`,
# askSMHI
Using OpenAI chat completion with function calls to [SMHI](https://en.wikipedia.org/wiki/Swedish
The API is instructed to use the current time in Europe/Stockholm timezone.
## Relevant API documentation
* [SMHI, forecast documentation](https://opendata.smhi.se/apidocs/metfcst/get-forecast.html)
AI, GPT function calling documentation](https://platform.openai.com/docs/guides/function-callin
## How to use this endpoint
## Enviroment variables
* OPENAI_CHAT: Needs to be authorized to write chat completions and to the moderation API.
## Packages used
* openai: For typesafe API request and responses
* valibot: for describing the SMHI API response and function API input
* valibot/to-json-schema: Transform the schema to json schema (readable by the GPT API)
import { offset, removeOffset } from "npm:@formkit/tempo";
import { isWithinTokenLimit } from "npm:gpt-tokenizer/model/gpt-4o";
import { OpenAI } from "npm:openai";
import * as v from "npm:valibot";
import { openAIModeration } from "./gpt/moderation";
import { getWeatherAtCoordinate } from "./smhi/forecast/service";
import { getSmhiForecastResponseZodSchema } from "./smhi/schema";
return { error: "Too many tokens in question" };
}
const { flagged } = await openAIModeration([question]);
if (flagged) {
return { error: "Be nice in your question" };
}[],
};
const openai = new OpenAI({ apiKey: process.env.OPENAI_CHAT });
const completion = await openai.chat.completions.create({
model: completionOptions.model,
store: completionOptions.store,
}]
: [];
const formattedFunctionResponseData = await openai.chat.completions.create({
model: completionOptions.model,
store: completionOptions.store,
import { fetch } from "https://esm.town/v/std/fetch";
import process from "node:process";
import { OpenAI } from "npm:openai";
export async function openAIModeration(publicMessages: string[]) {
const openAIKey = process.env.OPENAI_CHAT;
const headers = {
"Content-Type": "application/json",
Authorization: `Bearer ${openAIKey}`,
};
const openai = new OpenAI({ apiKey: process.env.OPENAI_CHAT });
const responseData = await openai.moderations.create({ input: publicMessages });
return { flagged: responseData.results.some((r) => r.flagged) };
}
import process from "node:process";
import { marked } from "npm:marked";
import { OpenAI } from "npm:openai";
function pm(...lines: string[]): string {
);
const client = new OpenAI({ apiKey: process.env.PERPLEXITY_API_KEY, baseURL: "https://api.perp
const response = await client.chat.completions.create({
model: "sonar",
if (request.method === "POST" && new URL(request.url).pathname === "/chat") {
try {
const { OpenAI } = await import("https://esm.town/v/std/openai");
const openai = new OpenAI();
const { messages } = await request.json();
const completion = await openai.chat.completions.create({
messages: messages,
model: "gpt-4o-mini",
"description": "A sample blah manifest demonstrating various tool types and configurations."
"env": {
"OPENAI_API_KEY": Deno.env.get("OPENAI_API_KEY"),
},
"tools": [
* This is a Townie clone for Val Town.
* It creates a chat interface that helps users generate Val Town applications.
* Uses OpenAI's API to generate code responses in the proper format.
*/
/** @jsxImportSource https://esm.sh/react@18.2.0 */
If you are changing a SQLite table's schema, you should also change the table's name so it creat
OPENAI
Val Town includes a free, proxied OpenAI:
import { OpenAI } from "https://esm.town/v/std/openai";
const openai = new OpenAI();
const completion = await openai.chat.completions.create({
messages: [
{ role: "user", content: "Say hello in a creative way" },
): Promise<string> {
try {
// Try using the proxied OpenAI first, if that fails, fall back to direct API call
try {
return await processWithValTownOpenAI(messages, systemPrompt, stream);
} catch (error) {
console.error("Error with Val Town OpenAI:", error);
console.log("Falling back to direct OpenAI API call");
return await processWithDirectOpenAI(messages, systemPrompt, stream);
}
} catch (error) {
/**
* Process chat using Val Town's proxied OpenAI
*/
async function processWithValTownOpenAI(
messages: Message[],
systemPrompt: string,
): Promise<string> {
try {
const { OpenAI } = await import("https://esm.town/v/std/openai");
const openai = new OpenAI();
// Prepare the messages array with system prompt
const openaiMessages = [
{ role: "system", content: systemPrompt },
...messages.map(m => ({ role: m.role, content: m.content }))
];
console.log("Sending to Val Town OpenAI:", {
messageCount: openaiMessages.length,
firstUserMessage: messages[0]?.content?.substring(0, 20) + "..."
});
// If streaming is not required, get a complete response
if (!stream) {
const completion = await openai.chat.completions.create({
messages: openaiMessages,
model: "gpt-4o-mini", // Using Val Town's available model
temperature: 0.7,
return completion.choices[0]?.message?.content || "Sorry, I couldn't generate a response."
} else {
// Streaming is not directly supported by Val Town OpenAI wrapper
// Falling back to direct API
throw new Error("Streaming not supported by Val Town OpenAI wrapper");
}
} catch (error) {
console.error("Error in processWithValTownOpenAI:", error);
throw error;
}
/**
* Process chat using direct OpenAI API
*/
async function processWithDirectOpenAI(
messages: Message[],
systemPrompt: string,
): Promise<string> {
// Get API key from environment
const apiKey = Deno.env.get("OPENAI_API_KEY");
if (!apiKey) {
throw new Error("OpenAI API Key not found. Please set the OPENAI_API_KEY environment variabl
}
// Format messages for OpenAI API
const openaiMessages = [
{ role: "system", content: systemPrompt },
...messages.map(m => ({ role: m.role, content: m.content }))
];
console.log("Sending to Direct OpenAI:", {
messageCount: openaiMessages.length,
firstUserMessage: messages[0]?.content?.substring(0, 20) + "..."
});
if (stream) {
// Stream the response if a stream is provided
return await streamChatResponse(openaiMessages, apiKey, stream);
} else {
// Otherwise, return the complete response
return await fetchChatResponse(openaiMessages, apiKey);
}
}
/**
* Fetch a complete chat response from OpenAI
*/
async function fetchChatResponse(messages: any[], apiKey: string): Promise<string> {
try {
const response = await fetch("https://api.openai.com/v1/chat/completions", {
method: "POST",
headers: {
if (!response.ok) {
const errorText = await response.text();
console.error("OpenAI API error response:", errorText);
try {
const errorData = JSON.parse(errorText);
throw new Error(`OpenAI API error: ${response.status} ${errorData.error?.message || erro
} catch (e) {
throw new Error(`OpenAI API error: ${response.status} ${errorText}`);
}
}
/**
* Stream a chat response from OpenAI
*/
async function streamChatResponse(messages: any[], apiKey: string, stream: any): Promise<string>
try {
const response = await fetch("https://api.openai.com/v1/chat/completions", {
method: "POST",
headers: {
if (!response.ok) {
const errorText = await response.text();
console.error("OpenAI API streaming error:", errorText);
throw new Error(`OpenAI API error: ${response.status} ${errorText}`);
}
status: "ok",
message: "Backend is working",
hasOpenAiKey: Boolean(Deno.env.get("OPENAI_API_KEY")),
hasValTownOpenAI: true
};
});
web/games/main.tsx
7 matches
export default async function server(request: Request): Promise<Response> {
// Import necessary dependencies
const { OpenAI } = await import("https://esm.town/v/std/openai");
const { sqlite } = await import("https://esm.town/v/stevekrouse/sqlite");
};
// Initialize OpenAI client
const openai = new OpenAI();
// Set constants
`;
// Prepare messages for OpenAI API
const aiMessages = [
{ role: "system", content: systemPrompt },
try {
// Call OpenAI API
const completion = await openai.chat.completions.create({
model: "gpt-4-turbo",
messages: aiMessages,
} catch (error) {
serverLog("ERROR", "Error calling OpenAI API", { error });
return new Response(JSON.stringify({
error: "Failed to process request",