Search

3,280 results found for openai (2294ms)

Code
3,185

Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to cre
### OpenAI
```ts
import { OpenAI } from "https://esm.town/v/std/openai";
const openai = new OpenAI();
const completion = await openai.chat.completions.create({
messages: [
{ role: "user", content: "Say hello in a creative way" },
join/sil/main.tsx
3 matches
// @ts-nocheck
import { OpenAI } from "https://esm.town/v/std/openai?v=4";
// --- AI BEHAVIORAL GUIDELINES ---
if (url.pathname === "/generatePrompts" && req.method === "POST") {
try {
const openai = new OpenAI();
const { subject } = await req.json();
}
const completion = await openai.chat.completions.create({
model: "gpt-4o",
messages: [
import { streamText } from "npm:hono@4.4.12/streaming";
// @ts-ignore
import { OpenAI } from "https://esm.town/v/std/openai?v=4";
// --- TYPE DEFINITIONS ---
// --- API HELPER ---
async function callOpenAI(
systemPrompt: string,
userContent: string | object,
isJson = true,
) {
const openai = new OpenAI();
const content = typeof userContent === "string"
? userContent
: JSON.stringify(userContent);
const completion = await openai.chat.completions.create({
model: "gpt-4o",
messages: [
}
const { context } = await c.req.json();
const result = await callOpenAI(LIST_GENERATOR_PROMPT(type, context), "");
return c.json(result);
});
app.post("/api/prompt/dynamic", async (c) => {
const body = await c.req.json();
const result = await callOpenAI(PROMPT_REFINER_PROMPT, body);
return c.json(result);
});
app.post("/api/inputs", async (c) => {
const { refined_prompt } = await c.req.json();
const result = await callOpenAI(FORM_GENERATOR_PROMPT, { refined_prompt });
return c.json(result);
});
app.post("/api/clarify", async (c) => {
const { refined_prompt } = await c.req.json();
const result = await callOpenAI(CLARIFICATION_PROMPT, { refined_prompt });
return c.json(result);
});
inputs: { ...user_inputs, ...clarifications },
};
const raw_output_v1 = await callOpenAI(
"Execute the provided prompt template using the given inputs. Produce only the raw output.",
v1UserContent,
output: raw_output_v1,
};
const { criteria } = await callOpenAI(
CRITERIA_GENERATOR_PROMPT,
criteriaContext,
criteria: criteria,
};
const evaluation_v1: Evaluation = await callOpenAI(
EVALUATOR_PROMPT,
v1EvalContext,
evaluation: evaluation_v1,
};
const raw_output_v2 = await callOpenAI(
REFINER_PROMPT,
v2RefineContext,
criteria: criteria,
};
const evaluation_v2: Evaluation = await callOpenAI(
EVALUATOR_PROMPT,
v2EvalContext,
import { Hono } from "npm:hono@4.4.12";
// @ts-ignore
import { OpenAI } from "https://esm.town/v/std/openai";
import type { Context } from "npm:hono@4.4.12";
import { streamText } from "npm:hono/streaming";
const services = {
/**
* A centralized function for making calls to the OpenAI API.
* @param systemPrompt - The system prompt to guide the AI.
* @param userContent - The user's input.
* @param options - Additional options like model, streaming, and response format.
* @returns A promise that resolves to the parsed JSON response or an OpenAI stream.
*/
async callOpenAI(
systemPrompt: string,
userContent: string | object,
} = options;
const openai = new OpenAI();
const messages: any[] = [
{ role: "system", content: systemPrompt },
try {
const completion = await openai.chat.completions.create(requestPayload);
if (stream && c) {
return completion;
} catch (e) {
console.error(`Error calling OpenAI: ${e.message}`);
throw new Error("AI service failed.");
}
...body,
};
return services.callOpenAI(
config.prompts.DYNAMIC_LIST_GENERATOR,
userContent,
JSON.stringify(company_context)
}\n\nOccupation: ${occupation_title}\n\nTask: ${task}`;
return services.callOpenAI(config.prompts.PROMPT_REFINER, userContent, {
c,
isJson: true,
app.post("/api/inputs", async (c: Context) => {
const { refined_prompt } = await c.req.json<InputsBody>();
return services.callOpenAI(config.prompts.INPUT_EXTRACTOR, refined_prompt, {
c,
isJson: true,
app.post("/api/clarify", async (c: Context) => {
const { refined_prompt } = await c.req.json<ClarifyBody>();
return services.callOpenAI(
config.prompts.CLARIFICATION_AGENT,
refined_prompt,
return streamText(c, async (stream) => {
try {
const openai = new OpenAI();
// 2. First call to the AI to see if it wants to use a tool
const initialResponse = await openai.chat.completions.create({
model: config.models.major,
messages,
// 5. Make the final call with tool results included, and stream the response
const finalStream = await openai.chat.completions.create({
model: config.models.major,
messages,
JSON.stringify(company_context)
}\n\nTask Briefing:\n${refined_prompt}`;
return services.callOpenAI(
config.prompts.EVALUATION_CRITERIA_GENERATOR,
userContent,
const systemPrompt =
`${config.prompts.EVALUATOR_AGENT}\n\nOutput Language: ${language}`;
return services.callOpenAI(systemPrompt, userContent, { c, stream: true });
});
const systemPrompt =
`${config.prompts.EVALUATOR_AGENT}\n\nOutput Language: ${language}`;
return services.callOpenAI(systemPrompt, userContent, { c, stream: true });
});
const systemPrompt =
`${config.prompts.REFINER_AGENT}\n\nOutput Language: ${language}`;
return services.callOpenAI(systemPrompt, userContent, { c, stream: true });
});
// @ts-ignore
import { OpenAI } from "https://esm.town/v/std/openai?v=4";
// --- TYPE DEFINITIONS ---
const url = new URL(req.url);
const action = url.searchParams.get("action");
const openai = new OpenAI();
if (req.method === "GET") {
? "occupations"
: "tasks";
const completion = await openai.chat.completions.create({
model: "gpt-4o",
messages: [{ role: "system", content: DYNAMIC_LIST_GENERATOR }, {
JSON.stringify(company_context)
}\n\nOccupation: ${occupation_title}\n\nTask: ${task}`;
const completion = await openai.chat.completions.create({
model: "gpt-4o",
messages: [{ role: "system", content: PROMPT_REFINER }, {
case "getInputs": {
const completion = await openai.chat.completions.create({
model: "gpt-4o",
messages: [{ role: "system", content: INPUT_EXTRACTOR }, {
];
const initialResponse = await openai.chat.completions.create({
model: "gpt-4o",
messages: messages,
if (functionName === "createSpecialistAgent") {
const refineResponse = await openai.chat.completions.create({
model: "gpt-4o",
messages: [{ role: "system", content: PROMPT_REFINER }, {
);
const inputsResponse = await openai.chat.completions.create({
model: "gpt-4o",
messages: [{ role: "system", content: INPUT_EXTRACTOR }, {
};
const finalResponse = await openai.chat.completions.create({
model: "gpt-4o",
messages: [...messages, responseMessage, {
}
const executionResponse = await openai.chat.completions.create({
model: "gpt-4o",
messages: [
const triggerCodeAnalysis = async (codeToAnalyze) => {
try {
const { OpenAI } = await import("https://esm.town/v/std/openai");
const openai = new OpenAI();
const analysis = await openai.chat.completions.create({
messages: [
{
import { Hono } from "npm:hono@4.4.12";
// @ts-ignore
import { OpenAI } from "https://esm.town/v/std/openai";
import type { Context } from "npm:hono@4.4.12";
import { streamText } from "npm:hono/streaming";
app.post("/api/blueprint/simulate-node", async (c: Context) => {
const { node } = await c.req.json();
const openai = new OpenAI();
try {
const completion = await openai.chat.completions.create({
model: "gpt-4o-mini",
messages: [
app.post("/api/blueprint/execute", async (c: Context) => {
const { blueprint } = await c.req.json();
const openai = new OpenAI();
try {
const stream = await openai.chat.completions.create({
model: "gpt-4o",
messages: [
The system automatically selects the most appropriate AI model based on the notification charact
- **Fast Model** (`openai/gpt-oss-20b`): Simple notifications with low severity and minimal desc
- **Balanced Model** (`anthropic/claude-3.5-sonnet`): Standard triage operations (default)
- **Advanced Model** (`openai/gpt-4o`): Critical issues requiring sophisticated analysis
- **Reasoning Model** (`moonshotai/kimi-k2`): Complex scenarios with multiple factors, excellent
| Model | Provider | Use Case | Cost (per 1M tokens) | Max Tokens | Context |
|-------|----------|----------|---------------------|------------|---------|
| GPT-OSS 20B | OpenAI | Fast | $0.10/$0.50 | 131K | Open-weight MoE |
| Claude 3.5 Sonnet | Anthropic | Balanced | $3/$15 | 8K | Best overall |
| GPT-4o Mini | OpenAI | Fast | $0.15/$0.60 | 16K | Fallback fast |
| GPT-4o | OpenAI | Advanced | $5/$15 | 4K | Complex analysis |
| Kimi K2 Instruct | MoonshotAI | Reasoning | $1/$3 | 131K | Tool use expert |
| Claude 3 Opus | Anthropic | Reasoning | $15/$75 | 4K | Most capable |
modelSelection: {
default: 'anthropic/claude-3.5-sonnet', // Change default model
fast: 'openai/gpt-oss-20b', // Ultra-cheap for simple notifications
advanced: 'openai/gpt-4o', // For critical analysis
reasoning: 'moonshotai/kimi-k2' // For complex scenarios with tool use
}
join/hill/main.tsx
8 matches
// @ts-ignore
import { OpenAI } from "https://esm.town/v/std/openai?v=4";
// @ts-ignore
import { APIError } from "npm:openai/error";
// --- AI BEHAVIORAL GUIDELINES ---
const sleep = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms));
async function createCompletionWithRetry(openai, options, maxRetries = 3) {
let lastError;
for (let i = 0; i < maxRetries; i++) {
try {
return await openai.chat.completions.create(options);
} catch (error) {
lastError = error;
if (error instanceof APIError && error.status && error.status >= 500) {
console.log(`OpenAI API request failed. Retrying in ${i + 1}s...`);
await sleep((i + 1) * 1000);
continue;
if (req.method === "POST") {
try {
const openai = new OpenAI();
const body = await req.json();
const mood = body.mood;
const completionOptions:
OpenAI.Chat.Completions.ChatCompletionCreateParams = {
model: "gpt-4o",
messages: [{ role: "system", content: MOOD_ANALYSIS_PROMPT }, {
const completion = await createCompletionWithRetry(
openai,
completionOptions,
);
modelId: string,
prompt: string = "Hello! Please respond with a simple greeting.",
provider?: 'groq' | 'openai' | 'anthropic' | 'auto'
): Promise<{ success: boolean; response?: string; error?: string; duration?: number; provider?:
try {