Search

3,377 results found for openai (7598ms)

Code
3,282

Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to cre
### OpenAI
```ts
import { OpenAI } from "https://esm.town/v/std/openai";
const openai = new OpenAI();
const completion = await openai.chat.completions.create({
messages: [
{ role: "user", content: "Say hello in a creative way" },
import { load } from "https://deno.land/std@0.220.0/dotenv/mod.ts";
import { Hono } from "https://deno.land/x/hono@v3.12.11/mod.ts";
import OpenAI from "npm:openai@latest";
// Load environment variables from .env file
const app = new Hono();
// Initialize OpenAI client
const openai = new OpenAI({
apiKey: env.OPENAI_API_KEY || Deno.env.get("OPENAI_API_KEY"),
});
// OpenAI chat endpoint using Responses API
app.get("/chat/:message?", async (c) => {
const message = c.req.query("message") ?? c.req.param("message");
try {
// Create a response with OpenAI Responses API in background mode
const response = await openai.responses.create({
model: "gpt-4o-mini",
input: [{
});
// Polling endpoint for OpenAI Responses API
app.get("/chat/poll/:pollId", async (c) => {
const pollId = c.req.param("pollId");
try {
// Check if response is completed (no delay first)
let response = await openai.responses.retrieve(responseId);
// If not completed, wait 4 seconds and check again
if (response.status !== "completed") {
await new Promise((resolve) => setTimeout(resolve, 4000));
response = await openai.responses.retrieve(responseId);
}
const OPENAI_API_HOST = "api.openai.com";
export default async function (request: Request): Promise<Response> {
const url = new URL(request.url);
url.host = OPENAI_API_HOST;
const newRequest = new Request(url.toString(), {
---
description: You can use openai-client when integrating vals to an LLM
globs:
alwaysApply: false
---
TypeScript interface for interacting with OpenAI's chat models, with optional global rate limiti
Key Components
Message Type: Defines the structure for chat messages (role and content).
ChatOpenAI(model: string): Factory function returning an object with an invoke(messages) method.
GlobalRateLimitedChatOpenAI(model: string, requestsPerSecond: number): Decorator for ChatOpenAI
GlobalRateLimiter: Class that implements the rate limiting logic. It checks the number of reques
ensureGlobalRateLimitTableExists: Ensures the rate limit tracking table exists in the database a
Usage
Use ChatOpenAI(model) for direct, unlimited access to OpenAI chat completions.
Use GlobalRateLimitedChatOpenAI(model, requestsPerSecond) to enforce a global rate limit on chat
Val Town/Platform Notes
Uses Val Town’s standard SQLite API for persistent storage.
Designed for server-side use (no browser-specific code).
No secrets are hardcoded; OpenAI API keys are managed by the OpenAI SDK/environment.
```
### OpenAI
library, import from https://esm.town/v/cricks_unmixed4u/openai-client/main.tsx
TypeScript interface for interacting with OpenAI's chat models, with optional global rate limiti
Key Components
Message Type: Defines the structure for chat messages (role and content).
ChatOpenAI(model: string): Factory function returning an object with an invoke(messages) method.
GlobalRateLimitedChatOpenAI(model: string, requestsPerSecond: number): Decorator for ChatOpenAI
GlobalRateLimiter: Class that implements the rate limiting logic. It checks the number of reques
ensureGlobalRateLimitTableExists: Ensures the rate limit tracking table exists in the database a
Usage
Use ChatOpenAI(model) for direct, unlimited access to OpenAI chat completions.
Use GlobalRateLimitedChatOpenAI(model, requestsPerSecond) to enforce a global rate limit on chat
Val Town/Platform Notes
Uses Val Town’s standard SQLite API for persistent storage.
Designed for server-side use (no browser-specific code).
No secrets are hardcoded; OpenAI API keys are managed by the OpenAI SDK/environment.
### Email
Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to cre
### OpenAI
```ts
import { OpenAI } from "https://esm.town/v/std/openai";
const openai = new OpenAI();
const completion = await openai.chat.completions.create({
messages: [
{ role: "user", content: "Say hello in a creative way" },
Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to cre
### OpenAI
```ts
import { OpenAI } from "https://esm.town/v/std/openai";
const openai = new OpenAI();
const completion = await openai.chat.completions.create({
messages: [
{ role: "user", content: "Say hello in a creative way" },
<div class="flex justify-between items-center">
<div>
<h5 class="font-medium">OpenAI Whisper API</h5>
<p class="text-sm text-gray-600">Direct audio transcription using Whisper</p
</div>
svc/med/main.tsx
5 matches
import { fetch } from "https://esm.town/v/std/fetch";
import { OpenAI } from "https://esm.town/v/std/openai";
import { z } from "npm:zod";
// --- CORE BACKEND LOGIC ---
const llm = async (sysPrompt, userPrompt, log, tid, model = "gpt-4o") => {
log("DEBUG", "LLM", `Calling OpenAI for TID ${tid}`);
try {
const oa = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
const completion = await oa.chat.completions.create({
model,
});
const content = completion.choices[0]?.message?.content;
if (!content) throw new Error("OpenAI returned no content.");
return JSON.parse(content);
} catch (err) {
log("ERROR", "LLM", `OpenAI API call failed for TID ${tid}`, { error: err.message });
throw new Error(`AI model error: ${err.message}`);
}
<div class="flex justify-between items-center">
<div>
<h5 class="font-medium">OpenAI Whisper API</h5>
<p class="text-sm text-gray-600">Direct audio transcription using Whisper</p
</div>