Search

3,380 results found for openai (3241ms)

Code
3,285

import { email } from "https://esm.town/v/std/email";
import { extractValInfo } from "https://esm.town/v/stevekrouse/extractValInfo";
import { OpenAI } from "npm:openai";
function stripHtmlBackticks(html: string): string {
export default async function(e: Email) {
const openai = new OpenAI();
console.log(`from: ${e.from} to: ${e.to} subject: ${e.subject}, cc: ${e.cc}, bcc: ${e.bcc}`);
}
const summary = await openai.chat.completions.create({
messages: [
{
Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to cre
### OpenAI
```ts
import { OpenAI } from "https://esm.town/v/std/openai";
const openai = new OpenAI();
const completion = await openai.chat.completions.create({
messages: [
{ role: "user", content: "Say hello in a creative way" },
import { OpenAI } from "https://esm.sh/openai@4.85.1";
import { GlobalRateLimiter } from "./GlobalRateLimiter.tsx";
};
interface ChatOpenAI {
invoke(messages: Message[]): Promise<string>;
}
export function ChatOpenAI(model: string): ChatOpenAI {
const openai = new OpenAI();
return {
invoke: async (messages: Message[]): Promise<string> => {
const completion = await openai.chat.completions.create({
messages: messages.map(message => ({
role: message.role as "user" | "assistant" | "system",
}
export function GlobalRateLimitedChatOpenAI(model: string, requestsPerSecond: number): ChatOpenA
const openAi = ChatOpenAI(model);
const rateLimiter = new GlobalRateLimiter(requestsPerSecond);
await rateLimiter.check();
return openAi.invoke(messages);
},
};
---
description: You can use openai-client when integrating vals to an LLM
globs:
alwaysApply: false
---
TypeScript interface for interacting with OpenAI's chat models, with optional global rate limiti
Key Components
Message Type: Defines the structure for chat messages (role and content).
ChatOpenAI(model: string): Factory function returning an object with an invoke(messages) method.
GlobalRateLimitedChatOpenAI(model: string, requestsPerSecond: number): Decorator for ChatOpenAI
GlobalRateLimiter: Class that implements the rate limiting logic. It checks the number of reques
ensureGlobalRateLimitTableExists: Ensures the rate limit tracking table exists in the database a
Usage
Use ChatOpenAI(model) for direct, unlimited access to OpenAI chat completions.
Use GlobalRateLimitedChatOpenAI(model, requestsPerSecond) to enforce a global rate limit on chat
Val Town/Platform Notes
Uses Val Town’s standard SQLite API for persistent storage.
Designed for server-side use (no browser-specific code).
No secrets are hardcoded; OpenAI API keys are managed by the OpenAI SDK/environment.
library, import from https://esm.town/v/cricks_unmixed4u/openai-client/main.tsx
```
### OpenAI
```ts
import { OpenAI } from "https://esm.town/v/std/openai";
const openai = new OpenAI();
const completion = await openai.chat.completions.create({
messages: [
{ role: "user", content: "Say hello in a creative way" },
const keywords = [
"GPT",
"OpenAI",
"Transformer",
"multimodal",
module.exports = [
"GPT",
"OpenAI",
"Transformer",
"multimodal",
---
description: You can use openai-client when integrating vals to an LLM
globs:
alwaysApply: false
---
TypeScript interface for interacting with OpenAI's chat models, with optional global rate limiti
Key Components
Message Type: Defines the structure for chat messages (role and content).
ChatOpenAI(model: string): Factory function returning an object with an invoke(messages) method.
GlobalRateLimitedChatOpenAI(model: string, requestsPerSecond: number): Decorator for ChatOpenAI
GlobalRateLimiter: Class that implements the rate limiting logic. It checks the number of reques
ensureGlobalRateLimitTableExists: Ensures the rate limit tracking table exists in the database a
Usage
Use ChatOpenAI(model) for direct, unlimited access to OpenAI chat completions.
Use GlobalRateLimitedChatOpenAI(model, requestsPerSecond) to enforce a global rate limit on chat
Val Town/Platform Notes
Uses Val Town’s standard SQLite API for persistent storage.
Designed for server-side use (no browser-specific code).
No secrets are hardcoded; OpenAI API keys are managed by the OpenAI SDK/environment.
}
export interface OpenAIServiceConfig {
API_KEY?: string;
BASE_URL: string;
TAVILY: TavilyServiceConfig;
FIRECRAWL: FirecrawlServiceConfig;
OPENAI: OpenAIServiceConfig;
RESEND: ResendServiceConfig;
}
Configure the following variables in your environment:
- `AGENT_API_KEY` (This is a secure token that you choose to secure the agent.tsx POST endpoint)
- `OPENAI_API_KEY` (An OpenAI API Key)
- `EXA_API_KEY` (Optional, though needed if you use the web search tool)