Search

3,380 results found for openai (2655ms)

Code
3,285

import { anthropic } from "npm:@ai-sdk/anthropic";
import { openai } from "npm:@ai-sdk/openai";
import { generateText, streamText } from "npm:ai";
import { getSystemPrompt } from "./prompt.tsx";
const maxSteps = 10;
ROPIC_API_KEY") ? anthropic("claude-3-7-sonnet-latest") : openai("gpt-4.1");
const options = {
// Future implementation of OpenAI client for API calls
import { OpenAI } from "openai";
const openai = new OpenAI({
apiKey: Deno.env.get("OPENAI_API_KEY"),
});
export default openai;
ENABLED: !!Deno.env.get("FIRECRAWL_API_KEY"),
},
OPENAI: {
API_KEY: Deno.env.get("OPENAI_API_KEY"),
BASE_URL: "https://api.openai.com/v1",
TIMEOUT: 30000,
ENABLED: !!Deno.env.get("OPENAI_API_KEY"),
},
RESEND: {
"path": "https://deno.land/std@0.210.0/path/mod.ts",
"xss": "https://esm.sh/xss",
"openai": "https://esm.sh/openai@^4.0.1",
"fs": "https://deno.land/std@0.210.0/fs/mod.ts",
"hono-rate-limiter": "npm:hono-rate-limiter@^0.4.2",
import {
AIPerformanceAnalysis,
createOpenAIClient,
LouOpenAIClient,
validatePerformanceData,
validateWaiverData,
WaiverRecommendation,
} from "./ai_performance_openai_client.ts";
import {
createPromptConfig,
maxWaiverCandidates: number; // Maximum waiver candidates to analyze (default: 15)
// OpenAI configuration
openai: {
model: "gpt-4-turbo-preview" | "gpt-4o" | "gpt-3.5-turbo";
maxTokens: number;
export class AITeamPerformanceReviewer {
private performanceAnalyzer: LouPerformanceAnalyzer;
private openaiClient: LouOpenAIClient;
private tokenStorage: LouTokenStorage;
private config: AITeamReviewConfig;
minConfidenceThreshold: 70,
maxWaiverCandidates: 15,
openai: {
model: "gpt-4-turbo-preview",
maxTokens: 4000,
// Initialize components
this.performanceAnalyzer = new LouPerformanceAnalyzer();
this.openaiClient = createOpenAIClient({
model: this.config.openai.model,
maxTokens: this.config.openai.maxTokens,
temperature: this.config.openai.temperature,
});
this.tokenStorage = new LouTokenStorage();
erformance Reviewer initialized with model: ${this.config.openai.model}`);
}
authentication: boolean;
yahooAPI: boolean;
openAI: boolean;
performanceAnalyzer: boolean;
};
authentication: false,
yahooAPI: false,
openAI: false,
performanceAnalyzer: false,
};
}
// Test 2: OpenAI API
try {
const openAITest = await this.openaiClient.testConnection();
results.openAI = openAITest.success;
if (!openAITest.success) {
errors.push(`OpenAI test failed: ${openAITest.error}`);
}
console.log(openAITest.success ? "✅ OpenAI test passed" : "❌ OpenAI test failed");
} catch (error) {
errors.push(`OpenAI test failed: ${error}`);
console.log("❌ OpenAI test failed");
}
// Execute AI analysis
const analysis = await this.openaiClient.analyzeTeamPerformance({
teamStats: data.teamStats,
playerStats: data.playerStats,
// Generate AI recommendations
const recommendations = await this.openaiClient.getPickupRecommendations(
underperformers,
data.waiverPlayers,
---
description: You can use openai-client when integrating vals to an LLM
globs:
alwaysApply: false
---
TypeScript interface for interacting with OpenAI's chat models, with optional global rate limiti
Key Components
Message Type: Defines the structure for chat messages (role and content).
ChatOpenAI(model: string): Factory function returning an object with an invoke(messages) method.
GlobalRateLimitedChatOpenAI(model: string, requestsPerSecond: number): Decorator for ChatOpenAI
GlobalRateLimiter: Class that implements the rate limiting logic. It checks the number of reques
ensureGlobalRateLimitTableExists: Ensures the rate limit tracking table exists in the database a
Usage
Use ChatOpenAI(model) for direct, unlimited access to OpenAI chat completions.
Use GlobalRateLimitedChatOpenAI(model, requestsPerSecond) to enforce a global rate limit on chat
Val Town/Platform Notes
Uses Val Town’s standard SQLite API for persistent storage.
Designed for server-side use (no browser-specific code).
No secrets are hardcoded; OpenAI API keys are managed by the OpenAI SDK/environment.
Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to cre
### OpenAI
```ts
import { OpenAI } from "https://esm.town/v/std/openai";
const openai = new OpenAI();
const completion = await openai.chat.completions.create({
messages: [
{ role: "user", content: "Say hello in a creative way" },
Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to cre
### OpenAI
```ts
import { OpenAI } from "https://esm.town/v/std/openai";
const openai = new OpenAI();
const completion = await openai.chat.completions.create({
messages: [
{ role: "user", content: "Say hello in a creative way" },
Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to cre
### OpenAI
```ts
import { OpenAI } from "https://esm.town/v/std/openai";
const openai = new OpenAI();
const completion = await openai.chat.completions.create({
messages: [
{ role: "user", content: "Say hello in a creative way" },
Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to cre
### OpenAI
```ts
import { OpenAI } from "https://esm.town/v/std/openai";
const openai = new OpenAI();
const completion = await openai.chat.completions.create({
messages: [
{ role: "user", content: "Say hello in a creative way" },