Search
Code3,285
import { anthropic } from "npm:@ai-sdk/anthropic";import { openai } from "npm:@ai-sdk/openai";import { generateText, streamText } from "npm:ai";import { getSystemPrompt } from "./prompt.tsx"; const maxSteps = 10; const model = Deno.env.get("ANTHROPIC_API_KEY") ? anthropic("claude-3-7-sonnet-latest") : openai("gpt-4.1"); const options = {
// Future implementation of OpenAI client for API callsimport { OpenAI } from "openai";const openai = new OpenAI({ apiKey: Deno.env.get("OPENAI_API_KEY"),});export default openai;
ENABLED: !!Deno.env.get("FIRECRAWL_API_KEY"), }, OPENAI: { API_KEY: Deno.env.get("OPENAI_API_KEY"), BASE_URL: "https://api.openai.com/v1", TIMEOUT: 30000, ENABLED: !!Deno.env.get("OPENAI_API_KEY"), }, RESEND: {
"path": "https://deno.land/std@0.210.0/path/mod.ts", "xss": "https://esm.sh/xss", "openai": "https://esm.sh/openai@^4.0.1", "fs": "https://deno.land/std@0.210.0/fs/mod.ts", "hono-rate-limiter": "npm:hono-rate-limiter@^0.4.2",
import { AIPerformanceAnalysis, createOpenAIClient, LouOpenAIClient, validatePerformanceData, validateWaiverData, WaiverRecommendation,} from "./ai_performance_openai_client.ts";import { createPromptConfig, maxWaiverCandidates: number; // Maximum waiver candidates to analyze (default: 15) // OpenAI configuration openai: { model: "gpt-4-turbo-preview" | "gpt-4o" | "gpt-3.5-turbo"; maxTokens: number;export class AITeamPerformanceReviewer { private performanceAnalyzer: LouPerformanceAnalyzer; private openaiClient: LouOpenAIClient; private tokenStorage: LouTokenStorage; private config: AITeamReviewConfig; minConfidenceThreshold: 70, maxWaiverCandidates: 15, openai: { model: "gpt-4-turbo-preview", maxTokens: 4000, // Initialize components this.performanceAnalyzer = new LouPerformanceAnalyzer(); this.openaiClient = createOpenAIClient({ model: this.config.openai.model, maxTokens: this.config.openai.maxTokens, temperature: this.config.openai.temperature, }); this.tokenStorage = new LouTokenStorage(); console.log(`🤖 AI Team Performance Reviewer initialized with model: ${this.config.openai.model}`); } authentication: boolean; yahooAPI: boolean; openAI: boolean; performanceAnalyzer: boolean; }; authentication: false, yahooAPI: false, openAI: false, performanceAnalyzer: false, }; } // Test 2: OpenAI API try { const openAITest = await this.openaiClient.testConnection(); results.openAI = openAITest.success; if (!openAITest.success) { errors.push(`OpenAI test failed: ${openAITest.error}`); } console.log(openAITest.success ? "✅ OpenAI test passed" : "❌ OpenAI test failed"); } catch (error) { errors.push(`OpenAI test failed: ${error}`); console.log("❌ OpenAI test failed"); } // Execute AI analysis const analysis = await this.openaiClient.analyzeTeamPerformance({ teamStats: data.teamStats, playerStats: data.playerStats, // Generate AI recommendations const recommendations = await this.openaiClient.getPickupRecommendations( underperformers, data.waiverPlayers,
---description: You can use openai-client when integrating vals to an LLMglobs: alwaysApply: false---TypeScript interface for interacting with OpenAI's chat models, with optional global rate limiting, and uses Val Town's SQLite for persistent rate limit tracking.Key ComponentsMessage Type: Defines the structure for chat messages (role and content).ChatOpenAI(model: string): Factory function returning an object with an invoke(messages) method. This method sends an array of messages to the specified OpenAI chat model and returns the assistant's response.GlobalRateLimitedChatOpenAI(model: string, requestsPerSecond: number): Decorator for ChatOpenAI that enforces a global rate limit (requests per second) using a persistent SQLite table.GlobalRateLimiter: Class that implements the rate limiting logic. It checks the number of requests in the current time window and throws an error if the limit is exceeded. It uses a table (global_rate_limit_1) in Val Town's SQLite.ensureGlobalRateLimitTableExists: Ensures the rate limit tracking table exists in the database at startup.UsageUse ChatOpenAI(model) for direct, unlimited access to OpenAI chat completions.Use GlobalRateLimitedChatOpenAI(model, requestsPerSecond) to enforce a global rate limit on chat completions, suitable for shared or public-facing endpoints.Val Town/Platform NotesUses Val Town’s standard SQLite API for persistent storage.Designed for server-side use (no browser-specific code).No secrets are hardcoded; OpenAI API keys are managed by the OpenAI SDK/environment.
Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to create a fresh table.### OpenAI```tsimport { OpenAI } from "https://esm.town/v/std/openai";const openai = new OpenAI();const completion = await openai.chat.completions.create({ messages: [ { role: "user", content: "Say hello in a creative way" },
Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to create a fresh table.### OpenAI```tsimport { OpenAI } from "https://esm.town/v/std/openai";const openai = new OpenAI();const completion = await openai.chat.completions.create({ messages: [ { role: "user", content: "Say hello in a creative way" },
Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to create a fresh table.### OpenAI```tsimport { OpenAI } from "https://esm.town/v/std/openai";const openai = new OpenAI();const completion = await openai.chat.completions.create({ messages: [ { role: "user", content: "Say hello in a creative way" },
Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to create a fresh table.### OpenAI```tsimport { OpenAI } from "https://esm.town/v/std/openai";const openai = new OpenAI();const completion = await openai.chat.completions.create({ messages: [ { role: "user", content: "Say hello in a creative way" },
reconsumeralization
import { OpenAI } from "https://esm.town/v/std/openai";
import { sqlite } from "https://esm.town/v/stevekrouse/sqlite";
/**
* Practical Implementation of Collective Content Intelligence
* Bridging advanced AI with collaborative content creation
*/
exp
kwhinnery_openai
lost1991
import { OpenAI } from "https://esm.town/v/std/openai";
export default async function(req: Request): Promise<Response> {
if (req.method === "OPTIONS") {
return new Response(null, {
headers: {
"Access-Control-Allow-Origin": "*",
No docs found