Search

3,361 results found for openai (6088ms)

Code
3,266

});
// API endpoint for OpenAI proxy
app.post("/api/openai/chat", async c => {
try {
const body = await c.req.json();
const { proxyChatCompletion, logApiUsage } = await import("./backend/openaiProxy.ts");
const result = await proxyChatCompletion(body);
return c.json(result);
} catch (error) {
console.error("OpenAI proxy error:", error);
return c.json({
error: error instanceof Error ? error.message : "Unknown error occurred"
/**
* OpenAI API Proxy
*
* Proxies requests to OpenAI API to avoid exposing API keys to the client
*/
import { OpenAI } from "https://esm.town/v/std/openai";
const openai = new OpenAI();
/**
* Proxy a chat completion request to OpenAI
*/
export async function proxyChatCompletion(params: any): Promise<any> {
// Create completion
const completion = await openai.chat.completions.create({
model,
messages: params.messages,
return completion;
} catch (error) {
console.error("OpenAI API error:", error);
throw error;
}
* A collapsible terminal-like console for debugging
*/
import { ApiEvents } from "../../shared/OpenAIConnector.ts";
export class DebugConsole {
*/
private setupEventListeners(): void {
window.addEventListener(`openai:${ApiEvents.REQUEST_STARTED}`, (e: any) => {
const detail = e.detail;
this.log('request', `Request started: ${detail.model}`, {
});
window.addEventListener(`openai:${ApiEvents.REQUEST_COMPLETED}`, (e: any) => {
const detail = e.detail;
this.log('success', `Request completed: ${detail.requestId}`, {
});
window.addEventListener(`openai:${ApiEvents.REQUEST_ERROR}`, (e: any) => {
const detail = e.detail;
this.log('error', `Error: ${detail.error}`, {
});
window.addEventListener(`openai:${ApiEvents.TOKEN_USAGE}`, (e: any) => {
const detail = e.detail;
this.log('info', `Token usage: ${detail.totalTokens} total (${detail.promptTokens} prompt,
});
window.addEventListener(`openai:${ApiEvents.LOG}`, (e: any) => {
const detail = e.detail;
this.log('log', detail.message);
/**
* OpenAI API Connector
*
* Handles direct connections to the OpenAI API
*/
};
export class OpenAIConnector {
private apiKey: string | null = null;
private baseUrl = 'https://api.openai.com/v1';
private useServerProxy: boolean = true;
*/
private async createCompletionViaProxy(params: any): Promise<any> {
const response = await fetch('/api/openai/chat', {
method: 'POST',
headers: {
if (!response.ok) {
const errorData = await response.json().catch(() => null);
throw new Error(errorData?.error?.message || `OpenAI API error: ${response.status}`);
}
if (typeof window !== 'undefined') {
this.dispatchEvent = (eventName, data) => {
const event = new CustomEvent(`openai:${eventName}`, { detail: data });
window.dispatchEvent(event);
};
// Fallback for non-browser environments
this.dispatchEvent = (eventName, data) => {
console.log(`[OpenAI Event] ${eventName}:`, data);
};
}
import { cors } from "https://esm.sh/hono@3.11.12/middleware";
import { readFile, serveFile } from "https://esm.town/v/std/utils@85-main/index.ts";
import { OpenAI } from "https://esm.town/v/std/openai";
import {
getAuthUrl,
app.get("/shared/*", c => serveFile(c.req.path, import.meta.url));
// Initialize OpenAI client
const openai = new OpenAI();
// Helper function to get session from cookies
try {
// Use OpenAI to parse the natural language command
const completion = await openai.chat.completions.create({
model: "gpt-4o-mini",
messages: [
import { OpenAI } from "https://esm.town/v/std/openai";
// Initialize OpenAI client (moved to server scope)
const openai = new OpenAI();
// This function will be the main entry point for the Val
if (typeof userCommand === "string") {
try {
const aiResponse = await getOpenAIResponse(userCommand);
return new Response(generateHtml(aiResponse), { headers: { "Content-Type": "text/html" }
} catch (error) {
}
async function getOpenAIResponse(command: string): Promise<string> {
try {
const completion = await openai.chat.completions.create({
messages: [
{ role: "user", content: command },
return completion.choices[0].message.content || "The air shifts, but nothing changes.";
} catch (error) {
console.error("Error calling OpenAI:", error);
return "The mists swirl strangely. (AI connection error.)";
}
Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to cre
### OpenAI
```ts
import { OpenAI } from "https://esm.town/v/std/openai";
const openai = new OpenAI();
const completion = await openai.chat.completions.create({
messages: [
{ role: "user", content: "Say hello in a creative way" },
<footer class="bg-yellow-500 text-black p-3 text-center text-sm">
<p>BeeGPT - Powered by OpenAI | <a href="https://val.town" target="_top" class="underline">V
</footer>
# BeeGPT - Bee-Themed AI Assistant
A fun, bee-themed wrapper for OpenAI's GPT models that adds bee personality, puns, and facts to
## Features
## How It Works
BeeGPT uses OpenAI's API to generate responses and images, but adds a bee-themed personality lay
1. A backend API that communicates with OpenAI
2. A bee-themed prompt that instructs the AI to respond with bee-related content
3. A bee-themed image generator that enhances prompts with bee elements
- Built on Val Town
- Uses OpenAI's GPT models (gpt-4o-mini for chat)
- Uses OpenAI's DALL-E 3 for image generation
- Frontend built with HTML, CSS, and vanilla JavaScript
- Styled with Tailwind CSS via CDN
## Environment Variables
This project requires an OpenAI API key to be set in your Val Town environment variables.
## License
salon/vNext/main.ts
14 matches
timestamp: string;
level: LogLevel;
component: string; // e.g., "Tool:OpenAI", "CustomFunc:ValidateData"
message: string;
details?: string; // Stringified JSON for complex objects
// For this example, they are functions called by a central router.
// Using Val Town's std/fetch and std/openai
import { fetch } from "https://esm.town/v/std/fetch";
import { OpenAI } from "https://esm.town/v/std/openai";
// 1. HTTP Fetch Tool Endpoint
}
// 2. OpenAI Call Tool Endpoint
async function handleOpenAICall(
reqPayload: {
messages: Array<{ role: "system" | "user" | "assistant"; content: string }>;
max_tokens,
} = reqPayload;
logger.log("INFO", `Making OpenAI call to ${model}`, {
messageCount: messages.length,
});
try {
const openai = new OpenAI(); // Assumes OPENAI_API_KEY is in environment
const completion = await openai.chat.completions.create({
model,
messages,
...(max_tokens !== undefined && { max_tokens }),
});
logger.log("SUCCESS", "OpenAI call successful.", {
modelUsed: completion.model,
});
return completion;
} catch (e: any) {
logger.log("ERROR", "OpenAI API call failed.", e);
throw e;
}
responsePayload = await handleHttpFetch(payload, logger);
break;
case "openai_call":
responsePayload = await handleOpenAICall(payload, logger);
break;
case "string_template":
{
"id": "clean_petal_width_llm",
"endpoint": "/api/tools/openai_call",
"description": "LLM cleaning for 'petal.width'",
"inputs": {
{
"id": "insights_llm",
"endpoint": "/api/tools/openai_call",
"description": "Get LLM insights on summary",
"inputs": {