Search

3,298 results found for openai (1647ms)

Code
3,203

import { email } from "https://esm.town/v/std/email";
import { OpenAI } from "https://esm.town/v/std/OpenAI";
// ------------------------------ Email Address ------------------------------
console.log(e);
// Use OpenAI provided by Val Town to reply to the email
const openai = new OpenAI();
let chatCompletion = await openai.chat.completions.create({
messages: [{
role: "user",
// ---------------- Val Town Standard Library ----------------
// Val Town provides limited free hosted services, including
// functions for sending emails and using OpenAI
import { email } from "https://esm.town/v/std/email";
import { OpenAI } from "https://esm.town/v/std/OpenAI";
// --------------------- Get weather data --------------------
export default async function() {
// Use OpenAI provided by Val Town to get weather reccomendation
// Experiment with changing the prompt
const openai = new OpenAI();
let chatCompletion = await openai.chat.completions.create({
messages: [{
role: "user",
* Allows users to specify hat sequence (e.g., "W,R,B,Y,G,B") or use default.
* Simulates workflow visualization via structured text logs.
* Uses OpenAI via @std/openai for agent responses.
* Based on previous multi-agent simulation structures.
*
// --- Main Request Handler (Server Code for ThinkingFlow MVP) ---
export default async function(req: Request) {
// Dynamic Import of OpenAI Library
const { OpenAI } = await import("https://esm.town/v/std/openai");
// --- OpenAI API Call Helper (Reused) ---
async function callOpenAI(
systemPrompt: string,
userMessage: string, // Can be query or intermediate context
): Promise<{ role: "assistant" | "system"; content: string }> {
try {
const openai = new OpenAI();
const response = await openai.chat.completions.create({
model: model,
messages: [
if (!response.choices?.[0]?.message?.content) {
console.error("OpenAI API returned unexpected structure:", JSON.stringify(response));
throw new Error("Received invalid or empty response from AI model.");
}
return { role: "assistant", content: response.choices[0].message.content };
} catch (error) {
console.error(`OpenAI API call failed for model ${model}. Error:`, error.message, error.re
let errorMessage = `Error with AI model (${model}).`;
let statusCode = error.status || error.response?.status;
if (statusCode === 401) errorMessage = "OpenAI Auth Error (401). Check Val Town 'openai' s
else if (statusCode === 429) errorMessage = "OpenAI Rate Limit/Quota Error (429). Check Op
else if (statusCode === 400) errorMessage = `OpenAI Bad Request (400). Details: ${error.me
else if (statusCode >= 500) errorMessage = `OpenAI Server Error (${statusCode}). Try again
else if (error.code === "ENOTFOUND" || error.code === "ECONNREFUSED")
errorMessage = `Network Error (${error.code}). Cannot connect to OpenAI.`;
else if (error.message.includes("secret")) errorMessage = error.message;
else errorMessage += ` Details: ${error.message}`;
currentPrompt = currentPrompt.replace("{{CONTEXT}}", ""); // No inter-hat context for MVP
const hatResponse = await callOpenAI(currentPrompt, trimmedQuery); // Pass original query
// Log response or error
.replace("{{COLLECTED_OUTPUTS}}", outputsText);
const summaryResponse = await callOpenAI(finalPrompt, "Synthesize the collected outputs.")
conversationLog.push({ agent: "Blue Hat", message: summaryResponse.content }); // Log summ
* User queries are routed, analyzed collaboratively, and synthesized into holistic advice.
* Based on the Multi-Agent AI Support Simulation structure.
* Uses OpenAI via /v/std/openai.
*
* Last Updated: 2025-04-18
<p class="description">
Your Personal Life Operating System. Integrate goals, wellness, career, finances, rela
can LifeSync help you synchronize your life today? (Using OpenAI via <code>/v/std/openai</code>)
<br>Current Date: ${currentDate}
</p>
// Includes LifeSync agent flow
export default async function(req: Request) {
const { OpenAI } = await import("https://esm.town/v/std/openai");
// --- Helper Function: Call OpenAI API ---
// (Mostly unchanged, but updated the JSON mode check)
async function callOpenAI(
systemPrompt: string,
userMessage: string,
): Promise<{ role: "assistant" | "system"; content: string }> {
try {
// Ensure OPENAI_API_KEY is set in Val Town secrets (environment variable)
const openai = new OpenAI();
const response = await openai.chat.completions.create({
model: model,
messages: [
if (!response.choices?.[0]?.message?.content) {
console.error("OpenAI API returned an unexpected or empty response structure:", JSON.str
throw new Error("Received invalid or empty response from AI model.");
}
} catch (error) {
console.error(
`OpenAI API call failed for model ${model}. System Prompt: ${systemPrompt.substring(0, 8
error,
);
let statusCode = error.status || (error.response ? error.response.status : null);
if (error.response && error.response.data && error.response.data.error) {
errorMessage = `OpenAI Error (${statusCode || "unknown status"}): ${
error.response.data.error.message || JSON.stringify(error.response.data.error)
}`;
}
if (statusCode === 401)
errorMessage = "OpenAI API Error (401): Authentication failed. Verify API key secret ('o
else if (statusCode === 429) errorMessage = "OpenAI API Error (429): Rate limit or quota e
else if (statusCode === 400) errorMessage = `OpenAI API Error (400): Bad Request. ${error.
else if (statusCode >= 500) errorMessage = `OpenAI Server Error (${statusCode}): Issue on
else if (error.code === "ENOTFOUND" || error.code === "ECONNREFUSED")
errorMessage = `Network Error (${error.code}): Cannot connect to OpenAI API.`;
// Return error: JSON for Routing agent, plain text for others
// --- 1. Routing Step ---
conversationLog.push({ agent: "⚙️ System", message: "Contacting Query Routing Agent..." });
const routingResponse = await callOpenAI(routingAgentSystemPrompt, trimmedQuery);
let relevantDomains: string[] = ["GENERAL"]; // Default
let routingLogMessage = "";
if (routingResponse.role === "system") { // Error from callOpenAI
routingLogMessage = routingResponse.content; // Log the system error
routingFailed = true;
// Pass the summary from the routing agent to the specialist agent
const specialistResponse = await callOpenAI(agentDetails.prompt, summary);
if (specialistResponse.role === "system") {
// Use a potentially stronger model for synthesis if needed
const synthesisResponse = await callOpenAI(
synthesisInput,
"Synthesize the above into a cohesive response for the user.",
Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to cre
### OpenAI
```ts
import { OpenAI } from "https://esm.town/v/std/openai";
const openai = new OpenAI();
const completion = await openai.chat.completions.create({
messages: [
{ role: "user", content: "Say hello in a creative way" },
Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to cre
### OpenAI
```ts
import { OpenAI } from "https://esm.town/v/std/openai";
const openai = new OpenAI();
const completion = await openai.chat.completions.create({
messages: [
{ role: "user", content: "Say hello in a creative way" },
// ———————— Val.town/Backend Imports ———————— //
import { OpenAI } from "https://esm.town/v/std/openai"; // For backend API call
import { html } from "https://esm.town/v/stevekrouse/html"; // For serving HTML
async function runImprovementPipeline(
originalText: string,
openai: OpenAI,
): Promise<{ improvedText: string; coreIntentSummary: string }> {
console.log("Pipeline Step 1: Starting Intent Extraction for text length:", originalText.lengt
try {
const intentPrompt = createIntentExtractionPrompt(originalText);
const intentCompletion = await openai.chat.completions.create({
model: AI_MODEL,
messages: [{ role: "user", content: intentPrompt }],
let improvedText = originalText;
try {
const mainCompletion = await openai.chat.completions.create({
model: AI_MODEL,
messages: [{ role: "user", content: mainImprovementPrompt }],
export default async function server(req: Request): Promise<Response> {
const url = new URL(req.url);
const apiKey = Deno.env.get("OPENAI_API_KEY");
if (!apiKey) {
// ... (API key error handling remains the same) ...
const errorMsg = "Server configuration error: OPENAI_API_KEY secret not set.";
console.error(errorMsg);
if (req.method === "POST") return Response.json({ error: errorMsg }, { status: 500 });
return html(`<html><body><h1>Configuration Error</h1><p>${errorMsg}</p></body></html>`, { st
}
const openai = new OpenAI({ apiKey: apiKey });
// --- API Endpoint (POST) ---
// Run the pipeline
const { improvedText, coreIntentSummary } = await runImprovementPipeline(text, openai);
// Return results
Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to cre
### OpenAI
```ts
import { OpenAI } from "https://esm.town/v/std/openai";
const openai = new OpenAI();
const completion = await openai.chat.completions.create({
messages: [
{ role: "user", content: "Say hello in a creative way" },
Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to cre
### OpenAI
```ts
import { OpenAI } from "https://esm.town/v/std/openai";
const openai = new OpenAI();
const completion = await openai.chat.completions.create({
messages: [
{ role: "user", content: "Say hello in a creative way" },
Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to cre
### OpenAI
```ts
import { OpenAI } from "https://esm.town/v/std/openai";
const openai = new OpenAI();
const completion = await openai.chat.completions.create({
messages: [
{ role: "user", content: "Say hello in a creative way" },