Search
Code3,286
Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to create a fresh table.### OpenAI```tsimport { OpenAI } from "https://esm.town/v/std/openai";const openai = new OpenAI();const completion = await openai.chat.completions.create({ messages: [ { role: "user", content: "Say hello in a creative way" },
import { type Context, Hono } from "https://esm.sh/hono@3.11.7";import { blob } from "https://esm.town/v/std/blob";import { OpenAI } from "https://esm.town/v/std/openai";import { sqlite } from "https://esm.town/v/stevekrouse/sqlite";import Groq from "npm:groq-sdk";const app = new Hono();const openai = new OpenAI();// Get all voice notes (for admin/dashboard)async function transcribeAudio(voiceNoteId: string, audioBuffer: ArrayBuffer) { try { // Convert ArrayBuffer to File for OpenAI const audioFile = new File([audioBuffer], "audio.webm", { type: "audio/webm" });
- 🎙️ Record voice notes directly in the browser- 🤖 AI-powered transcription using OpenAI Whisper- 🔗 Share voice notes via unique URLs- ⏰ Set expiration by max listens or date- **Database**: SQLite for voice note metadata- **Storage**: Val Town Blob storage for audio files- **AI**: OpenAI Whisper for transcription- **Frontend**: React with TypeScript- **Styling**: TailwindCSS
import { fetch } from "https://esm.town/v/std/fetch";import { OpenAI } from "https://esm.town/v/std/openai";import { PDFExtract, PDFExtractOptions } from "npm:pdf.js-extract";}async function callOpenAI( openaiInstance: OpenAI, systemPrompt: string, userMessage: string, agentName: string,): Promise<object | string> { log.push({ agent: agentName, type: "step", message: `Calling OpenAI model ${model}...` }); try { const response = await openaiInstance.chat.completions.create({ model: model, messages: [ } } catch (error) { console.error(agentName, "OpenAI API call error:", error); let errMsg = "AI communication error."; if (error.message) errMsg += ` Message: ${error.message}`; <li>**No client-side persistence of documents and analyses**</li> </ul> <p>This application uses OpenAI's GPT models for its AI capabilities. Data submitted will be processed by OpenAI.</p> </div> \`;export default async function(req: Request) { const openai = new OpenAI(); const url = new URL(req.url); const format = url.searchParams.get("format"); const suggPrompt = legalTaskSuggestionSystemPromptTemplate.replace("%%DOCUMENT_TEXT%%", suggText); const suggAgent = "Task Suggestion AI (LLM1)"; const suggRes = await callOpenAI( openai, suggPrompt, "Generate task suggestions based on the provided document text.", const analysisAgent = "Legal Analysis AI (LLM2)"; const aiRes = await callOpenAI(openai, finalPrompt, docToAnalyze, "gpt-4o", true, log, analysisAgent); if (typeof aiRes === "object" && (aiRes as any).error) {
// Environment Variables to set in Val.Town:// - OPENAI_KEY: Your OpenAI API Key// - NOTION_KEY: Your Notion API Key// - NOTION_DATABASE_ID: The Database ID for your "Reflections" database (submissions) console.log("Main Submission Handler: Starting AI Analysis for:", userNumberFromForm); const OPENAI_KEY = getEnv("OPENAI_KEY") as string; let aiAnalysisResult = { summary: "AI summary not generated.", const userMessage = `Please analyze the following user reflection:\n\nUser Reflection Text:\n\"\"\"\n${combinedResponsesText}\n\"\"\"\n\nProvide your analysis as a single JSON object.`; const openAIPayload = { model: "gpt-3.5-turbo", messages: [{ role: "system", content: systemMessage }, { role: "user", content: userMessage }], }; try { const openAIResponse = await fetch("https://api.openai.com/v1/chat/completions", { method: "POST", headers: { "Content-Type": "application/json", "Authorization": `Bearer ${OPENAI_KEY}` }, body: JSON.stringify(openAIPayload), }); if (!openAIResponse.ok) { const errorBody = await openAIResponse.text(); console.error("Main Submission Handler: OpenAI API Error:", openAIResponse.status, errorBody); } else { const openAIData = await openAIResponse.json(); const aiContent = openAIData.choices[0]?.message?.content; if (aiContent) { try { } catch (parseError) { console.error( "Main Submission Handler: Failed to parse OpenAI JSON response:", parseError, "Raw AI content:", } } else { console.warn("Main Submission Handler: OpenAI response content was empty."); } } } catch (aiError) { console.error("Main Submission Handler: Error calling OpenAI API:", aiError); }
const NOTION_USERS_DB_ID = getEnv("NOTION_USERS_DATABASE_ID") as string; const NOTION_REFLECTIONS_DB_ID = getEnv("NOTION_DATABASE_ID") as string; const OPENAI_KEY = getEnv("OPENAI_KEY") as string; let isFirstTimeUser = true;Output ONLY the welcome message text.`; const openAIPayload = { model: "gpt-3.5-turbo", messages: [{ role: "system", content: systemPrompt }], }; console.log("getUserReflectionContext: Calling OpenAI for personalized welcome..."); try { const openAIResponse = await fetch("https://api.openai.com/v1/chat/completions", { method: "POST", headers: { "Content-Type": "application/json", "Authorization": `Bearer ${OPENAI_KEY}` }, body: JSON.stringify(openAIPayload), }); if (!openAIResponse.ok) { const errorBody = await openAIResponse.text(); console.error("getUserReflectionContext: OpenAI API Error:", openAIResponse.status, errorBody); welcomeMessage = DEFAULT_RETURNING_USER_MESSAGE; } else { const openAIData = await openAIResponse.json(); if (openAIData.choices && openAIData.choices[0] && openAIData.choices[0].message) { welcomeMessage = openAIData.choices[0].message.content.trim(); console.log("getUserReflectionContext: OpenAI generated welcome:", welcomeMessage); } else { welcomeMessage = DEFAULT_RETURNING_USER_MESSAGE; console.warn( "getUserReflectionContext: OpenAI response structure unexpected, using default welcome.", ); } } } catch (aiError) { console.error("getUserReflectionContext: Error calling OpenAI:", aiError); welcomeMessage = DEFAULT_RETURNING_USER_MESSAGE; }
const aiTools: AITool[] = [ // AI Assistants & Chatbots { name: "ChatGPT", url: "https://chatgpt.com/", category: "Assistant", description: "OpenAI's conversational AI assistant" }, { name: "Claude", url: "https://claude.ai/", category: "Assistant", description: "Anthropic's AI assistant for various tasks and conversations" }, { name: "Gemini", url: "https://gemini.google.com/", category: "Assistant", description: "Google's advanced AI assistant" }, // Image Generation & Editing { name: "Midjourney", url: "https://www.midjourney.com/", category: "Images", description: "AI art and image generation platform" }, { name: "DALL·E 3", url: "https://openai.com/dall-e-3", category: "Images", description: "OpenAI's advanced image generation model" }, { name: "Stable Diffusion", url: "https://stability.ai/", category: "Images", description: "Open-source AI image generation" }, { name: "Lexica", url: "https://lexica.art/", category: "Images", description: "AI art search engine and generator" }, { name: "Runway", url: "https://runwayml.com/", category: "Video", description: "AI video editing and generation tools" }, { name: "Synthesia", url: "https://www.synthesia.io/", category: "Video", description: "AI video generation with virtual avatars" }, { name: "Sora", url: "https://openai.com/sora", category: "Video", description: "OpenAI's text-to-video generation model" }, { name: "Kling", url: "https://klingai.com/", category: "Video", description: "AI video generation platform" }, { name: "Hailuo", url: "https://hailuo.ai/", category: "Video", description: "AI video creation tool" }, // AI Research & Platforms { name: "OpenAI", url: "https://openai.com/", category: "Research", description: "AI research and deployment company" }, { name: "Anthropic", url: "https://www.anthropic.com/", category: "Research", description: "AI safety research company" }, { name: "xAI", url: "https://x.ai/", category: "Research", description: "Elon Musk's AI company" },
import process from "node:process";import { marked } from "npm:marked";import { OpenAI } from "npm:openai";function pm(...lines: string[]): string { ); const client = new OpenAI({ apiKey: process.env.PERPLEXITY_API_KEY, baseURL: "https://api.perplexity.ai" }); const response = await client.chat.completions.create({ model: "sonar",
import { fetch } from "https://esm.town/v/std/fetch";import { OpenAI } from "https://esm.town/v/std/openai";import { z } from "npm:zod";}async function callOpenAI( sysPrompt: string, userPrompt: string, lg( "DEBUG", "callOpenAI", `Initiating OpenAI call tid=${tid}`, { spLen: sysPrompt.length, upLen: userPrompt.length }, mid, ); try { const openai = new OpenAI(); const completion = await openai.chat.completions.create({ model: "gpt-4o-mini", messages: [{ role: "system", content: sysPrompt }, { role: "user", content: userPrompt }], lg( "WARN", "callOpenAI", `OpenAI call returned no text tid=${tid}.`, { usage: usage, finishReason: completion.choices[0]?.finish_reason }, mid, lg( "INFO", "callOpenAI", `OpenAI call OK tid=${tid}`, { resLen: resText.length, usage: usage, finishReason: completion.choices[0]?.finish_reason }, mid, } catch (err: any) { const errDtls = { msg: err.message, name: err.name, status: err.status, type: err.type, code: err.code }; lg("ERROR", "callOpenAI", `OpenAI API call failed tid=${tid}`, { error: errDtls }, mid, tid); throw new Error(`OpenAI API failed: ${err.message}` + (err.code ? ` (Code: ${err.code})` : "")); }} `Generate the ${p.cType} for the specified platform based on the context provided in the system prompt.`; try { const genContent = await callOpenAI(sysP, userP, mid, tid, lg); if (!genContent) { lg("WARN", "CntAgent", `LLM returned no content tid=${tid}.`, undefined, mid, tid); const userP = `Develop the marketing strategy based on the system prompt context & framework.`; try { const stratContent = await callOpenAI(sysP, userP, mid, tid, lg); if (!stratContent) { lg("WARN", "StratAgent", `LLM no content for strat tid=${tid}.`, undefined, mid, tid); `Generate 2-3 distinct logo concepts for "${p.bName}" based on the system prompt. Provide descriptions and AI prompts.`; try { const conceptContent = await callOpenAI(sysP, userP, mid, tid, lg); if (!conceptContent) { lg("WARN", "LogoAgent", `LLM no content for logo tid=${tid}.`, undefined, mid, tid); `Develop foundational brand ID guide for "${p.bName}" based on system prompt context/instructions. Ensure cohesive & practical.`; try { const idContent = await callOpenAI(sysP, userP, mid, tid, lg); if (!idContent) { lg("WARN", "BrandAgent", `LLM no content for brand ID tid=${tid}.`, undefined, mid, tid); const userP = `Run simulation type '${p.simType}' based on system prompt context/params. Follow format.`; try { const simContent = await callOpenAI(sysP, userP, mid, tid, lg); if (!simContent) { lg("WARN", "SimAgent", `LLM no content for sim tid=${tid}.`, undefined, mid, tid); `Based on my request in system context, generate/refine the system prompt per guidelines. Output only resulting prompt text.`; try { const genSysP = await callOpenAI(sysP, userP, mid, tid, lg); if (!genSysP) { lg("WARN", "MetaAgent", `LLM no content for meta-prompt tid=${tid}.`, undefined, mid, tid);
### Core Cognitive Tools- **AI-Enhanced Thought Forking**: Automatically generate parallel explorations using OpenAI- **Goal Tracking**: Create, update, and monitor goals with hierarchical structure- **Task Management**: Break down goals into actionable tasks with state tracking
reconsumeralization
import { OpenAI } from "https://esm.town/v/std/openai";
import { sqlite } from "https://esm.town/v/stevekrouse/sqlite";
/**
* Practical Implementation of Collective Content Intelligence
* Bridging advanced AI with collaborative content creation
*/
exp
kwhinnery_openai
lost1991
import { OpenAI } from "https://esm.town/v/std/openai";
export default async function(req: Request): Promise<Response> {
if (req.method === "OPTIONS") {
return new Response(null, {
headers: {
"Access-Control-Allow-Origin": "*",
No docs found