Search
Code3,275
import {generateObject, LanguageModel, tool} from "npm:ai"import {openai} from "npm:@ai-sdk/openai"import {google} from "npm:@ai-sdk/google"import { constructor(provider?: AIProvider) { const envProvider = Deno.env.get("AI_PROVIDER") as AIProvider | undefined this.provider = provider || envProvider || "openai" if (this.provider === "openai") { if (!Deno.env.get("OPENAI_API_KEY")) { console.warn("OPENAI_API_KEY environment variable not set. OpenAI calls may fail.") } this.model = openai("gpt-4o") as LanguageModel } else if (this.provider === "gemini") { if (!Deno.env.get("GOOGLE_GENERATIVE_AI_API_KEY") && !Deno.env.get("GOOGLE_API_KEY")) {
"npm:@ai-sdk/google@*": "0.0.23_zod@3.23.8", "npm:@ai-sdk/google@0.0.23": "0.0.23_zod@3.23.8", "npm:@ai-sdk/openai@*": "0.0.36_zod@3.23.8", "npm:@ai-sdk/openai@0.0.36": "0.0.36_zod@3.23.8", "npm:@libsql/client@*": "0.15.6", "npm:@types/node@*": "22.12.0", ] }, "@ai-sdk/openai@0.0.36_zod@3.23.8": { "integrity": "sha512-6IcvR35UMuuQEQPkVjzUtqDAuz6vy+PMCEL0PAS2ufHXdPPm81OTKVetqjgOPjebsikhVP0soK1pKPEe2cztAQ==", "dependencies": [
import { Hono } from "https://esm.sh/hono@3.11.7";import { cors } from "https://esm.sh/hono@3.11.7/middleware";import { OpenAI } from "https://esm.town/v/std/openai";import { readFile, serveFile } from "https://esm.town/v/std/utils@85-main/index.ts"; } const openai = new OpenAI(); try { const completion = await openai.chat.completions.create({ model: "gpt-4o-mini", messages: [ enhancedPrompt }); } catch (openaiError) { console.error("OpenAI API error:", openaiError); return c.json({ error: "OpenAI service error", message: "There was an issue with the AI service. Please try again later." }, 503);
- Simple, clean interface for entering prompts- AI-powered enhancement using OpenAI- Before/after comparison- Copy-to-clipboard functionality1. Enter your basic prompt in the input area2. Click "Enhance Prompt" 3. The application uses OpenAI to analyze and improve your prompt4. View the enhanced version and copy it for use with your preferred AI system- Frontend: HTML, JavaScript with Tailwind CSS for styling- Backend: TypeScript API using Hono framework and OpenAI integration- Deployed on Val Town
import { Hono } from "https://esm.sh/hono@3.11.7";import { OpenAI } from "https://esm.town/v/std/openai";import { readFile, serveFile } from "https://esm.town/v/std/utils@85-main/index.ts";import { parseProject } from "https://esm.town/v/std/utils@85-main/index.ts"; } const openai = new OpenAI(); // Create different prompts based on the content type } const completion = await openai.chat.completions.create({ messages: [ {
import { OpenAI } from "https://esm.town/v/std/openai";import { readFile } from "https://esm.town/v/std/utils@85-main/index.ts";import { LicenseInfo, ScanResponse } from "../shared/types.ts"; const dataURI = `data:${imageFile.type};base64,${base64Image}`; // Process with OpenAI const licenseInfo = await extractLicenseInfo(dataURI); /** * Extracts license information from an image using OpenAI's Vision capabilities */async function extractLicenseInfo(imageDataUri: string): Promise<LicenseInfo> { const openai = new OpenAI(); const response = await openai.chat.completions.create({ model: "gpt-4o", messages: [ return JSON.parse(jsonString) as LicenseInfo; } catch (error) { console.error("Error parsing OpenAI response:", error); return { rawText: content }; }
<footer class="mt-12 text-center text-sm text-gray-500"> <p>This application processes images using OpenAI's API. Images are not stored permanently.</p> <p class="mt-1"> <a
1. Upload an image of a driver's license2. The application uses OpenAI's Vision capabilities to analyze the image3. The extracted information is returned in a structured JSON format## Privacy NoticeThis application processes images using OpenAI's API. Images are not stored permanently but are transmitted to OpenAI for processing. Please ensure you have appropriate consent before uploading any personal identification documents.
import { OpenAI } from "https://esm.town/v/std/openai";import { readFile } from "https://esm.town/v/std/utils@85-main/index.ts";// Initialize OpenAI clientconst openai = new OpenAI();// Medical analysis prompt to guide the AI const dataURI = `data:${imageFile.type};base64,${base64Image}`; // Send the image to OpenAI for analysis const response = await openai.chat.completions.create({ model: "gpt-4-vision-preview", messages: [ }); // Extract the analysis from OpenAI's response const analysis = response.choices[0]?.message?.content || "Analysis failed";
1. Users upload a medical image through the web interface2. The image is sent to OpenAI's Vision API with specific medical analysis prompts3. The AI analyzes the image and provides potential diagnoses or observations4. Results are displayed to the user- Frontend: HTML, JavaScript, and TailwindCSS for styling- Backend: TypeScript with Val Town's HTTP trigger- AI: OpenAI's Vision API for image analysis## Limitations
reconsumeralization
import { OpenAI } from "https://esm.town/v/std/openai";
import { sqlite } from "https://esm.town/v/stevekrouse/sqlite";
/**
* Practical Implementation of Collective Content Intelligence
* Bridging advanced AI with collaborative content creation
*/
exp
kwhinnery_openai
lost1991
import { OpenAI } from "https://esm.town/v/std/openai";
export default async function(req: Request): Promise<Response> {
if (req.method === "OPTIONS") {
return new Response(null, {
headers: {
"Access-Control-Allow-Origin": "*",
No docs found