Search
Code3,267
import { OpenAI } from "https://esm.town/v/std/openai";import { readFile } from "https://esm.town/v/std/utils@85-main/index.ts";// Initialize OpenAI clientconst openai = new OpenAI();// Bee-themed personality prompt } // Call OpenAI with bee persona const completion = await openai.chat.completions.create({ model: "gpt-4o-mini", messages: [ try { // Generate image using DALL-E const response = await openai.images.generate({ model: "dall-e-3", prompt: enhancedPrompt, }); console.log("OpenAI response:", JSON.stringify(response)); const imageUrl = response.data[0]?.url; if (!imageUrl) { throw new Error("No image URL returned from OpenAI"); } }, ); } catch (openaiError) { console.error("OpenAI API Error:", openaiError); // Check if it's a content policy violation if (openaiError.message && openaiError.message.includes("content policy")) { return new Response( JSON.stringify({ error: "Your image request was rejected due to content policy. Please try a different prompt.", details: openaiError.message, }), { // Check if it's a rate limit error if (openaiError.message && openaiError.message.includes("rate limit")) { return new Response( JSON.stringify({ error: "Rate limit exceeded. Please try again later.", details: openaiError.message, }), { } throw openaiError; // Re-throw for general error handling } } catch (error) {
export default async function server(request: Request): Promise<Response> { if (request.method === "POST") { const { OpenAI } = await import("https://esm.town/v/std/openai"); const openai = new OpenAI(); const { question } = await request.json(); const completion = await openai.chat.completions.create({ messages: [ {
- `index.ts` - Main API entry point with Hono framework (HTTP trigger)- `database.ts` - SQLite database operations for storing resumes and job requirements- `parser.ts` - Resume parsing logic using OpenAI's GPT models- `scorer.ts` - Candidate scoring algorithms and feedback generation
import { OpenAI } from "https://esm.town/v/std/openai";import type { Resume, JobRequirement, ScoringResult, ParsedResumeData } from "../shared/types";import { calculateSimilarity } from "../shared/utils";const openai = new OpenAI();/** `; const completion = await openai.chat.completions.create({ messages: [{ role: "user", content: prompt }], model: "gpt-4o-mini",
import { OpenAI } from "https://esm.town/v/std/openai";import type { ParsedResumeData } from "../shared/types";const openai = new OpenAI();/** * Parses resume text using OpenAI to extract structured information */export async function parseResume(resumeText: string): Promise<ParsedResumeData> { `; const completion = await openai.chat.completions.create({ messages: [{ role: "user", content: prompt }], model: "gpt-4o-mini", const content = completion.choices[0]?.message?.content; if (!content) { throw new Error("Failed to get a response from OpenAI"); } `; const completion = await openai.chat.completions.create({ messages: [{ role: "user", content: prompt }], model: "gpt-4o-mini", const content = completion.choices[0]?.message?.content; if (!content) { throw new Error("Failed to get a response from OpenAI"); }
## Features- Resume text analysis using OpenAI's GPT models- Keyword extraction and skills matching- Candidate scoring and ranking## Technologies Used- OpenAI API for natural language processing- SQLite for data storage- Hono for backend API
import { SyntaxHighlighter } from "./components/SyntaxHighlighter.ts";import { DebugConsole } from "./components/DebugConsole.ts";import { OpenAIConnector } from "../shared/OpenAIConnector.ts";import { ThemeManager } from "./components/ThemeManager.ts";import { ConfettiManager } from "./components/ConfettiManager.ts"; const syntaxHighlighter = new SyntaxHighlighter(); const debugConsole = new DebugConsole(); const openAIConnector = new OpenAIConnector(); const themeManager = new ThemeManager(); const confettiManager = new ConfettiManager(); // Set up all event handlers setupFormHandling(tokenizer, scriptEditor, syntaxHighlighter, openAIConnector, confettiManager, textFormatter); setupTokenCounter(tokenizer); setupTemplateSelector(templateManager); setupAdvancedOptions(openAIConnector, debugConsole); setupResultActions(scriptEditor, textFormatter); setupHistoryModal(historyManager, scriptEditor); scriptEditor: ScriptEditor, syntaxHighlighter: SyntaxHighlighter, openAIConnector: OpenAIConnector, confettiManager: ConfettiManager, textFormatter: TextFormatter const apiKeyInput = document.getElementById("apiKey") as HTMLInputElement; if (apiKeyInput && apiKeyInput.value && localStorage.getItem("useDirectApi") === "true") { // Process directly with OpenAI API const prompt = createPromptForScriptType( text, ); const response = await openAIConnector.createChatCompletion({ model, messages: [{ role: "user", content: prompt }],// Set up advanced optionsfunction setupAdvancedOptions(openAIConnector: OpenAIConnector, debugConsole: DebugConsole) { const advancedOptionsBtn = document.getElementById("advancedOptionsBtn") as HTMLButtonElement; const advancedOptions = document.getElementById("advancedOptions") as HTMLDivElement; if (!apiKey.startsWith("sk-")) { alert("Invalid API key format. OpenAI API keys start with 'sk-'"); return; } try { // Set the API key in the connector openAIConnector.setApiKey(apiKey); // Store the preference (but not the key itself)
<div class="md:col-span-3"> <div class="flex items-center justify-between"> <label for="apiKey" class="block text-sm font-medium text-gray-700 dark:text-gray-300">OpenAI API Key (Optional)</label> <span class="text-xs text-gray-500 dark:text-gray-400">Direct API connection</span> </div> <footer class="mt-8 text-center text-sm text-gray-500 dark:text-gray-400"> <p>Powered by OpenAI GPT-4 • <a href="#" id="viewSourceLink" target="_top" class="text-indigo-600 dark:text-indigo-400 hover:underline">View Source</a></p> </footer> </div>
// Call AI service with your private API key // Replace with your actual AI service URL const aiResponse = await fetch("https://api.openai.com/v1/chat/completions", { method: "POST", headers: {
# Script Improver ProA Val Town application that processes large scripts through OpenAI's GPT-4 model to make them clearer, more concise, and better written.## Features- Combines processed outputs seamlessly- Simple, responsive UI with token counting and progress tracking- **Direct OpenAI API Connection** - Use your own API key for direct processing- **Debug Console** - View API requests, responses, and token usage- **Script Type Detection** - Automatically identifies screenplay, technical, marketing, academic, or creative content1. The user pastes their script into the text area and provides optional instructions2. The application splits the text into chunks of approximately 3330 tokens each3. Each chunk is processed sequentially through OpenAI's GPT-4 model4. The processed chunks are combined, handling overlaps to avoid duplication5. The improved script is displayed to the user- `/index.ts` - Main HTTP endpoint and route handler- `/backend/processor.ts` - Text processing logic and OpenAI integration- `/backend/openaiProxy.ts` - Server-side proxy for OpenAI API calls- `/backend/scriptTypeDetector.ts` - Automatic script type detection- `/shared/tokenizer.ts` - Advanced token counting and text chunking- `/shared/OpenAIConnector.ts` - Direct OpenAI API connection handling- `/frontend/index.html` - Main HTML template- `/frontend/index.ts` - Frontend JavaScript logic- **Backend**: Hono.js for HTTP routing- **Frontend**: Vanilla TypeScript with Tailwind CSS- **AI**: OpenAI GPT-4 for text processing- **Styling**: Tailwind CSS for responsive design- **Syntax Highlighting**: highlight.js for code highlighting2. Select script type or use auto-detection3. Choose an instruction template or write custom instructions4. (Optional) Set your OpenAI API key for direct processing5. Click "Improve Script" to process6. View, compare, and download the improved script### Direct API ConnectionYou can use your own OpenAI API key for direct processing, bypassing the server proxy. This can be useful for:- Processing very large scripts- Using custom model parameters## Limitations- Token counting is approximate and may not exactly match OpenAI's tokenizer- Very large scripts may take longer to process- The quality of improvements depends on the clarity of instructions and the quality of the input script
reconsumeralization
import { OpenAI } from "https://esm.town/v/std/openai";
import { sqlite } from "https://esm.town/v/stevekrouse/sqlite";
/**
* Practical Implementation of Collective Content Intelligence
* Bridging advanced AI with collaborative content creation
*/
exp
kwhinnery_openai
lost1991
import { OpenAI } from "https://esm.town/v/std/openai";
export default async function(req: Request): Promise<Response> {
if (req.method === "OPTIONS") {
return new Response(null, {
headers: {
"Access-Control-Allow-Origin": "*",
No docs found