Search
Code3,168
const REALTIME_BASE_URL = "https://api.openai.com/v1/realtime";const OPENAI_API_KEY = Deno.env.get("OPENAI_API_KEY");if (!OPENAI_API_KEY) { throw new Error("🔴 OpenAI API key not configured");}export function makeHeaders(contentType?: string) { const obj: Record<string, string> = { Authorization: `Bearer ${OPENAI_API_KEY}`, }; if (contentType) obj["Content-Type"] = contentType;
sip.post("/", async (c) => { // Verify the webhook. const OPENAI_SIGNING_SECRET = Deno.env.get("OPENAI_SIGNING_SECRET"); if (!OPENAI_SIGNING_SECRET) { console.error("🔴 webhook secret not configured"); return c.text("Internal error", 500); } const webhook = new Webhook(OPENAI_SIGNING_SECRET); const bodyStr = await c.req.text(); let callId: string | undefined;
# hello-realtime**Hello Realtime** is a OpenAI Realtime app that supports both WebRTC and SIP(telephone) users. You can access the app via WebRTC at[hello-realtime.val.run](https://hello-realtime.val.run), or via SIP by callingserver-side websocket interface.If you remix the app, you'll just need to pop in your own `OPENAI_API_KEY` (from[platform.openai.com](https://platform.openai.com)), and if you want SIP, the`OPENAI_SIGNING_SECRET`.## Architecture - Browser connects to frontend - creates WebRTC offer - `/rtc` endpoint handles SDP negotiation with OpenAI - observer established to monitor session2. **SIP Flow**:
const MODEL = "gpt-realtime";const INSTRUCTIONS = ` Greet the user in English, and thank them for trying the new OpenAI Realtime API. Give them a brief summary based on the list below, and then ask if they have any questions. Answer questions using the information below. For questions outside this scope, - higher audio quality - improved handling of alphanumerics (eg, properly understanding credit card and phone numbers) - support for the OpenAI Prompts API - support for MCP-based tools - auto-truncation to reduce context size
// @ts-ignoreimport { OpenAI } from "https://esm.town/v/std/openai?v=4";// --- AI BEHAVIORAL GUIDELINES --- if (req.method === "POST" && action === "getProblem") { try { const openai = new OpenAI(); const body = await req.json(); const level = typeof body.level === "number" ? body.level : 1; ]; const completion = await openai.chat.completions.create({ model: "gpt-4o", messages: messages, const content = completion.choices[0].message.content; if (!content) throw new Error("OpenAI returned an empty response."); const parsedContent = extractJson(content);
import { fetchText } from "https://esm.town/v/stevekrouse/fetchText?v=6";import { OpenAI } from "https://esm.town/v/std/openai";import type { EventData } from "./event.ts"; * * - Use r.jina.ai to convert the webpage to clean markdown * - Supply the markdown to OpenAI using Val Town's client * - Prompt LLM for structured output (see EVENT_SCHEMA below) */ } const openai = new OpenAI(); const completion = await openai.chat.completions.create({ model: "gpt-5-nano", response_format: {
// @ts-ignoreimport { OpenAI } from "https://esm.town/v/std/openai?v=4";import { Hono } from "npm:hono@4.4.12"; lanes: number,): Promise<ProblemSpec[]> { const openai = new OpenAI(); const completion = await openai.chat.completions.create({ model: "gpt-4o", messages: [
<p><strong><a href="https://static.simonwillison.net/static/2025/llama-3.2-webgpu/">Load Llama-3.2 WebGPU in your browser from a local folder</a></strong> (<a href="https://news.ycombinator.com/item?id=45168953#45173297" title="My Hacker News comment">via</a>) Inspired by <a href="https://news.ycombinator.com/item?id=45168953#45169054">a comment</a> on Hacker News I decided to see if it was possible to modify the <a href="https://github.com/huggingface/transformers.js-examples/tree/main/llama-3.2-webgpu">transformers.js-examples/tree/main/llama-3.2-webgpu</a> Llama 3.2 chat demo (<a href="https://huggingface.co/spaces/webml-community/llama-3.2-webgpu">online here</a>, I <a href="https://simonwillison.net/2024/Sep/30/llama-32-webgpu/">wrote about it last November</a>) to add an option to open a local model file directly from a folder on disk, rather than waiting for it to download over the network.</p><p>I posed the problem to OpenAI's GPT-5-enabled Codex CLI like this:</p><pre><code>git clone https://github.com/huggingface/transformers.js-examplescd transformers.js-examples/llama-3.2-webgpu
# hello-realtime**Hello Realtime** is a OpenAI Realtime app that supports both WebRTC and SIP(telephone) users. You can access the app via WebRTC at[hello-realtime.val.run](https://hello-realtime.val.run), or via SIP by callingserver-side websocket interface.If you remix the app, you'll just need to pop in your own `OPENAI_API_KEY` (from[platform.openai.com](https://platform.openai.com)), and if you want SIP, the`OPENAI_SIGNING_SECRET`.## Architecture - Browser connects to frontend - creates WebRTC offer - `/rtc` endpoint handles SDP negotiation with OpenAI - observer established to monitor session2. **SIP Flow**:
.trim(); const res = await fetch("https://text.pollinations.ai/openai", { method: "POST", headers: {
reconsumeralization
import { OpenAI } from "https://esm.town/v/std/openai";
import { sqlite } from "https://esm.town/v/stevekrouse/sqlite";
/**
* Practical Implementation of Collective Content Intelligence
* Bridging advanced AI with collaborative content creation
*/
exp
kwhinnery_openai
lost1991
import { OpenAI } from "https://esm.town/v/std/openai";
export default async function(req: Request): Promise<Response> {
if (req.method === "OPTIONS") {
return new Response(null, {
headers: {
"Access-Control-Allow-Origin": "*",
No docs found