Search

3,768 results found for openai (8985ms)

Code
3,665

// OpenAI + Orama Strategy: Semantic search using OpenAI embeddings and Orama
// Faster than JigsawStack (~100-200ms vs ~550ms for query embeddings)
import { create, insertMultiple, search } from "npm:@orama/orama@latest";
// OpenAI embeddings function
export const generateEmbeddings = async (content: string): Promise<number[] | null> => {
const OPENAI_API_KEY = Deno.env.get("OPENAI_API_KEY");
const OPENAI_API_URL = "https://api.openai.com/v1/embeddings";
if (!OPENAI_API_KEY) {
console.warn("OPENAI_API_KEY not found - embeddings disabled");
return null;
}
try {
const response = await fetch(OPENAI_API_URL, {
method: "POST",
headers: {
"Authorization": `Bearer ${OPENAI_API_KEY}`,
"Content-Type": "application/json",
},
if (!response.ok) {
const errorText = await response.text().catch(() => response.statusText);
throw new Error(`OpenAI API error: ${response.status} ${response.statusText} - ${errorText
}
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error);
console.error("OpenAI embeddings failed:", errorMessage);
return null;
}
export const searchStrategy: SearchStrategy = {
name: "openai-orama",
description: "Semantic search using OpenAI embeddings with Orama vector search (faster than Ji
search: async (query: string, pages: Page[], options: SearchOptions = {}): Promise<SearchResul
const limit = options.limit || 10;
snippet: page ? generateSnippet(page.content, queryWords, query.toLowerCase()) : hit.doc
metadata: {
strategy: "openai-orama",
similarity: hit.score,
...(enableTiming && { timings }),
// OpenAI + Cosine Similarity Strategy: Direct cosine similarity calculation
// Fastest for small datasets (<100 pages) - no DB overhead
import { generateSnippet, cosineSimilarity } from "./utils.ts";
// OpenAI embeddings function
export const generateEmbeddings = async (content: string): Promise<number[] | null> => {
const OPENAI_API_KEY = Deno.env.get("OPENAI_API_KEY");
const OPENAI_API_URL = "https://api.openai.com/v1/embeddings";
if (!OPENAI_API_KEY) {
console.warn("OPENAI_API_KEY not found - embeddings disabled");
return null;
}
try {
const response = await fetch(OPENAI_API_URL, {
method: "POST",
headers: {
"Authorization": `Bearer ${OPENAI_API_KEY}`,
"Content-Type": "application/json",
},
if (!response.ok) {
const errorText = await response.text().catch(() => response.statusText);
throw new Error(`OpenAI API error: ${response.status} ${response.statusText} - ${errorText
}
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error);
console.error("OpenAI embeddings failed:", errorMessage);
return null;
}
export const searchStrategy: SearchStrategy = {
name: "openai-cosine",
description: "Semantic search using OpenAI embeddings with direct cosine similarity (fastest f
search: async (query: string, pages: Page[], options: SearchOptions = {}): Promise<SearchResul
const limit = options.limit || 10;
snippet: generateSnippet(page.content, queryWords, query.toLowerCase()),
metadata: {
strategy: "openai-cosine",
similarity,
...(enableTiming && { timings }),
// Groq API Configuration
const GROQ_API_KEY = Deno.env.get("GROQ_API_KEY");
const GROQ_API_URL = "https://api.groq.com/openai/v1/chat/completions";
// Rate limiter for Groq API: 2 requests per second to avoid rate limits
## Token Counting
counts are calculated using [tiktoken](https://github.com/openai/tiktoken) with the `gpt-4` enco
- GPT-4
- GPT-3.5-turbo
- Many other OpenAI models
Token counts are:
const urls = [
"https://console.groq.com/docs/quickstart",
"https://console.groq.com/docs/openai",
"https://console.groq.com/docs/models",
"https://console.groq.com/docs/rate-limits",
Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to cre
### OpenAI
```ts
import { OpenAI } from "https://esm.town/v/std/openai";
const openai = new OpenAI();
const completion = await openai.chat.completions.create({
messages: [
{ role: "user", content: "Say hello in a creative way" },
Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to cre
### OpenAI
```ts
import { OpenAI } from "https://esm.town/v/std/openai";
const openai = new OpenAI();
const completion = await openai.chat.completions.create({
messages: [
{ role: "user", content: "Say hello in a creative way" },
// @ts-ignore
import { OpenAI } from "https://esm.town/v/std/openai?v=4";
// @ts-ignore
import { blob } from "https://esm.town/v/std/blob?v=11";
*
* Notes:
* - Uses std/openai (Val credentials) and std/blob for persistence
* - UI polls and triggers small "work ticks" to progress jobs
* - Keep prompts deterministic, temperature low, JSON-only outputs
========================= */
const OPENAI_MODEL = "gpt-4o-mini"; // adjust as preferred
const JSON_RESPONSE_FORMAT = { type: "json_object" as const };
const EXTRACTION_CONCURRENCY_PER_TICK = 4; // 24 lenses processed over ~6 ticks
========================= */
const client = new OpenAI();
/** Persist and load helpers */
const resp = await client.chat.completions.create({
model: OPENAI_MODEL,
temperature: 0.1,
response_format: JSON_RESPONSE_FORMAT,
const resp = await client.chat.completions.create({
model: OPENAI_MODEL,
temperature: 0.2,
response_format: JSON_RESPONSE_FORMAT,
const resp = await client.chat.completions.create({
model: OPENAI_MODEL,
temperature: 0.1,
response_format: JSON_RESPONSE_FORMAT,
const resp = await client.chat.completions.create({
model: OPENAI_MODEL,
temperature: 0.2,
response_format: JSON_RESPONSE_FORMAT,
- **Backend**: Hono + MCP Lite + Drizzle ORM + SQLite + Cloudinary
- **Widget**: React 19 + TanStack Router + OpenAI App SDK
## Quick Start
## Message Scoping
Messages are automatically scoped using the `openai/subject` field that ChatGPT includes in requ
The scoping happens in tool handlers:
```typescript
const subject = ctx.request.params._meta?.["openai/subject"];
const messages = await getMessages(subject);
```
The exact semantics of `openai/subject` are determined by ChatGPT.
## Where do I go from here?
function widgetMeta(invoking?: string, invoked?: string) {
return {
"openai/outputTemplate": WIDGET_URI,
"openai/toolInvocation/invoking": invoking,
"openai/toolInvocation/invoked": invoked,
"openai/widgetAccessible": true,
"openai/resultCanProduceWidget": true,
} as const;
}
_meta: {
...widgetMeta(),
"openai/widgetCSP": {
connect_domains: [baseUrl, "https://esm.sh", "https://cdn.jsdelivr.net"],
resource_domains: [baseUrl, "https://esm.sh", "https://cdn.jsdelivr.net"],
_meta: {
...widgetMeta(),
"openai/widgetCSP": {
connect_domains: [baseUrl, "https://esm.sh", "https://cdn.jsdelivr.net"],
resource_domains: [baseUrl, "https://esm.sh", "https://cdn.jsdelivr.net"],
_meta: {
...widgetMeta(),
"openai/widgetCSP": {
connect_domains: [baseUrl, "https://esm.sh", "https://cdn.jsdelivr.net"],
resource_domains: [baseUrl, "https://esm.sh", "https://cdn.jsdelivr.net"],
_meta: {
...widgetMeta(),
"openai/widgetCSP": {
connect_domains: [baseUrl, "https://esm.sh", "https://cdn.jsdelivr.net"],
resource_domains: [baseUrl, "https://esm.sh", "https://cdn.jsdelivr.net"],
_meta: widgetMeta("Loading messages...", "Messages loaded"),
handler: async (args, ctx) => {
const subject = ctx.request.params._meta?.["openai/subject"];
if (!subject) {
throw new Error("Missing openai/subject in request metadata");
}
_meta: widgetMeta("Loading message...", "Message loaded"),
handler: async ({ id }, ctx) => {
const subject = ctx.request.params._meta?.["openai/subject"];
if (!subject) {
throw new Error("Missing openai/subject in request metadata");
}
}
const subject = ctx.request.params._meta?.["openai/subject"];
if (!subject) {
throw new Error("Missing openai/subject in request metadata");
}
_meta: {
...widgetMeta("Loading item...", "Item loaded"),
"openai/outputTemplate": ITEM_CARD_URI,
},
handler: async ({ category }) => {
_meta: {
...widgetMeta(),
"openai/outputTemplate": ITEM_CARD_URI,
},
};
_meta: {
...widgetMeta("Loading items...", "Items loaded"),
"openai/outputTemplate": MULTI_ITEM_CARD_URI,
},
handler: async ({ count }) => {
_meta: {
...widgetMeta(),
"openai/outputTemplate": MULTI_ITEM_CARD_URI,
},
};
_meta: {
...widgetMeta("Creating outfit...", "Outfit ready"),
"openai/outputTemplate": OUTFIT_CARD_URI,
},
handler: async () => {
_meta: {
...widgetMeta(),
"openai/outputTemplate": OUTFIT_CARD_URI,
},
};
outputSchema: CaptureItemsOutput,
_meta: {
"openai/toolInvocation/invoking": "Scanning & uploading…",
"openai/toolInvocation/invoked": "Saved to your closet.",
"openai/widgetAccessible": true,
"openai/resultCanProduceWidget": true,
},
handler: async (args) => {
structuredContent: isMultiple ? { items: added } : { item: added[0] },
_meta: {
"openai/outputTemplate": isMultiple ? MULTI_ITEM_CARD_URI : ITEM_CARD_URI,
"openai/toolInvocation/invoking": "Scanning & uploading…",
"openai/toolInvocation/invoked": "Saved to your closet.",
"openai/widgetAccessible": true,
"openai/resultCanProduceWidget": true,
},
};
outputSchema: SuggestOutfitOutput,
_meta: {
"openai/outputTemplate": OUTFIT_CARD_URI,
"openai/toolInvocation/invoking": "Picking pieces…",
"openai/toolInvocation/invoked": "Outfit ready.",
"openai/widgetAccessible": true,
"openai/resultCanProduceWidget": true,
},
handler: async () => {
structuredContent: { top, bottom },
_meta: {
"openai/outputTemplate": OUTFIT_CARD_URI,
"openai/toolInvocation/invoking": "Picking pieces…",
"openai/toolInvocation/invoked": "Outfit ready.",
"openai/widgetAccessible": true,
"openai/resultCanProduceWidget": true,
},
};
}),
_meta: {
"openai/outputTemplate": MULTI_ITEM_CARD_URI,
"openai/toolInvocation/invoking": "Loading closet…",
"openai/toolInvocation/invoked": "Closet loaded.",
"openai/widgetAccessible": true,
"openai/resultCanProduceWidget": true,
},
handler: async ({ category }) => {
structuredContent: { items },
_meta: {
"openai/outputTemplate": MULTI_ITEM_CARD_URI,
"openai/toolInvocation/invoking": "Loading closet…",
"openai/toolInvocation/invoked": "Closet loaded.",
"openai/widgetAccessible": true,
"openai/resultCanProduceWidget": true,
},
};