Search

3,380 results found for openai (5377ms)

Code
3,285

// MEES Assistant API Endpoint with v2 API
// Remember to add your environment variables in Val settings:
// - OPENAI_API_KEY
// - PINECONE_API_KEY
// - ASSISTANT_ID
// - PARSER_ASSISTANT_ID (optional)
import { OpenAI } from "https://deno.land/x/openai@v4.20.1/mod.ts";
import { Pinecone } from "https://esm.sh/@pinecone-database/pinecone@2.0.0";
// Use OpenAI client only for embeddings
const openai = new OpenAI({
apiKey: Deno.env.get("OPENAI_API_KEY"),
});
async function parseQueryIntent(query: string) {
try {
const OPENAI_API_KEY = Deno.env.get("OPENAI_API_KEY");
// Create a quick thread for parsing
const threadResponse = await fetch("https://api.openai.com/v1/threads", {
method: "POST",
headers: {
"Authorization": `Bearer ${OPENAI_API_KEY}`,
"Content-Type": "application/json",
"OpenAI-Beta": "assistants=v2",
},
body: JSON.stringify({}),
// Add the query
await fetch(`https://api.openai.com/v1/threads/${thread.id}/messages`, {
method: "POST",
headers: {
"Authorization": `Bearer ${OPENAI_API_KEY}`,
"Content-Type": "application/json",
"OpenAI-Beta": "assistants=v2",
},
body: JSON.stringify({
// Run the parser assistant
const runResponse = await fetch(`https://api.openai.com/v1/threads/${thread.id}/runs`, {
method: "POST",
headers: {
"Authorization": `Bearer ${OPENAI_API_KEY}`,
"Content-Type": "application/json",
"OpenAI-Beta": "assistants=v2",
},
body: JSON.stringify({
const statusResponse = await fetch(
`https://api.openai.com/v1/threads/${thread.id}/runs/${run.id}`,
{
headers: {
"Authorization": `Bearer ${OPENAI_API_KEY}`,
"OpenAI-Beta": "assistants=v2",
},
},
// Get the response
const messagesResponse = await fetch(
`https://api.openai.com/v1/threads/${thread.id}/messages?order=desc&limit=1`,
{
headers: {
"Authorization": `Bearer ${OPENAI_API_KEY}`,
"OpenAI-Beta": "assistants=v2",
},
},
// Get embedding for query
const embeddingResponse = await openai.embeddings.create({
model: "text-embedding-3-small",
input: query,
});
// Step 4: Format data for OpenAI
debugInfo += `\n\n📦 STEP 4: DATA FORMATTING FOR OPENAI\n${"-".repeat(40)}\n`;
const formattedResults = searchResults.slice(0, 10).map((match) => {
const mediaInText = extractMediaFromText(meta.text || "");
// Truncate text for OpenAI
let textContent = meta.text || "";
const maxTextLength = 500;
});
debugInfo += `✅ Formatted ${formattedResults.length} results for OpenAI\n`;
debugInfo += `📄 Sample formatted result:\n${
JSON.stringify(formattedResults[0], null, 2).substring(0, 800)
}
const OPENAI_API_KEY = Deno.env.get("OPENAI_API_KEY");
if (!OPENAI_API_KEY) {
throw new Error("OPENAI_API_KEY not found in environment variables");
}
}
// IMPORTANT: All OpenAI API calls use these headers with assistants=v2
const baseHeaders = {
"Authorization": `Bearer ${OPENAI_API_KEY}`,
"Content-Type": "application/json",
"OpenAI-Beta": "assistants=v2",
};
console.log("Assistant ID:", ASSISTANT_ID);
console.log("Using OpenAI-Beta: assistants=v2");
// Parse the query first (if parser assistant is configured)
let thread;
if (threadId) {
const threadResponse = await fetch(`https://api.openai.com/v1/threads/${threadId}`, {
headers: baseHeaders,
});
thread = await threadResponse.json();
} else {
const threadResponse = await fetch("https://api.openai.com/v1/threads", {
method: "POST",
headers: baseHeaders,
// Add message to thread with v2 headers
const messageResponse = await fetch(`https://api.openai.com/v1/threads/${thread.id}/messages
method: "POST",
headers: baseHeaders,
// Run the assistant with v2 headers
const runResponse = await fetch(`https://api.openai.com/v1/threads/${thread.id}/runs`, {
method: "POST",
headers: baseHeaders,
const mediaInText = extractMediaFromText(meta.text || "");
// OPTIMIZATION: Only send text excerpts to OpenAI
let textContent = meta.text || "";
const maxTextLength = 500;
const statusResponse = await fetch(
`https://api.openai.com/v1/threads/${thread.id}/runs/${run.id}`,
{ headers: baseHeaders },
);
// Submit tool outputs with v2 headers
const toolOutputResponse = await fetch(
`https://api.openai.com/v1/threads/${thread.id}/runs/${run.id}/submit_tool_outputs`,
{
method: "POST",
// Get the assistant's response with v2 headers
const messagesResponse = await fetch(
`https://api.openai.com/v1/threads/${thread.id}/messages?order=desc&limit=20`,
{ headers: baseHeaders },
);
import { OpenAI } from "https://esm.town/v/std/openai";
import React, { useEffect, useState } from "npm:react";
const openai = new OpenAI();
interface Todo {
`;
const response = await openai.chat.completions.create({
model: "gpt-4",
messages: [{ role: "user", content: prompt }],
import type { MCPTool, ThoughtFork, ThoughtExploration } from "../../../shared/types.ts";
import type { AuthContext } from "../auth.ts";
import { OpenAI } from "https://esm.town/v/std/openai";
import {
createThoughtFork,
} from "../../database/queries.ts";
const openai = new OpenAI();
export const forkThoughtTools: MCPTool[] = [
try {
const completion = await openai.chat.completions.create({
messages: [
{
try {
const completion = await openai.chat.completions.create({
messages: [
{
async function extractInsights(content: string): Promise<string[]> {
try {
const completion = await openai.chat.completions.create({
messages: [
{
try {
const completion = await openai.chat.completions.create({
messages: [
{
<footer className="mt-12 text-center text-gray-500 text-sm">
<p>Built with ❤️ on Val TownPowered by OpenAI</p>
</footer>
</div>
- ✅ Create, read, update, and delete TODO items
- 🤖 AI-powered task prioritization using OpenAI
- 📱 Responsive web interface
- 💾 SQLite database storage
if (request.method === 'POST' && new URL(request.url).pathname === '/ai-analysis') {
try {
const { OpenAI } = await import("https://esm.town/v/std/openai");
const body: AIAnalysisRequest = await request.json();
}
const openai = new OpenAI();
const prompt = `Tu ek expert crypto trader aur market analyst hai.
Creative aur informative hinglish mein jawab do. Agar user ne koi specific coin mention ki
const completion = await openai.chat.completions.create({
messages: [{ role: "user", content: prompt }],
model: "gpt-4o-mini",
Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to cre
### OpenAI
```ts
import { OpenAI } from "https://esm.town/v/std/openai";
const openai = new OpenAI();
const completion = await openai.chat.completions.create({
messages: [
{ role: "user", content: "Say hello in a creative way" },
Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to cre
### OpenAI
```ts
import { OpenAI } from "https://esm.town/v/std/openai";
const openai = new OpenAI();
const completion = await openai.chat.completions.create({
messages: [
{ role: "user", content: "Say hello in a creative way" },
Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to cre
### OpenAI
```ts
import { OpenAI } from "https://esm.town/v/std/openai";
const openai = new OpenAI();
const completion = await openai.chat.completions.create({
messages: [
{ role: "user", content: "Say hello in a creative way" },
},
{
"title": "An Introduction to OpenAI fine-tuning",
"slug": "an-introduction-to-openai-fine-tuning",
"link": "/blog/an-introduction-to-openai-fine-tuning",
"description": "How to customize OpenAI to your liking",
"pubDate": "Fri, 25 Aug 2023 00:00:00 GMT",
"author": "Steve Krouse",
"slug": "val-town-newsletter-16",
"link": "/blog/val-town-newsletter-16",
"description": "Our seed round, growing team, Codeium completions, @std/openai, and more",
"pubDate": "Mon, 22 Apr 2024 00:00:00 GMT",
"author": "Steve Krouse",