UNPKG

scai

Version:

> **A local-first AI CLI for understanding, querying, and iterating on large codebases.** > **100% local • No token costs • No cloud • No prompt injection • Private by design**

55 lines (54 loc) 2.11 kB
// File: lib/generate.ts import { Config, readConfig } from '../config.js'; /** * The generate module uses the local model API to produce output * based on the `content` of the input. It returns the response * on the `data` property of ModuleIO. * * Assumes the model is already running. Does NOT attempt to start or restart it. */ export async function generate(input) { const model = Config.getModel(); const { contextLength, maxOutputTokens } = readConfig(); // Safely build prompt const queryPart = input.query ? `User query:\n${input.query}\n\n` : ''; const contentPart = input.content && typeof input.content !== 'string' ? JSON.stringify(input.content, null, 2) : input.content || ''; const prompt = `${queryPart}${contentPart}`.trim(); const data = await doGenerate(prompt, model, contextLength, maxOutputTokens); return { query: input.query, data }; } async function doGenerate(prompt, model, contextLength, maxOutputTokens) { const REQUEST_TIMEOUT_MS = 270000; let res; try { res = await fetch('http://localhost:11434/api/generate', { method: 'POST', headers: { 'Content-Type': 'application/json' }, signal: AbortSignal.timeout(REQUEST_TIMEOUT_MS), body: JSON.stringify({ model, prompt, stream: false, options: { // context window num_ctx: contextLength, // limit output to avoid exceeding context max_output_tokens: maxOutputTokens, }, }), }); } catch (err) { if (err?.name === 'TimeoutError' || err?.name === 'AbortError') { throw new Error(`Model request timed out after ${REQUEST_TIMEOUT_MS}ms. Verify Ollama is responsive or retry with a smaller prompt.`); } throw err; } if (!res.ok) { throw new Error(`Model request failed with status ${res.status}`); } const data = await res.json(); return data.response?.trim() ?? ''; }