UNPKG

llmverify

Version:

AI Output Verification Toolkit — Local-first LLM safety, hallucination detection, PII redaction, prompt injection defense, and runtime monitoring. Zero telemetry. OWASP LLM Top 10 aligned.

54 lines (53 loc) 1.43 kB
/** * Custom Adapter * * Adapter for any custom LLM provider or API. * * @module adapters/providers/custom * @author Haiec * @license MIT */ import { LlmClient, AdapterConfig } from '../types'; /** * Builds a custom adapter. * * @param config - Adapter configuration * @returns LLM client for custom provider * * @example * // Wrap any API * const llm = buildCustomAdapter({ * provider: 'custom', * providerName: 'My API', * client: { * async generate(request) { * const res = await fetch('https://my-api.com/generate', { * method: 'POST', * body: JSON.stringify({ prompt: request.prompt }) * }); * const data = await res.json(); * return { text: data.output, tokens: data.tokens }; * } * } * }); * * @example * // Wrap existing SDK * const llm = buildCustomAdapter({ * provider: 'custom', * providerName: 'Together AI', * client: { * async generate(request) { * const response = await togetherClient.chat.completions.create({ * model: request.model ?? 'meta-llama/Llama-3-70b-chat-hf', * messages: [{ role: 'user', content: request.prompt }] * }); * return { * text: response.choices[0].message.content, * tokens: response.usage?.completion_tokens ?? 0 * }; * } * } * }); */ export declare function buildCustomAdapter(config: AdapterConfig): LlmClient;