llmverify
Version:
AI Output Verification Toolkit — Local-first LLM safety, hallucination detection, PII redaction, prompt injection defense, and runtime monitoring. Zero telemetry. OWASP LLM Top 10 aligned.
54 lines (53 loc) • 1.3 kB
TypeScript
/**
* Local Adapter
*
* Adapter for local LLM inference (llama.cpp, vLLM, Ollama, etc.)
*
* @module adapters/providers/local
* @author Haiec
* @license MIT
*/
import { LlmClient, AdapterConfig } from '../types';
/**
* Builds a local adapter.
*
* @param config - Adapter configuration
* @returns LLM client for local inference
*
* @example
* // Simple function
* const llm = buildLocalAdapter({
* provider: 'local',
* client: async (prompt) => await myLocalModel(prompt)
* });
*
* @example
* // With Ollama
* import ollama from 'ollama';
* const llm = buildLocalAdapter({
* provider: 'local',
* client: async (prompt, opts) => {
* const response = await ollama.generate({
* model: opts?.model ?? 'llama3',
* prompt
* });
* return response.response;
* },
* defaultModel: 'llama3'
* });
*
* @example
* // With llama.cpp server
* const llm = buildLocalAdapter({
* provider: 'local',
* client: async (prompt) => {
* const res = await fetch('http://localhost:8080/completion', {
* method: 'POST',
* body: JSON.stringify({ prompt })
* });
* const data = await res.json();
* return data.content;
* }
* });
*/
export declare function buildLocalAdapter(config: AdapterConfig): LlmClient;