llmverify
Version:
AI Output Verification Toolkit — Local-first LLM safety, hallucination detection, PII redaction, prompt injection defense, and runtime monitoring. Zero telemetry. OWASP LLM Top 10 aligned.
23 lines (22 loc) • 589 B
TypeScript
/**
* OpenAI Adapter
*
* Adapter for OpenAI API (GPT-4, GPT-3.5, etc.)
*
* @module adapters/providers/openai
* @author Haiec
* @license MIT
*/
import { LlmClient, AdapterConfig } from '../types';
/**
* Builds an OpenAI adapter.
*
* @param config - Adapter configuration
* @returns LLM client for OpenAI
*
* @example
* import OpenAI from 'openai';
* const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
* const llm = buildOpenAIAdapter({ provider: 'openai', client: openai });
*/
export declare function buildOpenAIAdapter(config: AdapterConfig): LlmClient;