llmverify
Version:
AI Output Verification Toolkit — Local-first LLM safety, hallucination detection, PII redaction, prompt injection defense, and runtime monitoring. Zero telemetry. OWASP LLM Top 10 aligned.
27 lines (26 loc) • 712 B
TypeScript
/**
* DeepSeek Adapter
*
* Adapter for DeepSeek API (OpenAI-compatible)
*
* @module adapters/providers/deepseek
* @author Haiec
* @license MIT
*/
import { LlmClient, AdapterConfig } from '../types';
/**
* Builds a DeepSeek adapter.
*
* @param config - Adapter configuration
* @returns LLM client for DeepSeek
*
* @example
* // DeepSeek uses OpenAI SDK with custom base URL
* import OpenAI from 'openai';
* const deepseek = new OpenAI({
* apiKey: process.env.DEEPSEEK_API_KEY,
* baseURL: 'https://api.deepseek.com/v1'
* });
* const llm = buildDeepSeekAdapter({ provider: 'deepseek', client: deepseek });
*/
export declare function buildDeepSeekAdapter(config: AdapterConfig): LlmClient;