llmverify
Version:
AI Output Verification Toolkit — Local-first LLM safety, hallucination detection, PII redaction, prompt injection defense, and runtime monitoring. Zero telemetry. OWASP LLM Top 10 aligned.
39 lines (38 loc) • 1.04 kB
TypeScript
/**
* Static Echo Test
*
* Tests if the LLM can accurately echo back a specific phrase.
* This is a basic sanity check for response accuracy.
*
* WHAT THIS TESTS:
* ✅ Basic response capability
* ✅ Instruction following
* ✅ Text reproduction accuracy
*
* LIMITATIONS:
* - Very basic test, may pass even with degraded models
* - Does not test reasoning or complex behavior
* - May fail due to model safety filters
*
* @module sentinel/staticEchoTest
* @author Haiec
* @license MIT
*/
import { SentinelTestResult, SentinelConfig } from '../types/runtime';
/**
* Tests if the LLM can echo back a specific phrase.
*
* @param config - Sentinel configuration with LLM client
* @returns Test result with pass/fail and details
*
* @example
* const result = await staticEchoTest({
* client: myLLMClient,
* model: 'gpt-4'
* });
*
* if (!result.passed) {
* console.error('Echo test failed:', result.message);
* }
*/
export declare function staticEchoTest(config: SentinelConfig): Promise<SentinelTestResult>;