llmverify
Version:
AI Output Verification Toolkit — Local-first LLM safety, hallucination detection, PII redaction, prompt injection defense, and runtime monitoring. Zero telemetry. OWASP LLM Top 10 aligned.
40 lines (39 loc) • 1.16 kB
TypeScript
/**
* Duplicate Query Test
*
* Tests if the LLM provides consistent responses to identical queries.
* Helps detect non-deterministic behavior or model instability.
*
* WHAT THIS TESTS:
* ✅ Response consistency
* ✅ Deterministic behavior
* ✅ Model stability
*
* LIMITATIONS:
* - Some variation is expected and normal
* - Temperature settings affect consistency
* - Does not test correctness, only consistency
*
* @module sentinel/duplicateQueryTest
* @author Haiec
* @license MIT
*/
import { SentinelTestResult, SentinelConfig } from '../types/runtime';
/**
* Tests if the LLM provides consistent responses to the same query.
*
* @param config - Sentinel configuration with LLM client
* @param iterations - Number of times to repeat the query (default: 3)
* @returns Test result with consistency analysis
*
* @example
* const result = await duplicateQueryTest({
* client: myLLMClient,
* model: 'gpt-4'
* }, 5);
*
* if (!result.passed) {
* console.warn('Inconsistent responses detected');
* }
*/
export declare function duplicateQueryTest(config: SentinelConfig, iterations?: number): Promise<SentinelTestResult>;