llmverify
Version:
AI Output Verification Toolkit — Local-first LLM safety, hallucination detection, PII redaction, prompt injection defense, and runtime monitoring. Zero telemetry. OWASP LLM Top 10 aligned.
57 lines (56 loc) • 1.53 kB
TypeScript
/**
* Sentinel Test Suite
*
* Runs all sentinel tests and aggregates results.
*
* @module sentinel/suite
* @author Haiec
* @license MIT
*/
import { SentinelTestResult, SentinelConfig } from '../types/runtime';
/**
* Aggregated sentinel test suite results.
*/
export interface SentinelSuite {
/** Overall pass/fail status */
passed: boolean;
/** Number of tests passed */
passedCount: number;
/** Total number of tests */
totalCount: number;
/** Pass rate (0-1) */
passRate: number;
/** Individual test results */
results: SentinelTestResult[];
/** Timestamp of test run */
timestamp: number;
/** Duration in milliseconds */
durationMs: number;
/** Summary message */
summary: string;
}
/**
* Runs all sentinel tests and returns aggregated results.
*
* @param config - Sentinel configuration with LLM client
* @param options - Optional configuration for which tests to run
* @returns Aggregated test suite results
*
* @example
* const suite = await runAllSentinelTests({
* client: myLLMClient,
* model: 'gpt-4'
* });
*
* console.log(`Passed ${suite.passedCount}/${suite.totalCount} tests`);
*
* if (!suite.passed) {
* suite.results.filter(r => !r.passed).forEach(r => {
* console.error(`Failed: ${r.test} - ${r.message}`);
* });
* }
*/
export declare function runAllSentinelTests(config: SentinelConfig, options?: {
skipTests?: string[];
onTestComplete?: (result: SentinelTestResult) => void;
}): Promise<SentinelSuite>;