llmverify
Version:
AI Output Verification Toolkit — Local-first LLM safety, hallucination detection, PII redaction, prompt injection defense, and runtime monitoring. Zero telemetry. OWASP LLM Top 10 aligned.
39 lines (38 loc) • 1 kB
TypeScript
/**
* Short Reasoning Test
*
* Tests if the LLM can perform basic logical reasoning.
* Uses simple, verifiable reasoning tasks.
*
* WHAT THIS TESTS:
* ✅ Basic logical reasoning
* ✅ Step-by-step thinking
* ✅ Correct conclusion derivation
*
* LIMITATIONS:
* - Tests very basic reasoning only
* - May not detect subtle reasoning errors
* - Answer extraction may miss valid formats
*
* @module sentinel/shortReasoningTest
* @author Haiec
* @license MIT
*/
import { SentinelTestResult, SentinelConfig } from '../types/runtime';
/**
* Tests if the LLM can perform basic reasoning.
*
* @param config - Sentinel configuration with LLM client
* @returns Test result with reasoning analysis
*
* @example
* const result = await shortReasoningTest({
* client: myLLMClient,
* model: 'gpt-4'
* });
*
* if (!result.passed) {
* console.error('Reasoning test failed');
* }
*/
export declare function shortReasoningTest(config: SentinelConfig): Promise<SentinelTestResult>;