@dooor-ai/toolkit
Version:
Guards, Evals & Observability for AI applications - works seamlessly with LangChain/LangGraph
45 lines • 1.46 kB
TypeScript
import { Eval } from "./base";
import { EvalResult, EvalConfig } from "../core/types";
export interface RoleAdherenceConfig extends EvalConfig {
/** Expected role/persona for the LLM */
expectedRole?: string;
/** System prompt (optional, to extract role from) */
systemPrompt?: string;
}
/**
* RoleAdherenceEval - Measures if LLM stays in its assigned role/persona
*
* Evaluates whether the response is consistent with the expected role
* (e.g., "helpful assistant", "pirate captain", "technical support", etc.)
*
* Example:
* ```typescript
* const eval = new RoleAdherenceEval({
* threshold: 0.8,
* expectedRole: "professional financial advisor"
* });
* const result = await eval.evaluate(
* "Should I invest in crypto?",
* "Yolo bro! Just buy whatever coin is trending on TikTok!"
* );
* // result.score = 0.1 (breaks role), result.passed = false
* ```
*/
export declare class RoleAdherenceEval extends Eval {
private expectedRole?;
private systemPrompt?;
constructor(config?: RoleAdherenceConfig);
get name(): string;
/**
* Set expected role dynamically
*/
setExpectedRole(role: string): void;
/**
* Set system prompt dynamically
*/
setSystemPrompt(prompt: string): void;
evaluate(input: string, output: string, metadata?: Record<string, any>): Promise<EvalResult>;
private buildPrompt;
private parseScore;
}
//# sourceMappingURL=role-adherence.d.ts.map