UNPKG

mcp-evals

Version:

GitHub Action for evaluating MCP server tool calls using LLM-based scoring

9 lines 580 B
import { type LanguageModel } from "ai"; import { EvalConfig } from './types.js'; export declare function runEvals(model: LanguageModel | undefined, prompt: string, serverPath: string): Promise<string>; export declare function grade(model: LanguageModel | undefined, prompt: string, serverPath?: string): Promise<string>; export declare function runAllEvals(config: EvalConfig, serverPath: string): Promise<Map<string, any>>; export * from './types.js'; export { metrics } from './metrics.js'; export type { MetricsConfig } from './metrics.js'; //# sourceMappingURL=index.d.ts.map