llamaindex
Version: 
<p align="center"> <img height="100" width="100" alt="LlamaIndex logo" src="https://ts.llamaindex.ai/square.svg" /> </p> <h1 align="center">LlamaIndex.TS</h1> <h3 align="center"> Data framework for your LLM application. </h3>
153 lines (145 loc) • 8.31 kB
TypeScript
import { PromptTemplate, PromptMixin, ModuleRecord } from '@llamaindex/core/prompts';
import { QueryType } from '@llamaindex/core/query-engine';
import { EngineResponse } from '@llamaindex/core/schema';
declare const defaultUserPrompt: PromptTemplate<readonly ["query", "referenceAnswer", "generatedAnswer"], string[], "\n## User Query\n{query}\n\n## Reference Answer\n{referenceAnswer}\n\n## Generated Answer\n{generatedAnswer}\n">;
type UserPrompt = PromptTemplate<[
    "query",
    "referenceAnswer",
    "generatedAnswer"
]>;
declare const defaultCorrectnessSystemPrompt: CorrectnessSystemPrompt;
type CorrectnessSystemPrompt = PromptTemplate<[]>;
declare const defaultFaithfulnessRefinePrompt: PromptTemplate<readonly ["query", "existingAnswer", "context"], string[], "\nWe want to understand if the following information is present\nin the context information: {query}\nWe have provided an existing YES/NO answer: {existingAnswer}\nWe have the opportunity to refine the existing answer\n(only if needed) with some more context below.\n------------\n{context}\n------------\nIf the existing answer was already YES, still answer YES.\nIf the information is present in the new context, answer YES.\nOtherwise answer NO.\n">;
type FaithfulnessRefinePrompt = PromptTemplate<[
    "query",
    "existingAnswer",
    "context"
]>;
declare const defaultFaithfulnessTextQaPrompt: PromptTemplate<readonly ["context", "query"], string[], "\nPlease tell if a given piece of information\nis supported by the context.\nYou need to answer with either YES or NO.\nAnswer YES if any of the context supports the information, even\nif most of the context is unrelated.\nSome examples are provided below.\n\nInformation: Apple pie is generally double-crusted.\nContext: An apple pie is a fruit pie in which the principal filling\ningredient is apples.\nApple pie is often served with whipped cream, ice cream\n('apple pie à la mode'), custard or cheddar cheese.\nIt is generally double-crusted, with pastry both above\nand below the filling; the upper crust may be solid or\nlatticed (woven of crosswise strips).\nAnswer: YES\nInformation: Apple pies tastes bad.\nContext: An apple pie is a fruit pie in which the principal filling\ningredient is apples.\nApple pie is often served with whipped cream, ice cream\n('apple pie à la mode'), custard or cheddar cheese.\nIt is generally double-crusted, with pastry both above\nand below the filling; the upper crust may be solid or\nlatticed (woven of crosswise strips).\nAnswer: NO\nInformation: {query}\nContext: {context}\nAnswer:\n">;
type FaithfulnessTextQAPrompt = PromptTemplate<["query", "context"]>;
type RelevancyEvalPrompt = PromptTemplate<["context", "query"]>;
declare const defaultRelevancyEvalPrompt: PromptTemplate<readonly ["context", "query"], string[], "Your task is to evaluate if the response for the query is in line with the context information provided.\nYou have two options to answer. Either YES/ NO.\nAnswer - YES, if the response for the query is in line with context information otherwise NO.\nQuery and Response: {query}\nContext: {context}\nAnswer: ">;
declare const defaultRelevancyRefinePrompt: PromptTemplate<readonly ["query", "existingAnswer", "contextMsg"], string[], "We want to understand if the following query and response is\nin line with the context information: \n{query}\nWe have provided an existing YES/NO answer: \n{existingAnswer}\nWe have the opportunity to refine the existing answer\n(only if needed) with some more context below.\n------------\n{contextMsg}\n------------\nIf the existing answer was already YES, still answer YES.\nIf the information is present in the new context, answer YES.\nOtherwise answer NO.\n">;
type RelevancyRefinePrompt = PromptTemplate<[
    "query",
    "existingAnswer",
    "contextMsg"
]>;
type EvaluationResult = {
    query?: QueryType;
    contexts?: string[];
    response: string | null;
    score: number;
    scoreSecondary?: number;
    scoreSecondaryType?: string;
    meta?: any;
    passing: boolean;
    feedback: string;
};
type EvaluatorParams = {
    query: QueryType;
    response: string;
    contexts?: string[];
    reference?: string;
    sleepTimeInSeconds?: number;
};
type EvaluatorResponseParams = {
    query: QueryType;
    response: EngineResponse;
};
interface BaseEvaluator {
    evaluate(params: EvaluatorParams): Promise<EvaluationResult>;
    evaluateResponse?(params: EvaluatorResponseParams): Promise<EvaluationResult>;
}
type CorrectnessParams = {
    scoreThreshold?: number;
    parserFunction?: (str: string) => [number, string];
};
/** Correctness Evaluator */
declare class CorrectnessEvaluator extends PromptMixin implements BaseEvaluator {
    private scoreThreshold;
    private parserFunction;
    private llm;
    private correctnessPrompt;
    constructor(params?: CorrectnessParams);
    protected _getPrompts(): {
        correctnessPrompt: CorrectnessSystemPrompt;
    };
    protected _getPromptModules(): {};
    protected _updatePrompts(prompts: {
        correctnessPrompt: CorrectnessSystemPrompt;
    }): void;
    /**
     *
     * @param query Query to evaluate
     * @param response  Response to evaluate
     * @param contexts Array of contexts
     * @param reference  Reference response
     */
    evaluate({ query, response, contexts, reference, }: EvaluatorParams): Promise<EvaluationResult>;
    /**
     * @param query Query to evaluate
     * @param response  Response to evaluate
     */
    evaluateResponse({ query, response, }: EvaluatorResponseParams): Promise<EvaluationResult>;
}
declare class FaithfulnessEvaluator extends PromptMixin implements BaseEvaluator {
    private raiseError;
    private evalTemplate;
    private refineTemplate;
    constructor(params?: {
        raiseError?: boolean | undefined;
        faithfulnessSystemPrompt?: FaithfulnessTextQAPrompt | undefined;
        faithFulnessRefinePrompt?: FaithfulnessRefinePrompt | undefined;
    });
    protected _getPromptModules(): ModuleRecord;
    protected _getPrompts(): {
        [x: string]: any;
    };
    protected _updatePrompts(promptsDict: {
        faithfulnessSystemPrompt: FaithfulnessTextQAPrompt;
        faithFulnessRefinePrompt: FaithfulnessRefinePrompt;
    }): void;
    /**
     * @param query Query to evaluate
     * @param response  Response to evaluate
     * @param contexts Array of contexts
     * @param reference  Reference response
     * @param sleepTimeInSeconds  Sleep time in seconds
     */
    evaluate({ query, response, contexts, reference, sleepTimeInSeconds, }: EvaluatorParams): Promise<EvaluationResult>;
    /**
     * @param query Query to evaluate
     * @param response  Response to evaluate
     */
    evaluateResponse({ query, response, }: EvaluatorResponseParams): Promise<EvaluationResult>;
}
type RelevancyParams = {
    raiseError?: boolean | undefined;
    evalTemplate?: RelevancyEvalPrompt | undefined;
    refineTemplate?: RelevancyRefinePrompt | undefined;
};
declare class RelevancyEvaluator extends PromptMixin implements BaseEvaluator {
    private raiseError;
    private evalTemplate;
    private refineTemplate;
    constructor(params?: RelevancyParams);
    protected _getPromptModules(): ModuleRecord;
    _getPrompts(): {
        evalTemplate: RelevancyEvalPrompt;
        refineTemplate: RelevancyRefinePrompt;
    };
    _updatePrompts(prompts: {
        evalTemplate: RelevancyEvalPrompt;
        refineTemplate: RelevancyRefinePrompt;
    }): void;
    evaluate({ query, response, contexts, sleepTimeInSeconds, }: EvaluatorParams): Promise<EvaluationResult>;
    /**
     * @param query Query to evaluate
     * @param response  Response to evaluate
     */
    evaluateResponse({ query, response, }: EvaluatorResponseParams): Promise<EvaluationResult>;
}
declare const defaultEvaluationParser: (evalResponse: string) => [number, string];
export { CorrectnessEvaluator, FaithfulnessEvaluator, RelevancyEvaluator, defaultCorrectnessSystemPrompt, defaultEvaluationParser, defaultFaithfulnessRefinePrompt, defaultFaithfulnessTextQaPrompt, defaultRelevancyEvalPrompt, defaultRelevancyRefinePrompt, defaultUserPrompt };
export type { CorrectnessSystemPrompt, FaithfulnessRefinePrompt, FaithfulnessTextQAPrompt, RelevancyEvalPrompt, RelevancyRefinePrompt, UserPrompt };