UNPKG

@promptbook/azure-openai

Version:

Promptbook: Run AI apps in plain human language across multiple models and platforms

54 lines (53 loc) 2.01 kB
import type { FormatCommand } from '../../commands/FORMAT/FormatCommand'; import { ExpectError } from '../../errors/ExpectError'; import type { Expectations } from '../../pipeline/PipelineJson/Expectations'; import type { string_postprocessing_function_name } from '../../types/typeAliases'; /** * Options for validating a prompt result */ export interface ValidatePromptResultOptions { /** * The result string to validate */ resultString: string; /** * Expectations for the result (word count, sentence count, etc.) */ expectations?: Expectations; /** * Expected format of the result (e.g., 'JSON') */ format?: FormatCommand['format']; /** * List of postprocessing function names that should be applied * Note: This is for validation purposes only - postprocessing should be done before calling this function */ postprocessingFunctionNames?: ReadonlyArray<string_postprocessing_function_name>; } /** * Result of prompt result validation */ export interface ValidatePromptResultResult { /** * Whether the result is valid (passes all expectations and format checks) */ isValid: boolean; /** * The processed result string (may be modified if format extraction was needed) */ processedResultString: string; /** * Error that occurred during validation, if any */ error?: ExpectError; } /** * Validates a prompt result against expectations and format requirements. * This function provides a common abstraction for result validation that can be used * by both execution logic and caching logic to ensure consistency. * * @param options - The validation options including result string, expectations, and format * @returns Validation result with processed string and validity status * @private internal function of `createPipelineExecutor` and `cacheLlmTools` */ export declare function validatePromptResult(options: ValidatePromptResultOptions): ValidatePromptResultResult;