@latitude-data/sdk
Version:
Latitude SDK for Typescript
1 lines • 286 kB
Source Map (JSON)
{"version":3,"file":"index.cjs","sources":["../../../../../../constants/src/ai.ts","../../../../../../constants/src/config.ts","../../../../../../constants/src/evaluations/shared.ts","../../../../../../constants/src/evaluations/composite.ts","../../../../../../constants/src/evaluations/human.ts","../../../../../../constants/src/evaluations/llm.ts","../../../../../../constants/src/evaluations/rule.ts","../../../../../../constants/src/evaluations/index.ts","../../../../../../constants/src/events/legacy.ts","../../../../../../constants/src/events/events.ts","../../../../../../constants/src/experiments.ts","../../../../../../constants/src/grants.ts","../../../../../../constants/src/history.ts","../../../../../../constants/src/integrations.ts","../../../../../../constants/src/models.ts","../../../../../../constants/src/runs.ts","../../../../../../constants/src/legacyCompiler.ts","../../../../../../constants/src/tracing/span.ts","../../../../../../constants/src/tracing/trace.ts","../../../../../../constants/src/tracing/index.ts","../../../../../../constants/src/index.ts","../../../../src/env/index.ts","../../../../src/utils/types.ts","../../../../src/utils/index.ts","../../../../../../constants/src/errors/constants.ts","../../../../src/utils/errors.ts","../../../../src/utils/version.ts","../../../../src/utils/request.ts","../../../../src/utils/nodeFetchResponseToReadableStream.ts","../../../../src/utils/handleStream.ts","../../../../src/utils/streamRun.ts","../../../../src/utils/backgroundRun.ts","../../../../src/utils/streamAttach.ts","../../../../src/utils/streamChat.ts","../../../../src/utils/syncAttach.ts","../../../../src/utils/syncChat.ts","../../../../src/utils/syncRun.ts","../../../../src/utils/adapters/openai/getFunctionTools.ts","../../../../../../constants/src/latitudePromptSchema/providers/azure/index.ts","../../../../../../constants/src/latitudePromptSchema/providers/openai/fileSearchToolSchema.ts","../../../../../../constants/src/latitudePromptSchema/providers/openai/webSearchSchema.ts","../../../../../../constants/src/latitudePromptSchema/providers/openai/computerCallSchema.ts","../../../../../../constants/src/latitudePromptSchema/providers/openai/index.ts","../../../../../../constants/src/latitudePromptSchema/zodJsonSchema.ts","../../../../../../constants/src/latitudePromptSchema/toolsSchema.ts","../../../../../../constants/src/latitudePromptSchema/index.ts","../../../../src/utils/adapters/openai/getOpenAIResponsesBuiltinTools.ts","../../../../src/utils/adapters/getProviderTools.ts","../../../../src/utils/adapters/adaptPromptConfigToProvider.ts","../../../../src/utils/adapters/getAdapterFromProvider.ts","../../../../src/index.ts"],"sourcesContent":["import { FinishReason, TextStreamPart, Tool } from 'ai'\nimport { Message, ToolCall } from '@latitude-data/constants/legacyCompiler'\nimport { JSONSchema7 } from 'json-schema'\nimport { z } from 'zod'\nimport {\n LegacyVercelSDKVersion4Usage as LanguageModelUsage,\n type ReplaceTextDelta,\n LegacyResponseMessage,\n} from './ai/vercelSdkV5ToV4'\nimport { ParameterType } from './config'\nimport { LatitudeEventData, LegacyChainEventTypes } from './events'\nimport { AzureConfig, LatitudePromptConfig } from './latitudePromptSchema'\nimport { ProviderLog } from './models'\n\nexport type AgentToolsMap = Record<string, string> // { [toolName]: agentPath }\n\nexport type ToolDefinition = JSONSchema7 & {\n description: string\n parameters: {\n type: 'object'\n properties: Record<string, JSONSchema7>\n required?: string[]\n additionalProperties: boolean\n }\n}\n\nexport type VercelProviderTool = {\n type: 'provider-defined'\n id: `${string}.${string}`\n name: string\n args: Record<string, unknown>\n inputSchema: Tool['inputSchema']\n}\n\nexport type VercelTools = Record<string, ToolDefinition | VercelProviderTool>\n\nexport type ToolDefinitionsMap = Record<string, ToolDefinition>\nexport type ToolsItem =\n | ToolDefinitionsMap // - tool_name: <tool_definition>\n | string // - latitude/* (no spaces)\n\n// Config supported by Vercel\nexport type VercelConfig = {\n provider: string\n model: string\n url?: string\n cacheControl?: boolean\n schema?: JSONSchema7\n parameters?: Record<string, { type: ParameterType }>\n tools?: VercelTools\n azure?: AzureConfig\n /**\n * DEPRECATED: Legacy before SDK v5. Use `maxOutputTokens` instead.\n */\n maxTokens?: number\n maxOutputTokens?: number\n\n /**\n * Max steps the run can take.\n */\n maxSteps?: number\n}\n\nexport type PartialPromptConfig = Omit<LatitudePromptConfig, 'provider'>\n\nexport type VercelChunk = TextStreamPart<any> // Original Vercel SDK v5 type\n\nexport type ProviderData = ReplaceTextDelta<VercelChunk>\n\nexport type ChainEventDto = ProviderData | LatitudeEventData\n\nexport type AssertedStreamType = 'text' | Record<string | symbol, unknown>\nexport type ChainCallResponseDto<S extends AssertedStreamType = 'text'> =\n S extends 'text'\n ? ChainStepTextResponse\n : S extends Record<string | symbol, unknown>\n ? ChainStepObjectResponse<S>\n : never\n\nexport type ChainEventDtoResponse =\n | Omit<ChainStepResponse<'object'>, 'providerLog'>\n | Omit<ChainStepResponse<'text'>, 'providerLog'>\n\nexport type StreamType = 'object' | 'text'\n\ntype BaseResponse = {\n text: string\n usage: LanguageModelUsage\n documentLogUuid?: string\n providerLog?: ProviderLog\n output?: LegacyResponseMessage[] // TODO: Make this non-optional when we remove __deprecated\n}\n\nexport type ChainStepTextResponse = BaseResponse & {\n streamType: 'text'\n reasoning?: string | undefined\n toolCalls: ToolCall[] | null\n}\n\nexport type ChainStepObjectResponse<S extends Record<string, unknown> = any> =\n BaseResponse & {\n streamType: 'object'\n object: S\n }\n\nexport type ChainStepResponse<T extends StreamType> = T extends 'text'\n ? ChainStepTextResponse\n : T extends 'object'\n ? ChainStepObjectResponse\n : never\n\nexport enum StreamEventTypes {\n Latitude = 'latitude-event',\n Provider = 'provider-event',\n}\n\nexport type LegacyChainEvent =\n | {\n data: LegacyLatitudeEventData\n event: StreamEventTypes.Latitude\n }\n | {\n data: ProviderData\n event: StreamEventTypes.Provider\n }\n\nexport type LegacyLatitudeStepEventData = {\n type: LegacyChainEventTypes.Step\n config: LatitudePromptConfig\n isLastStep: boolean\n messages: Message[]\n documentLogUuid?: string\n}\n\nexport type LegacyLatitudeStepCompleteEventData = {\n type: LegacyChainEventTypes.StepComplete\n response: ChainStepResponse<StreamType>\n documentLogUuid?: string\n}\n\nexport type LegacyLatitudeChainCompleteEventData = {\n type: LegacyChainEventTypes.Complete\n config: LatitudePromptConfig\n messages?: Message[]\n object?: any\n response: ChainStepResponse<StreamType>\n finishReason: FinishReason\n documentLogUuid?: string\n}\n\nexport type LegacyLatitudeChainErrorEventData = {\n type: LegacyChainEventTypes.Error\n error: Error\n}\n\nexport type LegacyLatitudeEventData =\n | LegacyLatitudeStepEventData\n | LegacyLatitudeStepCompleteEventData\n | LegacyLatitudeChainCompleteEventData\n | LegacyLatitudeChainErrorEventData\n\nexport type RunSyncAPIResponse<S extends AssertedStreamType = 'text'> = {\n uuid: string\n conversation: Message[]\n response: ChainCallResponseDto<S>\n}\n\nexport type ChatSyncAPIResponse<S extends AssertedStreamType = 'text'> =\n RunSyncAPIResponse<S>\n\nexport const toolCallResponseSchema = z.object({\n id: z.string(),\n name: z.string(),\n result: z.unknown(),\n isError: z.boolean().optional(),\n text: z.string().optional(),\n})\n\nexport type ToolCallResponse = z.infer<typeof toolCallResponseSchema>\n\nexport const FINISH_REASON_DETAILS = {\n stop: {\n name: 'Stop',\n description:\n 'Generation ended naturally, either the model thought it was done, or it emitted a user-supplied stop-sequence, before hitting any limits.',\n },\n length: {\n name: 'Length',\n description:\n 'The model hit a hard token boundary in the overall context window, so output was truncated.',\n },\n 'content-filter': {\n name: 'Content Filter',\n description:\n \"The provider's safety filters flagged part of the prospective text (hate, sexual, self-harm, violence, etc.), so generation was withheld, returning early.\",\n },\n 'tool-calls': {\n name: 'Tool Calls',\n description:\n 'Instead of generating text, the assistant asked for one or more declared tools to run; your code should handle them before asking the model to continue.',\n },\n error: {\n name: 'Error',\n description:\n 'The generation terminated because the provider encountered an error. This could be due to a variety of reasons, including timeouts, server issues, or problems with the input data.',\n },\n other: {\n name: 'Other',\n description:\n 'The generation ended without a specific reason. This could be due to a variety of reasons, including timeouts, server issues, or problems with the input data.',\n },\n unknown: {\n name: 'Unknown',\n description: `The provider returned a finish-reason not yet standardized. Check out the provider's documentation for more information.`,\n },\n} as const satisfies {\n [R in FinishReason]: {\n name: string\n description: string\n }\n}\n\nexport type ToolResultPayload = {\n value: unknown\n isError: boolean\n}\n\nexport * from './ai/vercelSdkV5ToV4'\n\nexport const EMPTY_USAGE = () => ({\n inputTokens: 0,\n outputTokens: 0,\n promptTokens: 0,\n completionTokens: 0,\n totalTokens: 0,\n reasoningTokens: 0,\n cachedInputTokens: 0,\n})\n","export enum ParameterType {\n Text = 'text',\n Image = 'image',\n File = 'file',\n}\n\nexport const AGENT_TOOL_PREFIX = 'lat_agent'\nexport const LATITUDE_TOOL_PREFIX = 'lat_tool'\n\nexport enum LatitudeTool {\n RunCode = 'code',\n WebSearch = 'search',\n WebExtract = 'extract',\n Think = 'think',\n TODO = 'todo',\n}\n\nexport enum LatitudeToolInternalName {\n RunCode = 'lat_tool_run_code',\n WebSearch = 'lat_tool_web_search',\n WebExtract = 'lat_tool_web_extract',\n Think = 'think',\n TODO = 'todo_write',\n}\n\nexport const NOT_SIMULATABLE_LATITUDE_TOOLS = [\n LatitudeTool.Think,\n LatitudeTool.TODO,\n] as LatitudeTool[]\n\nexport const MAX_STEPS_CONFIG_NAME = 'maxSteps'\nexport const DEFAULT_MAX_STEPS = 20\nexport const ABSOLUTE_MAX_STEPS = 150\n\nconst capitalize = (str: string) => str.charAt(0).toUpperCase() + str.slice(1)\n\nexport type DiffValue = {\n newValue?: string\n oldValue?: string\n}\n\nexport const humanizeTool = (tool: string, suffix: boolean = true) => {\n if (tool.startsWith(AGENT_TOOL_PREFIX)) {\n const name = tool.replace(AGENT_TOOL_PREFIX, '').trim().split('_').join(' ')\n return suffix ? `${name} agent` : name\n }\n\n if (tool.startsWith(LATITUDE_TOOL_PREFIX)) {\n const name = tool\n .replace(LATITUDE_TOOL_PREFIX, '')\n .trim()\n .split('_')\n .join(' ')\n return suffix ? `${name} tool` : name\n }\n\n const name = tool.trim().split('_').map(capitalize).join(' ')\n return suffix ? `${name} tool` : name\n}\n","import { z } from 'zod'\n\nconst actualOutputConfiguration = z.object({\n messageSelection: z.enum(['last', 'all']), // Which assistant messages to select\n contentFilter: z\n .enum(['text', 'reasoning', 'image', 'file', 'tool_call'])\n .optional(),\n parsingFormat: z.enum(['string', 'json']),\n fieldAccessor: z.string().optional(),\n})\nexport type ActualOutputConfiguration = z.infer<\n typeof actualOutputConfiguration\n>\n\nconst expectedOutputConfiguration = z.object({\n parsingFormat: z.enum(['string', 'json']),\n fieldAccessor: z.string().optional(), // Field accessor to get the output from if it's a key-value format\n})\nexport type ExpectedOutputConfiguration = z.infer<\n typeof expectedOutputConfiguration\n>\n\nexport const ACCESSIBLE_OUTPUT_FORMATS = ['json']\n\nexport const baseEvaluationConfiguration = z.object({\n reverseScale: z.boolean(), // If true, lower is better, otherwise, higher is better\n actualOutput: actualOutputConfiguration,\n expectedOutput: expectedOutputConfiguration.optional(),\n})\nexport const baseEvaluationResultMetadata = z.object({\n // configuration: Configuration snapshot is defined in every metric specification\n actualOutput: z.string(),\n expectedOutput: z.string().optional(),\n datasetLabel: z.string().optional(),\n})\nexport const baseEvaluationResultError = z.object({\n message: z.string(),\n})\n","import { z } from 'zod'\nimport { EvaluationResultSuccessValue, EvaluationType } from './index'\nimport {\n baseEvaluationConfiguration,\n baseEvaluationResultError,\n baseEvaluationResultMetadata,\n} from './shared'\n\nconst compositeEvaluationConfiguration = baseEvaluationConfiguration.extend({\n evaluationUuids: z.array(z.string()),\n minThreshold: z.number().optional(), // Threshold percentage\n maxThreshold: z.number().optional(), // Threshold percentage\n defaultTarget: z.boolean().optional(), // Default for optimizations and distillations\n})\nconst compositeEvaluationResultMetadata = baseEvaluationResultMetadata.extend({\n results: z.record(\n z.string(), // Evaluation uuid\n z.object({\n uuid: z.string(), // Result uuid (for side effects)\n name: z.string(), // Evaluation name\n score: z.number(), // Normalized score\n reason: z.string(),\n passed: z.boolean(),\n }),\n ),\n})\nconst compositeEvaluationResultError = baseEvaluationResultError.extend({\n errors: z\n .record(\n z.string(), // Evaluation uuid\n z.object({\n uuid: z.string(), // Result uuid (for side effects)\n name: z.string(), // Evaluation name\n message: z.string(),\n }),\n )\n .optional(),\n})\n\n// AVERAGE\n\nconst compositeEvaluationAverageConfiguration =\n compositeEvaluationConfiguration.extend({})\nconst compositeEvaluationAverageResultMetadata =\n compositeEvaluationResultMetadata.extend({\n configuration: compositeEvaluationAverageConfiguration,\n })\nconst compositeEvaluationAverageResultError =\n compositeEvaluationResultError.extend({})\nexport const CompositeEvaluationAverageSpecification = {\n name: 'Average',\n description: 'Combines scores evenly. The resulting score is the average',\n configuration: compositeEvaluationAverageConfiguration,\n resultMetadata: compositeEvaluationAverageResultMetadata,\n resultError: compositeEvaluationAverageResultError,\n resultReason: (\n result: EvaluationResultSuccessValue<\n EvaluationType.Composite,\n CompositeEvaluationMetric.Average\n >,\n ) => {\n let reason = ''\n\n const reasons = Object.entries(result.metadata.results).map(\n ([_, result]) => `${result.name}: ${result.reason}`,\n )\n\n reason = reasons.join('\\n\\n')\n\n return reason\n },\n requiresExpectedOutput: false,\n supportsLiveEvaluation: false,\n supportsBatchEvaluation: true,\n supportsManualEvaluation: false,\n} as const\nexport type CompositeEvaluationAverageConfiguration = z.infer<\n typeof CompositeEvaluationAverageSpecification.configuration\n>\nexport type CompositeEvaluationAverageResultMetadata = z.infer<\n typeof CompositeEvaluationAverageSpecification.resultMetadata\n>\nexport type CompositeEvaluationAverageResultError = z.infer<\n typeof CompositeEvaluationAverageSpecification.resultError\n>\n\n// WEIGHTED\n\nconst compositeEvaluationWeightedConfiguration =\n compositeEvaluationConfiguration.extend({\n weights: z.record(\n z.string(), // Evaluation uuid\n z.number(), // Weight in percentage\n ),\n })\nconst compositeEvaluationWeightedResultMetadata =\n compositeEvaluationResultMetadata.extend({\n configuration: compositeEvaluationWeightedConfiguration,\n })\nconst compositeEvaluationWeightedResultError =\n compositeEvaluationResultError.extend({})\nexport const CompositeEvaluationWeightedSpecification = {\n name: 'Weighted',\n description:\n 'Combines scores using custom weights. The resulting score is the weighted blend',\n configuration: compositeEvaluationWeightedConfiguration,\n resultMetadata: compositeEvaluationWeightedResultMetadata,\n resultError: compositeEvaluationWeightedResultError,\n resultReason: (\n result: EvaluationResultSuccessValue<\n EvaluationType.Composite,\n CompositeEvaluationMetric.Weighted\n >,\n ) => {\n let reason = ''\n\n const reasons = Object.entries(result.metadata.results).map(\n ([_, result]) => `${result.name}: ${result.reason}`,\n )\n\n reason = reasons.join('\\n\\n')\n\n return reason\n },\n requiresExpectedOutput: false,\n supportsLiveEvaluation: false,\n supportsBatchEvaluation: true,\n supportsManualEvaluation: false,\n} as const\nexport type CompositeEvaluationWeightedConfiguration = z.infer<\n typeof CompositeEvaluationWeightedSpecification.configuration\n>\nexport type CompositeEvaluationWeightedResultMetadata = z.infer<\n typeof CompositeEvaluationWeightedSpecification.resultMetadata\n>\nexport type CompositeEvaluationWeightedResultError = z.infer<\n typeof CompositeEvaluationWeightedSpecification.resultError\n>\n\n// CUSTOM\n\nconst compositeEvaluationCustomConfiguration =\n compositeEvaluationConfiguration.extend({\n formula: z.string(),\n })\nconst compositeEvaluationCustomResultMetadata =\n compositeEvaluationResultMetadata.extend({\n configuration: compositeEvaluationCustomConfiguration,\n })\nconst compositeEvaluationCustomResultError =\n compositeEvaluationResultError.extend({})\nexport const CompositeEvaluationCustomSpecification = {\n name: 'Custom',\n description:\n 'Combines scores using a custom formula. The resulting score is the result of the expression',\n configuration: compositeEvaluationCustomConfiguration,\n resultMetadata: compositeEvaluationCustomResultMetadata,\n resultError: compositeEvaluationCustomResultError,\n resultReason: (\n result: EvaluationResultSuccessValue<\n EvaluationType.Composite,\n CompositeEvaluationMetric.Custom\n >,\n ) => {\n let reason = ''\n\n const reasons = Object.entries(result.metadata.results).map(\n ([_, result]) => `${result.name}: ${result.reason}`,\n )\n\n reason = reasons.join('\\n\\n')\n\n return reason\n },\n requiresExpectedOutput: false,\n supportsLiveEvaluation: false,\n supportsBatchEvaluation: true,\n supportsManualEvaluation: false,\n} as const\nexport type CompositeEvaluationCustomConfiguration = z.infer<\n typeof CompositeEvaluationCustomSpecification.configuration\n>\nexport type CompositeEvaluationCustomResultMetadata = z.infer<\n typeof CompositeEvaluationCustomSpecification.resultMetadata\n>\nexport type CompositeEvaluationCustomResultError = z.infer<\n typeof CompositeEvaluationCustomSpecification.resultError\n>\n\n/* ------------------------------------------------------------------------- */\n\nexport enum CompositeEvaluationMetric {\n Average = 'average',\n Weighted = 'weighted',\n Custom = 'custom',\n}\n\n// prettier-ignore\nexport type CompositeEvaluationConfiguration<M extends CompositeEvaluationMetric = CompositeEvaluationMetric> =\n M extends CompositeEvaluationMetric.Average ? CompositeEvaluationAverageConfiguration :\n M extends CompositeEvaluationMetric.Weighted ? CompositeEvaluationWeightedConfiguration :\n M extends CompositeEvaluationMetric.Custom ? CompositeEvaluationCustomConfiguration :\n never;\n\n// prettier-ignore\nexport type CompositeEvaluationResultMetadata<M extends CompositeEvaluationMetric = CompositeEvaluationMetric> = \n M extends CompositeEvaluationMetric.Average ? CompositeEvaluationAverageResultMetadata :\n M extends CompositeEvaluationMetric.Weighted ? CompositeEvaluationWeightedResultMetadata :\n M extends CompositeEvaluationMetric.Custom ? CompositeEvaluationCustomResultMetadata :\n never;\n\n// prettier-ignore\nexport type CompositeEvaluationResultError<M extends CompositeEvaluationMetric = CompositeEvaluationMetric> = \n M extends CompositeEvaluationMetric.Average ? CompositeEvaluationAverageResultError :\n M extends CompositeEvaluationMetric.Weighted ? CompositeEvaluationWeightedResultError :\n M extends CompositeEvaluationMetric.Custom ? CompositeEvaluationCustomResultError :\n never;\n\nexport const CompositeEvaluationSpecification = {\n name: 'Composite Score',\n description: 'Evaluate responses combining several evaluations at once',\n configuration: compositeEvaluationConfiguration,\n resultMetadata: compositeEvaluationResultMetadata,\n resultError: compositeEvaluationResultError,\n // prettier-ignore\n metrics: {\n [CompositeEvaluationMetric.Average]: CompositeEvaluationAverageSpecification,\n [CompositeEvaluationMetric.Weighted]: CompositeEvaluationWeightedSpecification,\n [CompositeEvaluationMetric.Custom]: CompositeEvaluationCustomSpecification,\n },\n} as const\n","import { z } from 'zod'\nimport { EvaluationResultSuccessValue, EvaluationType } from './index'\nimport {\n baseEvaluationConfiguration,\n baseEvaluationResultError,\n baseEvaluationResultMetadata,\n} from './shared'\n\nconst humanEvaluationConfiguration = baseEvaluationConfiguration.extend({\n enableControls: z.boolean().optional(), // UI annotation controls\n criteria: z.string().optional(),\n})\nconst humanEvaluationResultMetadata = baseEvaluationResultMetadata.extend({\n reason: z.string().optional(),\n})\nconst humanEvaluationResultError = baseEvaluationResultError.extend({})\n\n// BINARY\n\nconst humanEvaluationBinaryConfiguration = humanEvaluationConfiguration.extend({\n passDescription: z.string().optional(),\n failDescription: z.string().optional(),\n})\nconst humanEvaluationBinaryResultMetadata =\n humanEvaluationResultMetadata.extend({\n configuration: humanEvaluationBinaryConfiguration,\n })\nconst humanEvaluationBinaryResultError = humanEvaluationResultError.extend({})\nexport const HumanEvaluationBinarySpecification = {\n name: 'Binary',\n description:\n 'Judges whether the response meets the criteria. The resulting score is \"passed\" or \"failed\"',\n configuration: humanEvaluationBinaryConfiguration,\n resultMetadata: humanEvaluationBinaryResultMetadata,\n resultError: humanEvaluationBinaryResultError,\n resultReason: (\n result: EvaluationResultSuccessValue<\n EvaluationType.Human,\n HumanEvaluationMetric.Binary\n >,\n ) => {\n return result.metadata.reason\n },\n requiresExpectedOutput: false,\n supportsLiveEvaluation: false,\n supportsBatchEvaluation: false,\n supportsManualEvaluation: true,\n} as const\nexport type HumanEvaluationBinaryConfiguration = z.infer<\n typeof HumanEvaluationBinarySpecification.configuration\n>\nexport type HumanEvaluationBinaryResultMetadata = z.infer<\n typeof HumanEvaluationBinarySpecification.resultMetadata\n>\nexport type HumanEvaluationBinaryResultError = z.infer<\n typeof HumanEvaluationBinarySpecification.resultError\n>\n\n// RATING\n\nconst humanEvaluationRatingConfiguration = humanEvaluationConfiguration.extend({\n minRating: z.number(),\n minRatingDescription: z.string().optional(),\n maxRating: z.number(),\n maxRatingDescription: z.string().optional(),\n minThreshold: z.number().optional(), // Threshold in rating range\n maxThreshold: z.number().optional(), // Threshold in rating range\n})\nconst humanEvaluationRatingResultMetadata =\n humanEvaluationResultMetadata.extend({\n configuration: humanEvaluationRatingConfiguration,\n })\nconst humanEvaluationRatingResultError = humanEvaluationResultError.extend({})\nexport const HumanEvaluationRatingSpecification = {\n name: 'Rating',\n description:\n 'Judges the response by rating it under a criteria. The resulting score is the rating',\n configuration: humanEvaluationRatingConfiguration,\n resultMetadata: humanEvaluationRatingResultMetadata,\n resultError: humanEvaluationRatingResultError,\n resultReason: (\n result: EvaluationResultSuccessValue<\n EvaluationType.Human,\n HumanEvaluationMetric.Rating\n >,\n ) => {\n return result.metadata.reason\n },\n requiresExpectedOutput: false,\n supportsLiveEvaluation: false,\n supportsBatchEvaluation: false,\n supportsManualEvaluation: true,\n} as const\nexport type HumanEvaluationRatingConfiguration = z.infer<\n typeof HumanEvaluationRatingSpecification.configuration\n>\nexport type HumanEvaluationRatingResultMetadata = z.infer<\n typeof HumanEvaluationRatingSpecification.resultMetadata\n>\nexport type HumanEvaluationRatingResultError = z.infer<\n typeof HumanEvaluationRatingSpecification.resultError\n>\n\n/* ------------------------------------------------------------------------- */\n\nexport enum HumanEvaluationMetric {\n Binary = 'binary',\n Rating = 'rating',\n}\n\n// prettier-ignore\nexport type HumanEvaluationConfiguration<M extends HumanEvaluationMetric = HumanEvaluationMetric> =\n M extends HumanEvaluationMetric.Binary ? HumanEvaluationBinaryConfiguration :\n M extends HumanEvaluationMetric.Rating ? HumanEvaluationRatingConfiguration :\n never;\n\n// prettier-ignore\nexport type HumanEvaluationResultMetadata<M extends HumanEvaluationMetric = HumanEvaluationMetric> =\n M extends HumanEvaluationMetric.Binary ? HumanEvaluationBinaryResultMetadata :\n M extends HumanEvaluationMetric.Rating ? HumanEvaluationRatingResultMetadata :\n never;\n\n// prettier-ignore\nexport type HumanEvaluationResultError<M extends HumanEvaluationMetric = HumanEvaluationMetric> =\n M extends HumanEvaluationMetric.Binary ? HumanEvaluationBinaryResultError :\n M extends HumanEvaluationMetric.Rating ? HumanEvaluationRatingResultError :\n never;\n\nexport const HumanEvaluationSpecification = {\n name: 'Human-in-the-Loop',\n description: 'Evaluate responses using a human as a judge',\n configuration: humanEvaluationConfiguration,\n resultMetadata: humanEvaluationResultMetadata,\n resultError: humanEvaluationResultError,\n // prettier-ignore\n metrics: {\n [HumanEvaluationMetric.Binary]: HumanEvaluationBinarySpecification,\n [HumanEvaluationMetric.Rating]: HumanEvaluationRatingSpecification,\n },\n} as const\n","import { z } from 'zod'\nimport { EvaluationResultSuccessValue, EvaluationType } from './index'\nimport {\n baseEvaluationConfiguration,\n baseEvaluationResultError,\n baseEvaluationResultMetadata,\n} from './shared'\n\nconst llmEvaluationConfiguration = baseEvaluationConfiguration.extend({\n provider: z.string(),\n model: z.string(),\n})\nconst llmEvaluationResultMetadata = baseEvaluationResultMetadata.extend({\n evaluationLogId: z.number(),\n reason: z.string(),\n tokens: z.number(),\n cost: z.number(),\n duration: z.number(),\n})\nconst llmEvaluationResultError = baseEvaluationResultError.extend({\n runErrorId: z.number().optional(),\n})\n\n// BINARY\n\nconst llmEvaluationBinaryConfiguration = llmEvaluationConfiguration.extend({\n criteria: z.string(),\n passDescription: z.string(),\n failDescription: z.string(),\n})\nconst llmEvaluationBinaryResultMetadata = llmEvaluationResultMetadata.extend({\n configuration: llmEvaluationBinaryConfiguration,\n})\nconst llmEvaluationBinaryResultError = llmEvaluationResultError.extend({})\nexport const LlmEvaluationBinarySpecification = {\n name: 'Binary',\n description:\n 'Judges whether the response meets the criteria. The resulting score is \"passed\" or \"failed\"',\n configuration: llmEvaluationBinaryConfiguration,\n resultMetadata: llmEvaluationBinaryResultMetadata,\n resultError: llmEvaluationBinaryResultError,\n resultReason: (\n result: EvaluationResultSuccessValue<\n EvaluationType.Llm,\n LlmEvaluationMetric.Binary\n >,\n ) => {\n return result.metadata.reason\n },\n requiresExpectedOutput: false,\n supportsLiveEvaluation: true,\n supportsBatchEvaluation: true,\n supportsManualEvaluation: false,\n} as const\nexport type LlmEvaluationBinaryConfiguration = z.infer<\n typeof LlmEvaluationBinarySpecification.configuration\n>\nexport type LlmEvaluationBinaryResultMetadata = z.infer<\n typeof LlmEvaluationBinarySpecification.resultMetadata\n>\nexport type LlmEvaluationBinaryResultError = z.infer<\n typeof LlmEvaluationBinarySpecification.resultError\n>\n\n// RATING\n\nconst llmEvaluationRatingConfiguration = llmEvaluationConfiguration.extend({\n criteria: z.string(),\n minRating: z.number(),\n minRatingDescription: z.string(),\n maxRating: z.number(),\n maxRatingDescription: z.string(),\n minThreshold: z.number().optional(), // Threshold in rating range\n maxThreshold: z.number().optional(), // Threshold in rating range\n})\nconst llmEvaluationRatingResultMetadata = llmEvaluationResultMetadata.extend({\n configuration: llmEvaluationRatingConfiguration,\n})\nconst llmEvaluationRatingResultError = llmEvaluationResultError.extend({})\nexport const LlmEvaluationRatingSpecification = {\n name: 'Rating',\n description:\n 'Judges the response by rating it under a criteria. The resulting score is the rating',\n configuration: llmEvaluationRatingConfiguration,\n resultMetadata: llmEvaluationRatingResultMetadata,\n resultError: llmEvaluationRatingResultError,\n resultReason: (\n result: EvaluationResultSuccessValue<\n EvaluationType.Llm,\n LlmEvaluationMetric.Rating\n >,\n ) => {\n return result.metadata.reason\n },\n requiresExpectedOutput: false,\n supportsLiveEvaluation: true,\n supportsBatchEvaluation: true,\n supportsManualEvaluation: false,\n} as const\nexport type LlmEvaluationRatingConfiguration = z.infer<\n typeof LlmEvaluationRatingSpecification.configuration\n>\nexport type LlmEvaluationRatingResultMetadata = z.infer<\n typeof LlmEvaluationRatingSpecification.resultMetadata\n>\nexport type LlmEvaluationRatingResultError = z.infer<\n typeof LlmEvaluationRatingSpecification.resultError\n>\n\n// COMPARISON\n\nconst llmEvaluationComparisonConfiguration = llmEvaluationConfiguration.extend({\n criteria: z.string(),\n passDescription: z.string(),\n failDescription: z.string(),\n minThreshold: z.number().optional(), // Threshold percentage\n maxThreshold: z.number().optional(), // Threshold percentage\n})\nconst llmEvaluationComparisonResultMetadata =\n llmEvaluationResultMetadata.extend({\n configuration: llmEvaluationComparisonConfiguration,\n })\nconst llmEvaluationComparisonResultError = llmEvaluationResultError.extend({})\nexport const LlmEvaluationComparisonSpecification = {\n name: 'Comparison',\n description:\n 'Judges the response by comparing the criteria to the expected output. The resulting score is the percentage of compared criteria that is met',\n configuration: llmEvaluationComparisonConfiguration,\n resultMetadata: llmEvaluationComparisonResultMetadata,\n resultError: llmEvaluationComparisonResultError,\n resultReason: (\n result: EvaluationResultSuccessValue<\n EvaluationType.Llm,\n LlmEvaluationMetric.Comparison\n >,\n ) => {\n return result.metadata.reason\n },\n requiresExpectedOutput: true,\n supportsLiveEvaluation: false,\n supportsBatchEvaluation: true,\n supportsManualEvaluation: false,\n} as const\nexport type LlmEvaluationComparisonConfiguration = z.infer<\n typeof LlmEvaluationComparisonSpecification.configuration\n>\nexport type LlmEvaluationComparisonResultMetadata = z.infer<\n typeof LlmEvaluationComparisonSpecification.resultMetadata\n>\nexport type LlmEvaluationComparisonResultError = z.infer<\n typeof LlmEvaluationComparisonSpecification.resultError\n>\n\n// CUSTOM\n\nconst llmEvaluationCustomConfiguration = llmEvaluationConfiguration.extend({\n prompt: z.string(),\n minScore: z.number(),\n maxScore: z.number(),\n minThreshold: z.number().optional(), // Threshold percentage\n maxThreshold: z.number().optional(), // Threshold percentage\n})\nconst llmEvaluationCustomResultMetadata = llmEvaluationResultMetadata.extend({\n configuration: llmEvaluationCustomConfiguration,\n})\nconst llmEvaluationCustomResultError = llmEvaluationResultError.extend({})\nexport const LlmEvaluationCustomSpecification = {\n name: 'Custom',\n description:\n 'Judges the response under a criteria using a custom prompt. The resulting score is the value of criteria that is met',\n configuration: llmEvaluationCustomConfiguration,\n resultMetadata: llmEvaluationCustomResultMetadata,\n resultError: llmEvaluationCustomResultError,\n resultReason: (\n result: EvaluationResultSuccessValue<\n EvaluationType.Llm,\n LlmEvaluationMetric.Custom\n >,\n ) => {\n return result.metadata.reason\n },\n requiresExpectedOutput: false,\n supportsLiveEvaluation: true,\n supportsBatchEvaluation: true,\n supportsManualEvaluation: false,\n} as const\nexport type LlmEvaluationCustomConfiguration = z.infer<\n typeof LlmEvaluationCustomSpecification.configuration\n>\nexport type LlmEvaluationCustomResultMetadata = z.infer<\n typeof LlmEvaluationCustomSpecification.resultMetadata\n>\nexport type LlmEvaluationCustomResultError = z.infer<\n typeof LlmEvaluationCustomSpecification.resultError\n>\n\nexport const LLM_EVALUATION_CUSTOM_PROMPT_DOCUMENTATION = `\n/*\n IMPORTANT: The evaluation MUST return an object with the score and reason fields.\n\n These are the available variables:\n - {{ actualOutput }} (string): The actual output to evaluate\n - {{ expectedOutput }} (string/undefined): The, optional, expected output to compare against\n - {{ conversation }} (string): The full conversation of the evaluated log\n\n - {{ messages }} (array of objects): All the messages of the conversation\n - {{ toolCalls }} (array of objects): All the tool calls of the conversation\n - {{ cost }} (number): The cost, in cents, of the evaluated log\n - {{ tokens }} (number): The tokens of the evaluated log\n - {{ duration }} (number): The duration, in seconds, of the evaluated log\n\n More info on messages and tool calls format in: https://docs.latitude.so/promptl/syntax/messages\n\n - {{ prompt }} (string): The prompt of the evaluated log\n - {{ config }} (object): The configuration of the evaluated log\n - {{ parameters }} (object): The parameters of the evaluated log\n\n More info on configuration and parameters format in: https://docs.latitude.so/promptl/syntax/configuration\n*/\n`.trim()\n\n// CUSTOM LABELED\n\nexport const LlmEvaluationCustomLabeledSpecification = {\n ...LlmEvaluationCustomSpecification,\n name: 'Custom (Labeled)',\n resultReason: (\n result: EvaluationResultSuccessValue<\n EvaluationType.Llm,\n LlmEvaluationMetric.CustomLabeled\n >,\n ) => {\n return result.metadata.reason\n },\n requiresExpectedOutput: true,\n supportsLiveEvaluation: false,\n supportsBatchEvaluation: true,\n supportsManualEvaluation: false,\n} as const\n\n/* ------------------------------------------------------------------------- */\n\nexport enum LlmEvaluationMetric {\n Binary = 'binary',\n Rating = 'rating',\n Comparison = 'comparison',\n Custom = 'custom',\n CustomLabeled = 'custom_labeled',\n}\n\nexport type LlmEvaluationMetricAnyCustom =\n | LlmEvaluationMetric.Custom\n | LlmEvaluationMetric.CustomLabeled\n\n// prettier-ignore\nexport type LlmEvaluationConfiguration<M extends LlmEvaluationMetric = LlmEvaluationMetric> =\n M extends LlmEvaluationMetric.Binary ? LlmEvaluationBinaryConfiguration :\n M extends LlmEvaluationMetric.Rating ? LlmEvaluationRatingConfiguration :\n M extends LlmEvaluationMetric.Comparison ? LlmEvaluationComparisonConfiguration :\n M extends LlmEvaluationMetric.Custom ? LlmEvaluationCustomConfiguration :\n M extends LlmEvaluationMetric.CustomLabeled ? LlmEvaluationCustomConfiguration :\n never;\n\n// prettier-ignore\nexport type LlmEvaluationResultMetadata<M extends LlmEvaluationMetric = LlmEvaluationMetric> =\n M extends LlmEvaluationMetric.Binary ? LlmEvaluationBinaryResultMetadata :\n M extends LlmEvaluationMetric.Rating ? LlmEvaluationRatingResultMetadata :\n M extends LlmEvaluationMetric.Comparison ? LlmEvaluationComparisonResultMetadata :\n M extends LlmEvaluationMetric.Custom ? LlmEvaluationCustomResultMetadata :\n M extends LlmEvaluationMetric.CustomLabeled ? LlmEvaluationCustomResultMetadata :\n never;\n\n// prettier-ignore\nexport type LlmEvaluationResultError<M extends LlmEvaluationMetric = LlmEvaluationMetric> =\n M extends LlmEvaluationMetric.Binary ? LlmEvaluationBinaryResultError :\n M extends LlmEvaluationMetric.Rating ? LlmEvaluationRatingResultError :\n M extends LlmEvaluationMetric.Comparison ? LlmEvaluationComparisonResultError :\n M extends LlmEvaluationMetric.Custom ? LlmEvaluationCustomResultError :\n M extends LlmEvaluationMetric.CustomLabeled ? LlmEvaluationCustomResultError :\n never;\n\nexport const LlmEvaluationSpecification = {\n name: 'LLM-as-a-Judge',\n description: 'Evaluate responses using an LLM as a judge',\n configuration: llmEvaluationConfiguration,\n resultMetadata: llmEvaluationResultMetadata,\n resultError: llmEvaluationResultError,\n // prettier-ignore\n metrics: {\n [LlmEvaluationMetric.Binary]: LlmEvaluationBinarySpecification,\n [LlmEvaluationMetric.Rating]: LlmEvaluationRatingSpecification,\n [LlmEvaluationMetric.Comparison]: LlmEvaluationComparisonSpecification,\n [LlmEvaluationMetric.Custom]: LlmEvaluationCustomSpecification,\n [LlmEvaluationMetric.CustomLabeled]: LlmEvaluationCustomLabeledSpecification,\n },\n} as const\n\nexport const LLM_EVALUATION_PROMPT_PARAMETERS = [\n 'actualOutput',\n 'expectedOutput',\n 'conversation',\n 'cost',\n 'tokens',\n 'duration',\n 'config',\n 'toolCalls',\n 'messages',\n 'prompt',\n 'parameters',\n 'context',\n 'response',\n] as const\n\nexport type LlmEvaluationPromptParameter =\n (typeof LLM_EVALUATION_PROMPT_PARAMETERS)[number]\n","import { z } from 'zod'\nimport { EvaluationResultSuccessValue, EvaluationType } from './index'\nimport {\n baseEvaluationConfiguration,\n baseEvaluationResultError,\n baseEvaluationResultMetadata,\n} from './shared'\n\nconst ruleEvaluationConfiguration = baseEvaluationConfiguration.extend({})\nconst ruleEvaluationResultMetadata = baseEvaluationResultMetadata.extend({\n reason: z.string().optional(),\n})\nconst ruleEvaluationResultError = baseEvaluationResultError.extend({})\n\n// EXACT MATCH\n\nconst ruleEvaluationExactMatchConfiguration =\n ruleEvaluationConfiguration.extend({\n caseInsensitive: z.boolean(),\n })\nconst ruleEvaluationExactMatchResultMetadata =\n ruleEvaluationResultMetadata.extend({\n configuration: ruleEvaluationExactMatchConfiguration,\n })\nconst ruleEvaluationExactMatchResultError = ruleEvaluationResultError.extend({})\nexport const RuleEvaluationExactMatchSpecification = {\n name: 'Exact Match',\n description:\n 'Checks if the response is exactly the same as the expected output. The resulting score is \"matched\" or \"unmatched\"',\n configuration: ruleEvaluationExactMatchConfiguration,\n resultMetadata: ruleEvaluationExactMatchResultMetadata,\n resultError: ruleEvaluationExactMatchResultError,\n resultReason: (\n result: EvaluationResultSuccessValue<\n EvaluationType.Rule,\n RuleEvaluationMetric.ExactMatch\n >,\n ) => {\n let reason = ''\n\n if (result.score === 1) {\n reason = `Response is`\n } else {\n reason = `Response is not`\n }\n\n reason += ` exactly the same as '${result.metadata.expectedOutput}'`\n\n if (result.metadata.configuration.caseInsensitive) {\n reason += ' (comparison is case-insensitive)'\n }\n\n if (result.metadata.reason) {\n reason += `, because: ${result.metadata.reason}`\n }\n\n return reason + '.'\n },\n requiresExpectedOutput: true,\n supportsLiveEvaluation: false,\n supportsBatchEvaluation: true,\n supportsManualEvaluation: false,\n} as const\nexport type RuleEvaluationExactMatchConfiguration = z.infer<\n typeof RuleEvaluationExactMatchSpecification.configuration\n>\nexport type RuleEvaluationExactMatchResultMetadata = z.infer<\n typeof RuleEvaluationExactMatchSpecification.resultMetadata\n>\nexport type RuleEvaluationExactMatchResultError = z.infer<\n typeof RuleEvaluationExactMatchSpecification.resultError\n>\n\n// REGULAR EXPRESSION\n\nconst ruleEvaluationRegularExpressionConfiguration =\n ruleEvaluationConfiguration.extend({\n pattern: z.string(),\n })\nconst ruleEvaluationRegularExpressionResultMetadata =\n ruleEvaluationResultMetadata.extend({\n configuration: ruleEvaluationRegularExpressionConfiguration,\n })\nconst ruleEvaluationRegularExpressionResultError =\n ruleEvaluationResultError.extend({})\nexport const RuleEvaluationRegularExpressionSpecification = {\n name: 'Regular Expression',\n description:\n 'Checks if the response matches the regular expression. The resulting score is \"matched\" or \"unmatched\"',\n configuration: ruleEvaluationRegularExpressionConfiguration,\n resultMetadata: ruleEvaluationRegularExpressionResultMetadata,\n resultError: ruleEvaluationRegularExpressionResultError,\n resultReason: (\n result: EvaluationResultSuccessValue<\n EvaluationType.Rule,\n RuleEvaluationMetric.RegularExpression\n >,\n ) => {\n let reason = ''\n\n if (result.score === 1) {\n reason = `Response matches`\n } else {\n reason = `Response does not match`\n }\n\n reason += ` the regular expression \\`/${result.metadata.configuration.pattern}/gm\\``\n\n if (result.metadata.reason) {\n reason += `, because: ${result.metadata.reason}`\n }\n\n return reason + '.'\n },\n requiresExpectedOutput: false,\n supportsLiveEvaluation: true,\n supportsBatchEvaluation: true,\n supportsManualEvaluation: false,\n} as const\nexport type RuleEvaluationRegularExpressionConfiguration = z.infer<\n typeof RuleEvaluationRegularExpressionSpecification.configuration\n>\nexport type RuleEvaluationRegularExpressionResultMetadata = z.infer<\n typeof RuleEvaluationRegularExpressionSpecification.resultMetadata\n>\nexport type RuleEvaluationRegularExpressionResultError = z.infer<\n typeof RuleEvaluationRegularExpressionSpecification.resultError\n>\n\n// SCHEMA VALIDATION\n\nconst ruleEvaluationSchemaValidationConfiguration =\n ruleEvaluationConfiguration.extend({\n format: z.enum(['json']),\n schema: z.string(),\n })\nconst ruleEvaluationSchemaValidationResultMetadata =\n ruleEvaluationResultMetadata.extend({\n configuration: ruleEvaluationSchemaValidationConfiguration,\n })\nconst ruleEvaluationSchemaValidationResultError =\n ruleEvaluationResultError.extend({})\nexport const RuleEvaluationSchemaValidationSpecification = {\n name: 'Schema Validation',\n description:\n 'Checks if the response follows the schema. The resulting score is \"valid\" or \"invalid\"',\n configuration: ruleEvaluationSchemaValidationConfiguration,\n resultMetadata: ruleEvaluationSchemaValidationResultMetadata,\n resultError: ruleEvaluationSchemaValidationResultError,\n resultReason: (\n result: EvaluationResultSuccessValue<\n EvaluationType.Rule,\n RuleEvaluationMetric.SchemaValidation\n >,\n ) => {\n let reason = ''\n\n if (result.score === 1) {\n reason = `Response follows`\n } else {\n reason = `Response does not follow`\n }\n\n reason += ` the ${result.metadata.configuration.format.toUpperCase()} schema:\\n\\`\\`\\`\\n${result.metadata.configuration.schema}\\n\\`\\`\\``\n\n if (result.metadata.reason) {\n reason += `\\nbecause: ${result.metadata.reason}`\n }\n\n return reason + '.'\n },\n requiresExpectedOutput: false,\n supportsLiveEvaluation: true,\n supportsBatchEvaluation: true,\n supportsManualEvaluation: false,\n} as const\nexport type RuleEvaluationSchemaValidationConfiguration = z.infer<\n typeof RuleEvaluationSchemaValidationSpecification.configuration\n>\nexport type RuleEvaluationSchemaValidationResultMetadata = z.infer<\n typeof RuleEvaluationSchemaValidationSpecification.resultMetadata\n>\nexport type RuleEvaluationSchemaValidationResultError = z.infer<\n typeof RuleEvaluationSchemaValidationSpecification.resultError\n>\n\n// LENGTH COUNT\n\nconst ruleEvaluationLengthCountConfiguration =\n ruleEvaluationConfiguration.extend({\n algorithm: z.enum(['character', 'word', 'sentence']),\n minLength: z.number().optional(),\n maxLength: z.number().optional(),\n })\nconst ruleEvaluationLengthCountResultMetadata =\n ruleEvaluationResultMetadata.extend({\n configuration: ruleEvaluationLengthCountConfiguration,\n })\nconst ruleEvaluationLengthCountResultError = ruleEvaluationResultError.extend(\n {},\n)\nexport const RuleEvaluationLengthCountSpecification = {\n name: 'Length Count',\n description:\n 'Checks if the response is of a certain length. The resulting score is the length of the response',\n configuration: ruleEvaluationLengthCountConfiguration,\n resultMetadata: ruleEvaluationLengthCountResultMetadata,\n resultError: ruleEvaluationLengthCountResultError,\n resultReason: (\n result: EvaluationResultSuccessValue<\n EvaluationType.Rule,\n RuleEvaluationMetric.LengthCount\n >,\n ) => {\n let reason = `Response length is ${result.score} ${result.metadata.configuration.algorithm}s`\n\n if (result.hasPassed) {\n reason += ', which is'\n } else {\n reason += ', which is not'\n }\n\n reason += ` between ${result.metadata.configuration.minLength ?? 0} and ${result.metadata.configuration.maxLength ?? Infinity} ${result.metadata.configuration.algorithm}s`\n\n if (result.metadata.configuration.reverseScale) {\n reason += ' (shorter is better)'\n } else {\n reason += ' (longer is better)'\n }\n\n if (result.metadata.reason) {\n reason += `, because: ${result.metadata.reason}`\n }\n\n return reason + '.'\n },\n requiresExpectedOutput: false,\n supportsLiveEvaluation: true,\n supportsBatchEvaluation: true,\n supportsManualEvaluation: false,\n} as const\nexport type RuleEvaluationLengthCountConfiguration = z.infer<\n typeof RuleEvaluationLengthCountSpecification.configuration\n>\nexport type RuleEvaluationLengthCountResultMetadata = z.infer<\n typeof RuleEvaluationLengthCountSpecification.resultMetadata\n>\nexport type RuleEvaluationLengthCountResultError = z.infer<\n typeof RuleEvaluationLengthCountSpecification.resultError\n>\n\n// LEXICAL OVERLAP\n\nconst ruleEvaluationLexicalOverlapConfiguration =\n ruleEvaluationConfiguration.extend({\n algorithm: z.enum(['substring', 'levenshtein_distance', 'rouge']),\n minOverlap: z.number().optional(), // Percentage of overlap\n maxOverlap: z.number().optional(), // Percentage of overlap\n })\nconst ruleEvaluationLexicalOverlapResultMetadata =\n ruleEvaluationResultMetadata.extend({\n configuration: ruleEvaluationLexicalOverlapConfiguration,\n })\nconst ruleEvaluationLexicalOverlapResultError =\n ruleEvaluationResultError.extend({})\nexport const RuleEvaluationLexicalOverlapSpecification = {\n name: 'Lexical Overlap',\n description:\n 'Checks if the response contains the expected output. The resulting score is the percentage of overlap',\n configuration: ruleEvaluationLexicalOverlapConfiguration,\n resultMetadata: ruleEvaluationLexicalOverlapResultMetadata,\n resultError: ruleEvaluationLexicalOverlapResultError,\n resultReason: (\n result: EvaluationResultSuccessValue<\n EvaluationType.Rule,\n RuleEvaluationMetric.LexicalOverlap\n >,\n ) => {\n let reason = `Response lexical overlap with '${result.metadata.expectedOutput}' is ${result.score.toFixed(0)}%`\n\n if (result.hasPassed) {\n reason += ', which is'\n } else {\n reason += ', which is not'\n }\n\n reason += ` between ${(result.metadata.configuration.minOverlap ?? 0).toFixed(0)}% and ${(result.metadata.configuration.maxOverlap ?? 100).toFixed(0)}%`\n\n if (result.metadata.configuration.reverseScale) {\n reason += ' (lower is better)'\n } else {\n reason += ' (higher is better)'\n }\n\n if (result.metadata.reason) {\n reason += `, because: ${result.metadata.reason}`\n }\n\n return reason + '.'\n },\n requiresExpectedOutput: true,\n supportsLiveEvaluation: false,\n supportsBatchEvaluation: true,\n supportsManualEvaluation: false,\n} as const\nexport type RuleEvaluationLexicalOverlapConfiguration = z.infer<\n typeof RuleEvaluationLexicalOverlapSpecification.configuration\n>\nexport type RuleEvaluationLexicalOverlapResultMetadata = z.infer<\n typeof RuleEvaluationLexicalOverlapSpecification.resultMetadata\n>\nexport type RuleEvaluationLexicalOverlapResultError = z.infer<\n typeof RuleEvaluationLexicalOverlapSpecification.resultError\n>\n\n// SEMANTIC SIMILARITY\n\nconst ruleEvaluationSemanticSimilarityConfiguration =\n ruleEvaluationConfiguration.extend({\n algorithm: z.enum(['cosine_distance']),\n minSimilarity: z.number().optional(), // Percentage of similarity\n maxSimilarity: z.number().optional(), // Percentage of similarity\n })\nconst ruleEvaluationSemanticSimilarityResultMetadata =\n ruleEvaluationResultMetadata.extend({\n configuration: ruleEvaluationSemanticSimilarityConfiguration,\n })\nconst ruleEvaluationSemanticSimilarityResultError =\n ruleEvaluationResultError.extend({})\nexport const RuleEvaluationSemanticSimilaritySpecification = {\n name: 'Semantic Similarity',\n description:\n 'Checks if the response is semantically similar to the expected output. The resulting score is the percentage of similarity',\n configuration: ruleEvaluationSemanticSimilarityConfiguration,\n resultMetadata: ruleEvaluationSemanticSimilarityResultMetadata,\n resultError: ruleEvaluationSemanticSimilarityResultError,\n resultReason: (\n result: EvaluationResultSuccessValue<\n EvaluationType.Rule,\n RuleEvaluationMetric.SemanticSimilarity\n >,\n ) => {\n let reason = `Response sema