UNPKG

@salesforce/agents

Version:

Client side APIs for working with Salesforce agents

190 lines (189 loc) 6.62 kB
import { Connection } from '@salesforce/core'; import { Duration } from '@salesforce/kit'; import { DeployResult, FileProperties } from '@salesforce/source-deploy-retrieve'; export type TestStatus = 'NEW' | 'IN_PROGRESS' | 'COMPLETED' | 'ERROR' | 'TERMINATED'; export type AgentTestStartResponse = { runId: string; status: TestStatus; }; export type AgentTestStatusResponse = { status: TestStatus; startTime: string; endTime?: string; errorMessage?: string; }; export type TestCaseResult = { status: TestStatus; startTime: string; endTime?: string; inputs: { utterance: string; }; generatedData: { actionsSequence: string[]; outcome: string; topic: string; }; testResults: Array<{ name: string; actualValue: string; expectedValue: string; score: number; result: null | 'PASS' | 'FAILURE'; metricLabel: 'Accuracy' | 'Precision'; metricExplainability: string; status: TestStatus; startTime: string; endTime?: string; errorCode?: string; errorMessage?: string; }>; testNumber: number; }; export type AgentTestResultsResponse = { status: TestStatus; startTime: string; endTime?: string; errorMessage?: string; subjectName: string; testCases: TestCaseResult[]; }; export type AvailableDefinition = Omit<FileProperties, 'manageableState' | 'namespacePrefix'>; export type TestCase = { utterance: string; expectedActions: string[] | undefined; expectedOutcome: string | undefined; expectedTopic: string | undefined; }; export type TestSpec = { name: string; description?: string; subjectType: string; subjectName: string; subjectVersion?: string; testCases: TestCase[]; }; export declare const AgentTestCreateLifecycleStages: { CreatingLocalMetadata: string; Waiting: string; DeployingMetadata: string; Done: string; }; /** * AgentTester class to test Agents */ export declare class AgentTester { private connection; private maybeMock; constructor(connection: Connection); /** * List the AiEvaluationDefinitions available in the org. */ list(): Promise<AvailableDefinition[]>; /** * Initiates an AI evaluation run. * * @param aiEvalDefName - The name of the AI evaluation definition to run. * @returns Promise that resolves with the response from starting the test. */ start(aiEvalDefName: string): Promise<AgentTestStartResponse>; /** * Get the status of a test run * * @param {string} jobId * @returns {Promise<AgentTestStatusResponse>} */ status(jobId: string): Promise<AgentTestStatusResponse>; /** * Poll for a test run to complete * * @param {string} jobId * @param {Duration} timeout * @returns {Promise<AgentTestResultsResponse>} */ poll(jobId: string, { timeout, }?: { timeout?: Duration; }): Promise<AgentTestResultsResponse>; /** * Request test run details * * @param {string} jobId * @returns {Promise<AgentTestResultsResponse>} */ results(jobId: string): Promise<AgentTestResultsResponse>; /** * Cancel an in-progress test run * * @param {string} jobId * @returns {Promise<{success: boolean}>} */ cancel(jobId: string): Promise<{ success: boolean; }>; /** * Creates and deploys an AiEvaluationDefinition from a specification file. * * @param apiName - The API name of the AiEvaluationDefinition to create * @param specFilePath - The path to the specification file to create the definition from * @param options - Configuration options for creating the definition * @param options.outputDir - The directory where the AiEvaluationDefinition file will be written * @param options.preview - If true, writes the AiEvaluationDefinition file to <api-name>-preview-<timestamp>.xml in the current working directory and does not deploy it * * @returns Promise containing: * - path: The filesystem path to the created AiEvaluationDefinition file * - contents: The AiEvaluationDefinition contents as a string * - deployResult: The deployment result (if not in preview mode) * * @throws {SfError} When deployment fails */ create(apiName: string, specFilePath: string, options: { outputDir: string; preview?: boolean; }): Promise<{ path: string; contents: string; deployResult?: DeployResult; }>; } export declare function convertTestResultsToFormat(results: AgentTestResultsResponse, format: 'json' | 'junit' | 'tap'): Promise<string>; /** * Normalizes test results by decoding HTML entities in utterances and test result values. * * @param results - The agent test results response object to normalize * @returns A new AgentTestResultsResponse with decoded HTML entities * * @example * const results = { * testCases: [{ * inputs: { utterance: "&quot;hello&quot;" }, * testResults: [{ * actualValue: "&amp;test", * expectedValue: "&lt;value&gt;" * }] * }] * }; * const normalized = normalizeResults(results); */ export declare function normalizeResults(results: AgentTestResultsResponse): AgentTestResultsResponse; export declare function humanFriendlyName(name: string): string; /** * Generate a test specification file in YAML format. * This function takes a test specification object, cleans it by removing undefined and empty string values, * converts it to YAML format, and writes it to the specified output file. * * @param spec - The test specification object to be converted to YAML. * @param outputFile - The file path where the YAML output should be written. * @throws {Error} - May throw an error if file operations fail. * @returns A Promise that resolves when the file has been written. */ export declare function writeTestSpec(spec: TestSpec, outputFile: string): Promise<void>; /** * Generates a TestSpec object from an AI Evaluation Definition XML file. * * @param path - The file path to the AI Evaluation Definition XML file. * @returns Promise that resolves to a TestSpec object containing the parsed evaluation definition data. * @description Reads and parses an XML file containing AIEvaluationDefinition, converting it into a structured TestSpec format. * * @throws {Error} If the file cannot be read or parsed. */ export declare function generateTestSpecFromAiEvalDefinition(path: string): Promise<TestSpec>;