genkit-cli
Version:
CLI for interacting with the Google Genkit AI framework
99 lines • 4.62 kB
JavaScript
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
if (k2 === undefined) k2 = k;
var desc = Object.getOwnPropertyDescriptor(m, k);
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
desc = { enumerable: true, get: function() { return m[k]; } };
}
Object.defineProperty(o, k2, desc);
}) : (function(o, m, k, k2) {
if (k2 === undefined) k2 = k;
o[k2] = m[k];
}));
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
Object.defineProperty(o, "default", { enumerable: true, value: v });
}) : function(o, v) {
o["default"] = v;
});
var __importStar = (this && this.__importStar) || (function () {
var ownKeys = function(o) {
ownKeys = Object.getOwnPropertyNames || function (o) {
var ar = [];
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
return ar;
};
return ownKeys(o);
};
return function (mod) {
if (mod && mod.__esModule) return mod;
var result = {};
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
__setModuleDefault(result, mod);
return result;
};
})();
Object.defineProperty(exports, "__esModule", { value: true });
exports.evalRun = void 0;
const eval_1 = require("@genkit-ai/tools-common/eval");
const utils_1 = require("@genkit-ai/tools-common/utils");
const clc = __importStar(require("colorette"));
const commander_1 = require("commander");
const manager_utils_1 = require("../utils/manager-utils");
exports.evalRun = new commander_1.Command('eval:run')
.description('evaluate provided dataset against configured evaluators')
.argument('<dataset>', 'Dataset to evaluate on (currently only supports JSON)')
.option('--output <filename>', 'name of the output file to write evaluation results. Defaults to json output.')
.option('--output-format <format>', 'The output file format (csv, json)', 'json')
.option('--evaluators <evaluators>', 'comma separated list of evaluators to use (by default uses all)')
.option('--batchSize <batchSize>', 'batch size to use for parallel evals (default to 1, no parallelization)', Number.parseInt)
.option('--force', 'Automatically accept all interactive prompts')
.action(async (dataset, options) => {
await (0, manager_utils_1.runWithManager)(await (0, utils_1.findProjectRoot)(), async (manager) => {
if (!dataset) {
throw new Error('No input data passed. Specify input data using [data] argument');
}
let evaluatorActions;
if (!options.evaluators) {
evaluatorActions = await (0, eval_1.getAllEvaluatorActions)(manager);
}
else {
const evalActionKeys = options.evaluators
.split(',')
.map((k) => `/evaluator/${k}`);
evaluatorActions = await (0, eval_1.getMatchingEvaluatorActions)(manager, evalActionKeys);
}
if (!evaluatorActions.length) {
throw new Error(options.evaluators
? `No matching evaluators found for '${options.evaluators}'`
: `No evaluators found in your app`);
}
utils_1.logger.info(`Using evaluators: ${evaluatorActions.map((action) => action.name).join(',')}`);
if (!options.force) {
const confirmed = await (0, utils_1.confirmLlmUse)(evaluatorActions);
if (!confirmed) {
if (!confirmed) {
throw new Error('User declined using billed evaluators.');
}
}
}
const evalDataset = await (0, utils_1.loadEvaluationDatasetFile)(dataset);
const evalRun = await (0, eval_1.runEvaluation)({
manager,
evaluatorActions,
evalDataset,
batchSize: options.batchSize,
});
if (options.output) {
const exportFn = (0, eval_1.getExporterForString)(options.outputFormat);
await exportFn(evalRun, options.output);
}
const toolsInfo = manager.getMostRecentDevUI();
if (toolsInfo) {
utils_1.logger.info(clc.green(`\nView the evaluation results at: ${toolsInfo.url}/evaluate/${evalRun.key.evalRunId}`));
}
else {
utils_1.logger.info(`Succesfully ran evaluation, with evalId: ${evalRun.key.evalRunId}`);
}
});
});
//# sourceMappingURL=eval-run.js.map
;