@llumiverse/core
Version:
Provide an universal API to LLMs. Support for existing LLMs can be added by writing a driver.
177 lines • 6.84 kB
JavaScript
;
/**
* Classes to handle the execution of an interaction in an execution environment.
* Base abstract class is then implemented by each environment
* (eg: OpenAI, HuggingFace, etc.)
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports.AbstractDriver = void 0;
exports.createLogger = createLogger;
const CompletionStream_js_1 = require("./CompletionStream.js");
const index_js_1 = require("./formatters/index.js");
const common_1 = require("@llumiverse/common");
const validation_js_1 = require("./validation.js");
// Helper to create logger methods that support both message-only and object-first signatures
function createConsoleLoggerMethod(consoleMethod) {
return ((objOrMsg, msgOrNever, ...args) => {
if (typeof objOrMsg === 'string') {
// Message-only: logger.info("message", ...args)
consoleMethod(objOrMsg, msgOrNever, ...args);
}
else if (msgOrNever !== undefined) {
// Object-first: logger.info({ obj }, "message", ...args)
consoleMethod(msgOrNever, objOrMsg, ...args);
}
else {
// Object-only: logger.info({ obj })
consoleMethod(objOrMsg, ...args);
}
});
}
const ConsoleLogger = {
debug: createConsoleLoggerMethod(console.debug.bind(console)),
info: createConsoleLoggerMethod(console.info.bind(console)),
warn: createConsoleLoggerMethod(console.warn.bind(console)),
error: createConsoleLoggerMethod(console.error.bind(console)),
};
const noop = () => void 0;
const NoopLogger = {
debug: noop,
info: noop,
warn: noop,
error: noop,
};
function createLogger(logger) {
if (logger === "console") {
return ConsoleLogger;
}
else if (logger) {
return logger;
}
else {
return NoopLogger;
}
}
/**
* To be implemented by each driver
*/
class AbstractDriver {
options;
logger;
constructor(opts) {
this.options = opts;
this.logger = createLogger(opts.logger);
}
async createTrainingPrompt(options) {
const prompt = await this.createPrompt(options.segments, { result_schema: options.schema, model: options.model });
return JSON.stringify({
prompt,
completion: typeof options.completion === 'string' ? options.completion : JSON.stringify(options.completion)
});
}
startTraining(_dataset, _options) {
throw new Error("Method not implemented.");
}
cancelTraining(_jobId) {
throw new Error("Method not implemented.");
}
getTrainingJob(_jobId) {
throw new Error("Method not implemented.");
}
validateResult(result, options) {
if (!result.tool_use && !result.error && options.result_schema) {
try {
result.result = (0, validation_js_1.validateResult)(result.result, options.result_schema);
}
catch (error) {
const errorMessage = `[${this.provider}] [${options.model}] ${error.code ? '[' + error.code + '] ' : ''}Result validation error: ${error.message}`;
this.logger.error({ err: error, data: result.result }, errorMessage);
result.error = {
code: error.code || error.name,
message: error.message,
data: result.result,
};
}
}
}
async execute(segments, options) {
const prompt = await this.createPrompt(segments, options);
return this._execute(prompt, options).catch((error) => {
error.prompt = prompt;
throw error;
});
}
async _execute(prompt, options) {
this.logger.debug(`[${this.provider}] Executing prompt on ${options.model}`);
try {
const start = Date.now();
let result;
switch (options.output_modality) {
case common_1.Modalities.text:
result = await this.requestTextCompletion(prompt, options);
this.validateResult(result, options);
break;
case common_1.Modalities.image:
result = await this.requestImageGeneration(prompt, options);
break;
default:
throw new Error(`Unsupported modality: ${options['output_modality'] ?? "No modality specified"}`);
}
const execution_time = Date.now() - start;
return { ...result, prompt, execution_time };
}
catch (error) {
error.prompt = prompt;
throw error;
}
}
// by default no stream is supported. we block and we return all at once
async stream(segments, options) {
const prompt = await this.createPrompt(segments, options);
const canStream = await this.canStream(options);
if (options.output_modality === common_1.Modalities.text && canStream) {
return new CompletionStream_js_1.DefaultCompletionStream(this, prompt, options);
}
else {
return new CompletionStream_js_1.FallbackCompletionStream(this, prompt, options);
}
}
/**
* Override this method to provide a custom prompt formatter
* @param segments
* @param options
* @returns
*/
async formatPrompt(segments, opts) {
return (0, index_js_1.formatTextPrompt)(segments, opts.result_schema);
}
async createPrompt(segments, opts) {
return await (opts.format ? opts.format(segments, opts.result_schema) : this.formatPrompt(segments, opts));
}
/**
* Must be overridden if the implementation cannot stream.
* Some implementation may be able to stream for certain models but not for others.
* You must overwrite and return false if the current model doesn't support streaming.
* The default implementation returns true, so it is assumed that the streaming can be done.
* If this method returns false then the streaming execution will fallback on a blocking execution streaming the entire response as a single event.
* @param options the execution options containing the target model name.
* @returns true if the execution can be streamed false otherwise.
*/
canStream(_options) {
return Promise.resolve(true);
}
/**
* Get a list of models that can be trained.
* The default is to return an empty array
* @returns
*/
async listTrainableModels() {
return [];
}
async requestImageGeneration(_prompt, _options) {
throw new Error("Image generation not implemented.");
//Cannot be made abstract, as abstract methods are required in the derived class
}
}
exports.AbstractDriver = AbstractDriver;
//# sourceMappingURL=Driver.js.map