woolball-client
Version:
Client-side library for Woolball enabling secure browser resource sharing for distributed AI task processing
84 lines (83 loc) • 3.58 kB
JavaScript
;
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.TASK_CONFIGS = void 0;
exports.isTaskAvailableInEnvironment = isTaskAvailableInEnvironment;
exports.getTaskHandler = getTaskHandler;
exports.getTaskExecutionType = getTaskExecutionType;
const tasks_1 = require("../utils/tasks");
const worker_string_1 = __importDefault(require("./worker-string"));
// AI tasks: automatic-speech-recognition, text-to-speech, translation, text-generation, image-text-to-text
// Canvas tasks: char-to-image, html-to-image
/**
* Centralized task configuration following the rules:
* - Browser: AI tasks via worker, canvas tasks direct
* - Extension: AI tasks direct, canvas tasks unavailable
* - Node: AI tasks via node_worker, canvas tasks unavailable
*/
exports.TASK_CONFIGS = {
// AI Tasks
'automatic-speech-recognition': {
browser: { type: 'worker', handler: worker_string_1.default },
extension: { type: 'browser', handler: tasks_1.taskProcessors['automatic-speech-recognition'] },
node: { type: 'node_worker', handler: tasks_1.taskProcessors['automatic-speech-recognition'] }
},
'text-to-speech': {
browser: { type: 'worker', handler: worker_string_1.default },
extension: { type: 'browser', handler: tasks_1.taskProcessors['text-to-speech'] },
node: { type: 'node_worker', handler: tasks_1.taskProcessors['text-to-speech'] }
},
'translation': {
browser: { type: 'worker', handler: worker_string_1.default },
extension: { type: 'browser', handler: tasks_1.taskProcessors['translation'] },
node: { type: 'node_worker', handler: tasks_1.taskProcessors['translation'] }
},
'text-generation': {
browser: { type: 'worker', handler: worker_string_1.default },
extension: { type: 'browser', handler: tasks_1.taskProcessors['text-generation'] },
node: { type: 'node_worker', handler: tasks_1.taskProcessors['text-generation'] }
},
'image-text-to-text': {
browser: { type: 'worker', handler: worker_string_1.default },
extension: { type: 'browser', handler: tasks_1.taskProcessors['image-text-to-text'] },
node: { type: 'node_worker', handler: tasks_1.taskProcessors['image-text-to-text'] }
},
// Canvas Tasks
'char-to-image': {
browser: { type: 'browser', handler: tasks_1.taskProcessors['char-to-image'] }
// Not available in extension and node
},
'html-to-image': {
browser: { type: 'browser', handler: tasks_1.taskProcessors['html-to-image'] }
// Not available in extension and node
},
};
/**
* Check if a task is available in the given environment
*/
function isTaskAvailableInEnvironment(taskType, environment) {
const config = exports.TASK_CONFIGS[taskType];
return config ? config[environment] !== undefined : false;
}
/**
* Get the task handler for a specific task type and environment
*/
function getTaskHandler(taskType, environment) {
const config = exports.TASK_CONFIGS[taskType];
if (!config || !config[environment]) {
return null;
}
return config[environment].handler;
}
/**
* Get the execution type for a task in a specific environment
*/
function getTaskExecutionType(taskType, environment) {
const config = exports.TASK_CONFIGS[taskType];
if (!config || !config[environment]) {
return null;
}
return config[environment].type;
}