woolball-client
Version:
Client-side library for Woolball enabling secure browser resource sharing for distributed AI task processing
105 lines (104 loc) • 3.68 kB
JavaScript
;
/**
* WebLLM processor for handling LLM tasks
* Based on the documentation: https://webllm.mlc.ai/docs/user/basic_usage.html
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports.getWebLLMProcessor = exports.WebLLMProcessor = void 0;
/**
* Main WebLLM processor class that handles interactions with the MLC WebLLM library
*/
class WebLLMProcessor {
constructor(config) {
this.engine = null;
this.model = null;
this.webllmModule = null;
this.initProgress = config?.initProgressCallback;
}
/**
* Initializes the WebLLM engine with the specified model
*/
async initialize(model) {
if (this.engine && this.model === model) {
console.log(`[WebLLM] Engine already initialized with model ${model}`);
return;
}
try {
console.log(`[WebLLM] Initializing engine with model ${model}`);
// Dynamically import the WebLLM library
if (!this.webllmModule) {
this.webllmModule = await import('@mlc-ai/web-llm');
}
// Create the engine with the selected model
this.engine = await this.webllmModule.CreateMLCEngine(model, {
initProgressCallback: this.initProgress || ((progress) => {
console.log(`[WebLLM] Model loading progress:`, progress);
})
});
this.model = model;
console.log(`[WebLLM] Engine initialized successfully with model ${model}`);
}
catch (error) {
console.error(`[WebLLM] Error initializing engine:`, error);
throw new Error(`Failed to initialize WebLLM engine: ${error}`);
}
}
/**
* Generate text using WebLLM chat completion API
*/
async generateText(options) {
if (!this.engine) {
throw new Error('WebLLM engine not initialized. Call initialize() first.');
}
try {
console.log(`[WebLLM] Generating text with options:`, options);
const { messages, temperature = 1, max_new_tokens, stream = false, stream_options } = options;
if (stream) {
// Handle streaming generation
const chunks = await this.engine.chat.completions.create({
messages,
temperature,
max_tokens: max_new_tokens,
stream: true,
stream_options
});
// Return the AsyncGenerator for processing by the caller
return chunks;
}
else {
// Handle regular generation
const reply = await this.engine.chat.completions.create({
messages,
temperature,
max_tokens: max_new_tokens
});
return reply;
}
}
catch (error) {
console.error(`[WebLLM] Error generating text:`, error);
throw new Error(`Failed to generate text with WebLLM: ${error}`);
}
}
/**
* Clean up resources when done
*/
async cleanup() {
// Add any cleanup logic here if needed
this.engine = null;
this.model = null;
}
}
exports.WebLLMProcessor = WebLLMProcessor;
// Singleton instance for easy access
let instance = null;
/**
* Get or create a WebLLM processor instance
*/
function getWebLLMProcessor(config) {
if (!instance) {
instance = new WebLLMProcessor(config);
}
return instance;
}
exports.getWebLLMProcessor = getWebLLMProcessor;