react-native-executorch
Version:
An easy way to run AI models in react native with ExecuTorch
63 lines (62 loc) • 1.58 kB
JavaScript
;
import { LLMController } from '../../controllers/LLMController';
export class LLMModule {
static async load({
modelSource,
tokenizerSource,
tokenizerConfigSource,
onDownloadProgressCallback,
tokenCallback,
responseCallback,
messageHistoryCallback
}) {
this.controller = new LLMController({
tokenCallback: tokenCallback,
responseCallback: responseCallback,
messageHistoryCallback: messageHistoryCallback,
onDownloadProgressCallback: onDownloadProgressCallback
});
await this.controller.load({
modelSource,
tokenizerSource,
tokenizerConfigSource
});
}
static setTokenCallback({
tokenCallback
}) {
this.controller.setTokenCallback(tokenCallback);
}
static configure({
chatConfig,
toolsConfig
}) {
this.controller.configure({
chatConfig,
toolsConfig
});
}
static async forward(input) {
await this.controller.forward(input);
return this.controller.response;
}
static async generate(messages, tools) {
await this.controller.generate(messages, tools);
return this.controller.response;
}
static async sendMessage(message) {
await this.controller.sendMessage(message);
return this.controller.messageHistory;
}
static async deleteMessage(index) {
await this.controller.deleteMessage(index);
return this.controller.messageHistory;
}
static interrupt() {
this.controller.interrupt();
}
static delete() {
this.controller.delete();
}
}
//# sourceMappingURL=LLMModule.js.map