react-native-executorch
Version:
An easy way to run AI models in react native with ExecuTorch
37 lines (36 loc) • 1.32 kB
JavaScript
import { LLM } from '../../native/RnExecutorchModules';
import { fetchResource } from '../../utils/fetchResource';
import { DEFAULT_CONTEXT_WINDOW_LENGTH, DEFAULT_MESSAGE_HISTORY, DEFAULT_SYSTEM_PROMPT } from '../../constants/llamaDefaults';
export class LLMModule {
static onDownloadProgressCallback = _downloadProgress => {};
static async load(modelSource, tokenizerSource, systemPrompt = DEFAULT_SYSTEM_PROMPT, messageHistory = DEFAULT_MESSAGE_HISTORY, contextWindowLength = DEFAULT_CONTEXT_WINDOW_LENGTH) {
try {
const tokenizerFileUri = await fetchResource(tokenizerSource);
const modelFileUri = await fetchResource(modelSource, this.onDownloadProgressCallback);
await LLM.loadLLM(modelFileUri, tokenizerFileUri, systemPrompt, messageHistory, contextWindowLength);
} catch (err) {
throw new Error(err.message);
}
}
static async generate(input) {
try {
await LLM.runInference(input);
} catch (err) {
throw new Error(err.message);
}
}
static onDownloadProgress(callback) {
this.onDownloadProgressCallback = callback;
}
static onToken(callback) {
return LLM.onToken(callback);
}
static interrupt() {
LLM.interrupt();
}
static delete() {
LLM.deleteModule();
}
}
//# sourceMappingURL=LLMModule.js.map
;