react-native-executorch
Version:
An easy way to run AI models in React Native with ExecuTorch
29 lines (28 loc) • 1.18 kB
TypeScript
import { ResourceSource } from '../../types/common';
import { ChatConfig, LLMTool, Message, ToolsConfig } from '../../types/llm';
export declare class LLMModule {
private controller;
constructor({ tokenCallback, responseCallback, messageHistoryCallback, }?: {
tokenCallback?: (token: string) => void;
responseCallback?: (response: string) => void;
messageHistoryCallback?: (messageHistory: Message[]) => void;
});
load(model: {
modelSource: ResourceSource;
tokenizerSource: ResourceSource;
tokenizerConfigSource: ResourceSource;
}, onDownloadProgressCallback?: (progress: number) => void): Promise<void>;
setTokenCallback({ tokenCallback, }: {
tokenCallback: (token: string) => void;
}): void;
configure({ chatConfig, toolsConfig, }: {
chatConfig?: Partial<ChatConfig>;
toolsConfig?: ToolsConfig;
}): void;
forward(input: string): Promise<string>;
generate(messages: Message[], tools?: LLMTool[]): Promise<string>;
sendMessage(message: string): Promise<Message[]>;
deleteMessage(index: number): Message[];
interrupt(): void;
delete(): void;
}