UNPKG

react-native-executorch

Version:

An easy way to run AI models in react native with ExecuTorch

49 lines 1.97 kB
import { ResourceSource } from '../types/common'; import { ChatConfig, LLMTool, Message, ToolsConfig } from '../types/llm'; export declare class LLMController { private nativeModule; private chatConfig; private toolsConfig; private tokenizerConfig; private onToken; private _response; private _isReady; private _isGenerating; private _messageHistory; private tokenCallback; private responseCallback; private messageHistoryCallback; private isReadyCallback; private isGeneratingCallback; private onDownloadProgressCallback; constructor({ tokenCallback, responseCallback, messageHistoryCallback, isReadyCallback, isGeneratingCallback, onDownloadProgressCallback, }: { tokenCallback?: (token: string) => void; responseCallback?: (response: string) => void; messageHistoryCallback?: (messageHistory: Message[]) => void; isReadyCallback?: (isReady: boolean) => void; isGeneratingCallback?: (isGenerating: boolean) => void; onDownloadProgressCallback?: (downloadProgress: number) => void; }); get response(): string; get isReady(): boolean; get isGenerating(): boolean; get messageHistory(): Message[]; load({ modelSource, tokenizerSource, tokenizerConfigSource, }: { modelSource: ResourceSource; tokenizerSource: ResourceSource; tokenizerConfigSource: ResourceSource; }): Promise<void>; setTokenCallback(tokenCallback: (token: string) => void): void; configure({ chatConfig, toolsConfig, }: { chatConfig?: Partial<ChatConfig>; toolsConfig?: ToolsConfig; }): void; delete(): void; forward(input: string): Promise<void>; interrupt(): void; generate(messages: Message[], tools?: LLMTool[]): Promise<void>; sendMessage(message: string): Promise<void>; deleteMessage(index: number): void; private applyChatTemplate; } //# sourceMappingURL=LLMController.d.ts.map