react-native-executorch
Version:
An easy way to run AI models in React Native with ExecuTorch
55 lines • 1.56 kB
TypeScript
export interface LLMType {
messageHistory: Message[];
response: string;
token: string;
isReady: boolean;
isGenerating: boolean;
downloadProgress: number;
error: string | null;
configure: ({ chatConfig, toolsConfig, generationConfig, }: {
chatConfig?: Partial<ChatConfig>;
toolsConfig?: ToolsConfig;
generationConfig?: GenerationConfig;
}) => void;
getGeneratedTokenCount: () => number;
generate: (messages: Message[], tools?: LLMTool[]) => Promise<void>;
sendMessage: (message: string) => Promise<void>;
deleteMessage: (index: number) => void;
interrupt: () => void;
}
export type MessageRole = 'user' | 'assistant' | 'system';
export interface Message {
role: MessageRole;
content: string;
}
export interface ToolCall {
toolName: string;
arguments: Object;
}
export type LLMTool = Object;
export interface ChatConfig {
initialMessageHistory: Message[];
contextWindowLength: number;
systemPrompt: string;
}
export interface ToolsConfig {
tools: LLMTool[];
executeToolCallback: (call: ToolCall) => Promise<string | null>;
displayToolCalls?: boolean;
}
export interface GenerationConfig {
temperature?: number;
topp?: number;
outputTokenBatchSize?: number;
batchTimeInterval?: number;
}
export declare const SPECIAL_TOKENS: {
BOS_TOKEN: string;
EOS_TOKEN: string;
UNK_TOKEN: string;
SEP_TOKEN: string;
PAD_TOKEN: string;
CLS_TOKEN: string;
MASK_TOKEN: string;
};
//# sourceMappingURL=llm.d.ts.map