react-native-executorch
Version:
An easy way to run AI models in react native with ExecuTorch
87 lines (84 loc) • 2.43 kB
JavaScript
;
import { useCallback, useEffect, useMemo, useState } from 'react';
import { LLMController } from '../../controllers/LLMController';
/*
Hook version of LLMModule
*/
export const useLLM = ({
modelSource,
tokenizerSource,
tokenizerConfigSource,
preventLoad = false
}) => {
const [token, setToken] = useState('');
const [response, setResponse] = useState('');
const [messageHistory, setMessageHistory] = useState([]);
const [isReady, setIsReady] = useState(false);
const [isGenerating, setIsGenerating] = useState(false);
const [downloadProgress, setDownloadProgress] = useState(0);
const [error, setError] = useState(null);
const tokenCallback = useCallback(newToken => {
setToken(newToken);
setResponse(prevResponse => prevResponse + newToken);
}, []);
const model = useMemo(() => new LLMController({
tokenCallback: tokenCallback,
messageHistoryCallback: setMessageHistory,
isReadyCallback: setIsReady,
isGeneratingCallback: setIsGenerating,
onDownloadProgressCallback: setDownloadProgress
}), [tokenCallback]);
useEffect(() => {
setDownloadProgress(0);
setError(null);
if (!preventLoad) {
try {
(async () => {
await model.load({
modelSource,
tokenizerSource,
tokenizerConfigSource
});
})();
} catch (e) {
setError(e);
}
}
return () => {
model.delete();
};
}, [modelSource, tokenizerSource, tokenizerConfigSource, preventLoad, model]);
// memoization of returned functions
const configure = useCallback(({
chatConfig,
toolsConfig
}) => model.configure({
chatConfig,
toolsConfig
}), [model]);
const generate = useCallback((messages, tools) => {
setResponse('');
return model.generate(messages, tools);
}, [model]);
const sendMessage = useCallback(message => {
setResponse('');
return model.sendMessage(message);
}, [model]);
const deleteMessage = useCallback(index => model.deleteMessage(index), [model]);
const interrupt = useCallback(() => model.interrupt(), [model]);
return {
messageHistory,
response,
token,
isReady,
isGenerating,
downloadProgress,
error,
configure: configure,
generate: generate,
sendMessage: sendMessage,
deleteMessage: deleteMessage,
interrupt: interrupt
};
};
//# sourceMappingURL=useLLM.js.map