react-native-executorch
Version:
An easy way to run AI models in react native with ExecuTorch
122 lines (121 loc) • 4.13 kB
JavaScript
import { Platform } from 'react-native';
const LINKING_ERROR = `The package 'react-native-executorch' doesn't seem to be linked. Make sure: \n\n` + Platform.select({
ios: "- You have run 'pod install'\n",
default: ''
}) + '- You rebuilt the app after installing the package\n' + '- You are not using Expo Go\n';
const LLMSpec = require('./NativeLLM').default;
const LLM = LLMSpec ? LLMSpec : new Proxy({}, {
get() {
throw new Error(LINKING_ERROR);
}
});
const ETModuleSpec = require('./NativeETModule').default;
const ETModule = ETModuleSpec ? ETModuleSpec : new Proxy({}, {
get() {
throw new Error(LINKING_ERROR);
}
});
const ClassificationSpec = require('./NativeClassification').default;
const Classification = ClassificationSpec ? ClassificationSpec : new Proxy({}, {
get() {
throw new Error(LINKING_ERROR);
}
});
const ObjectDetectionSpec = require('./NativeObjectDetection').default;
const ObjectDetection = ObjectDetectionSpec ? ObjectDetectionSpec : new Proxy({}, {
get() {
throw new Error(LINKING_ERROR);
}
});
const StyleTransferSpec = require('./NativeStyleTransfer').default;
const StyleTransfer = StyleTransferSpec ? StyleTransferSpec : new Proxy({}, {
get() {
throw new Error(LINKING_ERROR);
}
});
const SpeechToTextSpec = require('./NativeSpeechToText').default;
const SpeechToText = SpeechToTextSpec ? SpeechToTextSpec : new Proxy({}, {
get() {
throw new Error(LINKING_ERROR);
}
});
const OCRSpec = require('./NativeOCR').default;
const OCR = OCRSpec ? OCRSpec : new Proxy({}, {
get() {
throw new Error(LINKING_ERROR);
}
});
const VerticalOCRSpec = require('./NativeVerticalOCR').default;
const VerticalOCR = VerticalOCRSpec ? VerticalOCRSpec : new Proxy({}, {
get() {
throw new Error(LINKING_ERROR);
}
});
class _ObjectDetectionModule {
async forward(input) {
return await ObjectDetection.forward(input);
}
async loadModule(modelSource) {
return await ObjectDetection.loadModule(modelSource);
}
}
class _StyleTransferModule {
async forward(input) {
return await StyleTransfer.forward(input);
}
async loadModule(modelSource) {
return await StyleTransfer.loadModule(modelSource);
}
}
class _SpeechToTextModule {
async generate(waveform) {
return await SpeechToText.generate(waveform);
}
async loadModule(modelName, modelSources) {
return await SpeechToText.loadModule(modelName, modelSources);
}
async encode(input) {
return await SpeechToText.encode(input);
}
async decode(prevTokens, encoderOutput) {
return await SpeechToText.decode(prevTokens, encoderOutput || []);
}
}
class _ClassificationModule {
async forward(input) {
return await Classification.forward(input);
}
async loadModule(modelSource) {
return await Classification.loadModule(modelSource);
}
}
class _OCRModule {
async forward(input) {
return await OCR.forward(input);
}
async loadModule(detectorSource, recognizerSourceLarge, recognizerSourceMedium, recognizerSourceSmall, symbols) {
return await OCR.loadModule(detectorSource, recognizerSourceLarge, recognizerSourceMedium, recognizerSourceSmall, symbols);
}
}
class _VerticalOCRModule {
async forward(input) {
return await VerticalOCR.forward(input);
}
async loadModule(detectorLargeSource, detectorMediumSource, recognizerSource, symbols, independentCharacters) {
return await VerticalOCR.loadModule(detectorLargeSource, detectorMediumSource, recognizerSource, symbols, independentCharacters);
}
}
class _ETModule {
async forward(inputs, shapes, inputTypes) {
return await ETModule.forward(inputs, shapes, inputTypes);
}
async loadModule(modelSource) {
return await ETModule.loadModule(modelSource);
}
async loadMethod(methodName) {
return await ETModule.loadMethod(methodName);
}
}
export { LLM, ETModule, Classification, ObjectDetection, StyleTransfer, SpeechToText, OCR, VerticalOCR, _ETModule, _ClassificationModule, _StyleTransferModule, _ObjectDetectionModule, _SpeechToTextModule, _OCRModule, _VerticalOCRModule };
//# sourceMappingURL=RnExecutorchModules.js.map
;