react-native-executorch
Version:
An easy way to run AI models in React Native with ExecuTorch
28 lines (27 loc) • 1.25 kB
JavaScript
import { ResourceFetcher } from '../../utils/ResourceFetcher';
import { DeeplabLabel } from '../../types/imageSegmentation';
import { ETError, getError } from '../../Error';
import { BaseNonStaticModule } from '../BaseNonStaticModule';
export class ImageSegmentationModule extends BaseNonStaticModule {
async load(model, onDownloadProgressCallback = () => { }) {
const paths = await ResourceFetcher.fetch(onDownloadProgressCallback, model.modelSource);
if (paths === null || paths.length < 1) {
throw new Error('Download interrupted.');
}
this.nativeModule = global.loadImageSegmentation(paths[0] || '');
}
async forward(imageSource, classesOfInterest, resize) {
if (this.nativeModule == null) {
throw new Error(getError(ETError.ModuleNotLoaded));
}
const stringDict = await this.nativeModule.generate(imageSource, (classesOfInterest || []).map((label) => DeeplabLabel[label]), resize || false);
let enumDict = {};
for (const key in stringDict) {
if (key in DeeplabLabel) {
const enumKey = DeeplabLabel[key];
enumDict[enumKey] = stringDict[key];
}
}
return enumDict;
}
}