react-native-executorch
Version:
An easy way to run AI models in React Native with ExecuTorch
30 lines (29 loc) • 1.2 kB
JavaScript
;
import { ResourceFetcher } from '../../utils/ResourceFetcher';
import { DeeplabLabel } from '../../types/imageSegmentation';
import { ETError, getError } from '../../Error';
import { BaseModule } from '../BaseModule';
export class ImageSegmentationModule extends BaseModule {
async load(model, onDownloadProgressCallback = () => {}) {
const paths = await ResourceFetcher.fetch(onDownloadProgressCallback, model.modelSource);
if (paths === null || paths.length < 1) {
throw new Error('Download interrupted.');
}
this.nativeModule = global.loadImageSegmentation(paths[0] || '');
}
async forward(imageSource, classesOfInterest, resize) {
if (this.nativeModule == null) {
throw new Error(getError(ETError.ModuleNotLoaded));
}
const stringDict = await this.nativeModule.generate(imageSource, (classesOfInterest || []).map(label => DeeplabLabel[label]), resize || false);
let enumDict = {};
for (const key in stringDict) {
if (key in DeeplabLabel) {
const enumKey = DeeplabLabel[key];
enumDict[enumKey] = stringDict[key];
}
}
return enumDict;
}
}
//# sourceMappingURL=ImageSegmentationModule.js.map