react-native-executorch
Version:
An easy way to run AI models in React Native with ExecuTorch
48 lines (47 loc) • 1.94 kB
JavaScript
;
import { ResourceFetcher } from '../../utils/ResourceFetcher';
import { BaseModule } from '../BaseModule';
import { Buffer } from 'buffer';
import { PNG } from 'pngjs/browser';
export class TextToImageModule extends BaseModule {
constructor(inferenceCallback) {
super();
this.inferenceCallback = stepIdx => {
inferenceCallback?.(stepIdx);
};
}
async load(model, onDownloadProgressCallback = () => {}) {
const results = await ResourceFetcher.fetch(onDownloadProgressCallback, model.tokenizerSource, model.schedulerSource, model.encoderSource, model.unetSource, model.decoderSource);
if (!results) {
throw new Error('Failed to fetch one or more resources.');
}
const [tokenizerPath, schedulerPath, encoderPath, unetPath, decoderPath] = results;
if (!tokenizerPath || !schedulerPath || !encoderPath || !unetPath || !decoderPath) {
throw new Error('Download interrupted.');
}
const response = await fetch('file://' + schedulerPath);
const schedulerConfig = await response.json();
this.nativeModule = global.loadTextToImage(tokenizerPath, encoderPath, unetPath, decoderPath, schedulerConfig.beta_start, schedulerConfig.beta_end, schedulerConfig.num_train_timesteps, schedulerConfig.steps_offset);
}
async forward(input, imageSize = 512, numSteps = 5, seed) {
const output = await this.nativeModule.generate(input, imageSize, numSteps, seed ? seed : -1, this.inferenceCallback);
const outputArray = new Uint8Array(output);
if (!outputArray.length) {
return '';
}
const png = new PNG({
width: imageSize,
height: imageSize
});
png.data = Buffer.from(outputArray);
const pngBuffer = PNG.sync.write(png, {
colorType: 6
});
const pngString = pngBuffer.toString('base64');
return pngString;
}
interrupt() {
this.nativeModule.interrupt();
}
}
//# sourceMappingURL=TextToImageModule.js.map