whisper-node-server
Version:
Local audio transcription on CPU. Node.js bindings for OpenAI's Whisper. Modified from node-whisper
93 lines • 3.75 kB
JavaScript
;
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.MODELS_LIST = exports.createCppCommand = void 0;
// todo: remove all imports from file
const fs_1 = require("fs");
const path_1 = __importDefault(require("path"));
const constants_1 = require("./constants");
// return as syntax for whisper.cpp command
const createCppCommand = ({ filePath, modelName = null, modelPath = null, options = null, isServer = false, port = 8080 }) => {
const flags = getFlags(options);
const model = modelPathOrName(modelName, modelPath);
const exeExt = process.platform === 'win32' ? '.exe' : '';
if (isServer) {
return `server${exeExt} ${flags} -m ${model} --host 0.0.0.0 --port ${port}`;
}
else {
return `main${exeExt} ${flags} -m ${model} -f ${filePath}`;
}
};
exports.createCppCommand = createCppCommand;
const modelPathOrName = (mn, mp) => {
if (mn && mp)
throw "Submit a modelName OR a modelPath. NOT BOTH!";
// Use default model if none specified
if (!mn && !mp) {
const nodeModulesPath = path_1.default.join(process.cwd().split('whisper.cpp')[0], 'whisper.cpp');
console.log("[whisper-node-server] No 'modelName' or 'modelPath' provided. Using default model:", constants_1.DEFAULT_MODEL, "\n");
const modelPath = path_1.default.join(nodeModulesPath, 'models', exports.MODELS_LIST[constants_1.DEFAULT_MODEL]);
console.log(modelPath);
if (!(0, fs_1.existsSync)(modelPath)) {
throw `'${constants_1.DEFAULT_MODEL}' not downloaded! Run 'npx whisper-node-server download'`;
}
return modelPath;
}
// Use custom model path
if (mp)
return mp;
// Use model from models directory
if (mn && exports.MODELS_LIST[mn]) {
const modelPath = path_1.default.join('models', exports.MODELS_LIST[mn]);
if (!(0, fs_1.existsSync)(modelPath)) {
throw `'${mn}' not found! Run 'npx whisper-node-server download'`;
}
return modelPath;
}
if (mn)
throw `modelName "${mn}" not found in list of models. Check your spelling OR use a custom modelPath.`;
throw `modelName OR modelPath required! You submitted modelName: '${mn}', modelPath: '${mp}'`;
};
// option flags list: https://github.com/ggerganov/whisper.cpp/blob/master/README.md?plain=1#L91
const getFlags = (flags) => {
if (!flags)
return '';
const flagList = [];
// Language
if (flags.language) {
flagList.push(`-l ${flags.language}`);
}
// Word timestamps
if (flags.word_timestamps) {
flagList.push('-ml 1');
}
// Timestamp size
if (flags.timestamp_size) {
flagList.push(`-ts ${flags.timestamp_size}`);
}
// Output files
if (flags.gen_file_txt)
flagList.push('-otxt');
if (flags.gen_file_subtitle)
flagList.push('-osrt');
if (flags.gen_file_vtt)
flagList.push('-ovtt');
return flagList.join(' ');
};
// model list: https://github.com/ggerganov/whisper.cpp/#more-audio-samples
exports.MODELS_LIST = {
"tiny": "ggml-tiny.bin",
"tiny.en": "ggml-tiny.en.bin",
"base": "ggml-base.bin",
"base.en": "ggml-base.en.bin",
"small": "ggml-small.bin",
"small.en": "ggml-small.en.bin",
"medium": "ggml-medium.bin",
"medium.en": "ggml-medium.en.bin",
"large-v1": "ggml-large-v1.bin",
"large": "ggml-large.bin",
"large-v3-turbo": "ggml-large-v3-turbo.bin",
};
//# sourceMappingURL=whisper.js.map