UNPKG

speechflow

Version:

Speech Processing Flow Graph

125 lines 4.83 kB
"use strict"; /* ** SpeechFlow - Speech Processing Flow Graph ** Copyright (c) 2024-2025 Dr. Ralf S. Engelschall <rse@engelschall.com> ** Licensed under GPL 3.0 <https://spdx.org/licenses/GPL-3.0-only> */ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { if (k2 === undefined) k2 = k; var desc = Object.getOwnPropertyDescriptor(m, k); if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) { desc = { enumerable: true, get: function() { return m[k]; } }; } Object.defineProperty(o, k2, desc); }) : (function(o, m, k, k2) { if (k2 === undefined) k2 = k; o[k2] = m[k]; })); var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) { Object.defineProperty(o, "default", { enumerable: true, value: v }); }) : function(o, v) { o["default"] = v; }); var __importStar = (this && this.__importStar) || (function () { var ownKeys = function(o) { ownKeys = Object.getOwnPropertyNames || function (o) { var ar = []; for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k; return ar; }; return ownKeys(o); }; return function (mod) { if (mod && mod.__esModule) return mod; var result = {}; if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]); __setModuleDefault(result, mod); return result; }; })(); var __importDefault = (this && this.__importDefault) || function (mod) { return (mod && mod.__esModule) ? mod : { "default": mod }; }; Object.defineProperty(exports, "__esModule", { value: true }); /* standard dependencies */ const node_stream_1 = __importDefault(require("node:stream")); /* internal dependencies */ const speechflow_node_1 = __importDefault(require("./speechflow-node")); const util = __importStar(require("./speechflow-util")); /* SpeechFlow node for gain adjustment in audio-to-audio passing */ class SpeechFlowNodeA2AGain extends speechflow_node_1.default { /* declare official node name */ static name = "a2a-gain"; /* internal state */ destroyed = false; /* construct node */ constructor(id, cfg, opts, args) { super(id, cfg, opts, args); /* declare node configuration parameters */ this.configure({ db: { type: "number", val: 0, pos: 0, match: (n) => n >= -60 && n <= 60 } }); /* declare node input/output format */ this.input = "audio"; this.output = "audio"; } /* open node */ async open() { /* clear destruction flag */ this.destroyed = false; /* adjust gain */ const adjustGain = (chunk, db) => { const dv = new DataView(chunk.payload.buffer, chunk.payload.byteOffset, chunk.payload.byteLength); const gainFactor = util.dB2lin(db); for (let i = 0; i < dv.byteLength; i += 2) { let sample = dv.getInt16(i, true); sample *= gainFactor; sample = Math.max(Math.min(sample, 32767), -32768); dv.setInt16(i, sample, true); } }; /* establish a transform stream */ const self = this; this.stream = new node_stream_1.default.Transform({ readableObjectMode: true, writableObjectMode: true, decodeStrings: false, transform(chunk, encoding, callback) { if (self.destroyed) { callback(new Error("stream already destroyed")); return; } if (!Buffer.isBuffer(chunk.payload)) callback(new Error("invalid chunk payload type")); else if (chunk.payload.byteLength % 2 !== 0) callback(new Error("invalid audio buffer size (not 16-bit aligned)")); else { /* adjust chunk */ adjustGain(chunk, self.params.db); this.push(chunk); callback(); } }, final(callback) { if (self.destroyed) { callback(); return; } this.push(null); callback(); } }); } /* close node */ async close() { /* indicate destruction */ this.destroyed = true; /* close stream */ if (this.stream !== null) { this.stream.destroy(); this.stream = null; } } } exports.default = SpeechFlowNodeA2AGain; //# sourceMappingURL=speechflow-node-a2a-gain.js.map