audio.libx.js
Version:
Comprehensive audio library with progressive streaming, recording capabilities, real-time processing, and intelligent caching for web applications
395 lines • 14.8 kB
JavaScript
import { ProcessingError } from './types.js';
export class RealtimeAudioProcessor {
constructor(options = {}) {
this._audioContext = null;
this._sourceNode = null;
this._analyserNode = null;
this._gainNode = null;
this._outputGainNode = null;
this._filterNode = null;
this._effectNodes = new Map();
this._isProcessing = false;
this._animationFrame = null;
this._callbacks = {};
this._options = {
enableSilenceDetection: options.enableSilenceDetection ?? true,
silenceThresholdDb: options.silenceThresholdDb ?? -50,
enableLevelMonitoring: options.enableLevelMonitoring ?? true,
levelUpdateInterval: options.levelUpdateInterval ?? 100,
enableEffects: options.enableEffects ?? false,
effects: options.effects ?? [],
};
}
async initialize(mediaStream) {
try {
this._audioContext = new AudioContext();
if (this._audioContext.state === 'suspended') {
await this._audioContext.resume();
}
this._sourceNode = this._audioContext.createMediaStreamSource(mediaStream);
await this._setupProcessingChain();
console.log('RealtimeAudioProcessor initialized with sample rate:', this._audioContext.sampleRate);
}
catch (error) {
console.error('Failed to initialize RealtimeAudioProcessor:', error);
throw new ProcessingError('Failed to initialize RealtimeAudioProcessor', undefined, error);
}
}
async _setupProcessingChain() {
if (!this._audioContext || !this._sourceNode) {
throw new ProcessingError('AudioContext or source node not available');
}
this._gainNode = this._audioContext.createGain();
this._gainNode.gain.value = 1.0;
this._filterNode = this._audioContext.createBiquadFilter();
this._filterNode.type = 'allpass';
this._filterNode.frequency.value = 1000;
this._analyserNode = this._audioContext.createAnalyser();
this._analyserNode.fftSize = 2048;
this._analyserNode.smoothingTimeConstant = 0.8;
this._outputGainNode = this._audioContext.createGain();
this._outputGainNode.gain.value = 0.3;
this._outputGainNode.connect(this._audioContext.destination);
await this._rebuildProcessingChain();
console.log('Processing chain setup complete');
}
async _applyEffects(inputNode) {
if (!this._audioContext || !this._options.effects) {
return inputNode;
}
let currentNode = inputNode;
for (const effect of this._options.effects) {
if (!effect.enabled)
continue;
try {
const effectNode = await this._createEffectNode(effect);
if (effectNode) {
currentNode.connect(effectNode);
currentNode = effectNode;
this._effectNodes.set(effect.type, effectNode);
}
}
catch (error) {
console.warn(`Failed to create effect ${effect.type}:`, error);
}
}
return currentNode;
}
async _createEffectNode(effect) {
if (!this._audioContext)
return null;
switch (effect.type) {
case 'gain':
const gainNode = this._audioContext.createGain();
gainNode.gain.value = effect.parameters.gain ?? 1.0;
return gainNode;
case 'filter':
const filterNode = this._audioContext.createBiquadFilter();
filterNode.type = effect.parameters.type ?? 'lowpass';
filterNode.frequency.value = effect.parameters.frequency ?? 1000;
filterNode.Q.value = effect.parameters.Q ?? 1;
return filterNode;
case 'reverb':
return await this._createReverbNode(effect.parameters);
case 'echo':
return this._createEchoNode(effect.parameters);
default:
console.warn(`Unknown effect type: ${effect.type}`);
return null;
}
}
async _createReverbNode(parameters) {
if (!this._audioContext)
return null;
try {
const convolver = this._audioContext.createConvolver();
const length = this._audioContext.sampleRate * (parameters.duration ?? 2);
const impulse = this._audioContext.createBuffer(2, length, this._audioContext.sampleRate);
const decay = parameters.decay ?? 0.5;
for (let channel = 0; channel < 2; channel++) {
const channelData = impulse.getChannelData(channel);
for (let i = 0; i < length; i++) {
channelData[i] = (Math.random() * 2 - 1) * Math.pow(1 - i / length, decay);
}
}
convolver.buffer = impulse;
return convolver;
}
catch (error) {
console.warn('Failed to create reverb effect:', error);
return null;
}
}
_createEchoNode(parameters) {
if (!this._audioContext)
return null;
try {
const delay = this._audioContext.createDelay();
const feedback = this._audioContext.createGain();
const output = this._audioContext.createGain();
delay.delayTime.value = parameters.delay ?? 0.3;
feedback.gain.value = parameters.feedback ?? 0.3;
output.gain.value = parameters.wet ?? 0.5;
delay.connect(feedback);
feedback.connect(delay);
delay.connect(output);
return output;
}
catch (error) {
console.warn('Failed to create echo effect:', error);
return null;
}
}
startProcessing() {
if (this._isProcessing || !this._analyserNode) {
return;
}
this._isProcessing = true;
this._processAudioFrame();
console.log('Real-time processing started');
}
stopProcessing() {
this._isProcessing = false;
if (this._animationFrame) {
cancelAnimationFrame(this._animationFrame);
this._animationFrame = null;
}
console.log('Real-time processing stopped');
}
_processAudioFrame() {
if (!this._isProcessing || !this._analyserNode || !this._audioContext) {
return;
}
const bufferLength = this._analyserNode.frequencyBinCount;
const dataArray = new Uint8Array(bufferLength);
const floatArray = new Float32Array(bufferLength);
this._analyserNode.getByteFrequencyData(dataArray);
this._analyserNode.getFloatFrequencyData(floatArray);
let sum = 0;
for (let i = 0; i < bufferLength; i++) {
sum += dataArray[i] * dataArray[i];
}
const rms = Math.sqrt(sum / bufferLength);
const level = rms / 255;
const dbLevel = 20 * Math.log10(level + 0.0001);
let isSilence = false;
if (this._options.enableSilenceDetection) {
isSilence = dbLevel < (this._options.silenceThresholdDb || -50);
if (this._callbacks.onSilenceDetected) {
this._callbacks.onSilenceDetected(isSilence);
}
}
if (this._options.enableLevelMonitoring && this._callbacks.onLevelUpdate) {
this._callbacks.onLevelUpdate(level);
}
const realtimeData = {
audioData: floatArray,
sampleRate: this._audioContext.sampleRate,
channels: 1,
level,
isSilence,
timestamp: Date.now(),
};
if (Math.random() < 0.01) {
console.log('Processing audio frame:', {
level: level.toFixed(3),
dbLevel: dbLevel.toFixed(1),
isSilence,
bufferLength,
hasCallbacks: !!this._callbacks.onAudioData,
});
}
if (this._callbacks.onAudioData) {
this._callbacks.onAudioData(realtimeData);
}
this._animationFrame = requestAnimationFrame(() => this._processAudioFrame());
}
onAudioData(callback) {
this._callbacks.onAudioData = callback;
}
onSilenceDetected(callback) {
this._callbacks.onSilenceDetected = callback;
}
onLevelUpdate(callback) {
this._callbacks.onLevelUpdate = callback;
}
updateOptions(options) {
this._options = { ...this._options, ...options };
if (options.effects && this._audioContext) {
this._rebuildProcessingChain();
}
}
setEffectsEnabled(enabled) {
this._options.enableEffects = enabled;
console.log(`Effects ${enabled ? 'enabled' : 'disabled'}. Rebuilding processing chain.`);
this._rebuildProcessingChain().catch((error) => {
console.warn('Failed to rebuild processing chain for effects toggle:', error);
});
}
async _rebuildProcessingChain() {
if (!this._audioContext || !this._sourceNode || !this._gainNode || !this._filterNode || !this._analyserNode || !this._outputGainNode) {
console.warn('Cannot rebuild chain: essential nodes are missing.');
return;
}
console.log('Disconnecting all nodes before rebuilding...');
this._sourceNode.disconnect();
this._gainNode.disconnect();
this._filterNode.disconnect();
this._analyserNode.disconnect();
for (const node of this._effectNodes.values()) {
node.disconnect();
}
this._effectNodes.clear();
let currentNode = this._sourceNode;
currentNode.connect(this._gainNode);
currentNode = this._gainNode;
currentNode.connect(this._filterNode);
currentNode = this._filterNode;
if (this._options.enableEffects) {
console.log('Applying effects...');
currentNode = await this._applyEffects(currentNode);
}
currentNode.connect(this._analyserNode);
currentNode.connect(this._outputGainNode);
console.log('Processing chain rebuilt successfully.');
}
setVolume(volume) {
if (this._gainNode) {
this._gainNode.gain.value = Math.max(0, Math.min(2, volume));
}
}
setOutputVolume(volume) {
if (this._outputGainNode) {
this._outputGainNode.gain.value = Math.max(0, Math.min(1, volume));
}
}
setOutputMuted(muted) {
if (this._outputGainNode) {
this._outputGainNode.gain.value = muted ? 0 : 0.3;
}
}
setFilter(type, frequency, Q = 1) {
if (this._filterNode) {
this._filterNode.type = type;
this._filterNode.frequency.value = frequency;
this._filterNode.Q.value = Q;
}
}
toggleEffect(effectType, enabled) {
const effect = this._options.effects?.find((e) => e.type === effectType);
if (effect) {
effect.enabled = enabled;
this._rebuildProcessingChain();
}
}
getCurrentLevel() {
if (!this._analyserNode)
return 0;
const bufferLength = this._analyserNode.frequencyBinCount;
const dataArray = new Uint8Array(bufferLength);
this._analyserNode.getByteFrequencyData(dataArray);
let sum = 0;
for (let i = 0; i < bufferLength; i++) {
sum += dataArray[i] * dataArray[i];
}
const rms = Math.sqrt(sum / bufferLength);
return rms / 255;
}
getFrequencyData() {
if (!this._analyserNode)
return null;
const bufferLength = this._analyserNode.frequencyBinCount;
const dataArray = new Uint8Array(bufferLength);
this._analyserNode.getByteFrequencyData(dataArray);
return dataArray;
}
getWaveformData() {
if (!this._analyserNode)
return null;
const bufferLength = this._analyserNode.frequencyBinCount;
const dataArray = new Uint8Array(bufferLength);
this._analyserNode.getByteTimeDomainData(dataArray);
return dataArray;
}
createProcessedStream() {
if (!this._audioContext || !this._analyserNode)
return null;
try {
const destination = this._audioContext.createMediaStreamDestination();
this._analyserNode.connect(destination);
return destination.stream;
}
catch (error) {
console.warn('Failed to create processed stream:', error);
return null;
}
}
getCapabilities() {
return {
isSupported: typeof AudioContext !== 'undefined' || typeof window.webkitAudioContext !== 'undefined',
hasAnalyser: this._analyserNode !== null,
isProcessing: this._isProcessing,
supportedEffects: ['gain', 'filter', 'reverb', 'echo'],
sampleRate: this._audioContext?.sampleRate,
currentOptions: { ...this._options },
};
}
dispose() {
this.stopProcessing();
if (this._sourceNode) {
try {
this._sourceNode.disconnect();
}
catch (error) {
}
}
if (this._gainNode) {
try {
this._gainNode.disconnect();
}
catch (error) {
}
}
if (this._outputGainNode) {
try {
this._outputGainNode.disconnect();
}
catch (error) {
}
}
if (this._filterNode) {
try {
this._filterNode.disconnect();
}
catch (error) {
}
}
if (this._analyserNode) {
try {
this._analyserNode.disconnect();
}
catch (error) {
}
}
for (const [type, node] of this._effectNodes) {
try {
node.disconnect();
}
catch (error) {
}
}
if (this._audioContext && this._audioContext.state !== 'closed') {
this._audioContext.close();
}
this._audioContext = null;
this._sourceNode = null;
this._analyserNode = null;
this._gainNode = null;
this._outputGainNode = null;
this._filterNode = null;
this._effectNodes.clear();
this._callbacks = {};
console.log('RealtimeAudioProcessor disposed');
}
}
//# sourceMappingURL=RealtimeAudioProcessor.js.map