extendable-media-recorder
Version:
An extendable drop-in replacement for the native MediaRecorder.
159 lines • 8.67 kB
JavaScript
import { encode, instantiate } from 'media-encoder-host';
import { addRecorderAudioWorkletModule, createRecorderAudioWorkletNode } from 'recorder-audio-worklet';
import { AudioBuffer, AudioBufferSourceNode, AudioWorkletNode, MediaStreamAudioSourceNode, MinimalAudioContext, addAudioWorkletModule } from 'standardized-audio-context';
const ERROR_MESSAGE = 'Missing AudioWorklet support. Maybe this is not running in a secure context.';
// @todo This should live in a separate file.
const createPromisedAudioNodesEncoderInstanceIdAndPort = async (audioBuffer, audioContext, channelCount, mediaStream, mimeType) => {
const { encoderInstanceId, port } = await instantiate(mimeType, audioContext.sampleRate);
if (AudioWorkletNode === undefined) {
throw new Error(ERROR_MESSAGE);
}
const audioBufferSourceNode = new AudioBufferSourceNode(audioContext, { buffer: audioBuffer });
const mediaStreamAudioSourceNode = new MediaStreamAudioSourceNode(audioContext, { mediaStream });
const recorderAudioWorkletNode = createRecorderAudioWorkletNode(AudioWorkletNode, audioContext, { channelCount });
return { audioBufferSourceNode, encoderInstanceId, mediaStreamAudioSourceNode, port, recorderAudioWorkletNode };
};
export const createWebAudioMediaRecorderFactory = (createBlobEvent, createInvalidModificationError, createInvalidStateError, createNotSupportedError) => {
return (eventTarget, mediaStream, mimeType) => {
var _a;
const sampleRate = (_a = mediaStream.getAudioTracks()[0]) === null || _a === void 0 ? void 0 : _a.getSettings().sampleRate;
const audioContext = new MinimalAudioContext({ latencyHint: 'playback', sampleRate });
const length = Math.max(1024, Math.ceil(audioContext.baseLatency * audioContext.sampleRate));
const audioBuffer = new AudioBuffer({ length, sampleRate: audioContext.sampleRate });
const bufferedArrayBuffers = [];
const promisedAudioWorkletModule = addRecorderAudioWorkletModule((url) => {
if (addAudioWorkletModule === undefined) {
throw new Error(ERROR_MESSAGE);
}
return addAudioWorkletModule(audioContext, url);
});
let abortRecording = null;
let intervalId = null;
let promisedAudioNodesAndEncoderInstanceId = null;
let promisedPartialRecording = null;
let isAudioContextRunning = true;
const dispatchDataAvailableEvent = (arrayBuffers) => {
eventTarget.dispatchEvent(createBlobEvent('dataavailable', { data: new Blob(arrayBuffers, { type: mimeType }) }));
};
const requestNextPartialRecording = async (encoderInstanceId, timeslice) => {
const arrayBuffers = await encode(encoderInstanceId, timeslice);
if (promisedAudioNodesAndEncoderInstanceId === null) {
bufferedArrayBuffers.push(...arrayBuffers);
}
else {
dispatchDataAvailableEvent(arrayBuffers);
promisedPartialRecording = requestNextPartialRecording(encoderInstanceId, timeslice);
}
};
const resume = () => {
isAudioContextRunning = true;
return audioContext.resume();
};
const stop = () => {
if (promisedAudioNodesAndEncoderInstanceId === null) {
return;
}
if (abortRecording !== null) {
mediaStream.removeEventListener('addtrack', abortRecording);
mediaStream.removeEventListener('removetrack', abortRecording);
}
if (intervalId !== null) {
clearTimeout(intervalId);
}
promisedAudioNodesAndEncoderInstanceId.then(async ({ encoderInstanceId, mediaStreamAudioSourceNode, recorderAudioWorkletNode }) => {
if (promisedPartialRecording !== null) {
promisedPartialRecording.catch(() => {
/* @todo Only catch the errors caused by a duplicate call to encode. */
});
promisedPartialRecording = null;
}
await recorderAudioWorkletNode.stop();
mediaStreamAudioSourceNode.disconnect(recorderAudioWorkletNode);
const arrayBuffers = await encode(encoderInstanceId, null);
if (promisedAudioNodesAndEncoderInstanceId === null) {
await suspend();
}
dispatchDataAvailableEvent([...bufferedArrayBuffers, ...arrayBuffers]);
bufferedArrayBuffers.length = 0;
eventTarget.dispatchEvent(new Event('stop'));
});
promisedAudioNodesAndEncoderInstanceId = null;
};
const suspend = () => {
isAudioContextRunning = false;
return audioContext.suspend();
};
suspend();
return {
get mimeType() {
return mimeType;
},
get state() {
return promisedAudioNodesAndEncoderInstanceId === null ? 'inactive' : isAudioContextRunning ? 'recording' : 'paused';
},
pause() {
if (promisedAudioNodesAndEncoderInstanceId === null) {
throw createInvalidStateError();
}
if (isAudioContextRunning) {
suspend();
eventTarget.dispatchEvent(new Event('pause'));
}
},
resume() {
if (promisedAudioNodesAndEncoderInstanceId === null) {
throw createInvalidStateError();
}
if (!isAudioContextRunning) {
resume();
eventTarget.dispatchEvent(new Event('resume'));
}
},
start(timeslice) {
var _a;
if (promisedAudioNodesAndEncoderInstanceId !== null) {
throw createInvalidStateError();
}
if (mediaStream.getVideoTracks().length > 0) {
throw createNotSupportedError();
}
eventTarget.dispatchEvent(new Event('start'));
const audioTracks = mediaStream.getAudioTracks();
const channelCount = audioTracks.length === 0 ? 2 : (_a = audioTracks[0].getSettings().channelCount) !== null && _a !== void 0 ? _a : 2;
promisedAudioNodesAndEncoderInstanceId = Promise.all([
resume(),
promisedAudioWorkletModule.then(() => createPromisedAudioNodesEncoderInstanceIdAndPort(audioBuffer, audioContext, channelCount, mediaStream, mimeType))
]).then(async ([, { audioBufferSourceNode, encoderInstanceId, mediaStreamAudioSourceNode, port, recorderAudioWorkletNode }]) => {
mediaStreamAudioSourceNode.connect(recorderAudioWorkletNode);
await new Promise((resolve) => {
audioBufferSourceNode.onended = resolve;
audioBufferSourceNode.connect(recorderAudioWorkletNode);
audioBufferSourceNode.start(audioContext.currentTime + length / audioContext.sampleRate);
});
audioBufferSourceNode.disconnect(recorderAudioWorkletNode);
await recorderAudioWorkletNode.record(port);
if (timeslice !== undefined) {
promisedPartialRecording = requestNextPartialRecording(encoderInstanceId, timeslice);
}
return { encoderInstanceId, mediaStreamAudioSourceNode, recorderAudioWorkletNode };
});
const tracks = mediaStream.getTracks();
abortRecording = () => {
stop();
eventTarget.dispatchEvent(new ErrorEvent('error', { error: createInvalidModificationError() }));
};
mediaStream.addEventListener('addtrack', abortRecording);
mediaStream.addEventListener('removetrack', abortRecording);
intervalId = setInterval(() => {
const currentTracks = mediaStream.getTracks();
if ((currentTracks.length !== tracks.length || currentTracks.some((track, index) => track !== tracks[index])) &&
abortRecording !== null) {
abortRecording();
}
}, 1000);
},
stop
};
};
};
//# sourceMappingURL=web-audio-media-recorder.js.map