@snap/camera-kit
Version:
Camera Kit Web
140 lines • 6.64 kB
JavaScript
import { __awaiter } from "tslib";
import { Subject, map } from "rxjs";
import { copyDefinedProperties } from "../common/copyDefinedProperties";
import { ensureError, stringifyErrorMessage } from "../common/errorHelpers";
import { Transform2D } from "../transforms/Transform2D";
import { debounceTimeAfter } from "../observable-operators/debounceTimeAfter";
import { CameraKitSource, defaultDeviceInfo } from "./CameraKitSource";
const defaultOptions = Object.assign(Object.assign({}, defaultDeviceInfo), { transform: Transform2D.Identity, disableSourceAudio: false });
function closeWorklet(worklet) {
if (!worklet)
return;
worklet.port.close();
worklet.port.onmessage = null;
worklet.disconnect();
}
function closeAudioContext(audioContext) {
return __awaiter(this, void 0, void 0, function* () {
if (!audioContext || audioContext.state === "closed")
return;
return audioContext.close();
});
}
function handleAudioProcessingErrors(errors, reportError) {
return errors
.pipe(debounceTimeAfter(1, 1000), map((event) => {
if (event.type === "initial") {
reportError(new Error("The first audio processing error before debouncing.", { cause: event.value }));
}
else if (event.type === "debounced") {
const errorMessages = [...new Set(event.values.map(stringifyErrorMessage))].join("\n");
reportError(new Error(`Debounced ${event.values.length} audio processing errors.`, {
cause: new Error(errorMessages),
}));
}
}))
.subscribe();
}
export function createMediaStreamSource(stream, options = {}) {
var _a;
const { facingMode } = stream.getVideoTracks().length > 0 ? stream.getVideoTracks()[0].getSettings() : { facingMode: undefined };
const detectedCameraType = facingMode === "user" || facingMode === "environment" ? facingMode : undefined;
const optionsWithDefaults = Object.assign(Object.assign(Object.assign({}, defaultOptions), copyDefinedProperties(options)), { cameraType: (_a = options.cameraType) !== null && _a !== void 0 ? _a : detectedCameraType });
const enableSourceAudio = stream.getAudioTracks().length > 0 && !optionsWithDefaults.disableSourceAudio;
const simulateStereoAudio = true;
const sampleRate = 44100;
let audioContext = undefined;
let audioSource = undefined;
let worklet = undefined;
let microphoneRecorderUrl;
if (enableSourceAudio) {
const microphoneRecorderWorklet = `
class MicrophoneWorkletProcessor extends AudioWorkletProcessor {
process(inputs, outputs, parameters) {
this.port.postMessage({
eventType: 'data',
buffer: inputs
});
return true;
}
}
registerProcessor('microphone-worklet', MicrophoneWorkletProcessor);`;
const microphoneRecorderBlob = new Blob([microphoneRecorderWorklet], {
type: "application/javascript",
});
microphoneRecorderUrl = URL.createObjectURL(microphoneRecorderBlob);
}
let audioProcessingErrorSubscription = undefined;
return new CameraKitSource({ media: stream }, {
onAttach: (source, lensCore, reportError) => __awaiter(this, void 0, void 0, function* () {
yield source.setTransform(optionsWithDefaults.transform);
if (enableSourceAudio) {
const audioProcessingErrors = new Subject();
audioProcessingErrorSubscription = handleAudioProcessingErrors(audioProcessingErrors, reportError);
yield lensCore.setAudioParameters({
parameters: {
numChannels: simulateStereoAudio ? 2 : 1,
sampleRate,
},
});
try {
closeWorklet(worklet);
audioSource === null || audioSource === void 0 ? void 0 : audioSource.disconnect();
yield closeAudioContext(audioContext);
}
catch (error) {
reportError(ensureError(error));
}
audioContext = new AudioContext();
audioSource = audioContext.createMediaStreamSource(stream);
const scopedAudioSource = audioSource;
audioContext.audioWorklet
.addModule(microphoneRecorderUrl)
.then(() => {
if (audioContext) {
worklet = new AudioWorkletNode(audioContext, "microphone-worklet");
scopedAudioSource.connect(worklet);
worklet.connect(audioContext.destination);
worklet.port.onmessage = (e) => {
if (e.data.eventType === "data") {
const leftSamples = e.data.buffer[0][0];
if (!leftSamples)
return;
let inputBuffers = [leftSamples];
if (simulateStereoAudio) {
const rightSamples = e.data.buffer[0].length > 1 ? e.data.buffer[0][1] : leftSamples.slice();
inputBuffers.push(rightSamples);
}
lensCore
.processAudioSampleBuffer({ input: inputBuffers })
.catch((error) => audioProcessingErrors.next(error));
}
};
}
})
.catch((error) => {
reportError(error);
});
}
}),
onDetach: (reportError) => __awaiter(this, void 0, void 0, function* () {
if (worklet) {
closeWorklet(worklet);
worklet = undefined;
}
if (audioSource) {
audioSource.disconnect();
audioSource = undefined;
}
if (audioContext) {
yield closeAudioContext(audioContext).catch(reportError);
audioContext = undefined;
}
if (audioProcessingErrorSubscription) {
audioProcessingErrorSubscription.unsubscribe();
audioProcessingErrorSubscription = undefined;
}
}),
}, optionsWithDefaults);
}
//# sourceMappingURL=MediaStreamSource.js.map