@trap_stevo/legendarybuilderproreact-ui
Version:
The legendary UI & utility API that makes your application a legendary application. ~ Created by Steven Compton
319 lines • 11.8 kB
JavaScript
import _toConsumableArray from "@babel/runtime/helpers/toConsumableArray";
import _slicedToArray from "@babel/runtime/helpers/slicedToArray";
import _asyncToGenerator from "@babel/runtime/helpers/asyncToGenerator";
import _regeneratorRuntime from "@babel/runtime/regenerator";
import * as React from "react";
import { useState, useEffect, useRef } from "react";
import lamejs from "@breezystack/lamejs";
import { BlobToBase64 } from "./HUDUniversalHUDUtilityManager.js";
export var convertWebMToWav = /*#__PURE__*/function () {
var _ref = _asyncToGenerator( /*#__PURE__*/_regeneratorRuntime.mark(function _callee(webmBlob) {
var arrayBuffer, audioContext, audioBuffer, wavBuffer;
return _regeneratorRuntime.wrap(function _callee$(_context) {
while (1) switch (_context.prev = _context.next) {
case 0:
_context.next = 2;
return webmBlob.arrayBuffer();
case 2:
arrayBuffer = _context.sent;
audioContext = new (window.AudioContext || window.webkitAudioContext)();
_context.next = 6;
return audioContext.decodeAudioData(arrayBuffer);
case 6:
audioBuffer = _context.sent;
wavBuffer = audioBufferToWav(audioBuffer);
return _context.abrupt("return", new Blob([wavBuffer], {
type: "audio/wav"
}));
case 9:
case "end":
return _context.stop();
}
}, _callee);
}));
return function convertWebMToWav(_x) {
return _ref.apply(this, arguments);
};
}();
export function audioBufferToWav(buffer) {
var numOfChan = buffer.numberOfChannels,
length = buffer.length * numOfChan * 2 + 44,
bufferArray = new ArrayBuffer(length),
view = new DataView(bufferArray),
channels = [],
sampleRate = buffer.sampleRate;
var offset = 0,
pos = 0;
function writeUint16(data) {
view.setUint16(pos, data, true);
pos += 2;
}
function writeUint32(data) {
view.setUint32(pos, data, true);
pos += 4;
}
writeUint32(0x46464952);
writeUint32(length - 8);
writeUint32(0x45564157);
writeUint32(0x20746d66);
writeUint32(16);
writeUint16(1);
writeUint16(numOfChan);
writeUint32(sampleRate);
writeUint32(sampleRate * 2 * numOfChan);
writeUint16(numOfChan * 2);
writeUint16(16);
writeUint32(0x61746164);
writeUint32(length - pos - 4);
for (var i = 0; i < numOfChan; i++) {
channels.push(buffer.getChannelData(i));
}
while (pos < length) {
for (var _i = 0; _i < numOfChan; _i++) {
var sample = Math.max(-1, Math.min(1, channels[_i][offset]));
sample = 0.5 + sample * 32767 | 0;
view.setInt16(pos, sample, true);
pos += 2;
}
offset++;
}
return bufferArray;
}
;
export function audioBufferToMP3(audioBuffer) {
var inputBitrate = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : null;
var bitrate = inputBitrate || detectBitrate(audioBuffer);
var numChannels = audioBuffer.numberOfChannels;
var sampleRate = audioBuffer.sampleRate;
var mp3Encoder = new lamejs.Mp3Encoder(numChannels, sampleRate, bitrate);
var convertTo16BitPCM = function convertTo16BitPCM(channelData) {
var pcm = new Int16Array(channelData.length);
for (var i = 0; i < channelData.length; i++) {
var sample = Math.max(-1, Math.min(1, channelData[i]));
pcm[i] = sample < 0 ? sample * 0x8000 : sample * 0x7FFF;
}
return pcm;
};
var leftChannel = convertTo16BitPCM(audioBuffer.getChannelData(0));
var rightChannel = numChannels > 1 ? convertTo16BitPCM(audioBuffer.getChannelData(1)) : null;
var blockSize = 1152;
var mp3Data = [];
for (var i = 0; i < leftChannel.length; i += blockSize) {
var leftChunk = leftChannel.subarray(i, i + blockSize);
var rightChunk = rightChannel ? rightChannel.subarray(i, i + blockSize) : null;
var mp3Chunk = numChannels > 1 ? mp3Encoder.encodeBuffer(leftChunk, rightChunk) : mp3Encoder.encodeBuffer(leftChunk);
if (mp3Chunk.length > 0) {
mp3Data.push(mp3Chunk);
}
}
var mp3End = mp3Encoder.flush();
if (mp3End.length > 0) {
mp3Data.push(mp3End);
}
var mp3Blob = new Blob(mp3Data, {
type: "audio/mp3"
});
return {
mp3Blob: mp3Blob,
bitrate: bitrate
};
}
;
export var detectBitrate = function detectBitrate(audioBuffer) {
var numChannels = audioBuffer.numberOfChannels;
var sampleRate = audioBuffer.sampleRate;
var durationInSeconds = audioBuffer.length / sampleRate;
var baseBitrate = 128;
if (sampleRate >= 48000) {
baseBitrate = 192;
} else if (durationInSeconds < 60) {
baseBitrate = 96;
}
if (numChannels > 1) {
baseBitrate += 32;
}
return baseBitrate;
};
export var useAudioRecorder = function useAudioRecorder() {
var _ref2 = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {},
_ref2$returnFrequency = _ref2.returnFrequencyData,
returnFrequencyData = _ref2$returnFrequency === void 0 ? false : _ref2$returnFrequency,
_ref2$returnWaveformD = _ref2.returnWaveformData,
returnWaveformData = _ref2$returnWaveformD === void 0 ? false : _ref2$returnWaveformD,
_ref2$returnAudioBase = _ref2.returnAudioBase64Data,
returnAudioBase64Data = _ref2$returnAudioBase === void 0 ? true : _ref2$returnAudioBase;
var _useState = useState(false),
_useState2 = _slicedToArray(_useState, 2),
recording = _useState2[0],
setRecording = _useState2[1];
var _useState3 = useState(null),
_useState4 = _slicedToArray(_useState3, 2),
frequencyData = _useState4[0],
setFrequencyData = _useState4[1];
var _useState5 = useState(null),
_useState6 = _slicedToArray(_useState5, 2),
waveformData = _useState6[0],
setWaveformData = _useState6[1];
var _useState7 = useState(null),
_useState8 = _slicedToArray(_useState7, 2),
audioBase64 = _useState8[0],
setAudioBase64 = _useState8[1];
var _useState9 = useState(null),
_useState10 = _slicedToArray(_useState9, 2),
audioBlob = _useState10[0],
setAudioBlob = _useState10[1];
var _useState11 = useState(null),
_useState12 = _slicedToArray(_useState11, 2),
audioUrl = _useState12[0],
setAudioUrl = _useState12[1];
var mediaRecorderRef = useRef(null);
var audioContextRef = useRef(null);
var analyserRef = useRef(null);
var sourceRef = useRef(null);
var streamRef = useRef(null);
var chunksRef = useRef([]);
var startRecording = /*#__PURE__*/function () {
var _ref3 = _asyncToGenerator( /*#__PURE__*/_regeneratorRuntime.mark(function _callee3() {
var onRecordingStop,
stream,
audioContext,
mediaRecorder,
source,
analyser,
_args3 = arguments;
return _regeneratorRuntime.wrap(function _callee3$(_context3) {
while (1) switch (_context3.prev = _context3.next) {
case 0:
onRecordingStop = _args3.length > 0 && _args3[0] !== undefined ? _args3[0] : null;
_context3.prev = 1;
if (audioUrl) {
URL.revokeObjectURL(audioUrl);
setAudioUrl(null);
}
_context3.next = 5;
return navigator.mediaDevices.getUserMedia({
audio: true
});
case 5:
stream = _context3.sent;
streamRef.current = stream;
audioContext = new (window.AudioContext || window.webkitAudioContext)();
audioContextRef.current = audioContext;
mediaRecorder = new MediaRecorder(stream, {
mimeType: "audio/webm"
});
mediaRecorderRef.current = mediaRecorder;
chunksRef.current = [];
if (returnFrequencyData || returnWaveformData) {
source = audioContext.createMediaStreamSource(stream);
sourceRef.current = source;
analyser = audioContext.createAnalyser();
analyser.fftSize = 1024;
analyserRef.current = analyser;
source.connect(analyser);
}
mediaRecorder.ondataavailable = function (event) {
if (event.data.size > 0) {
chunksRef.current.push(event.data);
}
};
mediaRecorder.onstop = /*#__PURE__*/_asyncToGenerator( /*#__PURE__*/_regeneratorRuntime.mark(function _callee2() {
var webMAudioBlob, wavBlob, audioURL, wavBase64;
return _regeneratorRuntime.wrap(function _callee2$(_context2) {
while (1) switch (_context2.prev = _context2.next) {
case 0:
webMAudioBlob = new Blob(chunksRef.current, {
type: "audio/webm"
});
_context2.next = 3;
return convertWebMToWav(webMAudioBlob);
case 3:
wavBlob = _context2.sent;
audioURL = URL.createObjectURL(wavBlob);
setAudioBlob(wavBlob);
setAudioUrl(audioURL);
if (!(typeof onRecordingStop === "function")) {
_context2.next = 15;
break;
}
wavBase64 = null;
if (!returnAudioBase64Data) {
_context2.next = 14;
break;
}
_context2.next = 12;
return BlobToBase64(wavBlob);
case 12:
wavBase64 = _context2.sent;
setAudioBase64(wavBase64);
case 14:
onRecordingStop(wavBlob, audioURL, wavBase64);
case 15:
case "end":
return _context2.stop();
}
}, _callee2);
}));
mediaRecorder.start();
setRecording(true);
_context3.next = 22;
break;
case 19:
_context3.prev = 19;
_context3.t0 = _context3["catch"](1);
console.log("Error accessing microphone ~", _context3.t0);
case 22:
case "end":
return _context3.stop();
}
}, _callee3, null, [[1, 19]]);
}));
return function startRecording() {
return _ref3.apply(this, arguments);
};
}();
var stopRecording = function stopRecording() {
if (mediaRecorderRef.current) {
mediaRecorderRef.current.stop();
setRecording(false);
}
if (audioContextRef.current) {
audioContextRef.current.close();
}
if (streamRef.current) {
streamRef.current.getTracks().forEach(function (track) {
return track.stop();
});
}
};
useEffect(function () {
var interval;
if (recording && analyserRef.current) {
interval = setInterval(function () {
if (returnFrequencyData) {
var buffer = new Uint8Array(analyserRef.current.frequencyBinCount);
analyserRef.current.getByteFrequencyData(buffer);
setFrequencyData(_toConsumableArray(buffer));
}
if (returnWaveformData) {
var waveformBuffer = new Uint8Array(analyserRef.current.fftSize);
analyserRef.current.getByteTimeDomainData(waveformBuffer);
setWaveformData(_toConsumableArray(waveformBuffer));
}
}, 50);
}
return function () {
return clearInterval(interval);
};
}, [recording, returnFrequencyData, returnWaveformData]);
return {
recording: recording,
audioUrl: audioUrl,
audioBlob: audioBlob,
audioBase64: audioBase64,
frequencyData: frequencyData,
waveformData: waveformData,
startRecording: startRecording,
stopRecording: stopRecording
};
};