react-voice-recorder-pro
Version:
A powerful React hook-based voice recording library with real-time audio visualization and comprehensive browser support
966 lines (954 loc) • 37.7 kB
JavaScript
;
var react = require('react');
// Import React hooks
/**
* Custom hook for managing Web Audio API AudioContext lifecycle
*
* Key features:
* - Automatically manages AudioContext creation and cleanup
* - Real-time tracking of context execution state via statechange events
* - Provides resume functionality for iOS/Safari user gesture requirements
* - Supports webkitAudioContext for browser compatibility
*/
function useAudioContext() {
// State to store AudioContext object
const [audioContext, setAudioContext] = react.useState(null);
// State to store whether AudioContext is currently running
const [isRunning, setIsRunning] = react.useState(false);
// Ref to store AudioContext reference (used for cleanup)
const ctxRef = react.useRef(null);
react.useEffect(() => {
const win = window;
// Select AudioContext constructor considering browser compatibility
// Use standard AudioContext if available, otherwise use webkitAudioContext
const Ctx = (window.AudioContext ??
win.webkitAudioContext);
// Create AudioContext instance
const ctx = new Ctx();
// Store context in ref for cleanup access
ctxRef.current = ctx;
// Store context in state
setAudioContext(ctx);
// Set initial running state
setIsRunning(ctx.state === 'running');
// Event listener function to detect context state changes
const onState = () => setIsRunning(ctx.state === 'running');
// Register statechange event listener
ctx.addEventListener('statechange', onState);
// Cleanup function: runs when component unmounts or dependencies change
return () => {
// Remove event listener
ctx.removeEventListener('statechange', onState);
// Safely close AudioContext (ignore errors)
ctx.close().catch(() => { });
};
}, []); // Empty dependency array to run only once on component mount
// Memoize function to resume AudioContext
const resume = react.useMemo(() => {
return async () => {
// Do nothing if context doesn't exist
if (!ctxRef.current)
return;
// Only call resume if context is not running
if (ctxRef.current.state !== 'running')
await ctxRef.current.resume();
};
}, []);
// Return AudioContext object, running state, and resume function
return { audioContext, isRunning, resume };
}
// Import React hooks
/**
* Custom hook that calculates RMS level (0..1) based on time-domain waveform of microphone stream
*
* Key features:
* - Configures AnalyserNode internally and polls in real-time with requestAnimationFrame
* - Provides basic analysis parameters like smoothing, fftSize for audio analysis quality control
* - Safely cleans up on stream/context changes or component unmount to prevent memory leaks
* - Accurate audio level measurement based on RMS (Root Mean Square)
*/
function useAudioMeter(params) {
// Extract values from parameters and set defaults
const { audioContext, stream, smoothing = 0.8, fftSize = 2048 } = params;
// State to store audio level (RMS value between 0..1)
const [level, setLevel] = react.useState(0);
// Ref to store MediaStreamAudioSourceNode reference
const sourceRef = react.useRef(null);
// Ref to store AnalyserNode reference
const analyserRef = react.useRef(null);
// Ref to store requestAnimationFrame ID
const rafRef = react.useRef(null);
/**
* Memoize function to clean up audio analysis related resources
* Initializes requestAnimationFrame, audio node connections, and state
*/
const cleanup = react.useMemo(() => {
return () => {
// Cancel running requestAnimationFrame
if (rafRef.current)
cancelAnimationFrame(rafRef.current);
rafRef.current = null;
// Disconnect audio nodes
if (analyserRef.current)
analyserRef.current.disconnect();
if (sourceRef.current)
sourceRef.current.disconnect();
// Initialize references
analyserRef.current = null;
sourceRef.current = null;
// Initialize audio level to 0
setLevel(0);
};
}, []);
react.useEffect(() => {
// Do nothing if audioContext or stream is not available
if (!audioContext || !stream)
return;
// Clean up existing resources
cleanup();
// Create MediaStreamAudioSourceNode to connect microphone stream to audio context
const source = audioContext.createMediaStreamSource(stream);
// Create AnalyserNode to analyze audio data
const analyser = audioContext.createAnalyser();
// Set analysis parameters
analyser.smoothingTimeConstant = smoothing; // Smoothing coefficient for time domain data
analyser.fftSize = fftSize; // Set FFT size
// Connect source and analyzer
source.connect(analyser);
// Store references
sourceRef.current = source;
analyserRef.current = analyser;
// Get buffer size needed for analysis
const bufferLength = analyser.frequencyBinCount;
// Create array to store time domain data
const data = new Uint8Array(bufferLength);
/**
* Function to calculate audio level
* Measures audio level in real-time using requestAnimationFrame
*/
const tick = () => {
// Get time domain data (waveform data)
analyser.getByteTimeDomainData(data);
// Initialize variable for RMS (Root Mean Square) calculation
let sumSquares = 0;
// Perform RMS calculation for each sample
for (let i = 0; i < data.length; i++) {
// Normalize 0-255 range values to -1~1 range
const v = (data[i] - 128) / 128;
// Calculate sum of squares
sumSquares += v * v;
}
// Calculate RMS value (square root of mean of squares)
const rms = Math.sqrt(sumSquares / data.length);
// Amplify RMS value to increase sensitivity (limited to max 1.0)
// const amplifiedLevel = Math.min(rms * 3, 1.0)
// Store calculated level in state
setLevel(rms);
// Schedule requestAnimationFrame to run again in next frame
rafRef.current = requestAnimationFrame(tick);
};
// Start first frame
rafRef.current = requestAnimationFrame(tick);
// Return cleanup function to run when dependencies change or component unmounts
return cleanup;
}, [audioContext, stream, smoothing, fftSize, cleanup]);
// Return calculated audio level
return { level };
}
// Import React hooks
/**
* Custom hook for managing audio playback
*
* Key features:
* - Manages play/pause state of audio files
* - Provides HTML Audio element reference
* - Automatic pause handling on playback completion
* - Player state reset functionality
*/
function useAudioPlayer() {
// State to store audio playback status
const [isPlaying, setIsPlaying] = react.useState(false);
// Ref to store HTML Audio element reference
const audioRef = react.useRef(null);
/**
* Function to play/pause audio
* Performs play or pause based on current playback state
*/
const playPause = react.useCallback(() => {
// Do nothing if audio element doesn't exist
if (!audioRef.current)
return;
if (isPlaying) {
// Pause if currently playing
audioRef.current.pause();
setIsPlaying(false);
}
else {
// Play if paused
audioRef.current.play();
setIsPlaying(true);
}
}, [isPlaying]); // Function is recreated whenever isPlaying state changes
/**
* Function called when audio playback ends
* Automatically changes to pause state on playback completion
*/
const handleAudioEnded = react.useCallback(() => {
setIsPlaying(false);
}, []); // Empty dependency array to prevent function recreation
/**
* Function to reset player state
* Called when starting new recording to initialize previous playback state
*/
const resetPlayer = react.useCallback(() => {
// Pause if currently playing
if (isPlaying && audioRef.current) {
audioRef.current.pause();
}
setIsPlaying(false);
}, [isPlaying]); // Function is recreated whenever isPlaying state changes
// Set event listener on audio element
// This should be set as onEnded={handleAudioEnded} when using audioRef in component
// Return player-related states and functions
return {
isPlaying, // Whether currently playing
audioRef, // HTML Audio element reference
playPause, // Play/pause toggle function
resetPlayer, // Player state reset function
handleAudioEnded, // Playback completion handler (used in component)
};
}
// Import React hooks
/**
* Custom hook for audio recording based on MediaRecorder API
*
* Key features:
* - Provides start/stop/pause/resume recording functionality
* - Collects audio data chunks through dataavailable events
* - Creates final audio file in Blob format
* - Automatically cleans up previous recorder when stream changes to prevent memory leaks
* - Error handling and state management
*/
function useMediaRecorder(stream, // Media stream to record
options // MediaRecorder options (MIME type etc.)
) {
// States to store recording status
const [isRecording, setIsRecording] = react.useState(false);
const [isPaused, setIsPaused] = react.useState(false);
const [chunks, setChunks] = react.useState([]); // Recorded audio data chunks
const [error, setError] = react.useState(null); // Error message
// Ref to store MediaRecorder instance reference
const recorderRef = react.useRef(null);
// Set MIME type (default: 'audio/webm')
const mimeType = options?.mimeType || 'audio/webm';
react.useEffect(() => {
// Set recorder to null and exit if no stream
if (!stream) {
recorderRef.current = null;
return;
}
try {
// Create MediaRecorder instance
const rec = new MediaRecorder(stream, { mimeType });
// dataavailable event listener: receives recorded audio data chunks
rec.ondataavailable = (e) => {
// Add to chunk array if data exists and size is greater than 0
if (e.data && e.data.size > 0)
setChunks((prev) => [...prev, e.data]);
};
// start event listener: synchronizes state when recording starts
rec.onstart = () => {
setIsRecording(true);
};
// pause event listener: synchronizes state when recording is paused
rec.onpause = () => {
setIsPaused(true);
};
// resume event listener: synchronizes state when recording is resumed
rec.onresume = () => {
setIsPaused(false);
};
// stop event listener: synchronizes state when recording stops
rec.onstop = () => {
setIsRecording(false);
setIsPaused(false);
};
// error event listener: handles errors that occur during recording
rec.onerror = (e) => {
const target = e.target;
// Set error message (currently using default message)
const message = target && target.mimeType
? 'Recording error'
: 'Recording error';
setError(message);
};
// Store recorder reference
recorderRef.current = rec;
}
catch (err) {
// Set error message when MediaRecorder creation fails
setError(err instanceof Error ? err.message : 'MediaRecorder creation failed');
recorderRef.current = null;
}
// cleanup function: runs when stream changes or component unmounts
return () => {
const r = recorderRef.current;
// Stop if recorder is in active state
if (r && r.state !== 'inactive')
r.stop();
recorderRef.current = null;
};
}, [stream, mimeType]); // runs whenever stream or mimeType changes
/**
* Function to start recording
* Initializes errors, clears chunk array, then starts recording
*/
const start = react.useCallback(() => {
// Initialize previous error message
setError(null);
// Initialize previous recording chunks
setChunks([]);
// Release pause state
setIsPaused(false);
const r = recorderRef.current;
if (!r)
return;
// Only start recording when recorder is in inactive state
if (r.state === 'inactive') {
try {
// Set to collect data at 100ms intervals and start recording
r.start(100);
}
catch (error) {
setError('Recording start failed');
}
}
}, []);
/**
* Function to pause recording
* Only pauses when recorder is recording
*/
const pause = react.useCallback(() => {
const r = recorderRef.current;
if (!r)
return;
// Only pause when recorder is recording
if (r.state === 'recording') {
r.pause();
}
}, []);
/**
* Function to resume recording
* Only resumes when recorder is in paused state
*/
const resume = react.useCallback(() => {
const r = recorderRef.current;
if (!r)
return;
// Only resume when recorder is in paused state
if (r.state === 'paused') {
r.resume();
// setIsPaused(false) removed - handled in onresume event
}
}, []);
/**
* Function to stop recording and return Blob
* Returns Promise for asynchronous processing
*/
const stop = react.useCallback(() => {
return new Promise((resolve) => {
const r = recorderRef.current;
if (!r)
return resolve(null);
// Only stop when recorder is not in inactive state
if (r.state !== 'inactive') {
// Set stop event listener
r.onstop = () => {
// Create Blob from collected chunks
const blob = new Blob(chunks, { type: mimeType });
resolve(blob);
// State initialization is handled in MediaRecorder's onstop event
};
// Stop recording
r.stop();
}
else {
// Return null if already in inactive state
resolve(null);
}
});
}, [chunks, mimeType]); // Function is recreated whenever chunks or mimeType changes
/**
* Function to reset recording state
* Clears chunk array and releases pause state
*/
const reset = react.useCallback(() => {
setChunks([]);
setIsPaused(false);
}, []); // Empty dependency array to prevent function recreation
// Return MediaRecorder state and control functions
return {
isRecording, // Whether currently recording
isPaused, // Whether recording is paused
chunks, // Recorded audio data chunks
mimeType, // MIME type of recorded file
start, // Function to start recording
pause, // Function to pause recording
resume, // Function to resume recording
stop, // Function to stop recording and return Blob
reset, // Function to reset recording state
error, // Recording-related error message
};
}
// Import React hooks
function useMicrophone() {
// State to store media stream from microphone
const [stream, setStream] = react.useState(null);
// State to store whether microphone is currently enabled
const [isEnabled, setIsEnabled] = react.useState(false);
// State to store microphone permission state ('granted', 'denied', 'prompt', 'unknown')
const [permission, setPermission] = react.useState('unknown');
// State to store microphone-related error message
const [error, setError] = react.useState(null);
// Ref to store stream reference (used for cleanup)
const streamRef = react.useRef(null);
react.useEffect(() => {
/**
* useEffect to track microphone permission state
* If Permissions API is available, tracks current microphone permission state in real-time.
* Some browsers (especially iOS Safari) don't have Permissions API, so fallback is used.
*/
let mounted = true; // Flag to track if component is mounted
const nav = navigator;
// Check if browser supports Permissions API
if (nav.permissions?.query) {
// Query microphone permission state
nav.permissions
.query({ name: 'microphone' })
.then((status) => {
// Don't update state if component is unmounted
if (!mounted)
return;
// Set current permission state
setPermission(status.state);
// Register listener to update state whenever permission state changes
status.onchange = () => setPermission(status.state);
})
.catch(() => setPermission('unknown')); // Set to 'unknown' on error
}
else {
// Set to 'prompt' for browsers that don't support Permissions API
setPermission('prompt');
}
// cleanup function: set mounted flag to false on component unmount
return () => {
mounted = false;
};
}, []); // Empty dependency array to run only once on component mount
/**
* Function to enable microphone
* Requests microphone stream and updates state on success.
* Enables basic audio processing options to mitigate noise/echo.
* If existing stream exists, cleans it up first to ensure popup appears again on re-request.
*/
const enable = react.useCallback(async () => {
try {
// Initialize previous error message
setError(null);
// Clean up existing stream first if it exists
// Prevents issues on re-request depending on browser
if (streamRef.current) {
// Stop all media tracks
streamRef.current.getTracks().forEach((t) => t.stop());
// Initialize reference and state
streamRef.current = null;
setStream(null);
setIsEnabled(false);
}
// Request microphone stream
// Apply settings for improved audio quality and increased sensitivity
const nextStream = await navigator.mediaDevices.getUserMedia({
audio: {
echoCancellation: true, // Enable echo cancellation
noiseSuppression: true, // Enable noise suppression
autoGainControl: true, // Enable automatic gain control
sampleRate: 44100, // Set high sample rate
channelCount: 1, // Mono channel
},
});
// Update state when stream is successfully received
streamRef.current = nextStream;
setStream(nextStream);
setIsEnabled(true);
}
catch (err) {
// Convert browser-specific errors to user-friendly messages
let message = 'Microphone permission or device error';
if (err && typeof err === 'object') {
const e = err;
// Set appropriate message based on error type
if (e.name === 'NotAllowedError' || e.name === 'SecurityError') {
message =
'Permission denied. Please allow microphone in browser site settings.';
}
else if (e.name === 'NotFoundError') {
message = 'Microphone device not found.';
}
else if (e.name === 'NotReadableError') {
message = 'Another app is using the microphone or it is not accessible.';
}
}
// Set error message and disable microphone
setError(message);
setIsEnabled(false);
}
}, []); // Empty dependency array to prevent function recreation
/**
* Function to disable microphone
* Releases microphone by stopping all tracks of currently acquired stream.
*/
const disable = react.useCallback(() => {
// Get current stream reference
const current = streamRef.current;
if (current) {
// Stop all media tracks
current.getTracks().forEach((t) => t.stop());
}
// Initialize reference and state
streamRef.current = null;
setStream(null);
setIsEnabled(false);
}, []); // Empty dependency array to prevent function recreation
react.useEffect(() => {
/**
* useEffect to safely clean up if microphone is on when component unmounts
* Stops all media tracks to prevent memory leaks.
*/
return () => {
const current = streamRef.current;
if (current)
current.getTracks().forEach((t) => t.stop());
};
}, []); // Empty dependency array to run only on component unmount
// Return microphone state and control functions
return { stream, isEnabled, permission, error, enable, disable };
}
// Import React hooks
/**
* Custom hook for tracking recording progress time
*
* Key features:
* - Automatically starts/stops timer based on recording start/stop
* - Supports pause/resume functionality for accurate recording time tracking
* - Formats time in HH:MM:SS format for user display
* - Safely cleans up timer on component unmount
*/
function useRecordingTimer(isRecording, // Whether currently recording
isPaused = false // Whether recording is paused
) {
// State to store elapsed time in seconds
const [elapsedTime, setElapsedTime] = react.useState(0);
// Ref to store setInterval ID
const intervalRef = react.useRef(null);
// Ref to store recording start time (Date.now() value)
const startTimeRef = react.useRef(null);
// Ref to store total paused time (in milliseconds)
const pausedTimeRef = react.useRef(0);
// Ref to store pause start time
const pauseStartTimeRef = react.useRef(null);
react.useEffect(() => {
if (isRecording && !isPaused) {
// Recording start or resume state
if (startTimeRef.current === null) {
// First time starting recording: set current time as start time
startTimeRef.current = Date.now();
}
else if (pauseStartTimeRef.current !== null) {
// Resuming from pause: accumulate paused time
const pauseDuration = Date.now() - pauseStartTimeRef.current;
pausedTimeRef.current += pauseDuration;
pauseStartTimeRef.current = null; // Initialize pause start time
}
// Set interval to update elapsed time every 100ms
intervalRef.current = setInterval(() => {
if (startTimeRef.current) {
// Calculate actual recording time by subtracting start time from current time and subtracting paused time (in seconds)
const totalElapsed = Date.now() - startTimeRef.current;
const actualElapsed = Math.floor((totalElapsed - pausedTimeRef.current) / 1000);
setElapsedTime(actualElapsed);
}
}, 100); // Update every 100ms for smooth time display
}
else if (isPaused) {
// Paused state: clean up interval and record pause start time
if (intervalRef.current) {
clearInterval(intervalRef.current);
intervalRef.current = null;
}
// Record pause start time (current time)
if (pauseStartTimeRef.current === null) {
pauseStartTimeRef.current = Date.now();
}
}
else {
// Recording stopped state: clean up interval
if (intervalRef.current) {
clearInterval(intervalRef.current);
intervalRef.current = null;
}
}
// cleanup function: clean up interval on component unmount
return () => {
if (intervalRef.current) {
clearInterval(intervalRef.current);
intervalRef.current = null;
}
};
}, [isRecording, isPaused]); // runs whenever recording state or pause state changes
/**
* Function to format time in seconds to HH:MM:SS format string
* @param seconds - Time to format (in seconds)
* @returns Time string in HH:MM:SS format
*/
const formatTime = (seconds) => {
// Calculate hours, minutes, seconds
const hours = Math.floor(seconds / 3600); // 3600 seconds = 1 hour
const mins = Math.floor((seconds % 3600) / 60); // Calculate minutes from remaining time
const secs = seconds % 60; // Calculate seconds from remaining time
// Format each value as 2-digit string and connect with colons
return `${hours.toString().padStart(2, '0')}:${mins.toString().padStart(2, '0')}:${secs.toString().padStart(2, '0')}`;
};
/**
* Function to reset timer
* Sets elapsed time to 0 and initializes all references
*/
const reset = () => {
setElapsedTime(0);
startTimeRef.current = null;
pausedTimeRef.current = 0;
pauseStartTimeRef.current = null;
};
// Return timer-related states and functions
return {
elapsedTime, // Elapsed time (in seconds)
formattedTime: formatTime(elapsedTime), // Formatted time string (HH:MM:SS)
reset, // Function to reset timer
};
}
/**
* All-in-one custom hook for voice recording
*
* Key features:
* - Microphone permission management and stream control
* - Real-time audio level measurement and visualization
* - Recording start/stop/pause/resume functionality
* - Recording time tracking and formatting
* - Recorded audio playback functionality
* - Error handling and state management
* - iOS/Safari compatibility support
*/
function useVoiceRecorder(options = {}) {
const { mimeType = 'audio/webm', smoothing = 0.8, fftSize = 2048, autoEnableMicrophone = false, autoPlayAfterRecording = false, } = options;
// Use basic hooks
const { audioContext, isRunning, resume: resumeAudioContext } = useAudioContext();
const { stream, isEnabled: isMicrophoneEnabled, permission, error: micError, enable: enableMicrophone, disable: disableMicrophone } = useMicrophone();
const { level: audioLevel } = useAudioMeter({ audioContext, stream, smoothing, fftSize });
const { isPlaying, audioRef, playPause, resetPlayer, handleAudioEnded } = useAudioPlayer();
const { isRecording, isPaused, chunks, error: recorderError, start, pause, resume, stop, reset: resetRecorder } = useMediaRecorder(stream, { mimeType });
const { elapsedTime, formattedTime, reset: resetTimer } = useRecordingTimer(isRecording, isPaused);
// Local state
const [recordedBlob, setRecordedBlob] = react.useState(null);
const [audioUrl, setAudioUrl] = react.useState(null);
const [error, setError] = react.useState(null);
// Ref for cleaning up previous URL
const previousUrlRef = react.useRef(null);
// Integrate error states
react.useEffect(() => {
const combinedError = micError || recorderError || error;
setError(combinedError);
}, [micError, recorderError, error]);
// Auto microphone activation
react.useEffect(() => {
if (autoEnableMicrophone && !isMicrophoneEnabled && permission === 'prompt') {
enableMicrophone().catch(() => {
// Error is already set in micError
});
}
}, [autoEnableMicrophone, isMicrophoneEnabled, permission, enableMicrophone]);
// Auto play after recording
react.useEffect(() => {
if (autoPlayAfterRecording && recordedBlob && !isPlaying) {
// Play after slight delay (after UI update completion)
const timer = setTimeout(() => {
playPause();
}, 100);
return () => clearTimeout(timer);
}
}, [autoPlayAfterRecording, recordedBlob, isPlaying, playPause]);
// Audio URL management
react.useEffect(() => {
if (recordedBlob) {
// Clean up previous URL
if (previousUrlRef.current) {
URL.revokeObjectURL(previousUrlRef.current);
}
// Create new URL
const url = URL.createObjectURL(recordedBlob);
setAudioUrl(url);
previousUrlRef.current = url;
}
else {
// Clean up URL if no Blob
if (previousUrlRef.current) {
URL.revokeObjectURL(previousUrlRef.current);
previousUrlRef.current = null;
}
setAudioUrl(null);
}
}, [recordedBlob]);
// Clean up URL on component unmount
react.useEffect(() => {
return () => {
if (previousUrlRef.current) {
URL.revokeObjectURL(previousUrlRef.current);
}
};
}, []);
// Recording start function
const startRecording = react.useCallback(async () => {
try {
setError(null);
// Enable microphone if disabled
if (!isMicrophoneEnabled) {
await enableMicrophone();
}
// Resume audio context if not running
if (!isRunning) {
await resumeAudioContext();
}
// Start recording
start();
}
catch (err) {
setError('Failed to start recording.');
}
}, [isMicrophoneEnabled, isRunning, enableMicrophone, resumeAudioContext, start]);
// Recording stop function
const stopRecording = react.useCallback(async () => {
try {
const blob = await stop();
setRecordedBlob(blob);
return blob;
}
catch (err) {
setError('Failed to stop recording.');
return null;
}
}, [stop]);
// Recording pause function
const pauseRecording = react.useCallback(() => {
pause();
}, [pause]);
// Recording resume function
const resumeRecording = react.useCallback(() => {
resume();
}, [resume]);
// Reset function
const reset = react.useCallback(() => {
resetRecorder();
resetPlayer();
resetTimer();
setRecordedBlob(null);
setError(null);
}, [resetRecorder, resetPlayer, resetTimer]);
// Microphone disable function (override)
const disableMicrophoneOverride = react.useCallback(() => {
disableMicrophone();
reset();
}, [disableMicrophone, reset]);
// Return object
return {
// State
isRecording,
isPaused,
isMicrophoneEnabled,
isPlaying,
permission,
audioLevel,
elapsedTime,
formattedTime,
recordedBlob,
audioUrl,
error,
// Control functions
startRecording,
pauseRecording,
resumeRecording,
stopRecording,
enableMicrophone,
disableMicrophone: disableMicrophoneOverride,
playPause,
reset,
resumeAudioContext,
// References
audioRef,
};
}
// Supported audio formats
const SUPPORTED_AUDIO_FORMATS = [
{ mimeType: 'audio/webm', extension: 'webm', supported: true },
{ mimeType: 'audio/mp4', extension: 'mp4', supported: true },
{ mimeType: 'audio/wav', extension: 'wav', supported: true },
{ mimeType: 'audio/ogg', extension: 'ogg', supported: true },
];
/**
* Check audio formats supported by browser
*/
function getSupportedAudioFormats() {
return SUPPORTED_AUDIO_FORMATS.filter(format => {
if (!MediaRecorder.isTypeSupported)
return false;
return MediaRecorder.isTypeSupported(format.mimeType);
});
}
/**
* Select optimal audio format
*/
function getBestAudioFormat() {
const supportedFormats = getSupportedAudioFormats();
// Priority: webm > mp4 > wav > ogg
const priority = ['audio/webm', 'audio/mp4', 'audio/wav', 'audio/ogg'];
for (const mimeType of priority) {
const format = supportedFormats.find(f => f.mimeType === mimeType);
if (format)
return format.mimeType;
}
// Default value
return 'audio/webm';
}
/**
* Download Blob
*/
function downloadBlob(blob, filename = 'recording') {
const url = URL.createObjectURL(blob);
const a = document.createElement('a');
a.href = url;
a.download = filename;
document.body.appendChild(a);
a.click();
document.body.removeChild(a);
URL.revokeObjectURL(url);
}
/**
* Convert Blob to Base64 string
*/
function blobToBase64(blob) {
return new Promise((resolve, reject) => {
const reader = new FileReader();
reader.onload = () => {
const result = reader.result;
resolve(result.split(',')[1]); // Remove data:audio/...;base64, part
};
reader.onerror = reject;
reader.readAsDataURL(blob);
});
}
/**
* Convert Base64 string to Blob
*/
function base64ToBlob(base64, mimeType = 'audio/webm') {
const byteCharacters = atob(base64);
const byteNumbers = new Array(byteCharacters.length);
for (let i = 0; i < byteCharacters.length; i++) {
byteNumbers[i] = byteCharacters.charCodeAt(i);
}
const byteArray = new Uint8Array(byteNumbers);
return new Blob([byteArray], { type: mimeType });
}
/**
* Format file size in human-readable form
*/
function formatFileSize(bytes) {
if (bytes === 0)
return '0 Bytes';
const k = 1024;
const sizes = ['Bytes', 'KB', 'MB', 'GB'];
const i = Math.floor(Math.log(bytes) / Math.log(k));
return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + ' ' + sizes[i];
}
/**
* Format time in human-readable form
*/
function formatDuration(seconds) {
const hours = Math.floor(seconds / 3600);
const minutes = Math.floor((seconds % 3600) / 60);
const secs = Math.floor(seconds % 60);
if (hours > 0) {
return `${hours}:${minutes.toString().padStart(2, '0')}:${secs.toString().padStart(2, '0')}`;
}
else {
return `${minutes}:${secs.toString().padStart(2, '0')}`;
}
}
/**
* Convert audio level for visualization
*/
function normalizeAudioLevel(level, minLevel = 0.01) {
// Convert to log scale for more natural visualization
const normalized = Math.max(0, Math.min(1, level));
return normalized < minLevel ? 0 : normalized;
}
/**
* Check if browser supports MediaRecorder
*/
function isMediaRecorderSupported() {
return typeof MediaRecorder !== 'undefined' && !!MediaRecorder.isTypeSupported;
}
/**
* Check if browser supports getUserMedia
*/
function isGetUserMediaSupported() {
return !!(navigator.mediaDevices && navigator.mediaDevices.getUserMedia);
}
/**
* Check if browser supports Web Audio API
*/
function isWebAudioSupported() {
return !!(window.AudioContext || window.webkitAudioContext);
}
/**
* Get device information
*/
function getDeviceInfo() {
const userAgent = navigator.userAgent;
return {
userAgent,
isMobile: /Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(userAgent),
isIOS: /iPad|iPhone|iPod/.test(userAgent),
isAndroid: /Android/.test(userAgent),
isSafari: /Safari/.test(userAgent) && !/Chrome/.test(userAgent),
isChrome: /Chrome/.test(userAgent) && !/Edge/.test(userAgent),
isFirefox: /Firefox/.test(userAgent),
};
}
exports.SUPPORTED_AUDIO_FORMATS = SUPPORTED_AUDIO_FORMATS;
exports.base64ToBlob = base64ToBlob;
exports.blobToBase64 = blobToBase64;
exports.downloadBlob = downloadBlob;
exports.formatDuration = formatDuration;
exports.formatFileSize = formatFileSize;
exports.getBestAudioFormat = getBestAudioFormat;
exports.getDeviceInfo = getDeviceInfo;
exports.getSupportedAudioFormats = getSupportedAudioFormats;
exports.isGetUserMediaSupported = isGetUserMediaSupported;
exports.isMediaRecorderSupported = isMediaRecorderSupported;
exports.isWebAudioSupported = isWebAudioSupported;
exports.normalizeAudioLevel = normalizeAudioLevel;
exports.useAudioContext = useAudioContext;
exports.useAudioMeter = useAudioMeter;
exports.useAudioPlayer = useAudioPlayer;
exports.useMediaRecorder = useMediaRecorder;
exports.useMicrophone = useMicrophone;
exports.useRecordingTimer = useRecordingTimer;
exports.useVoiceRecorder = useVoiceRecorder;
//# sourceMappingURL=index.js.map