@lvyanxiang/react-native-audio-waveform
Version:
A React Native library for extracting audio waveform data from audio and video files. Supports iOS and Android platforms with streaming processing for large files.
290 lines (268 loc) • 9.04 kB
JavaScript
import { Platform } from 'react-native';
import NativeAudioWaveform from './NativeAudioWaveform';
import { processAudioToWaveform, calculateRanges } from './audioProcessing';
import { convertPCMToFloat32, applyDecodingOptions, extractTimeSegment } from './utils';
// Export types for advanced users
export * from './types';
/**
* 音频视频转波形数据 - 唯一需要的函数!
* Convert audio/video to waveform data - The only function you need!
*
* @param fileUri - 音频/视频文件路径 | Path to audio/video file
* @param options - 可选配置 | Optional configuration
* @returns Promise with waveform data and metadata
*
* @example
* ```typescript
* import { audioToWaveform } from '@lvyanxiang/react-native-audio-waveform';
*
* // 基础用法 - 获取波形数据
* const result = await audioToWaveform('file:///path/to/audio.mp3');
* console.log(result.waveform.dataPoints); // 波形数据点
* console.log(result.metadata.durationMs); // 音频时长
*
* // 自定义配置
* const result = await audioToWaveform('file:///path/to/audio.mp3', {
* segmentDurationMs: 50, // 更详细的波形
* numberOfPoints: 100, // 或者固定点数
* startTimeMs: 0, // 开始时间
* endTimeMs: 30000 // 结束时间
* });
* ```
*/
export async function audioToWaveform(fileUri, options) {
try {
// 参数验证
if (!fileUri || typeof fileUri !== 'string') {
throw new Error('文件路径无效 | Invalid file URI');
}
const {
segmentDurationMs = 100,
numberOfPoints,
startTimeMs,
endTimeMs,
includeMetadata = true,
enableStreaming = true,
streamingThreshold = 10 * 1024 * 1024,
// 10MB
chunkSize = 1000
} = options || {};
// 构建原生调用参数
const nativeOptions = {
fileUri,
segmentDurationMs,
numberOfPoints,
startTimeMs,
endTimeMs,
includeMetadata,
enableStreaming,
streamingThreshold,
chunkSize
};
// 尝试原生实现
if (Platform.OS !== 'web') {
try {
return await NativeAudioWaveform.audioToWaveform(nativeOptions);
} catch (nativeError) {
console.warn('原生处理失败,使用JS降级 | Native failed, using JS fallback:', nativeError);
}
}
// JavaScript 降级实现
return await audioToWaveformJS(nativeOptions);
} catch (error) {
throw new Error(`音频转波形失败 | Audio to waveform failed: ${error}`);
}
}
/**
* JavaScript implementation (fallback)
*/
async function audioToWaveformJS(options) {
const startTime = Date.now();
// Check if streaming is enabled and file is large
const isStreaming = options.enableStreaming && (options.fileUri.startsWith('http') || options.fileUri.startsWith('https'));
if (isStreaming) {
return await audioToWaveformJSStreaming(options);
}
// Fetch audio file
const response = await fetch(options.fileUri);
if (!response.ok) {
throw new Error(`Failed to fetch audio file: ${response.statusText}`);
}
const arrayBuffer = await response.arrayBuffer();
// Check file size for streaming threshold
const fileSize = arrayBuffer.byteLength;
const shouldUseStreaming = options.enableStreaming && fileSize > (options.streamingThreshold || 10 * 1024 * 1024);
if (shouldUseStreaming) {
return await audioToWaveformJSStreaming(options);
}
// Convert to Float32Array (assuming 16-bit WAV for simplicity)
const {
audioData,
sampleRate,
channels
} = await convertPCMToFloat32(arrayBuffer, 16);
// Apply decoding options
const {
processedData,
sampleRate: finalSampleRate,
channels: finalChannels
} = await applyDecodingOptions(audioData, sampleRate, channels, undefined);
// Extract time segment if specified
let segmentData = processedData;
if (options.startTimeMs !== undefined || options.endTimeMs !== undefined) {
segmentData = extractTimeSegment(processedData, finalSampleRate, options.startTimeMs, options.endTimeMs);
}
// Process to waveform
let dataPoints;
if (options.numberOfPoints) {
// Fixed number of points mode
const totalDuration = segmentData.length / finalSampleRate * 1000;
const segmentDuration = totalDuration / options.numberOfPoints;
dataPoints = processAudioToWaveform(segmentData, finalSampleRate, segmentDuration);
// Limit to exact number of points
dataPoints = dataPoints.slice(0, options.numberOfPoints);
} else {
// Segment duration mode
dataPoints = processAudioToWaveform(segmentData, finalSampleRate, options.segmentDurationMs);
}
// Calculate ranges
const {
amplitudeRange,
rmsRange
} = calculateRanges(dataPoints);
const extractionTimeMs = Date.now() - startTime;
const durationMs = segmentData.length / finalSampleRate * 1000;
const waveform = {
segmentDurationMs: options.segmentDurationMs || durationMs / dataPoints.length,
durationMs,
bitDepth: 16,
samples: segmentData.length,
numberOfChannels: finalChannels,
sampleRate: finalSampleRate,
dataPoints,
amplitudeRange,
rmsRange,
extractionTimeMs,
isStreaming: false
};
let metadata;
if (options.includeMetadata) {
metadata = {
durationMs,
sampleRate: finalSampleRate,
channels: finalChannels,
bitDepth: 16,
format: 'unknown'
};
}
return {
waveform,
metadata
};
}
/**
* JavaScript streaming implementation for large files
*/
async function audioToWaveformJSStreaming(options) {
const startTime = Date.now();
// For streaming mode, we'll simulate the behavior by processing in chunks
// This is a simplified implementation - in practice, you'd want to use
// Web Audio API or similar for proper streaming
const response = await fetch(options.fileUri);
if (!response.ok) {
throw new Error(`Failed to fetch audio file: ${response.statusText}`);
}
const arrayBuffer = await response.arrayBuffer();
const {
audioData,
sampleRate,
channels
} = await convertPCMToFloat32(arrayBuffer, 16);
// Calculate total duration and segments
const totalDuration = audioData.length / sampleRate * 1000;
const segmentDurationMs = options.segmentDurationMs || 100;
const totalSegments = Math.ceil(totalDuration / segmentDurationMs);
// Process in chunks to simulate streaming
const chunkSize = options.chunkSize || 1000;
const dataPoints = [];
let minAmplitude = Infinity;
let maxAmplitude = -Infinity;
let minRms = Infinity;
let maxRms = -Infinity;
for (let chunkStart = 0; chunkStart < totalSegments; chunkStart += chunkSize) {
const chunkEnd = Math.min(chunkStart + chunkSize, totalSegments);
for (let i = chunkStart; i < chunkEnd; i++) {
const startSample = Math.floor(i * segmentDurationMs / 1000 * sampleRate);
const endSample = Math.min(startSample + Math.floor(segmentDurationMs / 1000 * sampleRate), audioData.length);
if (startSample >= audioData.length) break;
// Calculate metrics for this segment with improved amplitude calculation
let maxAmp = 0;
let sumSquares = 0;
let sumAbs = 0;
for (let j = startSample; j < endSample; j++) {
const sample = Math.abs(audioData[j]);
maxAmp = Math.max(maxAmp, sample);
sumSquares += sample * sample;
sumAbs += sample;
}
// Apply dynamic range compression similar to expo-audio-stream
const compressedAmplitude = maxAmp > 0.1 ? Math.sqrt(maxAmp) : maxAmp;
const rms = Math.sqrt(sumSquares / (endSample - startSample));
const dB = rms > 0 ? 20 * Math.log10(rms) : -Infinity;
const silent = rms < 0.001;
// Track ranges using compressed amplitude
minAmplitude = Math.min(minAmplitude, compressedAmplitude);
maxAmplitude = Math.max(maxAmplitude, compressedAmplitude);
minRms = Math.min(minRms, rms);
maxRms = Math.max(maxRms, rms);
dataPoints.push({
id: i,
amplitude: compressedAmplitude,
rms,
dB,
silent,
startTime: i * segmentDurationMs,
endTime: (i + 1) * segmentDurationMs,
startPosition: startSample * 2,
endPosition: endSample * 2,
samples: endSample - startSample
});
}
}
const extractionTimeMs = Date.now() - startTime;
const waveform = {
segmentDurationMs,
durationMs: totalDuration,
bitDepth: 16,
samples: audioData.length,
numberOfChannels: channels,
sampleRate,
dataPoints,
amplitudeRange: {
min: minAmplitude,
max: maxAmplitude
},
rmsRange: {
min: minRms,
max: maxRms
},
extractionTimeMs,
isStreaming: true,
totalSamples: audioData.length
};
let metadata;
if (options.includeMetadata) {
metadata = {
durationMs: totalDuration,
sampleRate,
channels,
bitDepth: 16,
format: 'unknown'
};
}
return {
waveform,
metadata
};
}
//# sourceMappingURL=index.js.map