@diffusionstudio/core-v4
Version:
2D motion graphics and video rendering engine
41 lines (40 loc) • 1.69 kB
TypeScript
import { Input } from 'mediabunny';
import { BaseSource, SourceOptions, SourceInitOptions } from '../source';
import { WebAudioDecoder } from '../../services/audio-decoder';
import { ClipType } from '../../clips';
import { Timestamp, Transcript } from '../../models';
import { AudioSlice, SamplerOptions, SilenceDetectionOptions } from './types';
import { Time } from '../../types';
export declare class AudioSource extends BaseSource {
readonly type: ClipType;
element: HTMLAudioElement | HTMLVideoElement;
decoder: WebAudioDecoder;
duration?: Timestamp;
demuxer?: Promise<Input>;
transcript?: Transcript;
constructor(options: SourceOptions);
init(options?: SourceInitOptions): Promise<void>;
decode(numberOfChannels?: number, sampleRate?: number, cache?: boolean): Promise<AudioBuffer>;
/**
* Find silences in the audio clip. Results are cached.
*
* uses default sample rate of 3000
* @param options - Silences options.
* @returns An array of the silences (in ms) in the clip.
*/
silences(options?: SilenceDetectionOptions): Promise<AudioSlice[]>;
/**
* Sampler that uses a window size to calculate the max value of the samples in the window.
* @param options - Sampling options.
* @returns An array of the max values of the samples in the window.
*/
sample({ length, start, stop, }?: SamplerOptions): Promise<Float32Array>;
samplesInRange(options: {
startTime: Time;
endTime: Time;
}): AsyncGenerator<import('mediabunny').AudioSample, void, unknown>;
getMetadata(): Promise<{
sampleRate: number;
numberOfChannels: number;
} | null>;
}