UNPKG

webcodecs-encoder

Version:

A TypeScript library for browser environments to encode video (H.264/AVC, VP9, VP8) and audio (AAC, Opus) using the WebCodecs API and mux them into MP4 or WebM containers with real-time streaming support. New function-first API design.

1 lines 136 kB
{"version":3,"sources":["../src/index.ts","../src/types.ts","../src/utils/config-parser.ts","../src/worker/worker-communicator.ts","../src/utils/video-frame-converter.ts","../src/core/encode.ts","../src/stream/encode-stream.ts","../src/utils/can-encode.ts","../src/factory/encoder.ts"],"sourcesContent":["// メイン関数ファーストAPI\nexport { encode } from \"./core/encode\";\nexport { encodeStream } from \"./stream/encode-stream\";\nexport { canEncode } from \"./utils/can-encode\";\n\n// 高度な使用向け:カスタムエンコーダーファクトリ\nexport { createEncoder, encoders, examples } from \"./factory/encoder\";\nexport type { EncoderFactory } from \"./factory/encoder\";\n\n// Legacy class-based API removed in v0.2.4 - use encodeStream() for MediaStream encoding\n\n// 型定義\nexport type {\n VideoSource,\n Frame,\n EncodeOptions,\n QualityPreset,\n VideoConfig,\n AudioConfig,\n AvcBitstreamFormatOption,\n HevcBitstreamFormatOption,\n AacBitstreamFormatOption,\n ProgressInfo,\n EncodeErrorType,\n VideoFile,\n} from \"./types\";\n\nexport { EncodeError } from './types';\n\n// 内部実装用(高度な使用のみ)\nexport type {\n EncoderConfig,\n ProcessingStage,\n EncoderErrorType,\n WorkerMessage,\n MainThreadMessage,\n VideoEncoderGetter,\n AudioEncoderGetter,\n AudioDataGetter,\n} from './types';\n","// Type definitions for new function-first API\n\n// Basic frame type\nexport type Frame = VideoFrame | HTMLCanvasElement | OffscreenCanvas | ImageBitmap | ImageData;\n\n// Video file type\nexport interface VideoFile {\n file: File | Blob;\n type: string;\n}\n\n// Video source type (all input formats)\nexport type VideoSource = \n | Frame[] // Static frame array\n | AsyncIterable<Frame> // Streaming frames\n | MediaStream // Camera/screen sharing\n | VideoFile; // Existing video file\n\n// Quality presets\nexport type QualityPreset = 'low' | 'medium' | 'high' | 'lossless';\n\nexport type AvcBitstreamFormatOption = \"annexb\" | \"avc\";\nexport type HevcBitstreamFormatOption = \"annexb\" | \"hevc\";\nexport type AacBitstreamFormatOption = \"aac\" | \"adts\";\n\n// Video configuration\nexport interface VideoConfig {\n codec?: 'avc' | 'hevc' | 'vp9' | 'vp8' | 'av1';\n /** Override codec string passed to VideoEncoder (e.g. \"avc1.640028\"). */\n codecString?: string;\n bitrate?: number;\n /**\n * Optional quantizer hint. Browser support varies by codec/platform.\n * When set, it is forwarded to VideoEncoderConfig.\n */\n quantizer?: number;\n /** AVC-specific options. */\n avc?: {\n format?: AvcBitstreamFormatOption;\n };\n /** HEVC-specific options. */\n hevc?: {\n format?: HevcBitstreamFormatOption;\n };\n hardwareAcceleration?: 'no-preference' | 'prefer-hardware' | 'prefer-software';\n latencyMode?: 'quality' | 'realtime';\n keyFrameInterval?: number;\n}\n\n// Audio configuration\nexport type AudioCodec =\n | 'aac'\n | 'opus'\n | 'flac'\n | 'mp3'\n | 'vorbis'\n | 'pcm'\n | 'ulaw'\n | 'alaw';\n\nexport interface AudioConfig {\n codec?: AudioCodec;\n /** Override codec string passed to AudioEncoder (e.g. \"mp4a.40.2\"). */\n codecString?: string;\n bitrate?: number;\n sampleRate?: number;\n channels?: number;\n bitrateMode?: 'constant' | 'variable';\n /** AAC-specific options. */\n aac?: {\n format?: AacBitstreamFormatOption;\n };\n}\n\n// Progress information\nexport interface ProgressInfo {\n percent: number;\n processedFrames: number;\n totalFrames?: number;\n fps: number;\n stage: string;\n estimatedRemainingMs?: number;\n}\n\n// Encode options\nexport interface EncodeOptions {\n // Basic settings (auto-detectable)\n width?: number;\n height?: number;\n frameRate?: number;\n\n // Quality preset\n quality?: QualityPreset;\n\n // Detailed settings (optional)\n video?: VideoConfig | false; // false to disable video\n audio?: AudioConfig | false; // false to disable audio\n container?: 'mp4' | 'webm';\n\n // Timestamp handling\n firstTimestampBehavior?: \"offset\" | \"strict\";\n\n // Latency mode (top level)\n latencyMode?: \"quality\" | \"realtime\";\n\n // Backpressure control\n maxVideoQueueSize?: number; // Default: 30 frames\n maxAudioQueueSize?: number; // Default: 30 chunks\n backpressureStrategy?: \"drop\" | \"wait\"; // Default: \"drop\"\n\n // Callbacks\n onProgress?: (progress: ProgressInfo) => void;\n onError?: (error: EncodeError) => void;\n}\n\n// Error types\nexport type EncodeErrorType =\n | 'not-supported'\n | 'initialization-failed'\n | 'configuration-error'\n | 'invalid-input' // Input source or frame data is invalid\n | 'encoding-failed'\n | 'video-encoding-error'\n | 'audio-encoding-error'\n | 'muxing-failed'\n | 'cancelled'\n | 'timeout'\n | 'worker-error'\n | 'filesystem-error' // VideoFile access errors\n | 'unknown';\n\n// Custom error class\nexport class EncodeError extends Error {\n type: EncodeErrorType;\n cause?: unknown;\n\n constructor(type: EncodeErrorType, message: string, cause?: unknown) {\n super(message);\n this.name = 'EncodeError';\n this.type = type;\n this.cause = cause;\n Object.setPrototypeOf(this, EncodeError.prototype);\n }\n}\n\n// --- Internal implementation type definitions (worker communication, etc.) ---\n\n// Basic configuration type for worker communication (internal implementation)\nexport interface EncoderConfig {\n width: number;\n height: number;\n frameRate: number;\n videoBitrate: number; // bps\n audioBitrate: number; // bps\n /**\n * Controls bitrate distribution for AAC. \"constant\" produces constant\n * bitrate (CBR) output while \"variable\" enables variable bitrate (VBR).\n * Not all browsers respect this setting. Chrome 119+ improves CBR support.\n */\n audioBitrateMode?: \"constant\" | \"variable\";\n sampleRate: number; // Hz\n channels: number; // e.g., 1 for mono, 2 for stereo\n container?: \"mp4\" | \"webm\"; // Default: 'mp4'. Set 'webm' for WebM output.\n codec?: {\n video?: \"avc\" | \"hevc\" | \"vp9\" | \"vp8\" | \"av1\"; // Default: 'avc' (H.264)\n audio?: AudioCodec; // Default: 'aac'\n };\n /**\n * Optional codec string overrides passed directly to the encoders.\n * For example: `{ video: 'avc1.640028', audio: 'mp4a.40.2' }`.\n */\n codecString?: {\n video?: string;\n audio?: string;\n };\n latencyMode?: \"quality\" | \"realtime\"; // Default: 'quality'\n /** Preference for hardware or software encoding. */\n hardwareAcceleration?:\n | \"prefer-hardware\"\n | \"prefer-software\"\n | \"no-preference\";\n /** Drop new video frames when the number of queued frames exceeds `maxQueueDepth`. */\n dropFrames?: boolean;\n /** Maximum number of queued video frames before dropping. Defaults to `Infinity`. */\n maxQueueDepth?: number;\n /** Total frames for progress calculation if known in advance. */\n totalFrames?: number;\n /** Force a key frame every N video frames. */\n keyFrameInterval?: number;\n /**\n * How to handle the first timestamp of a track.\n * 'offset': Offsets all timestamps so the first one is 0.\n * 'strict': Requires the first timestamp to be 0 (default).\n */\n firstTimestampBehavior?: \"offset\" | \"strict\";\n /** Backpressure control for video queue */\n maxVideoQueueSize?: number;\n /** Backpressure control for audio queue */\n maxAudioQueueSize?: number;\n /** Backpressure strategy: drop frames or wait */\n backpressureStrategy?: \"drop\" | \"wait\";\n /** Additional VideoEncoder configuration overrides. */\n videoEncoderConfig?: Partial<VideoEncoderConfig>;\n /** Additional AudioEncoder configuration overrides. */\n audioEncoderConfig?: Partial<AudioEncoderConfig>;\n}\n\n// Processing stage definitions\nexport enum ProcessingStage {\n Initializing = \"initializing\",\n VideoEncoding = \"video-encoding\",\n AudioEncoding = \"audio-encoding\",\n Muxing = \"muxing\",\n Finalizing = \"finalizing\",\n}\n\n// Encoder error types (internal implementation)\nexport enum EncoderErrorType {\n NotSupported = \"not-supported\",\n InitializationFailed = \"initialization-failed\",\n ConfigurationError = \"configuration-error\",\n InvalidInput = \"invalid-input\", // Input source or frame data is invalid\n EncodingFailed = \"encoding-failed\", // Generic encoding error\n VideoEncodingError = \"video-encoding-error\", // Specific video encoding error\n AudioEncodingError = \"audio-encoding-error\", // Specific audio encoding error\n MuxingFailed = \"muxing-failed\",\n Cancelled = \"cancelled\",\n Timeout = \"timeout\",\n WorkerError = \"worker-error\",\n FilesystemError = \"filesystem-error\", // VideoFile access errors\n Unknown = \"unknown\",\n}\n\n// --- Worker communication message types ---\n\n// Messages TO the Worker\nexport interface InitializeWorkerMessage {\n type: \"initialize\";\n config: EncoderConfig;\n totalFrames?: number; // For progress calculation\n}\n\nexport interface AddVideoFrameMessage {\n type: \"addVideoFrame\";\n frame: VideoFrame;\n timestamp: number; // microseconds\n}\n\nexport interface AddAudioDataMessage {\n type: \"addAudioData\";\n // Array of Float32Array for each channel (non-interleaved).\n // The ArrayBuffer of each Float32Array should be transferred.\n audioData?: Float32Array[];\n /** Optional AudioData object to be encoded directly. */\n audio?: AudioData;\n timestamp: number; // microseconds\n format: AudioSampleFormat; // e.g., \"f32-planar\" or \"s16\" etc. (AudioSampleFormat from WebCodecs)\n sampleRate: number;\n numberOfFrames: number;\n numberOfChannels: number;\n}\n\nexport interface FinalizeWorkerMessage {\n type: \"finalize\";\n}\n\nexport interface CancelWorkerMessage {\n type: \"cancel\";\n}\n\nexport type WorkerMessage =\n | InitializeWorkerMessage\n | AddVideoFrameMessage\n | AddAudioDataMessage\n | FinalizeWorkerMessage\n | CancelWorkerMessage;\n\n// Messages FROM the Worker\nexport interface WorkerInitializedMessage {\n type: \"initialized\";\n actualVideoCodec?: string;\n actualAudioCodec?: string;\n}\n\nexport interface ProgressMessage {\n type: \"progress\";\n processedFrames: number;\n totalFrames?: number;\n}\n\nexport interface WorkerFinalizedMessage {\n type: \"finalized\";\n output: Uint8Array | null; // MP4 file data or null when streaming\n}\n\nexport interface QueueSizeMessage {\n type: \"queueSize\";\n videoQueueSize: number;\n audioQueueSize: number;\n}\n\nexport interface WorkerDataChunkMessage {\n type: \"dataChunk\";\n chunk: Uint8Array;\n isHeader?: boolean; // Indicates if this chunk is a header (e.g., moov for MP4, EBML for WebM)\n offset?: number; // For MP4 fragmented streaming\n container: \"mp4\" | \"webm\"; // To inform the main thread which muxer this chunk belongs to\n}\n\nexport interface WorkerErrorMessage {\n type: \"error\";\n errorDetail: {\n message: string;\n type: EncoderErrorType;\n stack?: string;\n };\n}\n\nexport interface WorkerCancelledMessage {\n type: \"cancelled\";\n}\n\nexport type MainThreadMessage =\n | WorkerInitializedMessage\n | ProgressMessage\n | WorkerFinalizedMessage\n | QueueSizeMessage\n | WorkerDataChunkMessage\n | WorkerErrorMessage\n | WorkerCancelledMessage;\n\n// --- Helper Types for environment-dependent constructors ---\nexport type VideoEncoderConstructor = typeof VideoEncoder;\nexport type AudioEncoderConstructor = typeof AudioEncoder;\nexport type AudioDataConstructor = typeof AudioData;\n\nexport type VideoEncoderGetter = () => VideoEncoderConstructor | undefined;\nexport type AudioEncoderGetter = () => AudioEncoderConstructor | undefined;\nexport type AudioDataGetter = () => AudioDataConstructor | undefined;\n","/**\n * 設定の推定・変換・マージ処理のユーティリティ\n */\n\nimport {\n EncodeOptions,\n VideoSource,\n Frame,\n QualityPreset,\n EncoderConfig,\n VideoFile,\n} from \"../types\";\n\n/**\n * VideoSourceから設定を推定し、EncodeOptionsとマージして最終的なEncoderConfigを生成\n */\nexport async function inferAndBuildConfig(\n source: VideoSource,\n options?: EncodeOptions,\n): Promise<EncoderConfig> {\n // ソースから基本的な設定を推定\n const inferredConfig = await inferConfigFromSource(source);\n\n // ユーザー指定のオプションをマージ\n const mergedOptions = mergeWithUserOptions(inferredConfig, options);\n\n // 品質プリセットを適用\n const configWithPreset = applyQualityPreset(mergedOptions, options?.quality);\n\n // 最終的なEncoderConfigに変換\n return convertToEncoderConfig(configWithPreset);\n}\n\n/**\n * VideoSourceから基本設定を推定\n */\nasync function inferConfigFromSource(\n source: VideoSource,\n): Promise<Partial<EncodeOptions>> {\n const config: Partial<EncodeOptions> = {\n frameRate: 30, // デフォルト値\n container: \"mp4\", // デフォルト値\n };\n\n try {\n // 最初のフレームを取得して解像度を推定\n const firstFrame = await getFirstFrame(source);\n if (firstFrame) {\n const dimensions = getFrameDimensions(firstFrame);\n config.width = dimensions.width;\n config.height = dimensions.height;\n }\n\n if (isVideoFileSource(source)) {\n await enrichConfigFromVideoFile(config, source);\n }\n\n // MediaStreamの場合はビデオ・オーディオトラックの有無も確認\n if (source instanceof MediaStream) {\n const videoTracks = source.getVideoTracks();\n const audioTracks = source.getAudioTracks();\n\n // ビデオトラックがない場合\n if (videoTracks.length === 0) {\n config.video = false; // ビデオなし\n }\n\n if (audioTracks.length === 0) {\n config.audio = false; // オーディオなし\n } else {\n // MediaStreamTrackからオーディオ設定を推定\n const audioTrack = audioTracks[0];\n const settings = audioTrack.getSettings();\n config.audio = {\n sampleRate: settings.sampleRate || 48000,\n channels: settings.channelCount || 2,\n };\n }\n }\n } catch (error) {\n // 推定に失敗した場合はデフォルト値を使用\n config.width = 640;\n config.height = 480;\n }\n\n return config;\n}\n\n/**\n * ユーザー指定のオプションをマージ\n */\nfunction mergeWithUserOptions(\n inferredConfig: Partial<EncodeOptions>,\n userOptions?: EncodeOptions,\n): EncodeOptions {\n const mergeNestedConfig = <T extends Record<string, any>>(\n inferredValue: T | false | undefined,\n userValue: T | false | undefined,\n ): T | false | undefined => {\n if (userValue === false) {\n return false;\n }\n\n if (userValue === undefined) {\n if (inferredValue === false) {\n return false;\n }\n if (inferredValue && typeof inferredValue === \"object\") {\n return { ...inferredValue };\n }\n return inferredValue;\n }\n\n if (inferredValue === false || inferredValue == null) {\n return { ...userValue } as T;\n }\n\n return {\n ...(inferredValue as T),\n ...(userValue as T),\n };\n };\n\n return {\n // 推定された設定をベースに\n ...inferredConfig,\n // ユーザー指定の設定で上書き\n ...userOptions,\n // ネストしたオブジェクトは個別にマージ\n video: mergeNestedConfig(\n inferredConfig.video as any,\n userOptions?.video as any,\n ) as any,\n audio: mergeNestedConfig(\n inferredConfig.audio as any,\n userOptions?.audio as any,\n ) as any,\n };\n}\n\n/**\n * 品質プリセットを適用\n */\nfunction applyQualityPreset(\n config: EncodeOptions,\n quality?: QualityPreset,\n): EncodeOptions {\n if (!quality) return config;\n\n const width = config.width || 640;\n const height = config.height || 480;\n const pixels = width * height;\n\n // 解像度とフレームレートに基づいてビットレートを計算\n const basePixelsPerSecond = pixels * (config.frameRate || 30);\n\n let videoBitrate: number;\n let audioBitrate: number;\n\n switch (quality) {\n case \"low\":\n videoBitrate = Math.max(500_000, basePixelsPerSecond * 0.1);\n audioBitrate = 64_000;\n break;\n case \"medium\":\n videoBitrate = Math.max(1_000_000, basePixelsPerSecond * 0.2);\n audioBitrate = 128_000;\n break;\n case \"high\":\n videoBitrate = Math.max(2_000_000, basePixelsPerSecond * 0.4);\n audioBitrate = 192_000;\n break;\n case \"lossless\":\n videoBitrate = Math.max(10_000_000, basePixelsPerSecond * 1.0);\n audioBitrate = 320_000;\n break;\n default:\n return config;\n }\n\n const mergedAudio =\n config.audio === false\n ? false\n : {\n ...(config.audio as any),\n };\n\n if (mergedAudio && typeof mergedAudio === \"object\") {\n const codec = (mergedAudio.codec || \"aac\") as any;\n if (\n codec !== \"pcm\" &&\n codec !== \"ulaw\" &&\n codec !== \"alaw\" &&\n mergedAudio.bitrate == null\n ) {\n mergedAudio.bitrate = audioBitrate;\n }\n }\n\n return {\n ...config,\n video:\n config.video === false\n ? false\n : {\n ...(config.video as any),\n bitrate: (config.video as any)?.bitrate || videoBitrate,\n },\n audio: mergedAudio,\n };\n}\n\n/**\n * EncodeOptionsから内部のEncoderConfigに変換\n */\nfunction convertToEncoderConfig(options: EncodeOptions): EncoderConfig {\n const videoOptions =\n options.video && typeof options.video === \"object\" ? options.video : null;\n\n const config: EncoderConfig = {\n width: options.video === false ? 0 : options.width || 640,\n height: options.video === false ? 0 : options.height || 480,\n frameRate: options.frameRate || 30,\n videoBitrate:\n options.video === false ? 0 : videoOptions?.bitrate || 1_000_000,\n audioBitrate: 0,\n sampleRate: 0,\n channels: 0,\n container: options.container || \"mp4\",\n codec: {\n video: options.video === false ? undefined : videoOptions?.codec || \"avc\",\n audio: undefined,\n },\n latencyMode:\n options.video === false\n ? \"quality\"\n : options.latencyMode || videoOptions?.latencyMode || \"quality\",\n hardwareAcceleration:\n options.video === false\n ? \"no-preference\"\n : videoOptions?.hardwareAcceleration || \"no-preference\",\n keyFrameInterval:\n options.video === false ? undefined : videoOptions?.keyFrameInterval,\n audioBitrateMode: undefined,\n firstTimestampBehavior: options.firstTimestampBehavior || \"offset\",\n maxVideoQueueSize: options.maxVideoQueueSize || 30,\n maxAudioQueueSize: options.maxAudioQueueSize || 30,\n backpressureStrategy: options.backpressureStrategy || \"drop\",\n };\n\n if (options.video !== false && videoOptions?.codecString) {\n config.codecString = {\n ...(config.codecString ?? {}),\n video: videoOptions.codecString,\n };\n }\n\n if (options.video !== false && videoOptions) {\n const videoEncoderConfig: Partial<VideoEncoderConfig> = {};\n if (typeof videoOptions.quantizer === \"number\") {\n (videoEncoderConfig as any).quantizer = videoOptions.quantizer;\n }\n if (config.codec?.video === \"avc\" && videoOptions.avc?.format) {\n (videoEncoderConfig as any).avc = { format: videoOptions.avc.format };\n }\n if (config.codec?.video === \"hevc\" && videoOptions.hevc?.format) {\n (videoEncoderConfig as any).hevc = { format: videoOptions.hevc.format };\n }\n if (Object.keys(videoEncoderConfig).length > 0) {\n config.videoEncoderConfig = videoEncoderConfig;\n }\n }\n\n if (options.audio !== false) {\n const audioOptions = (options.audio as any) || {};\n const requestedCodec = (audioOptions.codec || \"aac\") as any;\n const isTelephonyCodec =\n requestedCodec === \"ulaw\" || requestedCodec === \"alaw\";\n const isPcmCodec = requestedCodec === \"pcm\";\n\n const defaultSampleRate =\n audioOptions.sampleRate || (isTelephonyCodec ? 8000 : 48000);\n const defaultChannels = audioOptions.channels || (isTelephonyCodec ? 1 : 2);\n\n let defaultBitrate: number | undefined = audioOptions.bitrate;\n if (defaultBitrate == null) {\n if (isPcmCodec) {\n defaultBitrate = defaultSampleRate * defaultChannels * 16; // Approximate bits per second\n } else if (isTelephonyCodec) {\n defaultBitrate = 64_000;\n } else if (requestedCodec === \"mp3\") {\n defaultBitrate = 128_000;\n } else if (requestedCodec === \"flac\") {\n defaultBitrate = 512_000;\n } else if (requestedCodec === \"vorbis\") {\n defaultBitrate = 128_000;\n } else {\n defaultBitrate = 128_000;\n }\n }\n\n config.sampleRate = defaultSampleRate;\n config.channels = defaultChannels;\n config.audioBitrate = defaultBitrate;\n config.codec = {\n ...config.codec,\n audio: requestedCodec,\n };\n config.audioBitrateMode =\n audioOptions.bitrateMode ||\n (requestedCodec === \"aac\" ? \"variable\" : \"constant\");\n\n if (audioOptions.codecString) {\n config.codecString = {\n ...(config.codecString ?? {}),\n audio: audioOptions.codecString,\n };\n }\n\n const audioEncoderConfig: Partial<AudioEncoderConfig> = {};\n if (requestedCodec === \"aac\" && audioOptions.aac?.format) {\n (audioEncoderConfig as any).aac = { format: audioOptions.aac.format };\n }\n if (Object.keys(audioEncoderConfig).length > 0) {\n config.audioEncoderConfig = audioEncoderConfig;\n }\n }\n\n if (options.audio === false) {\n config.codec = {\n ...config.codec,\n audio: undefined,\n };\n }\n\n return config;\n}\n\n/**\n * VideoSourceから最初のフレームを取得(AsyncIterableの場合、元のイテレータを消費しない)\n */\nasync function getFirstFrame(source: VideoSource): Promise<Frame | null> {\n if (Array.isArray(source)) {\n return source.length > 0 ? source[0] : null;\n }\n\n if (source instanceof MediaStream) {\n // MediaStreamから最初のフレームを取得するのは複雑なので、\n // VideoTrackの設定から解像度を推定\n const videoTracks = source.getVideoTracks();\n if (videoTracks.length > 0) {\n const settings = videoTracks[0].getSettings();\n if (settings.width && settings.height) {\n // 仮想的なフレームサイズ情報として返す\n return {\n displayWidth: settings.width,\n displayHeight: settings.height,\n } as any;\n }\n }\n return null;\n }\n\n if (source && typeof (source as any)[Symbol.asyncIterator] === \"function\") {\n // AsyncIterableは先頭フレームを安全にプレビューする手段がないため\n // ここでは推定を行わず、後続処理でデフォルト値にフォールバックする\n return null;\n }\n\n // VideoFileの場合は実装が必要(今回は簡略化)\n return null;\n}\n\nasync function enrichConfigFromVideoFile(\n config: Partial<EncodeOptions>,\n videoFile: VideoFile,\n): Promise<void> {\n if (typeof document === \"undefined\" || typeof URL === \"undefined\") {\n return;\n }\n\n const file = videoFile.file;\n if (!(typeof Blob !== \"undefined\" && file instanceof Blob)) {\n return;\n }\n\n const video = document.createElement(\"video\");\n video.preload = \"metadata\";\n\n let objectUrl: string | null = null;\n try {\n objectUrl = URL.createObjectURL(file);\n video.src = objectUrl;\n\n await new Promise<void>((resolve, reject) => {\n const cleanup = () => {\n video.onloadedmetadata = null;\n video.onerror = null;\n };\n video.onloadedmetadata = () => {\n cleanup();\n resolve();\n };\n video.onerror = () => {\n cleanup();\n reject(new Error(\"Failed to load video metadata\"));\n };\n });\n\n if (video.videoWidth && video.videoHeight) {\n config.width = video.videoWidth;\n config.height = video.videoHeight;\n }\n\n if (!config.container && typeof videoFile.type === \"string\") {\n if (videoFile.type.includes(\"webm\")) {\n config.container = \"webm\";\n } else if (videoFile.type.includes(\"mp4\")) {\n config.container = \"mp4\";\n }\n }\n } catch (error) {\n console.warn(\"Failed to infer metadata from VideoFile\", error);\n } finally {\n if (objectUrl) {\n URL.revokeObjectURL(objectUrl);\n }\n video.src = \"\";\n video.remove?.();\n }\n}\n\nfunction isVideoFileSource(source: VideoSource): source is VideoFile {\n if (!source || typeof source !== \"object\") {\n return false;\n }\n\n const maybeVideoFile = source as Partial<VideoFile> & { file?: unknown };\n if (!(\"file\" in maybeVideoFile)) {\n return false;\n }\n\n const file = maybeVideoFile.file;\n if (typeof Blob !== \"undefined\" && file instanceof Blob) {\n return true;\n }\n return false;\n}\n\n/**\n * フレームから解像度を取得\n */\nfunction getFrameDimensions(frame: Frame | null): {\n width: number;\n height: number;\n} {\n if (!frame) {\n return { width: 640, height: 480 };\n }\n\n if (frame instanceof VideoFrame) {\n return {\n width: frame.displayWidth || frame.codedWidth,\n height: frame.displayHeight || frame.codedHeight,\n };\n }\n\n if (frame instanceof HTMLCanvasElement || frame instanceof OffscreenCanvas) {\n return { width: frame.width, height: frame.height };\n }\n\n if (frame instanceof ImageBitmap) {\n return { width: frame.width, height: frame.height };\n }\n\n if (frame instanceof ImageData) {\n return { width: frame.width, height: frame.height };\n }\n\n // 仮想的なフレーム情報の場合\n if (\"displayWidth\" in frame && \"displayHeight\" in frame) {\n return {\n width: (frame as any).displayWidth,\n height: (frame as any).displayHeight,\n };\n }\n\n return { width: 640, height: 480 };\n}\n","/**\n * Worker creation and management\n */\n\nimport { EncodeError } from \"../types\";\n\nfunction resolveWorkerUrl(): string {\n const processUrl =\n typeof process !== \"undefined\"\n ? process.env?.WEBCODECS_WORKER_URL\n : undefined;\n const windowUrl =\n typeof window !== \"undefined\"\n ? (window as any).__WEBCODECS_WORKER_URL__\n : undefined;\n\n const configuredUrl =\n (typeof windowUrl === \"string\" && windowUrl.trim()) ||\n (typeof processUrl === \"string\" && processUrl.trim());\n if (configuredUrl) {\n return configuredUrl;\n }\n\n if (typeof document !== \"undefined\" && document.baseURI) {\n return new URL(\"webcodecs-worker.js\", document.baseURI).toString();\n }\n\n return \"/webcodecs-worker.js\";\n}\n\n/**\n * Create external worker\n */\nfunction createExternalWorker(): Worker {\n try {\n return new Worker(resolveWorkerUrl(), { type: \"module\" });\n } catch (error) {\n throw new EncodeError(\n \"initialization-failed\",\n \"Failed to create external worker. Make sure webcodecs-worker.js is available and WEBCODECS_WORKER_URL is configured when needed.\",\n error,\n );\n }\n}\n\n/**\n * Create inline worker (for test environments)\n */\nfunction createInlineWorker(): { worker: Worker; blobUrl: string } {\n try {\n const workerSource = getWorkerSource();\n const blob = new Blob([workerSource], { type: \"application/javascript\" });\n const blobUrl = URL.createObjectURL(blob);\n\n const worker = new Worker(blobUrl, { type: \"module\" });\n\n return { worker, blobUrl };\n } catch (error) {\n throw new EncodeError(\n \"initialization-failed\",\n \"Failed to create inline worker\",\n error,\n );\n }\n}\n\n/**\n * Create appropriate worker\n */\nexport function createWorker(): Worker | { worker: Worker; blobUrl: string } {\n const isTestEnvironment = detectTestEnvironment();\n const isProductionEnvironment = detectProductionEnvironment();\n const inlineOverride = hasInlineWorkerOverride();\n const inlineDisabled = isInlineWorkerDisabled();\n\n if (inlineOverride) {\n if (isProductionEnvironment && !allowInlineOverrideInProduction()) {\n throw new Error(\n \"[WorkerCommunicator] Inline worker override is disabled in production environments.\",\n );\n }\n console.warn(\"[WorkerCommunicator] Using inline worker (override).\");\n return createInlineWorker();\n }\n\n if (isTestEnvironment && !inlineDisabled) {\n console.warn(\n \"[WorkerCommunicator] Using inline worker (test environment).\",\n );\n return createInlineWorker();\n }\n\n try {\n return createExternalWorker();\n } catch (error) {\n if (!inlineDisabled && !isProductionEnvironment) {\n console.warn(\n \"[WorkerCommunicator] Failed to create external worker. Falling back to inline worker.\",\n error,\n );\n return createInlineWorker();\n }\n\n if (!inlineDisabled) {\n console.error(\n \"[WorkerCommunicator] Failed to create external worker in a production-like environment.\",\n error,\n );\n }\n\n throw error;\n }\n}\n\nfunction detectTestEnvironment(): boolean {\n if (typeof process !== \"undefined\") {\n if (process.env?.VITEST === \"true\") return true;\n if (process.env?.JEST_WORKER_ID !== undefined) return true;\n if (process.env?.NODE_ENV === \"test\") return true;\n if (process.env?.npm_lifecycle_event?.includes(\"test\")) return true;\n }\n\n if (typeof globalThis !== \"undefined\" && (globalThis as any).vi) return true;\n\n if (typeof global !== \"undefined\") {\n const nodeEnv = (global as any).process?.env?.NODE_ENV;\n if (nodeEnv === \"test\") return true;\n }\n\n if (typeof window !== \"undefined\") {\n if (window.navigator?.userAgent?.includes(\"jsdom\")) return true;\n }\n\n return false;\n}\n\nfunction detectProductionEnvironment(): boolean {\n if (typeof process !== \"undefined\") {\n const nodeEnv = process.env?.NODE_ENV;\n if (!nodeEnv) {\n const lifecycle = process.env?.npm_lifecycle_event ?? \"\";\n return /build|start|serve|preview/i.test(lifecycle);\n }\n return [\"production\", \"prod\", \"staging\", \"preview\"].includes(nodeEnv);\n }\n\n if (typeof window !== \"undefined\") {\n const protocol = window.location?.protocol;\n const hostname = window.location?.hostname ?? \"\";\n const isLocalHost =\n hostname === \"\" ||\n hostname === \"localhost\" ||\n hostname === \"127.0.0.1\" ||\n hostname.endsWith(\".localhost\");\n\n return protocol === \"https:\" && !isLocalHost;\n }\n\n return false;\n}\n\nfunction hasInlineWorkerOverride(): boolean {\n return (\n (typeof process !== \"undefined\" &&\n process.env?.WEBCODECS_USE_INLINE_WORKER === \"true\") ||\n (typeof window !== \"undefined\" &&\n (window as any).__WEBCODECS_USE_INLINE_WORKER__ === true)\n );\n}\n\nfunction allowInlineOverrideInProduction(): boolean {\n return (\n (typeof process !== \"undefined\" &&\n process.env?.WEBCODECS_ALLOW_INLINE_IN_PROD === \"true\") ||\n (typeof window !== \"undefined\" &&\n (window as any).__WEBCODECS_ALLOW_INLINE_IN_PROD__ === true)\n );\n}\n\nfunction isInlineWorkerDisabled(): boolean {\n return (\n (typeof process !== \"undefined\" &&\n process.env?.WEBCODECS_DISABLE_INLINE_WORKER === \"true\") ||\n (typeof window !== \"undefined\" &&\n (window as any).__WEBCODECS_DISABLE_INLINE_WORKER__ === true)\n );\n}\n\n/**\n * Generate inline worker source code (testing only)\n */\nfunction getWorkerSource(): string {\n return `\n // ⚠️ TESTING ONLY - DO NOT USE IN PRODUCTION ⚠️\n // WebCodecs Encoder Worker (Inline Mock Implementation)\n // This is a minimal mock for testing purposes only.\n // Real encoding should use the external webcodecs-worker.js file.\n \n console.warn('⚠️ Using inline mock worker - FOR TESTING ONLY');\n \n let config = null;\n let processedFrames = 0;\n \n self.onmessage = async function(event) {\n const { type, ...data } = event.data;\n \n try {\n switch (type) {\n case 'initialize':\n config = data.config;\n processedFrames = 0;\n // Wait a bit before sending success response\n setTimeout(() => {\n self.postMessage({ type: 'initialized' });\n }, 50);\n break;\n \n case 'addVideoFrame':\n processedFrames++;\n // Progress update\n self.postMessage({ \n type: 'progress', \n processedFrames,\n totalFrames: data.totalFrames \n });\n break;\n \n case 'addAudioData':\n // Audio data processing (placeholder)\n break;\n \n case 'finalize':\n // Wait a bit before returning result\n setTimeout(() => {\n const result = new Uint8Array([0x00, 0x00, 0x00, 0x20, 0x66, 0x74, 0x79, 0x70]); // MP4 magic number\n self.postMessage({ type: 'finalized', output: result });\n }, 100);\n break;\n \n case 'cancel':\n self.postMessage({ type: 'cancelled' });\n break;\n \n default:\n console.warn('Unknown message type:', type);\n }\n } catch (error) {\n self.postMessage({ \n type: 'error', \n errorDetail: {\n message: error.message,\n type: 'encoding-failed',\n stack: error.stack\n }\n });\n }\n };\n `;\n}\n\n/**\n * Worker communication helper\n */\nexport class WorkerCommunicator {\n private worker: Worker;\n private messageHandlers: Map<string, (data: any) => void> = new Map();\n private workerBlobUrl: string | null = null;\n private pendingWorkerError: { errorDetail: any } | null = null;\n\n constructor() {\n const workerResult = createWorker();\n if (typeof workerResult === \"object\" && \"worker\" in workerResult) {\n // Inline worker case\n this.worker = workerResult.worker;\n this.workerBlobUrl = workerResult.blobUrl;\n } else {\n // External worker case\n this.worker = workerResult;\n }\n this.worker.onmessage = this.handleMessage.bind(this);\n this.worker.onerror = this.handleWorkerError.bind(this);\n }\n\n private handleMessage(event: MessageEvent): void {\n const { type, ...data } = event.data;\n const handler = this.messageHandlers.get(type);\n if (handler) {\n handler(data);\n }\n }\n\n private handleWorkerError(event: ErrorEvent): void {\n if (typeof event.preventDefault === \"function\") {\n event.preventDefault();\n }\n\n const payload = {\n errorDetail: {\n message: event.message\n ? `Worker error: ${event.message}`\n : \"Worker error\",\n type: \"worker-error\",\n stack: (event as any).error?.stack,\n },\n };\n\n const handler = this.messageHandlers.get(\"error\");\n if (handler) {\n handler(payload);\n return;\n }\n\n this.pendingWorkerError = payload;\n console.error(\"Worker error before error handler registration:\", event);\n }\n\n /**\n * Register message handler\n */\n on(type: string, handler: (data: any) => void): void {\n this.messageHandlers.set(type, handler);\n if (type === \"error\" && this.pendingWorkerError) {\n const pending = this.pendingWorkerError;\n this.pendingWorkerError = null;\n handler(pending);\n }\n }\n\n /**\n * Unregister message handler\n */\n off(type: string): void {\n this.messageHandlers.delete(type);\n }\n\n /**\n * Send message to worker\n */\n send(type: string, data: any = {}): void {\n // Detect transferable objects for optimization\n const transferables: Transferable[] = [];\n\n // Safari compatibility: VideoFrame and AudioData should NOT be transferred\n // as they cause issues in Safari when used as transferable objects\n const isSafari =\n typeof navigator !== \"undefined\" &&\n /^((?!chrome|android).)*safari/i.test(navigator.userAgent);\n\n // Optimize transfer only if ArrayBuffer is included\n if (data.buffer instanceof ArrayBuffer) {\n transferables.push(data.buffer);\n }\n\n // Deep scan for nested ArrayBuffers, but skip VideoFrame/AudioData\n this.collectTransferables(data, transferables, isSafari);\n\n // Use optimized transfer if transferable objects exist\n if (transferables.length > 0) {\n try {\n this.worker.postMessage({ type, ...data }, transferables);\n } catch (error) {\n // Safari fallback: if transferable fails, send without transferables\n console.warn(\n \"Transferable object transfer failed, falling back to clone:\",\n error,\n );\n this.worker.postMessage({ type, ...data });\n }\n } else {\n this.worker.postMessage({ type, ...data });\n }\n }\n\n /**\n * Recursively collect transferable objects while avoiding problematic types\n */\n private collectTransferables(\n obj: any,\n transferables: Transferable[],\n isSafari: boolean,\n ): void {\n if (!obj || typeof obj !== \"object\") return;\n\n // Skip VideoFrame and AudioData objects as they cause Safari issues\n if (typeof VideoFrame !== \"undefined\" && obj instanceof VideoFrame) return;\n if (typeof AudioData !== \"undefined\" && obj instanceof AudioData) return;\n\n // Safari-specific: avoid transferring certain objects\n if (isSafari) {\n // Be more conservative with Safari - only transfer obvious ArrayBuffers\n if (obj instanceof ArrayBuffer && !transferables.includes(obj)) {\n transferables.push(obj);\n }\n return;\n }\n\n // For other browsers, collect more transferable types\n if (obj instanceof ArrayBuffer && !transferables.includes(obj)) {\n transferables.push(obj);\n } else if (obj instanceof MessagePort && !transferables.includes(obj)) {\n transferables.push(obj);\n } else if (\n typeof ImageBitmap !== \"undefined\" &&\n obj instanceof ImageBitmap &&\n !transferables.includes(obj)\n ) {\n transferables.push(obj);\n }\n\n // Recursively check object properties\n for (const key in obj) {\n if (Object.prototype.hasOwnProperty.call(obj, key)) {\n this.collectTransferables(obj[key], transferables, isSafari);\n }\n }\n }\n\n /**\n * Terminate communication\n */\n terminate(): void {\n this.messageHandlers.clear();\n if (this.worker) {\n this.worker.terminate();\n }\n if (this.workerBlobUrl) {\n URL.revokeObjectURL(this.workerBlobUrl);\n this.workerBlobUrl = null;\n }\n }\n}\n","/**\n * VideoFrame変換ユーティリティ\n */\n\nimport { EncodeError, Frame } from \"../types\";\n\n/**\n * FrameをVideoFrameに変換\n */\nexport async function convertToVideoFrame(\n frame: Frame,\n timestamp: number,\n): Promise<VideoFrame> {\n if (frame instanceof VideoFrame) {\n // Always create a new VideoFrame to ensure clear ownership\n // The caller owns the returned VideoFrame and must close it\n return new VideoFrame(frame, { timestamp });\n }\n\n // 他のFrame型をVideoFrameに変換\n if (frame instanceof HTMLCanvasElement) {\n return new VideoFrame(frame, { timestamp });\n }\n\n if (frame instanceof OffscreenCanvas) {\n return new VideoFrame(frame, { timestamp });\n }\n\n if (frame instanceof ImageBitmap) {\n return new VideoFrame(frame, { timestamp });\n }\n\n if (frame instanceof ImageData) {\n // ImageDataの場合、BufferInitを使用\n return new VideoFrame(frame.data, {\n format: \"RGBA\",\n codedWidth: frame.width,\n codedHeight: frame.height,\n timestamp,\n });\n }\n\n // テスト環境でのモックオブジェクトの場合、プロパティベースで判定\n if (frame && typeof frame === \"object\") {\n // ImageDataに似たオブジェクト\n if (\"width\" in frame && \"height\" in frame && \"data\" in frame) {\n const imageDataLike = frame as {\n width: number;\n height: number;\n data: Uint8ClampedArray;\n };\n return new VideoFrame(imageDataLike.data, {\n format: \"RGBA\",\n codedWidth: imageDataLike.width,\n codedHeight: imageDataLike.height,\n timestamp,\n });\n }\n\n // Canvasに似たオブジェクト\n if (\n \"width\" in frame &&\n \"height\" in frame &&\n (\"getContext\" in frame || \"transferToImageBitmap\" in frame)\n ) {\n return new VideoFrame(frame as any, { timestamp });\n }\n\n // ImageBitmapに似たオブジェクト\n if (\n \"width\" in frame &&\n \"height\" in frame &&\n \"close\" in frame &&\n typeof (frame as any).close === \"function\"\n ) {\n return new VideoFrame(frame as any, { timestamp });\n }\n }\n\n throw new EncodeError(\n \"invalid-input\",\n `Unsupported frame type: ${typeof frame}. Frame must be VideoFrame, HTMLCanvasElement, OffscreenCanvas, ImageBitmap, or ImageData.`,\n );\n}\n","/**\n * Core encode function implementation\n */\n\nimport {\n VideoSource,\n EncodeOptions,\n EncodeError,\n Frame,\n ProgressInfo,\n VideoFile,\n} from \"../types\";\nimport { inferAndBuildConfig } from \"../utils/config-parser\";\nimport { WorkerCommunicator } from \"../worker/worker-communicator\";\nimport { convertToVideoFrame } from \"../utils/video-frame-converter\";\n\n/**\n * Main video encoding function\n *\n * @param source Video source to encode\n * @param options Encoding options\n * @returns Encoded binary data\n */\nexport async function encode(\n source: VideoSource,\n options?: EncodeOptions,\n): Promise<Uint8Array> {\n let communicator: WorkerCommunicator | null = null;\n\n try {\n // Configuration inference and normalization\n const config = await inferAndBuildConfig(source, options);\n\n // Start communication with worker\n communicator = new WorkerCommunicator();\n\n // Execute encoding process\n const result = await performEncoding(communicator, source, config, options);\n\n return result;\n } catch (error) {\n // Unified error handling\n const encodeError =\n error instanceof EncodeError\n ? error\n : new EncodeError(\n \"encoding-failed\",\n `Encoding failed: ${error instanceof Error ? error.message : String(error)}`,\n error,\n );\n\n if (options?.onError) {\n options.onError(encodeError);\n }\n\n throw encodeError;\n } finally {\n // Resource cleanup\n if (communicator) {\n communicator.terminate();\n }\n }\n}\n\n/**\n * Execute the actual encoding process\n */\nasync function performEncoding(\n communicator: WorkerCommunicator,\n source: VideoSource,\n config: any,\n options?: EncodeOptions,\n): Promise<Uint8Array> {\n return new Promise<Uint8Array>((resolve, reject) => {\n let processedFrames = 0;\n let totalFrames: number | undefined;\n const startTime = Date.now();\n\n // Calculate totalFrames upfront for progress tracking\n calculateTotalFrames(source, config)\n .then((frames) => {\n totalFrames = frames;\n })\n .catch((error) => {\n console.warn(\"Failed to calculate total frames:\", error);\n });\n\n // Update progress information\n const updateProgress = (stage: string) => {\n if (options?.onProgress) {\n const elapsed = Date.now() - startTime;\n const fps =\n processedFrames > 0 ? (processedFrames / elapsed) * 1000 : 0;\n const percent = totalFrames ? (processedFrames / totalFrames) * 100 : 0;\n const estimatedRemainingMs =\n totalFrames && fps > 0\n ? ((totalFrames - processedFrames) / fps) * 1000\n : undefined;\n\n const progressInfo: ProgressInfo = {\n percent,\n processedFrames,\n totalFrames,\n fps,\n stage,\n estimatedRemainingMs,\n };\n\n options.onProgress(progressInfo);\n }\n };\n\n // Handle messages from worker\n communicator.on(\"initialized\", () => {\n updateProgress(\"encoding\");\n // Start frame processing\n processVideoSource(communicator, source, config)\n .then(() => {\n updateProgress(\"finalizing\");\n communicator.send(\"finalize\");\n })\n .catch(reject);\n });\n\n communicator.on(\n \"progress\",\n (data: { processedFrames: number; totalFrames?: number }) => {\n processedFrames = data.processedFrames;\n if (data.totalFrames !== undefined) {\n totalFrames = data.totalFrames;\n }\n updateProgress(\"encoding\");\n },\n );\n\n communicator.on(\"finalized\", (data: { output: Uint8Array | null }) => {\n if (data.output) {\n updateProgress(\"finalizing\");\n resolve(data.output);\n } else {\n reject(new EncodeError(\"encoding-failed\", \"No output produced\"));\n }\n });\n\n communicator.on(\"error\", (data: { errorDetail: any }) => {\n const error = new EncodeError(\n data.errorDetail.type || \"encoding-failed\",\n data.errorDetail.message || \"Worker error\",\n data.errorDetail,\n );\n reject(error);\n });\n\n // Start encoding\n communicator.send(\"initialize\", { config, totalFrames });\n });\n}\n\n/**\n * Process VideoSource and send to worker\n */\nasync function processVideoSource(\n communicator: WorkerCommunicator,\n source: VideoSource,\n config: any,\n): Promise<void> {\n if (Array.isArray(source)) {\n // Process static frame array\n await processFrameArray(communicator, source, config);\n } else if (source instanceof MediaStream) {\n // Process MediaStream\n await processMediaStream(communicator, source, config);\n } else if (Symbol.asyncIterator in source) {\n // Process AsyncIterable\n await processAsyncIterable(communicator, source, config);\n } else {\n // Process VideoFile\n await processVideoFile(communicator, source as VideoFile, config);\n }\n}\n\n/**\n * Process frame array\n */\nasync function processFrameArray(\n communicator: WorkerCommunicator,\n frames: Frame[],\n config?: any,\n): Promise<void> {\n const frameRate = config?.frameRate || 30;\n for (let i = 0; i < frames.length; i++) {\n const frame = frames[i];\n const timestamp = (i * 1000000) / frameRate; // Use frameRate from config\n\n await addFrameToWorker(communicator, frame, timestamp);\n }\n}\n\n/**\n * Process AsyncIterable\n */\nasync function processAsyncIterable(\n communicator: WorkerCommunicator,\n source: AsyncIterable<Frame>,\n config?: any,\n): Promise<void> {\n let frameIndex = 0;\n const frameRate = config?.frameRate || 30;\n\n for await (const frame of source) {\n const timestamp = (frameIndex * 1000000) / frameRate; // Use frameRate from config\n await addFrameToWorker(communicator, frame, timestamp);\n frameIndex++;\n }\n}\n\n/**\n * Process MediaStream\n */\nasync function processMediaStream(\n communicator: WorkerCommunicator,\n stream: MediaStream,\n _config: any,\n): Promise<void> {\n // MediaStream processing is complex, so use MediaStreamTrackProcessor\n const videoTracks = stream.getVideoTracks();\n const audioTracks = stream.getAudioTracks();\n\n const readers: ReadableStreamDefaultReader<any>[] = [];\n const processingPromises: Promise<void>[] = [];\n\n try {\n // Process video tracks\n if (videoTracks.length > 0) {\n const videoTrack = videoTracks[0];\n const processor = new MediaStreamTrackProcessor({ track: videoTrack });\n const reader =\n processor.readable.getReader() as ReadableStreamDefaultReader<VideoFrame>;\n readers.push(reader);\n\n processingPromises.push(processVideoReader(communicator, reader));\n }\n\n // Process audio tracks\n if (audioTracks.length > 0) {\n const audioTrack = audioTracks[0];\n const processor = new MediaStreamTrackProcessor({ track: audioTrack });\n const reader =\n processor.readable.getReader() as ReadableStreamDefaultReader<AudioData>;\n readers.push(reader);\n\n processingPromises.push(processAudioReader(communicator, reader));\n }\n\n // Wait for all processing to complete\n await Promise.all(processingPromises);\n } finally {\n // Clean up readers\n for (const reader of readers) {\n try {\n reader.cancel();\n } catch (e) {\n // Ignore errors (may already be cancelled)\n }\n }\n }\n}\n\n/**\n * Process VideoFrame reader\n */\nasync function processVideoReader(\n communicator: WorkerCommunicator,\n reader: ReadableStreamDefaultReader<VideoFrame>,\n): Promise<void> {\n try {\n // eslint-disable-next-line no-constant-condition\n while (true) {\n const { value, done } = await reader.read();\n if (done || !value) break;\n\n try {\n await addFrameToWorker(communicator, value, value.timestamp || 0);\n } finally {\n value.close();\n }\n }\n } catch (error) {\n throw new EncodeError(\n \"video-encoding-error\",\n `Video stream processing error: ${error instanceof Error ? error.message : String(error)}`,\n error,\n );\n }\n}\n\n/**\n * Process AudioData reader\n */\nasync function processAudioReader(\n communicator: WorkerCommunicator,\n reader: ReadableStreamDefaultReader<AudioData>,\n): Promise<void> {\n try {\n // eslint-disable-next-line no-constant-condition\n while (true) {\n const { value, done } = await reader.read();\n if (done || !value) break;\n\n try {\n communicator.send(\"addAudioData\", {\n audio: value,\n timestamp: value.timestamp || 0,\n format: \"f32\",\n sampleRate: value.sampleRate,\n numberOfFrames: value.numberOfFrames,\n numberOfChannels: value.numberOfChannels,\n });\n } finally {\n value.close();\n }\n }\n } catch (error) {\n throw new EncodeError(\n \"audio-encoding-error\",\n `Audio stream processing error: ${error instanceof Error ? error.message : String(error)}`,\n error,\n );\n }\n}\n\n/**\n * Send frame to worker\n */\nasync function addFrameToWorker(\n communicator: WorkerCommunicator,\n frame: Frame,\n timestamp: number,\n): Promise<void> {\n // Convert frame to VideoFrame\n const videoFrame = await convertToVideoFrame(frame, timestamp);\n\n try {\n communicator.send(\"addVideoF