UNPKG

webcodecs-encoder

Version:

A TypeScript library for browser environments to encode video (H.264/AVC, VP9, VP8) and audio (AAC, Opus) using the WebCodecs API and mux them into MP4 or WebM containers with real-time streaming support. New function-first API design.

1 lines 134 kB
{"version":3,"sources":["../src/index.ts","../src/types.ts","../src/utils/config-parser.ts","../src/worker/worker-communicator.ts","../src/utils/video-frame-converter.ts","../src/core/encode.ts","../src/stream/encode-stream.ts","../src/utils/can-encode.ts","../src/factory/encoder.ts","../src/mediastream-recorder.ts"],"sourcesContent":["// メイン関数ファーストAPI\nexport { encode } from \"./core/encode\";\nexport { encodeStream } from \"./stream/encode-stream\";\nexport { canEncode } from \"./utils/can-encode\";\n\n// 高度な使用向け:カスタムエンコーダーファクトリ\nexport { createEncoder, encoders, examples } from \"./factory/encoder\";\nexport type { EncoderFactory } from \"./factory/encoder\";\n\n// レガシーサポート:既存のクラスベースAPI(非推奨)\nexport { MediaStreamRecorder } from \"./mediastream-recorder\";\n\n// 型定義\nexport type {\n VideoSource,\n Frame,\n EncodeOptions,\n QualityPreset,\n VideoConfig,\n AudioConfig,\n ProgressInfo,\n EncodeErrorType,\n VideoFile,\n} from \"./types\";\n\nexport { EncodeError } from './types';\n\n// 内部実装用(高度な使用のみ)\nexport type {\n EncoderConfig,\n ProcessingStage,\n EncoderErrorType,\n WorkerMessage,\n MainThreadMessage,\n VideoEncoderGetter,\n AudioEncoderGetter,\n AudioDataGetter,\n} from './types';\n","// Type definitions for new function-first API\n\n// Basic frame type\nexport type Frame = VideoFrame | HTMLCanvasElement | OffscreenCanvas | ImageBitmap | ImageData;\n\n// Video file type\nexport interface VideoFile {\n file: File | Blob;\n type: string;\n}\n\n// Video source type (all input formats)\nexport type VideoSource = \n | Frame[] // Static frame array\n | AsyncIterable<Frame> // Streaming frames\n | MediaStream // Camera/screen sharing\n | VideoFile; // Existing video file\n\n// Quality presets\nexport type QualityPreset = 'low' | 'medium' | 'high' | 'lossless';\n\n// Video configuration\nexport interface VideoConfig {\n codec?: 'avc' | 'hevc' | 'vp9' | 'vp8' | 'av1';\n bitrate?: number;\n hardwareAcceleration?: 'no-preference' | 'prefer-hardware' | 'prefer-software';\n latencyMode?: 'quality' | 'realtime';\n keyFrameInterval?: number;\n}\n\n// Audio configuration\nexport interface AudioConfig {\n codec?: 'aac' | 'opus';\n bitrate?: number;\n sampleRate?: number;\n channels?: number;\n bitrateMode?: 'constant' | 'variable';\n}\n\n// Progress information\nexport interface ProgressInfo {\n percent: number;\n processedFrames: number;\n totalFrames?: number;\n fps: number;\n stage: string;\n estimatedRemainingMs?: number;\n}\n\n// Encode options\nexport interface EncodeOptions {\n // Basic settings (auto-detectable)\n width?: number;\n height?: number;\n frameRate?: number;\n\n // Quality preset\n quality?: QualityPreset;\n\n // Detailed settings (optional)\n video?: VideoConfig | false; // false to disable video\n audio?: AudioConfig | false; // false to disable audio\n container?: 'mp4' | 'webm';\n\n // Timestamp handling\n firstTimestampBehavior?: \"offset\" | \"strict\";\n\n // Latency mode (top level)\n latencyMode?: \"quality\" | \"realtime\";\n\n // Backpressure control\n maxVideoQueueSize?: number; // Default: 30 frames\n maxAudioQueueSize?: number; // Default: 30 chunks\n backpressureStrategy?: \"drop\" | \"wait\"; // Default: \"drop\"\n\n // Callbacks\n onProgress?: (progress: ProgressInfo) => void;\n onError?: (error: EncodeError) => void;\n}\n\n// Error types\nexport type EncodeErrorType =\n | 'not-supported'\n | 'initialization-failed'\n | 'configuration-error'\n | 'invalid-input' // Input source or frame data is invalid\n | 'encoding-failed'\n | 'video-encoding-error'\n | 'audio-encoding-error'\n | 'muxing-failed'\n | 'cancelled'\n | 'timeout'\n | 'worker-error'\n | 'filesystem-error' // VideoFile access errors\n | 'unknown';\n\n// Custom error class\nexport class EncodeError extends Error {\n type: EncodeErrorType;\n cause?: unknown;\n\n constructor(type: EncodeErrorType, message: string, cause?: unknown) {\n super(message);\n this.name = 'EncodeError';\n this.type = type;\n this.cause = cause;\n Object.setPrototypeOf(this, EncodeError.prototype);\n }\n}\n\n// --- Internal implementation type definitions (worker communication, etc.) ---\n\n// Basic configuration type for worker communication (internal implementation)\nexport interface EncoderConfig {\n width: number;\n height: number;\n frameRate: number;\n videoBitrate: number; // bps\n audioBitrate: number; // bps\n /**\n * Controls bitrate distribution for AAC. \"constant\" produces constant\n * bitrate (CBR) output while \"variable\" enables variable bitrate (VBR).\n * Not all browsers respect this setting. Chrome 119+ improves CBR support.\n */\n audioBitrateMode?: \"constant\" | \"variable\";\n sampleRate: number; // Hz\n channels: number; // e.g., 1 for mono, 2 for stereo\n container?: \"mp4\" | \"webm\"; // Default: 'mp4'. Set 'webm' for WebM output.\n codec?: {\n video?: \"avc\" | \"hevc\" | \"vp9\" | \"vp8\" | \"av1\"; // Default: 'avc' (H.264)\n audio?: \"aac\" | \"opus\"; // Default: 'aac'\n };\n /**\n * Optional codec string overrides passed directly to the encoders.\n * For example: `{ video: 'avc1.640028', audio: 'mp4a.40.2' }`.\n */\n codecString?: {\n video?: string;\n audio?: string;\n };\n latencyMode?: \"quality\" | \"realtime\"; // Default: 'quality'\n /** Preference for hardware or software encoding. */\n hardwareAcceleration?:\n | \"prefer-hardware\"\n | \"prefer-software\"\n | \"no-preference\";\n /** Drop new video frames when the number of queued frames exceeds `maxQueueDepth`. */\n dropFrames?: boolean;\n /** Maximum number of queued video frames before dropping. Defaults to `Infinity`. */\n maxQueueDepth?: number;\n /** Total frames for progress calculation if known in advance. */\n totalFrames?: number;\n /** Force a key frame every N video frames. */\n keyFrameInterval?: number;\n /**\n * How to handle the first timestamp of a track.\n * 'offset': Offsets all timestamps so the first one is 0.\n * 'strict': Requires the first timestamp to be 0 (default).\n */\n firstTimestampBehavior?: \"offset\" | \"strict\";\n /** Backpressure control for video queue */\n maxVideoQueueSize?: number;\n /** Backpressure control for audio queue */\n maxAudioQueueSize?: number;\n /** Backpressure strategy: drop frames or wait */\n backpressureStrategy?: \"drop\" | \"wait\";\n /** Additional VideoEncoder configuration overrides. */\n videoEncoderConfig?: Partial<VideoEncoderConfig>;\n /** Additional AudioEncoder configuration overrides. */\n audioEncoderConfig?: Partial<AudioEncoderConfig>;\n}\n\n// Processing stage definitions\nexport enum ProcessingStage {\n Initializing = \"initializing\",\n VideoEncoding = \"video-encoding\",\n AudioEncoding = \"audio-encoding\",\n Muxing = \"muxing\",\n Finalizing = \"finalizing\",\n}\n\n// Encoder error types (internal implementation)\nexport enum EncoderErrorType {\n NotSupported = \"not-supported\",\n InitializationFailed = \"initialization-failed\",\n ConfigurationError = \"configuration-error\",\n InvalidInput = \"invalid-input\", // Input source or frame data is invalid\n EncodingFailed = \"encoding-failed\", // Generic encoding error\n VideoEncodingError = \"video-encoding-error\", // Specific video encoding error\n AudioEncodingError = \"audio-encoding-error\", // Specific audio encoding error\n MuxingFailed = \"muxing-failed\",\n Cancelled = \"cancelled\",\n Timeout = \"timeout\",\n WorkerError = \"worker-error\",\n FilesystemError = \"filesystem-error\", // VideoFile access errors\n Unknown = \"unknown\",\n}\n\n// --- Worker communication message types ---\n\n// Messages TO the Worker\nexport interface InitializeWorkerMessage {\n type: \"initialize\";\n config: EncoderConfig;\n totalFrames?: number; // For progress calculation\n}\n\nexport interface AddVideoFrameMessage {\n type: \"addVideoFrame\";\n frame: VideoFrame;\n timestamp: number; // microseconds\n}\n\nexport interface AddAudioDataMessage {\n type: \"addAudioData\";\n // Array of Float32Array for each channel (non-interleaved).\n // The ArrayBuffer of each Float32Array should be transferred.\n audioData?: Float32Array[];\n /** Optional AudioData object to be encoded directly. */\n audio?: AudioData;\n timestamp: number; // microseconds\n format: AudioSampleFormat; // e.g., \"f32-planar\" or \"s16\" etc. (AudioSampleFormat from WebCodecs)\n sampleRate: number;\n numberOfFrames: number;\n numberOfChannels: number;\n}\n\nexport interface FinalizeWorkerMessage {\n type: \"finalize\";\n}\n\nexport interface CancelWorkerMessage {\n type: \"cancel\";\n}\n\nexport type WorkerMessage =\n | InitializeWorkerMessage\n | AddVideoFrameMessage\n | AddAudioDataMessage\n | FinalizeWorkerMessage\n | CancelWorkerMessage;\n\n// Messages FROM the Worker\nexport interface WorkerInitializedMessage {\n type: \"initialized\";\n actualVideoCodec?: string;\n actualAudioCodec?: string;\n}\n\nexport interface ProgressMessage {\n type: \"progress\";\n processedFrames: number;\n totalFrames?: number;\n}\n\nexport interface WorkerFinalizedMessage {\n type: \"finalized\";\n output: Uint8Array | null; // MP4 file data or null when streaming\n}\n\nexport interface QueueSizeMessage {\n type: \"queueSize\";\n videoQueueSize: number;\n audioQueueSize: number;\n}\n\nexport interface WorkerDataChunkMessage {\n type: \"dataChunk\";\n chunk: Uint8Array;\n isHeader?: boolean; // Indicates if this chunk is a header (e.g., moov for MP4, EBML for WebM)\n offset?: number; // For MP4 fragmented streaming\n container: \"mp4\" | \"webm\"; // To inform the main thread which muxer this chunk belongs to\n}\n\nexport interface WorkerErrorMessage {\n type: \"error\";\n errorDetail: {\n message: string;\n type: EncoderErrorType;\n stack?: string;\n };\n}\n\nexport interface WorkerCancelledMessage {\n type: \"cancelled\";\n}\n\nexport type MainThreadMessage =\n | WorkerInitializedMessage\n | ProgressMessage\n | WorkerFinalizedMessage\n | QueueSizeMessage\n | WorkerDataChunkMessage\n | WorkerErrorMessage\n | WorkerCancelledMessage;\n\n// --- Helper Types for environment-dependent constructors ---\nexport type VideoEncoderConstructor = typeof VideoEncoder;\nexport type AudioEncoderConstructor = typeof AudioEncoder;\nexport type AudioDataConstructor = typeof AudioData;\n\nexport type VideoEncoderGetter = () => VideoEncoderConstructor | undefined;\nexport type AudioEncoderGetter = () => AudioEncoderConstructor | undefined;\nexport type AudioDataGetter = () => AudioDataConstructor | undefined;\n","/**\n * 設定の推定・変換・マージ処理のユーティリティ\n */\n\nimport {\n EncodeOptions,\n VideoSource,\n Frame,\n QualityPreset,\n EncoderConfig,\n} from \"../types\";\n\n/**\n * VideoSourceから設定を推定し、EncodeOptionsとマージして最終的なEncoderConfigを生成\n */\nexport async function inferAndBuildConfig(\n source: VideoSource,\n options?: EncodeOptions,\n): Promise<EncoderConfig> {\n // ソースから基本的な設定を推定\n const inferredConfig = await inferConfigFromSource(source);\n\n // ユーザー指定のオプションをマージ\n const mergedOptions = mergeWithUserOptions(inferredConfig, options);\n\n // 品質プリセットを適用\n const configWithPreset = applyQualityPreset(mergedOptions, options?.quality);\n\n // 最終的なEncoderConfigに変換\n return convertToEncoderConfig(configWithPreset);\n}\n\n/**\n * VideoSourceから基本設定を推定\n */\nasync function inferConfigFromSource(\n source: VideoSource,\n): Promise<Partial<EncodeOptions>> {\n const config: Partial<EncodeOptions> = {\n frameRate: 30, // デフォルト値\n container: \"mp4\", // デフォルト値\n };\n\n try {\n // 最初のフレームを取得して解像度を推定\n const firstFrame = await getFirstFrame(source);\n if (firstFrame) {\n const dimensions = getFrameDimensions(firstFrame);\n config.width = dimensions.width;\n config.height = dimensions.height;\n }\n\n // MediaStreamの場合はビデオ・オーディオトラックの有無も確認\n if (source instanceof MediaStream) {\n const videoTracks = source.getVideoTracks();\n const audioTracks = source.getAudioTracks();\n\n // ビデオトラックがない場合\n if (videoTracks.length === 0) {\n config.video = false; // ビデオなし\n }\n\n if (audioTracks.length === 0) {\n config.audio = false; // オーディオなし\n } else {\n // MediaStreamTrackからオーディオ設定を推定\n const audioTrack = audioTracks[0];\n const settings = audioTrack.getSettings();\n config.audio = {\n sampleRate: settings.sampleRate || 48000,\n channels: settings.channelCount || 2,\n };\n }\n }\n } catch (error) {\n // 推定に失敗した場合はデフォルト値を使用\n config.width = 640;\n config.height = 480;\n }\n\n return config;\n}\n\n/**\n * ユーザー指定のオプションをマージ\n */\nfunction mergeWithUserOptions(\n inferredConfig: Partial<EncodeOptions>,\n userOptions?: EncodeOptions,\n): EncodeOptions {\n return {\n // 推定された設定をベースに\n ...inferredConfig,\n // ユーザー指定の設定で上書き\n ...userOptions,\n // ネストしたオブジェクトは個別にマージ\n video: {\n ...inferredConfig.video,\n ...userOptions?.video,\n },\n audio:\n userOptions?.audio === false\n ? false\n : {\n ...(inferredConfig.audio as any),\n ...userOptions?.audio,\n },\n };\n}\n\n/**\n * 品質プリセットを適用\n */\nfunction applyQualityPreset(\n config: EncodeOptions,\n quality?: QualityPreset,\n): EncodeOptions {\n if (!quality) return config;\n\n const width = config.width || 640;\n const height = config.height || 480;\n const pixels = width * height;\n\n // 解像度とフレームレートに基づいてビットレートを計算\n const basePixelsPerSecond = pixels * (config.frameRate || 30);\n\n let videoBitrate: number;\n let audioBitrate: number;\n\n switch (quality) {\n case \"low\":\n videoBitrate = Math.max(500_000, basePixelsPerSecond * 0.1);\n audioBitrate = 64_000;\n break;\n case \"medium\":\n videoBitrate = Math.max(1_000_000, basePixelsPerSecond * 0.2);\n audioBitrate = 128_000;\n break;\n case \"high\":\n videoBitrate = Math.max(2_000_000, basePixelsPerSecond * 0.4);\n audioBitrate = 192_000;\n break;\n case \"lossless\":\n videoBitrate = Math.max(10_000_000, basePixelsPerSecond * 1.0);\n audioBitrate = 320_000;\n break;\n default:\n return config;\n }\n\n return {\n ...config,\n video:\n config.video === false\n ? false\n : {\n ...(config.video as any),\n bitrate: (config.video as any)?.bitrate || videoBitrate,\n },\n audio:\n config.audio === false\n ? false\n : {\n ...(config.audio as any),\n bitrate: (config.audio as any)?.bitrate || audioBitrate,\n },\n };\n}\n\n/**\n * EncodeOptionsから内部のEncoderConfigに変換\n */\nfunction convertToEncoderConfig(options: EncodeOptions): EncoderConfig {\n const config: EncoderConfig = {\n width: options.video === false ? 0 : options.width || 640,\n height: options.video === false ? 0 : options.height || 480,\n frameRate: options.frameRate || 30,\n videoBitrate:\n options.video === false\n ? 0\n : (options.video as any)?.bitrate || 1_000_000,\n audioBitrate:\n options.audio === false ? 0 : (options.audio as any)?.bitrate || 128_000,\n sampleRate:\n options.audio === false ? 0 : (options.audio as any)?.sampleRate || 48000,\n channels:\n options.audio === false ? 0 : (options.audio as any)?.channels || 2,\n container: options.container || \"mp4\",\n codec: {\n video:\n options.video === false\n ? undefined\n : (options.video as any)?.codec || \"avc\",\n audio:\n options.audio === false\n ? undefined\n : (options.audio as any)?.codec || \"aac\",\n },\n latencyMode:\n options.video === false\n ? \"quality\"\n : options.latencyMode ||\n (options.video as any)?.latencyMode ||\n \"quality\",\n hardwareAcceleration:\n options.video === false\n ? \"no-preference\"\n : (options.video as any)?.hardwareAcceleration || \"no-preference\",\n keyFrameInterval:\n options.video === false\n ? undefined\n : (options.video as any)?.keyFrameInterval,\n audioBitrateMode:\n options.audio === false\n ? undefined\n : (options.audio as any)?.bitrateMode || \"variable\",\n firstTimestampBehavior: options.firstTimestampBehavior || \"strict\",\n maxVideoQueueSize: options.maxVideoQueueSize || 30,\n maxAudioQueueSize: options.maxAudioQueueSize || 30,\n backpressureStrategy: options.backpressureStrategy || \"drop\",\n };\n\n return config;\n}\n\n/**\n * VideoSourceから最初のフレームを取得\n */\nasync function getFirstFrame(source: VideoSource): Promise<Frame | null> {\n if (Array.isArray(source)) {\n return source.length > 0 ? source[0] : null;\n }\n\n if (source instanceof MediaStream) {\n // MediaStreamから最初のフレームを取得するのは複雑なので、\n // VideoTrackの設定から解像度を推定\n const videoTracks = source.getVideoTracks();\n if (videoTracks.length > 0) {\n const settings = videoTracks[0].getSettings();\n if (settings.width && settings.height) {\n // 仮想的なフレームサイズ情報として返す\n return {\n displayWidth: settings.width,\n displayHeight: settings.height,\n } as any;\n }\n }\n return null;\n }\n\n if (Symbol.asyncIterator in source) {\n // AsyncIterableの場合は最初の要素を取得\n for await (const frame of source) {\n return frame;\n }\n return null;\n }\n\n // VideoFileの場合は実装が必要(今回は簡略化)\n return null;\n}\n\n/**\n * フレームから解像度を取得\n */\nfunction getFrameDimensions(frame: Frame | null): {\n width: number;\n height: number;\n} {\n if (!frame) {\n return { width: 640, height: 480 };\n }\n\n if (frame instanceof VideoFrame) {\n return {\n width: frame.displayWidth || frame.codedWidth,\n height: frame.displayHeight || frame.codedHeight,\n };\n }\n\n if (frame instanceof HTMLCanvasElement || frame instanceof OffscreenCanvas) {\n return { width: frame.width, height: frame.height };\n }\n\n if (frame instanceof ImageBitmap) {\n return { width: frame.width, height: frame.height };\n }\n\n if (frame instanceof ImageData) {\n return { width: frame.width, height: frame.height };\n }\n\n // 仮想的なフレーム情報の場合\n if (\"displayWidth\" in frame && \"displayHeight\" in frame) {\n return {\n width: (frame as any).displayWidth,\n height: (frame as any).displayHeight,\n };\n }\n\n return { width: 640, height: 480 };\n}\n","/**\n * Worker creation and management\n */\n\nimport { EncodeError } from \"../types\";\n\n/**\n * Create external worker\n */\nfunction createExternalWorker(): Worker {\n try {\n // Use external worker file\n const worker = new Worker(\"/webcodecs-worker.js\", { type: \"module\" });\n\n // Worker error handling\n worker.onerror = (event) => {\n console.error(\"Worker error:\", event);\n throw new EncodeError(\"worker-error\", `Worker error: ${event.message}`);\n };\n\n return worker;\n } catch (error) {\n throw new EncodeError(\n \"initialization-failed\",\n \"Failed to create external worker. Make sure webcodecs-worker.js is available in your public directory.\",\n error,\n );\n }\n}\n\n/**\n * Create inline worker (for test environments)\n */\nfunction createInlineWorker(): { worker: Worker; blobUrl: string } {\n try {\n const workerSource = getWorkerSource();\n const blob = new Blob([workerSource], { type: \"application/javascript\" });\n const blobUrl = URL.createObjectURL(blob);\n\n const worker = new Worker(blobUrl, { type: \"module\" });\n\n worker.onerror = (event) => {\n console.error(\"Inline worker error:\", event);\n throw new EncodeError(\n \"worker-error\",\n `Inline worker error: ${event.message}`,\n );\n };\n\n return { worker, blobUrl };\n } catch (error) {\n throw new EncodeError(\n \"initialization-failed\",\n \"Failed to create inline worker\",\n error,\n );\n }\n}\n\n/**\n * Create appropriate worker\n */\nexport function createWorker(): Worker | { worker: Worker; blobUrl: string } {\n // Enhanced production environment detection\n const isProductionEnvironment = detectProductionEnvironment();\n\n // Test environment or development environment detection\n const isTestEnvironment =\n // Vitest environment\n (typeof process !== \"undefined\" && process.env?.VITEST === \"true\") ||\n // Jest environment\n (typeof process !== \"undefined\" &&\n process.env?.JEST_WORKER_ID !== undefined) ||\n // Node.js environment\n (typeof process !== \"undefined\" && process.env?.NODE_ENV === \"test\") ||\n // Global test runner exists\n (typeof global !== \"undefined\" &&\n (global as any).process?.env?.NODE_ENV === \"test\") ||\n // vitest global function exists\n (typeof globalThis !== \"undefined\" && \"vi\" in globalThis) ||\n // jsdom environment\n (typeof window !== \"undefined\" &&\n window.navigator?.userAgent?.includes(\"jsdom\")) ||\n // Variables commonly set in test environments\n (typeof process !== \"undefined\" &&\n process.env?.npm_lifecycle_event?.includes(\"test\"));\n\n // Enhanced fallback for integration test environments\n const isIntegrationTestEnvironment =\n typeof window !== \"undefined\" &&\n (window.location?.hostname === \"localhost\" ||\n window.location?.hostname === \"127.0.0.1\") &&\n window.location?.port;\n\n // Force disable check via environment variables\n const forceDisableInlineWorker =\n (typeof process !== \"undefined\" &&\n process.env?.WEBCODECS_DISABLE_INLINE_WORKER === \"true\") ||\n (typeof window !== \"undefined\" &&\n (window as any).__WEBCODECS_DISABLE_INLINE_WORKER__ === true);\n\n // Strictly prohibit if production environment or inline worker is explicitly disabled\n if (\n (isProductionEnvironment || forceDisableInlineWorker) &&\n (isTestEnvironment || isIntegrationTestEnvironment)\n ) {\n throw new Error(\n \"[WorkerCommunicator] CRITICAL SECURITY ERROR: Inline worker detected in production environment or explicitly disabled. \" +\n \"This is a security risk. Please ensure webcodecs-worker.js is properly deployed.\",\n );\n }\n\n // Always use inline worker in test environments\n if (isTestEnvironment || isIntegrationTestEnvironment) {\n console.warn(\n \"[WorkerCommunicator] Using inline worker for test environment\",\n );\n return createInlineWorker();\n }\n\n // Use only external worker in production environment\n try {\n return createExternalWorker();\n } catch (error) {\n if (isProductionEnvironment) {\n throw new Error(\n \"[WorkerCommunicator] PRODUCTION ERROR: External worker failed to load. \" +\n \"Inline worker is disabled for security reasons. \" +\n \"Please ensure webcodecs-worker.js is accessible at /webcodecs-worker.js\",\n );\n }\n console.error(\n \"[WorkerCommunicator] External worker creation failed. Inline worker is not used in production.\",\n error,\n );\n throw error;\n }\n}\n\n/**\n * Detect production environment\n */\nfunction detectProductionEnvironment(): boolean {\n // Production detection in Node.js environment\n if (typeof process !== \"undefined\") {\n const nodeEnv = process.env?.NODE_ENV;\n // Production-like environments (production, staging, preview, prod)\n return (\n nodeEnv === \"production\" ||\n nodeEnv === \"staging\" ||\n nodeEnv === \"preview\" ||\n nodeEnv === \"prod\"\n );\n }\n\n // Production detection in browser environment\n if (typeof window !== \"undefined\") {\n // Whether served over HTTPS\n const isHttps = window.location?.protocol === \"https:\";\n // Host other than localhost\n const isNotLocalhost =\n window.location?.hostname !== \"localhost\" &&\n window.location?.hostname !== \"127.0.0.1\" &&\n !window.location?.hostname?.endsWith(\".localhost\");\n\n // Exclude ports commonly used by development servers\n const isDevelopmentPort =\n window.location?.port &&\n [\"3000\", \"3001\", \"4000\", \"5000\", \"5173\", \"8000\", \"8080\", \"9000\"].includes(\n window.location.port,\n );\n\n // Check production domain patterns\n const hostname = window.location?.hostname || \"\";\n const isProductionDomain =\n hostname.includes(\".com\") ||\n hostname.includes(\".org\") ||\n hostname.includes(\".net\") ||\n hostname.includes(\"staging\") ||\n hostname.includes(\"preview\") ||\n hostname.includes(\"prod\");\n\n // More strict production environment determination\n return (\n isHttps && isNotLocalhost && !isDevelopmentPort && isProductionDomain\n );\n }\n\n return false;\n}\n\n/**\n * Generate inline worker source code (testing only)\n */\nfunction getWorkerSource(): string {\n return `\n // ⚠️ TESTING ONLY - DO NOT USE IN PRODUCTION ⚠️\n // WebCodecs Encoder Worker (Inline Mock Implementation)\n // This is a minimal mock for testing purposes only.\n // Real encoding should use the external webcodecs-worker.js file.\n \n console.warn('⚠️ Using inline mock worker - FOR TESTING ONLY');\n \n let config = null;\n let processedFrames = 0;\n \n self.onmessage = async function(event) {\n const { type, ...data } = event.data;\n \n try {\n switch (type) {\n case 'initialize':\n config = data.config;\n processedFrames = 0;\n // Wait a bit before sending success response\n setTimeout(() => {\n self.postMessage({ type: 'initialized' });\n }, 50);\n break;\n \n case 'addVideoFrame':\n processedFrames++;\n // Progress update\n self.postMessage({ \n type: 'progress', \n processedFrames,\n totalFrames: data.totalFrames \n });\n break;\n \n case 'addAudioData':\n // Audio data processing (placeholder)\n break;\n \n case 'finalize':\n // Wait a bit before returning result\n setTimeout(() => {\n const result = new Uint8Array([0x00, 0x00, 0x00, 0x20, 0x66, 0x74, 0x79, 0x70]); // MP4 magic number\n self.postMessage({ type: 'finalized', output: result });\n }, 100);\n break;\n \n case 'cancel':\n self.postMessage({ type: 'cancelled' });\n break;\n \n default:\n console.warn('Unknown message type:', type);\n }\n } catch (error) {\n self.postMessage({ \n type: 'error', \n errorDetail: {\n message: error.message,\n type: 'encoding-failed',\n stack: error.stack\n }\n });\n }\n };\n `;\n}\n\n/**\n * Worker communication helper\n */\nexport class WorkerCommunicator {\n private worker: Worker;\n private messageHandlers: Map<string, (data: any) => void> = new Map();\n private workerBlobUrl: string | null = null;\n\n constructor() {\n const workerResult = createWorker();\n if (typeof workerResult === \"object\" && \"worker\" in workerResult) {\n // Inline worker case\n this.worker = workerResult.worker;\n this.workerBlobUrl = workerResult.blobUrl;\n } else {\n // External worker case\n this.worker = workerResult;\n }\n this.worker.onmessage = this.handleMessage.bind(this);\n }\n\n private handleMessage(event: MessageEvent): void {\n const { type, ...data } = event.data;\n const handler = this.messageHandlers.get(type);\n if (handler) {\n handler(data);\n }\n }\n\n /**\n * Register message handler\n */\n on(type: string, handler: (data: any) => void): void {\n this.messageHandlers.set(type, handler);\n }\n\n /**\n * Unregister message handler\n */\n off(type: string): void {\n this.messageHandlers.delete(type);\n }\n\n /**\n * Send message to worker\n */\n send(type: string, data: any = {}): void {\n // Detect transferable objects for optimization\n const transferables: Transferable[] = [];\n\n // Safari compatibility: VideoFrame and AudioData should NOT be transferred\n // as they cause issues in Safari when used as transferable objects\n const isSafari =\n typeof navigator !== \"undefined\" &&\n /^((?!chrome|android).)*safari/i.test(navigator.userAgent);\n\n // Optimize transfer only if ArrayBuffer is included\n if (data.buffer instanceof ArrayBuffer) {\n transferables.push(data.buffer);\n }\n\n // Deep scan for nested ArrayBuffers, but skip VideoFrame/AudioData\n this.collectTransferables(data, transferables, isSafari);\n\n // Use optimized transfer if transferable objects exist\n if (transferables.length > 0) {\n try {\n this.worker.postMessage({ type, ...data }, transferables);\n } catch (error) {\n // Safari fallback: if transferable fails, send without transferables\n console.warn(\n \"Transferable object transfer failed, falling back to clone:\",\n error,\n );\n this.worker.postMessage({ type, ...data });\n }\n } else {\n this.worker.postMessage({ type, ...data });\n }\n }\n\n /**\n * Recursively collect transferable objects while avoiding problematic types\n */\n private collectTransferables(\n obj: any,\n transferables: Transferable[],\n isSafari: boolean,\n ): void {\n if (!obj || typeof obj !== \"object\") return;\n\n // Skip VideoFrame and AudioData objects as they cause Safari issues\n if (typeof VideoFrame !== \"undefined\" && obj instanceof VideoFrame) return;\n if (typeof AudioData !== \"undefined\" && obj instanceof AudioData) return;\n\n // Safari-specific: avoid transferring certain objects\n if (isSafari) {\n // Be more conservative with Safari - only transfer obvious ArrayBuffers\n if (obj instanceof ArrayBuffer && !transferables.includes(obj)) {\n transferables.push(obj);\n }\n return;\n }\n\n // For other browsers, collect more transferable types\n if (obj instanceof ArrayBuffer && !transferables.includes(obj)) {\n transferables.push(obj);\n } else if (obj instanceof MessagePort && !transferables.includes(obj)) {\n transferables.push(obj);\n } else if (\n typeof ImageBitmap !== \"undefined\" &&\n obj instanceof ImageBitmap &&\n !transferables.includes(obj)\n ) {\n transferables.push(obj);\n }\n\n // Recursively check object properties\n for (const key in obj) {\n if (Object.prototype.hasOwnProperty.call(obj, key)) {\n this.collectTransferables(obj[key], transferables, isSafari);\n }\n }\n }\n\n /**\n * Terminate communication\n */\n terminate(): void {\n this.messageHandlers.clear();\n if (this.worker) {\n this.worker.terminate();\n }\n if (this.workerBlobUrl) {\n URL.revokeObjectURL(this.workerBlobUrl);\n this.workerBlobUrl = null;\n }\n }\n}\n","/**\n * VideoFrame変換ユーティリティ\n */\n\nimport { EncodeError, Frame } from \"../types\";\n\n/**\n * FrameをVideoFrameに変換\n */\nexport async function convertToVideoFrame(\n frame: Frame,\n timestamp: number,\n): Promise<VideoFrame> {\n if (frame instanceof VideoFrame) {\n // Always create a new VideoFrame to ensure clear ownership\n // The caller owns the returned VideoFrame and must close it\n return new VideoFrame(frame, { timestamp });\n }\n\n // 他のFrame型をVideoFrameに変換\n if (frame instanceof HTMLCanvasElement) {\n return new VideoFrame(frame, { timestamp });\n }\n\n if (frame instanceof OffscreenCanvas) {\n return new VideoFrame(frame, { timestamp });\n }\n\n if (frame instanceof ImageBitmap) {\n return new VideoFrame(frame, { timestamp });\n }\n\n if (frame instanceof ImageData) {\n // ImageDataの場合、BufferInitを使用\n return new VideoFrame(frame.data, {\n format: \"RGBA\",\n codedWidth: frame.width,\n codedHeight: frame.height,\n timestamp,\n });\n }\n\n // テスト環境でのモックオブジェクトの場合、プロパティベースで判定\n if (frame && typeof frame === \"object\") {\n // ImageDataに似たオブジェクト\n if (\"width\" in frame && \"height\" in frame && \"data\" in frame) {\n const imageDataLike = frame as {\n width: number;\n height: number;\n data: Uint8ClampedArray;\n };\n return new VideoFrame(imageDataLike.data, {\n format: \"RGBA\",\n codedWidth: imageDataLike.width,\n codedHeight: imageDataLike.height,\n timestamp,\n });\n }\n\n // Canvasに似たオブジェクト\n if (\n \"width\" in frame &&\n \"height\" in frame &&\n (\"getContext\" in frame || \"transferToImageBitmap\" in frame)\n ) {\n return new VideoFrame(frame as any, { timestamp });\n }\n\n // ImageBitmapに似たオブジェクト\n if (\n \"width\" in frame &&\n \"height\" in frame &&\n \"close\" in frame &&\n typeof (frame as any).close === \"function\"\n ) {\n return new VideoFrame(frame as any, { timestamp });\n }\n }\n\n throw new EncodeError(\n \"invalid-input\",\n `Unsupported frame type: ${typeof frame}. Frame must be VideoFrame, HTMLCanvasElement, OffscreenCanvas, ImageBitmap, or ImageData.`,\n );\n}\n","/**\n * Core encode function implementation\n */\n\nimport {\n VideoSource,\n EncodeOptions,\n EncodeError,\n Frame,\n ProgressInfo,\n VideoFile,\n} from \"../types\";\nimport { inferAndBuildConfig } from \"../utils/config-parser\";\nimport { WorkerCommunicator } from \"../worker/worker-communicator\";\nimport { convertToVideoFrame } from \"../utils/video-frame-converter\";\n\n/**\n * Main video encoding function\n *\n * @param source Video source to encode\n * @param options Encoding options\n * @returns Encoded binary data\n */\nexport async function encode(\n source: VideoSource,\n options?: EncodeOptions,\n): Promise<Uint8Array> {\n let communicator: WorkerCommunicator | null = null;\n\n try {\n // Configuration inference and normalization\n const config = await inferAndBuildConfig(source, options);\n\n // Start communication with worker\n communicator = new WorkerCommunicator();\n\n // Execute encoding process\n const result = await performEncoding(communicator, source, config, options);\n\n return result;\n } catch (error) {\n // Unified error handling\n const encodeError =\n error instanceof EncodeError\n ? error\n : new EncodeError(\n \"encoding-failed\",\n `Encoding failed: ${error instanceof Error ? error.message : String(error)}`,\n error,\n );\n\n if (options?.onError) {\n options.onError(encodeError);\n }\n\n throw encodeError;\n } finally {\n // Resource cleanup\n if (communicator) {\n communicator.terminate();\n }\n }\n}\n\n/**\n * Execute the actual encoding process\n */\nasync function performEncoding(\n communicator: WorkerCommunicator,\n source: VideoSource,\n config: any,\n options?: EncodeOptions,\n): Promise<Uint8Array> {\n return new Promise<Uint8Array>((resolve, reject) => {\n let processedFrames = 0;\n let totalFrames: number | undefined;\n const startTime = Date.now();\n\n // Calculate totalFrames upfront for progress tracking\n calculateTotalFrames(source, config)\n .then((frames) => {\n totalFrames = frames;\n })\n .catch((error) => {\n console.warn(\"Failed to calculate total frames:\", error);\n });\n\n // Update progress information\n const updateProgress = (stage: string) => {\n if (options?.onProgress) {\n const elapsed = Date.now() - startTime;\n const fps =\n processedFrames > 0 ? (processedFrames / elapsed) * 1000 : 0;\n const percent = totalFrames ? (processedFrames / totalFrames) * 100 : 0;\n const estimatedRemainingMs =\n totalFrames && fps > 0\n ? ((totalFrames - processedFrames) / fps) * 1000\n : undefined;\n\n const progressInfo: ProgressInfo = {\n percent,\n processedFrames,\n totalFrames,\n fps,\n stage,\n estimatedRemainingMs,\n };\n\n options.onProgress(progressInfo);\n }\n };\n\n // Handle messages from worker\n communicator.on(\"initialized\", () => {\n updateProgress(\"encoding\");\n // Start frame processing\n processVideoSource(communicator, source, config)\n .then(() => {\n updateProgress(\"finalizing\");\n communicator.send(\"finalize\");\n })\n .catch(reject);\n });\n\n communicator.on(\n \"progress\",\n (data: { processedFrames: number; totalFrames?: number }) => {\n processedFrames = data.processedFrames;\n if (data.totalFrames !== undefined) {\n totalFrames = data.totalFrames;\n }\n updateProgress(\"encoding\");\n },\n );\n\n communicator.on(\"finalized\", (data: { output: Uint8Array | null }) => {\n if (data.output) {\n updateProgress(\"finalizing\");\n resolve(data.output);\n } else {\n reject(new EncodeError(\"encoding-failed\", \"No output produced\"));\n }\n });\n\n communicator.on(\"error\", (data: { errorDetail: any }) => {\n const error = new EncodeError(\n data.errorDetail.type || \"encoding-failed\",\n data.errorDetail.message || \"Worker error\",\n data.errorDetail,\n );\n reject(error);\n });\n\n // Start encoding\n communicator.send(\"initialize\", { config, totalFrames });\n });\n}\n\n/**\n * Process VideoSource and send to worker\n */\nasync function processVideoSource(\n communicator: WorkerCommunicator,\n source: VideoSource,\n config: any,\n): Promise<void> {\n if (Array.isArray(source)) {\n // Process static frame array\n await processFrameArray(communicator, source, config);\n } else if (source instanceof MediaStream) {\n // Process MediaStream\n await processMediaStream(communicator, source, config);\n } else if (Symbol.asyncIterator in source) {\n // Process AsyncIterable\n await processAsyncIterable(communicator, source, config);\n } else {\n // Process VideoFile\n await processVideoFile(communicator, source as VideoFile, config);\n }\n}\n\n/**\n * Process frame array\n */\nasync function processFrameArray(\n communicator: WorkerCommunicator,\n frames: Frame[],\n config?: any,\n): Promise<void> {\n const frameRate = config?.frameRate || 30;\n for (let i = 0; i < frames.length; i++) {\n const frame = frames[i];\n const timestamp = (i * 1000000) / frameRate; // Use frameRate from config\n\n await addFrameToWorker(communicator, frame, timestamp);\n }\n}\n\n/**\n * Process AsyncIterable\n */\nasync function processAsyncIterable(\n communicator: WorkerCommunicator,\n source: AsyncIterable<Frame>,\n config?: any,\n): Promise<void> {\n let frameIndex = 0;\n const frameRate = config?.frameRate || 30;\n\n for await (const frame of source) {\n const timestamp = (frameIndex * 1000000) / frameRate; // Use frameRate from config\n await addFrameToWorker(communicator, frame, timestamp);\n frameIndex++;\n }\n}\n\n/**\n * Process MediaStream\n */\nasync function processMediaStream(\n communicator: WorkerCommunicator,\n stream: MediaStream,\n _config: any,\n): Promise<void> {\n // MediaStream processing is complex, so use MediaStreamTrackProcessor\n const videoTracks = stream.getVideoTracks();\n const audioTracks = stream.getAudioTracks();\n\n const readers: ReadableStreamDefaultReader<any>[] = [];\n const processingPromises: Promise<void>[] = [];\n\n try {\n // Process video tracks\n if (videoTracks.length > 0) {\n const videoTrack = videoTracks[0];\n const processor = new MediaStreamTrackProcessor({ track: videoTrack });\n const reader =\n processor.readable.getReader() as ReadableStreamDefaultReader<VideoFrame>;\n readers.push(reader);\n\n processingPromises.push(processVideoReader(communicator, reader));\n }\n\n // Process audio tracks\n if (audioTracks.length > 0) {\n const audioTrack = audioTracks[0];\n const processor = new MediaStreamTrackProcessor({ track: audioTrack });\n const reader =\n processor.readable.getReader() as ReadableStreamDefaultReader<AudioData>;\n readers.push(reader);\n\n processingPromises.push(processAudioReader(communicator, reader));\n }\n\n // Wait for all processing to complete\n await Promise.all(processingPromises);\n } finally {\n // Clean up readers\n for (const reader of readers) {\n try {\n reader.cancel();\n } catch (e) {\n // Ignore errors (may already be cancelled)\n }\n }\n\n // Stop tracks\n for (const track of [...videoTracks, ...audioTracks]) {\n track.stop();\n }\n }\n}\n\n/**\n * Process VideoFrame reader\n */\nasync function processVideoReader(\n communicator: WorkerCommunicator,\n reader: ReadableStreamDefaultReader<VideoFrame>,\n): Promise<void> {\n try {\n // eslint-disable-next-line no-constant-condition\n while (true) {\n const { value, done } = await reader.read();\n if (done || !value) break;\n\n try {\n await addFrameToWorker(communicator, value, value.timestamp || 0);\n } finally {\n value.close();\n }\n }\n } catch (error) {\n throw new EncodeError(\n \"video-encoding-error\",\n `Video stream processing error: ${error instanceof Error ? error.message : String(error)}`,\n error,\n );\n }\n}\n\n/**\n * Process AudioData reader\n */\nasync function processAudioReader(\n communicator: WorkerCommunicator,\n reader: ReadableStreamDefaultReader<AudioData>,\n): Promise<void> {\n try {\n // eslint-disable-next-line no-constant-condition\n while (true) {\n const { value, done } = await reader.read();\n if (done || !value) break;\n\n try {\n communicator.send(\"addAudioData\", {\n audio: value,\n timestamp: value.timestamp || 0,\n format: \"f32\",\n sampleRate: value.sampleRate,\n numberOfFrames: value.numberOfFrames,\n numberOfChannels: value.numberOfChannels,\n });\n } finally {\n value.close();\n }\n }\n } catch (error) {\n throw new EncodeError(\n \"audio-encoding-error\",\n `Audio stream processing error: ${error instanceof Error ? error.message : String(error)}`,\n error,\n );\n }\n}\n\n/**\n * Send frame to worker\n */\nasync function addFrameToWorker(\n communicator: WorkerCommunicator,\n frame: Frame,\n timestamp: number,\n): Promise<void> {\n // Convert frame to VideoFrame\n const videoFrame = await convertToVideoFrame(frame, timestamp);\n\n try {\n communicator.send(\"addVideoFrame\", {\n frame: videoFrame,\n timestamp,\n });\n } finally {\n // convertToVideoFrame always returns a new VideoFrame that we own\n videoFrame.close();\n }\n}\n\n/**\n * Process VideoFile and extract frames\n */\nasync function processVideoFile(\n communicator: WorkerCommunicator,\n videoFile: VideoFile,\n config: any,\n): Promise<void> {\n try {\n // Create HTML5 Video element and load file\n const video = document.createElement(\"video\");\n video.muted = true;\n video.preload = \"metadata\";\n\n // Set file as object URL\n const objectUrl = URL.createObjectURL(videoFile.file);\n video.src = objectUrl;\n\n await new Promise<void>((resolve, reject) => {\n video.onloadedmetadata = () => resolve();\n video.onerror = () => reject(new Error(\"Failed to load video file\"));\n });\n\n // Get video information\n const { duration, videoWidth, videoHeight } = video;\n const frameRate = config.frameRate || 30;\n const totalFrames = Math.floor(duration * frameRate);\n\n // Prepare audio processing (if audio is enabled in config)\n let audioContext: AudioContext | null = null;\n let audioBuffer: AudioBuffer | null = null;\n\n if (config.audioBitrate > 0 && typeof AudioContext !== \"undefined\") {\n try {\n audioContext = new AudioContext();\n\n // Load audio data from file\n const arrayBuffer = await videoFile.file.arrayBuffer();\n audioBuffer = await audioContext.decodeAudioData(arrayBuffer);\n\n // Split and send audio data\n await processAudioFromFile(\n communicator,\n audioBuffer,\n duration,\n frameRate,\n );\n } catch (audioError) {\n // Log and continue if audio processing fails\n console.warn(\"Failed to process audio from VideoFile:\", audioError);\n }\n }\n\n // Create canvas to extract frames\n const canvas = document.createElement(\"canvas\");\n canvas.width = videoWidth;\n canvas.height = videoHeight;\n const ctx = canvas.getContext(\"2d\");\n\n if (!ctx) {\n throw new EncodeError(\n \"initialization-failed\",\n \"Failed to get canvas context\",\n );\n }\n\n // Process each frame of the video\n for (let frameIndex = 0; frameIndex < totalFrames; frameIndex++) {\n try {\n const timestamp = frameIndex / frameRate;\n\n // Seek to specified time in video\n video.currentTime = timestamp;\n\n await new Promise<void>((resolve, reject) => {\n const onSeeked = () => {\n video.removeEventListener(\"seeked\", onSeeked);\n resolve();\n };\n video.addEventListener(\"seeked\", onSeeked, { once: true });\n video.onerror = () => reject(new Error(\"Video seek failed\"));\n });\n\n // Draw current frame to canvas\n ctx.drawImage(video, 0, 0, videoWidth, videoHeight);\n\n // Create VideoFrame\n const videoFrame = new VideoFrame(canvas, {\n timestamp: frameIndex * (1000000 / frameRate), // microseconds\n });\n\n // Send to worker\n await addFrameToWorker(\n communicator,\n videoFrame,\n frameIndex * (1000000 / frameRate),\n );\n\n // Close frame to prevent memory leaks\n videoFrame.close();\n } catch (frameError) {\n throw new EncodeError(\n \"video-encoding-error\",\n `Failed to process frame ${frameIndex}: ${frameError instanceof Error ? frameError.message : String(frameError)}`,\n frameError,\n );\n }\n }\n\n // Clean up resources\n URL.revokeObjectURL(objectUrl);\n video.remove();\n\n if (audioContext) {\n audioContext.close();\n }\n } catch (error) {\n throw new EncodeError(\n \"invalid-input\",\n `VideoFile processing failed: ${error instanceof Error ? error.message : String(error)}`,\n error,\n );\n }\n}\n\n/**\n * Process audio data from AudioBuffer and send to worker\n */\nasync function processAudioFromFile(\n communicator: WorkerCommunicator,\n audioBuffer: AudioBuffer,\n duration: number,\n frameRate: number,\n): Promise<void> {\n const sampleRate = audioBuffer.sampleRate;\n const numberOfChannels = audioBuffer.numberOfChannels;\n const totalSamples = audioBuffer.length;\n\n // Split audio data into appropriate chunk sizes\n // Use smaller chunk sizes for better memory efficiency\n const chunkDurationMs = Math.min(20, 1000 / frameRate); // 20ms or frame duration, whichever is smaller\n const samplesPerChunk = Math.floor((sampleRate * chunkDurationMs) / 1000);\n\n for (let offset = 0; offset < totalSamples; offset += samplesPerChunk) {\n const remainingSamples = Math.min(samplesPerChunk, totalSamples - offset);\n const timestamp = (offset / sampleRate) * 1000000; // microseconds\n\n // Get channel data\n const channelData: Float32Array[] = [];\n for (let channel = 0; channel < numberOfChannels; channel++) {\n const sourceData = audioBuffer.getChannelData(channel);\n const chunkData = new Float32Array(remainingSamples);\n chunkData.set(sourceData.subarray(offset, offset + remainingSamples));\n channelData.push(chunkData);\n }\n\n try {\n // Create AudioData and send to worker\n // Convert to interleaved format\n const interleavedData = new Float32Array(\n remainingSamples * numberOfChannels,\n );\n for (let frame = 0; frame < remainingSamples; frame++) {\n for (let channel = 0; channel < numberOfChannels; channel++) {\n interleavedData[frame * numberOfChannels + channel] =\n channelData[channel][frame];\n }\n }\n\n const audioData = new AudioData({\n format: \"f32\",\n sampleRate,\n numberOfFrames: remainingSamples,\n numberOfChannels,\n timestamp,\n data: interleavedData,\n });\n\n communicator.send(\"addAudioData\", {\n audio: audioData,\n timestamp,\n format: \"f32\",\n sampleRate,\n numberOfFrames: remainingSamples,\n numberOfChannels,\n });\n\n audioData.close();\n\n // Release channel data for memory efficiency\n channelData.length = 0;\n } catch (error) {\n console.warn(\"Failed to create AudioData chunk:\", error);\n }\n }\n}\n\n/**\n * Calculate total frames for different video sources\n */\nasync function calculateTotalFrames(\n source: VideoSource,\n config: any,\n): Promise<number | undefined> {\n try {\n if (Array.isArr