@livepeer/core-web
Version:
Livepeer UI Kit's core web library, for adding reactive stores to video elements.
1 lines • 132 kB
Source Map (JSON)
{"version":3,"sources":["../../src/broadcast.ts","../../src/media/controls/controller.ts","../../src/hls/hls.ts","../../src/media/utils.ts","../../src/webrtc/shared.ts","../../src/media/controls/pictureInPicture.ts","../../src/webrtc/whip.ts"],"sourcesContent":["import { PERMISSIONS_ERROR_MESSAGE } from \"@livepeer/core/errors\";\nimport type { MediaControllerStore } from \"@livepeer/core/media\";\nimport type { ClientStorage } from \"@livepeer/core/storage\";\nimport { warn } from \"@livepeer/core/utils\";\nimport {\n createJSONStorage,\n persist,\n subscribeWithSelector,\n} from \"zustand/middleware\";\nimport { createStore, type StoreApi } from \"zustand/vanilla\";\nimport { isPictureInPictureSupported } from \"./media/controls\";\nimport { getRTCPeerConnectionConstructor } from \"./webrtc/shared\";\nimport {\n attachMediaStreamToPeerConnection,\n createMirroredVideoTrack,\n createNewWHIP,\n getDisplayMedia,\n getDisplayMediaExists,\n getMediaDevices,\n getUserMedia,\n // biome-ignore lint/correctness/noUnusedImports: ignored using `--suppress`\n setMediaStreamTracksStatus,\n} from \"./webrtc/whip\";\n\nconst delay = (ms: number) => {\n return new Promise((resolve) => setTimeout(resolve, ms));\n};\n\nexport type BroadcastStatus = \"live\" | \"pending\" | \"idle\";\nexport type AudioDeviceId = \"default\" | `ID${string}`;\nexport type VideoDeviceId = \"default\" | \"screen\" | `ID${string}`;\n\nexport type MediaDeviceIds = {\n audioinput: AudioDeviceId;\n videoinput: VideoDeviceId;\n};\n\nexport type MediaDeviceInfoExtended = Omit<\n MediaDeviceInfo,\n \"label\" | \"toJSON\"\n> & {\n /**\n * This is a convenience field added to MediaDeviceInfo to help easily add a device picker.\n *\n * For security reasons, the label field will blank unless an active media stream exists\n * or the user has granted persistent permission for media device access. The set of device labels\n * could otherwise be used as part of a fingerprinting mechanism to identify a user.\n *\n * When the label field is not blank, these are the same value. Otherwise, the value is a friendly default.\n */\n friendlyName: string;\n /**\n * For security reasons, the label field is blank unless an active media stream exists\n * or the user has granted persistent permission for media device access. The set of device labels\n * could otherwise be used as part of a fingerprinting mechanism to identify a user.\n *\n * We override it here to be null when it is blank, for easier developer usage.\n */\n label: string | null;\n};\n\nexport const getBroadcastDeviceInfo = (\n version: string,\n): BroadcastDeviceInformation => ({\n version,\n\n isMediaDevicesSupported: Boolean(getMediaDevices()),\n isRTCPeerConnectionSupported: Boolean(getRTCPeerConnectionConstructor()),\n isDisplayMediaSupported: Boolean(getDisplayMediaExists()),\n});\n\nexport type BroadcastDeviceInformation = {\n version: string;\n\n /** If the environment supports mediaDevices */\n isMediaDevicesSupported: boolean;\n /** If the environment supports RTCPeerConnection */\n isRTCPeerConnectionSupported: boolean;\n /** If the environment supports sharing display media */\n isDisplayMediaSupported: boolean;\n};\n\nexport type BroadcastControlsState = {\n /** The last time that a force renegotiation was requested (this is triggered on an error) */\n requestedForceRenegotiateLastTime: number;\n /** The last time that the device list was requested */\n requestedUpdateDeviceListLastTime: number;\n\n /** The requested audio input device ID */\n requestedAudioInputDeviceId: AudioDeviceId;\n /** The requested video input device ID */\n requestedVideoInputDeviceId: VideoDeviceId | null;\n\n /** The previous video input device ID, used when screenshare ends */\n previousVideoInputDeviceId: VideoDeviceId | null;\n\n /** The original microphone track, stored for swapping with silent track */\n microphoneTrack: MediaStreamTrack | null;\n\n /**\n * The internal list of the current media devices from the browser.\n */\n mediaDevices: MediaDeviceInfo[] | null;\n};\n\nexport type InitialBroadcastProps = {\n /**\n * The aspect ratio for the container element\n */\n aspectRatio: number | null;\n\n /**\n * Whether audio is initially enabled for the broadcast.\n *\n * Set to false to initialize the broadcast to not request an audio track.\n */\n audio: boolean | Omit<MediaTrackConstraints, \"deviceId\">;\n\n /**\n * The creatorId for the current broadcast.\n */\n creatorId: string | null;\n\n /**\n * Whether hotkeys are enabled. Defaults to `true`. Allows users to use keyboard shortcuts for broadcast control.\n *\n * This is highly recommended to adhere to ARIA guidelines.\n */\n hotkeys: boolean;\n\n /**\n * Whether the WebRTC stream should attempt to initialize immediately after the user grants\n * permission to their video/audio input.\n *\n * Defaults to `false`, where preview is shown and then once the stream is enabled, it sends\n * media to the server.\n */\n forceEnabled?: boolean;\n\n /**\n * Whether video is initially enabled for the broadcast.\n *\n * Set to false to initialize the broadcast to not request a video track.\n */\n video: boolean | Omit<MediaTrackConstraints, \"deviceId\">;\n\n /**\n * @deprecated in favor of `iceServers`\n *\n * Whether to disable ICE gathering.\n *\n * Set to true to disable ICE gathering. This is useful for testing purposes.\n */\n noIceGathering?: boolean;\n\n /**\n * Whether to send a silent audio track if the audio is disabled.\n *\n * Set to true to send a silent audio track if the audio is disabled.\n */\n silentAudioTrack?: boolean;\n\n /**\n * The ICE servers to use.\n *\n * If not provided, the default ICE servers will be used.\n */\n iceServers?: RTCIceServer | RTCIceServer[];\n\n /**\n * Whether the video stream should be mirrored (horizontally flipped).\n *\n * Set to true to broadcast a mirrored view.\n * Defaults to `false`.\n */\n mirrored?: boolean;\n};\n\nexport type BroadcastAriaText = {\n audioTrigger: string;\n start: string;\n screenshareTrigger: string;\n videoTrigger: string;\n};\n\nexport type BroadcastState = {\n /** The ARIA text for the current state. */\n aria: BroadcastAriaText;\n\n /** If the broadcast audio track is turned on. */\n audio: boolean;\n\n /** Whether the broadcast is currently enabled, or in \"preview\" mode. */\n enabled: boolean;\n\n /** Whether the broadcast store is hydrated. */\n hydrated: boolean;\n\n /**\n * A list of the current media devices. This will change when permissions change, or when\n * a user starts sharing their display.\n */\n mediaDevices: MediaDeviceInfoExtended[] | null;\n\n /** The MediaStream for the current broadcast. */\n mediaStream: MediaStream | null;\n\n /** Whether the broadcast component is mounted. */\n mounted: boolean;\n\n /** The RTCPeerConnection for the current broadcast. */\n peerConnection: RTCPeerConnection | null;\n\n /** The status of the current broadcast. */\n status: BroadcastStatus;\n\n /**\n * The WHIP ingest URL to use for the broadcast.\n */\n ingestUrl: string | null;\n\n /** If the broadcast video track is turned on. */\n video: boolean;\n\n /** The currently selected media devices. */\n mediaDeviceIds: MediaDeviceIds;\n\n /** The initial props passed into the component. */\n __initialProps: InitialBroadcastProps;\n /** The broadcast device information and support. */\n __device: BroadcastDeviceInformation;\n /** The controls state. */\n __controls: BroadcastControlsState;\n\n __controlsFunctions: {\n requestDeviceListInfo: () => void;\n requestForceRenegotiate: () => void;\n requestMediaDeviceId: (\n deviceId: AudioDeviceId,\n type: keyof MediaDeviceIds,\n ) => void;\n rotateAudioSource: () => void;\n rotateVideoSource: () => void;\n setIngestUrl: (ingestUrl: string) => void;\n setInitialState: (\n ids: MediaDeviceIds,\n audio: boolean,\n video: boolean,\n ) => void;\n setPeerConnection: (peerConnection: RTCPeerConnection) => void;\n setStatus: (status: BroadcastStatus) => void;\n setMediaDeviceIds: (mediaDevices: Partial<MediaDeviceIds>) => void;\n toggleAudio: () => void;\n toggleDisplayMedia: () => void;\n toggleEnabled: () => void;\n toggleVideo: () => void;\n updateDeviceList: (mediaDevices: MediaDeviceInfo[]) => void;\n updateMediaStream: (mediaStream: MediaStream) => void;\n };\n};\n\nexport type BroadcastStore = StoreApi<BroadcastState> & {\n subscribe: {\n (\n listener: (\n selectedState: BroadcastState,\n previousSelectedState: BroadcastState,\n ) => void,\n ): () => void;\n <U>(\n selector: (state: BroadcastState) => U,\n listener: (selectedState: U, previousSelectedState: U) => void,\n options?: {\n equalityFn?: (a: U, b: U) => boolean;\n fireImmediately?: boolean;\n },\n ): () => void;\n };\n persist: {\n onFinishHydration: (fn: (state: BroadcastState) => void) => () => void;\n };\n};\n\nexport const createBroadcastStore = ({\n ingestUrl,\n device,\n storage,\n initialProps,\n}: {\n ingestUrl: string | null | undefined;\n device: BroadcastDeviceInformation;\n storage: ClientStorage;\n initialProps: Partial<InitialBroadcastProps>;\n}): { store: BroadcastStore; destroy: () => void } => {\n const initialControls: BroadcastControlsState = {\n requestedUpdateDeviceListLastTime: 0,\n requestedForceRenegotiateLastTime: 0,\n requestedAudioInputDeviceId: \"default\",\n requestedVideoInputDeviceId: null,\n previousVideoInputDeviceId: null,\n mediaDevices: null,\n microphoneTrack: null,\n };\n\n const store = createStore<\n BroadcastState,\n [\n [\"zustand/subscribeWithSelector\", Partial<BroadcastState>],\n [\"zustand/persist\", Partial<BroadcastState>],\n ]\n >(\n subscribeWithSelector(\n persist(\n // biome-ignore lint/correctness/noUnusedFunctionParameters: ignored using `--suppress`\n (set, get) => ({\n audio: initialProps?.audio !== false,\n video: initialProps?.video !== false,\n\n hydrated: false,\n mounted: false,\n\n enabled: initialProps?.forceEnabled ?? false,\n\n status: \"idle\",\n\n mediaStream: null,\n mediaDevices: null,\n peerConnection: null,\n\n ingestUrl: ingestUrl ?? null,\n\n mediaDeviceIds: {\n audioinput: \"default\",\n videoinput: \"default\",\n },\n\n aria: {\n audioTrigger:\n initialProps?.audio === false\n ? \"Turn audio on (space)\"\n : \"Turn audio off (space)\",\n start: \"Start broadcasting (b)\",\n screenshareTrigger: \"Share screen (d)\",\n videoTrigger:\n initialProps?.video === false\n ? \"Turn video on (v)\"\n : \"Turn video off (v)\",\n },\n\n __initialProps: {\n aspectRatio: initialProps?.aspectRatio ?? null,\n audio: initialProps?.audio ?? true,\n creatorId: initialProps.creatorId ?? null,\n forceEnabled: initialProps?.forceEnabled ?? false,\n hotkeys: initialProps.hotkeys ?? true,\n ingestUrl: ingestUrl ?? null,\n video: initialProps?.video ?? true,\n noIceGathering: initialProps?.noIceGathering ?? false,\n silentAudioTrack: initialProps?.silentAudioTrack ?? false,\n iceServers: initialProps?.iceServers,\n mirrored: initialProps?.mirrored ?? false,\n },\n\n __device: device,\n\n __controls: initialControls,\n\n __metadata: null,\n\n __controlsFunctions: {\n updateMediaStream: (mediaStream) =>\n set(() => ({\n mediaStream,\n })),\n\n setPeerConnection: (peerConnection) =>\n set(() => ({\n peerConnection,\n })),\n\n setIngestUrl: (ingestUrl) =>\n set(() => ({\n ingestUrl,\n })),\n\n requestForceRenegotiate: () =>\n set(({ __controls }) => ({\n __controls: {\n ...__controls,\n requestedForceRenegotiateLastTime: Date.now(),\n },\n })),\n\n rotateAudioSource: () =>\n set(({ mediaDeviceIds, mediaDevices, __controls }) => {\n if (!mediaDevices) {\n warn(\n \"Could not rotate audio source, no audio media devices detected.\",\n );\n\n return {};\n }\n\n const audioDevices = mediaDevices.filter(\n (m) => m.kind === \"audioinput\",\n );\n\n const currentAudioInputIndex = audioDevices.findIndex(\n (s) => s.deviceId === mediaDeviceIds.audioinput,\n );\n\n // Get the next audio input device\n const nextAudioInputDevice =\n audioDevices[\n (currentAudioInputIndex + 1) % audioDevices.length\n ] ?? null;\n\n return {\n __controls: {\n ...__controls,\n requestedAudioInputDeviceId:\n nextAudioInputDevice.deviceId as AudioDeviceId,\n },\n };\n }),\n\n rotateVideoSource: () =>\n set(({ mediaDeviceIds, mediaDevices, __controls }) => {\n if (!mediaDevices) {\n warn(\n \"Could not rotate video source, no video media devices detected.\",\n );\n\n return {};\n }\n\n const videoDevices = mediaDevices.filter(\n (m) => m.kind === \"videoinput\",\n );\n\n const currentVideoInputIndex = videoDevices.findIndex(\n (s) => s.deviceId === mediaDeviceIds.videoinput,\n );\n\n // Get the next video input device\n const nextVideoInputDevice =\n videoDevices[\n (currentVideoInputIndex + 1) % videoDevices.length\n ] ?? null;\n\n return {\n __controls: {\n ...__controls,\n requestedVideoInputDeviceId:\n nextVideoInputDevice.deviceId as VideoDeviceId,\n },\n };\n }),\n\n toggleDisplayMedia: () =>\n set(({ __controls, mediaDeviceIds, aria }) => {\n if (mediaDeviceIds.videoinput === \"screen\") {\n return {\n aria: {\n ...aria,\n screenshareTrigger: \"Share screen (d)\",\n },\n __controls: {\n ...__controls,\n requestedVideoInputDeviceId:\n __controls.previousVideoInputDeviceId,\n },\n };\n }\n\n return {\n aria: {\n ...aria,\n screenshareTrigger: \"Stop sharing screen (d)\",\n },\n __controls: {\n ...__controls,\n previousVideoInputDeviceId: mediaDeviceIds.videoinput,\n requestedVideoInputDeviceId: \"screen\",\n },\n };\n }),\n\n setInitialState: (deviceIds, audio, video) =>\n set(({ __controls }) => ({\n hydrated: true,\n audio,\n video,\n __controls: {\n ...__controls,\n requestedAudioInputDeviceId:\n deviceIds?.audioinput ?? \"default\",\n requestedVideoInputDeviceId:\n deviceIds?.videoinput === \"screen\"\n ? \"default\"\n : (deviceIds?.videoinput ?? \"default\"),\n },\n })),\n\n requestMediaDeviceId: (deviceId, type) =>\n set(({ __controls }) => ({\n __controls: {\n ...__controls,\n ...(type === \"videoinput\"\n ? {\n requestedVideoInputDeviceId: deviceId,\n }\n : type === \"audioinput\"\n ? {\n requestedAudioInputDeviceId: deviceId,\n }\n : {}),\n },\n })),\n\n setStatus: (status) =>\n set(() => ({\n status,\n })),\n\n setMediaDeviceIds: (newMediaDeviceIds) =>\n set(({ mediaDeviceIds }) => ({\n mediaDeviceIds: {\n ...mediaDeviceIds,\n ...newMediaDeviceIds,\n },\n })),\n\n updateDeviceList: (mediaDevices) =>\n set(({ __controls }) => ({\n __controls: {\n ...__controls,\n mediaDevices,\n },\n })),\n\n requestDeviceListInfo: () =>\n set(({ __controls }) => ({\n __controls: {\n ...__controls,\n requestedUpdateDeviceListLastTime: Date.now(),\n },\n })),\n\n toggleVideo: () =>\n set(({ video, aria }) => ({\n video: !video,\n aria: {\n ...aria,\n videoTrigger: !video\n ? \"Turn video off (v)\"\n : \"Turn video on (v)\",\n },\n })),\n\n toggleAudio: () =>\n set(({ audio, aria }) => ({\n audio: !audio,\n aria: {\n ...aria,\n audioTrigger: !audio\n ? \"Turn audio off (space)\"\n : \"Turn audio on (space)\",\n },\n })),\n\n toggleEnabled: () =>\n set(({ enabled, aria }) => ({\n enabled: !enabled,\n aria: {\n ...aria,\n start: enabled\n ? \"Start broadcasting (b)\"\n : \"Stop broadcasting (b)\",\n },\n })),\n },\n }),\n {\n name: \"livepeer-broadcast-controller\",\n version: 1,\n // these values are persisted across broadcasts\n partialize: ({ audio, video, mediaDeviceIds }) => ({\n audio,\n video,\n mediaDeviceIds,\n }),\n storage: createJSONStorage(() => storage),\n },\n ),\n ),\n );\n\n const destroy = store.persist.onFinishHydration(\n ({ mediaDeviceIds, audio, video }) => {\n store\n .getState()\n .__controlsFunctions.setInitialState(mediaDeviceIds, audio, video);\n },\n );\n\n return { store, destroy };\n};\n\nconst MEDIA_BROADCAST_INITIALIZED_ATTRIBUTE =\n \"data-livepeer-broadcast-initialized\";\n\nconst allKeyTriggers = [\n \"KeyL\",\n \"KeyV\",\n \"KeyB\",\n \"Space\",\n \"KeyD\",\n \"KeyC\",\n \"KeyM\",\n] as const;\ntype KeyTrigger = (typeof allKeyTriggers)[number];\n\nexport const addBroadcastEventListeners = (\n element: HTMLMediaElement,\n store: BroadcastStore,\n mediaStore: MediaControllerStore,\n) => {\n const onKeyUp = (e: KeyboardEvent) => {\n e.preventDefault();\n e.stopPropagation();\n\n const code = e.code as KeyTrigger;\n\n if (allKeyTriggers.includes(code)) {\n if (code === \"Space\" || code === \"KeyL\") {\n store.getState().__controlsFunctions.toggleAudio();\n } else if (code === \"KeyV\") {\n store.getState().__controlsFunctions.toggleVideo();\n } else if (code === \"KeyB\") {\n store.getState().__controlsFunctions.toggleEnabled();\n } else if (code === \"KeyD\") {\n store.getState().__controlsFunctions.toggleDisplayMedia();\n } else if (code === \"KeyC\") {\n store.getState().__controlsFunctions.rotateVideoSource();\n } else if (code === \"KeyM\") {\n store.getState().__controlsFunctions.rotateAudioSource();\n }\n }\n };\n\n const onDeviceChange = () => {\n store.getState().__controlsFunctions.requestDeviceListInfo();\n };\n\n const mediaDevices = getMediaDevices();\n\n mediaDevices?.addEventListener?.(\"devicechange\", onDeviceChange);\n\n const parentElementOrElement = element?.parentElement ?? element;\n\n if (element) {\n if (parentElementOrElement) {\n if (store.getState().__initialProps.hotkeys) {\n parentElementOrElement.addEventListener(\"keyup\", onKeyUp);\n parentElementOrElement.setAttribute(\"tabindex\", \"0\");\n }\n }\n\n element.setAttribute(MEDIA_BROADCAST_INITIALIZED_ATTRIBUTE, \"true\");\n }\n\n // add effects\n const { destroy: destroyEffects } = addEffectsToStore(\n element,\n store,\n mediaStore,\n );\n\n const removeHydrationListener = store.persist.onFinishHydration(\n ({ mediaDeviceIds, audio, video }) => {\n store\n .getState()\n .__controlsFunctions.setInitialState(mediaDeviceIds, audio, video);\n },\n );\n\n return {\n destroy: () => {\n removeHydrationListener?.();\n\n parentElementOrElement?.removeEventListener?.(\"keyup\", onKeyUp);\n\n mediaDevices?.removeEventListener?.(\"devicechange\", onDeviceChange);\n\n destroyEffects?.();\n\n element?.removeAttribute?.(MEDIA_BROADCAST_INITIALIZED_ATTRIBUTE);\n },\n };\n};\n\ntype Cleanup = () => void | Promise<void>;\n\n// Cleanup function for whip\nlet cleanupWhip: Cleanup = () => {};\n// Cleanup function for media source\nlet cleanupMediaStream: Cleanup = () => {};\n\nconst addEffectsToStore = (\n element: HTMLMediaElement,\n store: BroadcastStore,\n mediaStore: MediaControllerStore,\n) => {\n /** MEDIA STORE SYNC LISTENERS - these one-way synchronize the playback state with the broadcast state */\n\n // Subscribe to error count\n const destroyErrorCount = mediaStore.subscribe(\n ({ errorCount }) => errorCount,\n async (errorCount) => {\n if (errorCount > 0) {\n const delayTime = 500 * 2 ** (errorCount - 1);\n await delay(delayTime);\n\n store.getState().__controlsFunctions.requestForceRenegotiate();\n }\n },\n );\n\n // Subscribe to sync the mounted states\n const destroyMediaSyncMounted = mediaStore.subscribe(\n ({ mounted }) => mounted,\n async (mounted) => {\n // we use setState here so it's clear this isn't an external function\n store.setState({ mounted });\n },\n );\n\n // Subscribe to sync the error states\n const destroyMediaSyncError = mediaStore.subscribe(\n ({ error }) => error,\n async (error) => {\n if (error?.type === \"permissions\") {\n // we use setState here so it's clear this isn't an external function\n store.setState((state) => ({\n __controls: {\n ...state.__controls,\n requestedVideoInputDeviceId: state.mediaDeviceIds.videoinput,\n },\n }));\n }\n },\n );\n\n // Subscribe to media stream changes\n const destroyPictureInPictureSupportedMonitor = store.subscribe(\n (state) => state.mediaStream,\n async () => {\n const isPipSupported = isPictureInPictureSupported(element);\n\n if (!isPipSupported) {\n mediaStore.setState((state) => ({\n __device: {\n ...state.__device,\n isPictureInPictureSupported: isPipSupported,\n },\n }));\n }\n },\n {\n equalityFn: (a, b) => a?.id === b?.id,\n },\n );\n\n /** STORE LISTENERS - handle broadcast state */\n\n // Subscribe to request user media\n const destroyWhip = store.subscribe(\n ({ enabled, ingestUrl, __controls, mounted, __initialProps }) => ({\n enabled,\n ingestUrl,\n requestedForceRenegotiateLastTime:\n __controls.requestedForceRenegotiateLastTime,\n mounted,\n noIceGathering: __initialProps.noIceGathering,\n silentAudioTrack: __initialProps.silentAudioTrack,\n iceServers: __initialProps.iceServers,\n }),\n async ({ enabled, ingestUrl, noIceGathering, iceServers }) => {\n await cleanupWhip?.();\n\n if (!enabled) {\n return;\n }\n\n if (!ingestUrl) {\n warn(\n \"No ingest URL provided, cannot start stream. Please check the configuration passed to the Broadcast component.\",\n );\n return;\n }\n\n let unmounted = false;\n\n const onErrorComposed = (err: Error) => {\n if (!unmounted) {\n mediaStore.getState().__controlsFunctions.setLive(false);\n mediaStore.getState().__controlsFunctions?.onError?.(err);\n }\n };\n\n store.getState().__controlsFunctions.setStatus(\"pending\");\n\n const { destroy } = createNewWHIP({\n ingestUrl,\n element,\n callbacks: {\n onRTCPeerConnection: (peerConnection) => {\n store\n .getState()\n .__controlsFunctions.setPeerConnection(peerConnection);\n },\n onConnected: () => {\n store.getState().__controlsFunctions.setStatus(\"live\");\n mediaStore.getState().__controlsFunctions.onError(null);\n },\n onError: onErrorComposed,\n },\n sdpTimeout: null,\n noIceGathering,\n iceServers,\n });\n\n cleanupWhip = () => {\n unmounted = true;\n destroy?.();\n store.getState().__controlsFunctions.setStatus(\"idle\");\n };\n },\n {\n equalityFn: (a, b) =>\n a.requestedForceRenegotiateLastTime ===\n b.requestedForceRenegotiateLastTime &&\n a.ingestUrl === b.ingestUrl &&\n a.enabled === b.enabled &&\n a.mounted === b.mounted,\n },\n );\n\n // Subscribe to request user media\n const destroyRequestUserMedia = store.subscribe(\n (state) => ({\n hydrated: state.hydrated,\n mounted: state.mounted,\n video: state.video,\n audio: state.audio,\n requestedAudioDeviceId: state.__controls.requestedAudioInputDeviceId,\n requestedVideoDeviceId: state.__controls.requestedVideoInputDeviceId,\n initialAudioConfig: state.__initialProps.audio,\n initialVideoConfig: state.__initialProps.video,\n mirrored: state.__initialProps.mirrored,\n previousMediaStream: state.mediaStream,\n silentAudioTrack: state.__initialProps.silentAudioTrack,\n }),\n async ({\n hydrated,\n mounted,\n audio,\n video,\n requestedAudioDeviceId,\n requestedVideoDeviceId,\n previousMediaStream,\n initialAudioConfig,\n initialVideoConfig,\n silentAudioTrack,\n mirrored,\n }) => {\n try {\n if (!mounted || !hydrated) {\n return;\n }\n\n // Force audio to true if silentAudioTrack is enabled so we get a microphone track\n const shouldRequestAudio = audio || silentAudioTrack;\n\n if (!shouldRequestAudio && !video) {\n console.log(\n \"|||| FORCING VIDEO ENABLED to request getUserMedia ||||\",\n );\n warn(\n \"At least one of audio and video must be requested. Overriding video to be enabled so that `getUserMedia` can be requested.\",\n );\n\n store.setState({ video: true });\n video = true;\n }\n\n const audioConstraints =\n typeof initialAudioConfig !== \"boolean\" ? initialAudioConfig : null;\n const videoConstraints =\n typeof initialVideoConfig !== \"boolean\" ? initialVideoConfig : null;\n\n console.log(\n \"|||| Requesting media with audio:\",\n shouldRequestAudio,\n \"and video:\",\n video,\n \"||||\",\n );\n const stream = await (requestedVideoDeviceId === \"screen\"\n ? getDisplayMedia({\n // for now, only the microphone audio track is supported - we don't support multiple\n // discrete audio tracks\n audio: false,\n\n // we assume that if the user is requested to share screen, they want to enable video,\n // and we don't listen to the `video` enabled state\n //\n // we apply the video constraints to the video track\n video: videoConstraints ?? true,\n })\n : getUserMedia({\n // Always request audio if silentAudioTrack is enabled\n audio:\n shouldRequestAudio &&\n requestedAudioDeviceId &&\n requestedAudioDeviceId !== \"default\"\n ? {\n ...(audioConstraints ? audioConstraints : {}),\n deviceId: {\n ideal: requestedAudioDeviceId,\n },\n }\n : shouldRequestAudio\n ? {\n ...(audioConstraints ? audioConstraints : {}),\n }\n : false,\n video:\n video &&\n requestedVideoDeviceId &&\n requestedVideoDeviceId !== \"default\"\n ? {\n ...(videoConstraints ? videoConstraints : {}),\n deviceId: {\n ideal: requestedVideoDeviceId,\n },\n ...(mirrored ? { facingMode: \"user\" } : {}),\n }\n : video\n ? {\n ...(videoConstraints ? videoConstraints : {}),\n ...(mirrored ? { facingMode: \"user\" } : {}),\n }\n : false,\n }));\n\n if (stream) {\n const microphoneTrack = stream?.getAudioTracks()?.[0] ?? null;\n if (microphoneTrack) {\n store.setState((state) => ({\n __controls: {\n ...state.__controls,\n microphoneTrack: microphoneTrack,\n },\n }));\n }\n\n // we get the device ID from the MediaStream and update those\n const allAudioTracks = stream?.getAudioTracks() ?? [];\n const allVideoTracks = stream?.getVideoTracks() ?? [];\n\n const allAudioDeviceIds = allAudioTracks.map(\n (track) => track?.getSettings()?.deviceId,\n );\n const allVideoDeviceIds = allVideoTracks.map(\n (track) => track?.getSettings()?.deviceId,\n );\n\n const firstAudioDeviceId = (allAudioDeviceIds?.[0] ??\n null) as AudioDeviceId | null;\n const firstVideoDeviceId = (allVideoDeviceIds?.[0] ??\n null) as VideoDeviceId | null;\n\n store.getState().__controlsFunctions.setMediaDeviceIds({\n ...(firstAudioDeviceId ? { audioinput: firstAudioDeviceId } : {}),\n ...(firstVideoDeviceId\n ? {\n videoinput:\n requestedVideoDeviceId === \"screen\"\n ? \"screen\"\n : firstVideoDeviceId,\n }\n : {}),\n });\n\n // merge the new audio and/or video and the old media stream\n const mergedMediaStream = new MediaStream();\n\n const mergedAudioTrack =\n allAudioTracks?.[0] ??\n previousMediaStream?.getAudioTracks?.()?.[0] ??\n null;\n\n let mergedVideoTrack =\n allVideoTracks?.[0] ??\n previousMediaStream?.getVideoTracks?.()?.[0] ??\n null;\n\n if (\n mergedVideoTrack &&\n mirrored &&\n requestedVideoDeviceId !== \"screen\"\n ) {\n try {\n const videoSettings = mergedVideoTrack.getSettings();\n const isFrontFacing =\n videoSettings.facingMode === \"user\" ||\n !videoSettings.facingMode;\n\n if (isFrontFacing) {\n element.classList.add(\"livepeer-mirrored-video\");\n mergedVideoTrack = createMirroredVideoTrack(mergedVideoTrack);\n } else {\n element.classList.remove(\"livepeer-mirrored-video\");\n }\n } catch (err) {\n warn(\n `Failed to apply video mirroring: ${(err as Error).message}`,\n );\n }\n } else {\n element.classList.remove(\"livepeer-mirrored-video\");\n }\n\n if (mergedAudioTrack) mergedMediaStream.addTrack(mergedAudioTrack);\n if (mergedVideoTrack) mergedMediaStream.addTrack(mergedVideoTrack);\n\n store\n .getState()\n .__controlsFunctions.updateMediaStream(mergedMediaStream);\n }\n } catch (e) {\n if ((e as Error)?.name === \"NotAllowedError\") {\n mediaStore\n .getState()\n .__controlsFunctions.onError(new Error(PERMISSIONS_ERROR_MESSAGE));\n } else {\n warn((e as Error)?.message);\n }\n }\n },\n {\n equalityFn: (a, b) =>\n a.hydrated === b.hydrated &&\n a.mounted === b.mounted &&\n a.requestedAudioDeviceId === b.requestedAudioDeviceId &&\n a.requestedVideoDeviceId === b.requestedVideoDeviceId,\n },\n );\n\n // Subscribe to audio & video enabled, and media stream\n const destroyAudioVideoEnabled = store.subscribe(\n (state) => ({\n audio: state.audio,\n video: state.video,\n mediaStream: state.mediaStream,\n silentAudioTrack: state.__initialProps.silentAudioTrack,\n peerConnection: state.peerConnection,\n microphoneTrack: state.__controls.microphoneTrack,\n }),\n async ({\n audio,\n video,\n mediaStream,\n silentAudioTrack,\n peerConnection,\n microphoneTrack,\n }) => {\n if (!mediaStream) return;\n\n for (const videoTrack of mediaStream.getVideoTracks()) {\n videoTrack.enabled = video;\n }\n\n if (silentAudioTrack) {\n if (peerConnection) {\n const currentAudioTrack = mediaStream.getAudioTracks()[0];\n\n if (!audio && microphoneTrack) {\n // use silent track\n if (currentAudioTrack && currentAudioTrack !== microphoneTrack) {\n currentAudioTrack.enabled = true;\n } else {\n // swap in a silent track\n const silentTrack = createSilentAudioTrack();\n\n if (currentAudioTrack) {\n mediaStream.removeTrack(currentAudioTrack);\n }\n mediaStream.addTrack(silentTrack);\n\n // Replace in peer connection\n const audioSender = peerConnection\n .getSenders()\n .find((s) => s.track && s.track.kind === \"audio\");\n if (audioSender) {\n await audioSender.replaceTrack(silentTrack);\n }\n }\n } else if (audio && microphoneTrack) {\n if (currentAudioTrack === microphoneTrack) {\n microphoneTrack.enabled = true;\n } else {\n // swap back to microphone track\n if (currentAudioTrack) {\n mediaStream.removeTrack(currentAudioTrack);\n }\n mediaStream.addTrack(microphoneTrack);\n\n const audioSender = peerConnection\n .getSenders()\n .find((s) => s.track && s.track.kind === \"audio\");\n if (audioSender) {\n await audioSender.replaceTrack(microphoneTrack);\n microphoneTrack.enabled = true;\n }\n }\n }\n } else {\n for (const audioTrack of mediaStream.getAudioTracks()) {\n audioTrack.enabled = audio;\n }\n }\n } else {\n for (const audioTrack of mediaStream.getAudioTracks()) {\n audioTrack.enabled = audio;\n }\n }\n },\n {\n equalityFn: (a, b) =>\n a.audio === b.audio &&\n a.video === b.video &&\n a.mediaStream?.id === b.mediaStream?.id,\n },\n );\n\n // Subscribe to media stream\n const destroyPeerConnectionAndMediaStream = store.subscribe(\n ({ mediaStream, peerConnection }) => ({ mediaStream, peerConnection }),\n async ({ mediaStream, peerConnection }) => {\n if (!mediaStream || !peerConnection) {\n return;\n }\n\n await attachMediaStreamToPeerConnection({\n mediaStream,\n peerConnection,\n });\n },\n {\n equalityFn: (a, b) =>\n a.mediaStream?.id === b.mediaStream?.id &&\n a.peerConnection === b.peerConnection,\n },\n );\n\n // Subscribe to mediastream changes\n const destroyMediaStream = store.subscribe(\n (state) => state.mediaStream,\n async (mediaStream) => {\n await cleanupMediaStream?.();\n\n if (mediaStream) {\n element.srcObject = mediaStream;\n\n const togglePlay = () => {\n mediaStore.getState().__controlsFunctions.togglePlay(true);\n };\n\n element.addEventListener(\"loadedmetadata\", togglePlay);\n\n cleanupMediaStream = () => {\n element?.removeEventListener?.(\"loadedmetadata\", togglePlay);\n\n element.srcObject = null;\n };\n } else {\n element.srcObject = null;\n }\n },\n {\n equalityFn: (a, b) => a?.id === b?.id,\n },\n );\n\n // Subscribe to media devices\n const destroyUpdateDeviceList = store.subscribe(\n (state) => ({\n mounted: state.mounted,\n requestedUpdateDeviceListLastTime:\n state.__controls.requestedUpdateDeviceListLastTime,\n }),\n async ({ mounted }) => {\n if (!mounted) {\n return;\n }\n\n const mediaDevices = getMediaDevices();\n const devices = await mediaDevices?.enumerateDevices();\n\n if (devices) {\n store\n .getState()\n .__controlsFunctions.updateDeviceList(\n devices.filter((d) => d.deviceId),\n );\n }\n },\n {\n equalityFn: (a, b) =>\n a.mounted === b.mounted &&\n a.requestedUpdateDeviceListLastTime ===\n b.requestedUpdateDeviceListLastTime,\n },\n );\n\n // Subscribe to media devices and map to friendly names\n const destroyMapDeviceListToFriendly = store.subscribe(\n (state) => ({\n mediaDeviceIds: state.mediaDeviceIds,\n mediaDevices: state.__controls.mediaDevices,\n }),\n async ({ mediaDeviceIds, mediaDevices }) => {\n if (mediaDevices) {\n const extendedDevices: MediaDeviceInfoExtended[] = mediaDevices\n .filter((d) => d.deviceId)\n .map((device, i) => ({\n deviceId: device.deviceId,\n kind: device.kind,\n groupId: device.groupId,\n label: device.label || null,\n friendlyName:\n device.label ??\n `${\n device.kind === \"audioinput\"\n ? \"Audio Source\"\n : device.kind === \"audiooutput\"\n ? \"Audio Output\"\n : \"Video Source\"\n } ${i + 1} (${\n device.deviceId === \"default\"\n ? \"default\"\n : device.deviceId.slice(0, 6)\n })`,\n }));\n\n const isScreenshare = mediaDeviceIds.videoinput === \"screen\";\n\n if (isScreenshare) {\n extendedDevices.push({\n deviceId: mediaDeviceIds.videoinput,\n label: \"Screen share\",\n groupId: \"none\",\n kind: \"videoinput\",\n friendlyName: \"Screen share\",\n });\n }\n\n store.setState({\n mediaDevices: extendedDevices,\n });\n }\n },\n {\n equalityFn: (a, b) =>\n a.mediaDeviceIds === b.mediaDeviceIds &&\n a.mediaDevices === b.mediaDevices,\n },\n );\n\n const destroyPeerConnectionAudioHandler = store.subscribe(\n (state) => ({\n peerConnection: state.peerConnection,\n audio: state.audio,\n mediaStream: state.mediaStream,\n silentAudioTrack: state.__initialProps.silentAudioTrack,\n microphoneTrack: state.__controls.microphoneTrack,\n }),\n async ({\n peerConnection,\n audio,\n mediaStream,\n silentAudioTrack,\n microphoneTrack,\n }) => {\n // Only run when the peer connection becomes available\n if (!peerConnection || !mediaStream || !silentAudioTrack) return;\n\n // swap in the silent track\n if (!audio && microphoneTrack) {\n const currentAudioTracks = mediaStream.getAudioTracks();\n const currentAudioTrack = currentAudioTracks[0];\n\n if (currentAudioTrack && currentAudioTrack !== microphoneTrack) {\n return;\n }\n\n const silentTrack = createSilentAudioTrack();\n\n for (const track of currentAudioTracks) {\n mediaStream.removeTrack(track);\n }\n\n mediaStream.addTrack(silentTrack);\n\n const audioSender = peerConnection\n .getSenders()\n .find((s) => s.track && s.track.kind === \"audio\");\n\n if (audioSender) {\n await audioSender.replaceTrack(silentTrack);\n }\n }\n },\n {\n equalityFn: (a, b) =>\n a.peerConnection === b.peerConnection && a.audio === b.audio,\n },\n );\n\n return {\n destroy: () => {\n destroyAudioVideoEnabled?.();\n destroyErrorCount?.();\n destroyMapDeviceListToFriendly?.();\n destroyMediaStream?.();\n destroyMediaSyncError?.();\n destroyMediaSyncMounted?.();\n destroyPeerConnectionAndMediaStream?.();\n destroyPeerConnectionAudioHandler?.();\n destroyPictureInPictureSupportedMonitor?.();\n destroyRequestUserMedia?.();\n destroyUpdateDeviceList?.();\n destroyWhip?.();\n },\n };\n};\n\n/**\n * Creates a silent audio track to use when audio is muted but we still want\n * to send an audio track. This helps maintain connection stability while muted.\n * @returns MediaStreamTrack A silent audio track\n */\nexport const createSilentAudioTrack = (): MediaStreamTrack => {\n // biome-ignore lint/suspicious/noExplicitAny: ignored using `--suppress`\n const ctx = new (window.AudioContext || (window as any).webkitAudioContext)();\n const oscillator = ctx.createOscillator();\n const dst = ctx.createMediaStreamDestination();\n\n const gainNode = ctx.createGain();\n gainNode.gain.value = 0;\n\n oscillator.type = \"sine\";\n oscillator.frequency.value = 440;\n\n oscillator.connect(gainNode);\n gainNode.connect(dst);\n\n oscillator.start();\n const track = dst.stream.getAudioTracks()[0];\n track.enabled = true;\n return track;\n};\n","import {\n ACCESS_CONTROL_ERROR_MESSAGE,\n BFRAMES_ERROR_MESSAGE,\n STREAM_OFFLINE_ERROR_MESSAGE,\n} from \"@livepeer/core/errors\";\nimport type { MediaControllerStore } from \"@livepeer/core/media\";\nimport { warn } from \"@livepeer/core/utils\";\nimport type { HlsConfig as HlsJsConfig } from \"hls.js\";\nimport { createNewHls, type HlsError } from \"../../hls/hls\";\nimport { createNewWHEP } from \"../../webrtc/whep\";\nimport {\n addFullscreenEventListener,\n enterFullscreen,\n exitFullscreen,\n isCurrentlyFullscreen,\n} from \"./fullscreen\";\nimport {\n addEnterPictureInPictureEventListener,\n addExitPictureInPictureEventListener,\n enterPictureInPicture,\n exitPictureInPicture,\n isCurrentlyPictureInPicture,\n} from \"./pictureInPicture\";\nimport { isVolumeChangeSupported } from \"./volume\";\n\nexport type HlsConfig = Partial<HlsJsConfig>;\n\nconst MEDIA_CONTROLLER_INITIALIZED_ATTRIBUTE =\n \"data-livepeer-controller-initialized\";\n\nconst allKeyTriggers = [\n \"KeyF\",\n \"KeyK\",\n \"KeyM\",\n \"KeyI\",\n \"KeyV\",\n \"KeyX\",\n \"Space\",\n \"ArrowRight\",\n \"ArrowLeft\",\n] as const;\ntype KeyTrigger = (typeof allKeyTriggers)[number];\n\nconst delay = (ms: number) => {\n return new Promise((resolve) => setTimeout(resolve, ms));\n};\n\nexport const addEventListeners = (\n element: HTMLMediaElement,\n store: MediaControllerStore,\n) => {\n const initializedState = store.getState();\n\n try {\n isVolumeChangeSupported(\n initializedState.currentSource?.type === \"audio\" ? \"audio\" : \"video\",\n ).then((result) => {\n store.setState(({ __device }) => ({\n __device: {\n ...__device,\n isVolumeChangeSupported: result,\n },\n }));\n });\n } catch (e) {\n console.error(e);\n }\n\n const onLoadedMetadata = () => {\n store.getState().__controlsFunctions.onCanPlay();\n store.getState().__controlsFunctions.requestMeasure();\n };\n\n const onLoadedData = () => {\n store.getState().__controlsFunctions.requestMeasure();\n };\n\n const onPlay = () => {\n store.getState().__controlsFunctions.onPlay();\n };\n const onPause = () => {\n store.getState().__controlsFunctions.onPause();\n };\n\n const onDurationChange = () =>\n store\n .getState()\n .__controlsFunctions.onDurationChange(element?.duration ?? 0);\n\n const onKeyUp = (e: KeyboardEvent) => {\n e.preventDefault();\n e.stopPropagation();\n\n const code = e.code as KeyTrigger;\n\n store.getState().__controlsFunctions.updateLastInteraction();\n\n const isNotBroadcast =\n store.getState().__initialProps.hotkeys !== \"broadcast\";\n\n if (allKeyTriggers.includes(code)) {\n if ((code === \"Space\" || code === \"KeyK\") && isNotBroadcast) {\n store.getState().__controlsFunctions.togglePlay();\n } else if (code === \"ArrowRight\" && isNotBroadcast) {\n store.getState().__controlsFunctions.requestSeekForward();\n } else if (code === \"ArrowLeft\" && isNotBroadcast) {\n store.getState().__controlsFunctions.requestSeekBack();\n } else if (code === \"KeyM\" && isNotBroadcast) {\n store.getState().__controlsFunctions.requestToggleMute();\n } else if (code === \"KeyX\" && isNotBroadcast) {\n store.getState().__controlsFunctions.requestClip();\n } else if (code === \"KeyF\") {\n store.getState().__controlsFunctions.requestToggleFullscreen();\n } else if (code === \"KeyI\") {\n store.getState().__controlsFunctions.requestTogglePictureInPicture();\n }\n }\n };\n\n const onMouseUpdate = () => {\n store.getState().__controlsFunctions.updateLastInteraction();\n };\n const onTouchUpdate = async () => {\n store.getState().__controlsFunctions.updateLastInteraction();\n };\n\n const onVolumeChange = () => {\n store\n .getState()\n .__controlsFunctions.setVolume(element.muted ? 0 : (element.volume ?? 0));\n };\n\n const onRateChange = () => {\n store.getState().__controlsFunctions.setPlaybackRate(element.playbackRate);\n };\n\n const onTimeUpdate = () => {\n store.getState().__controlsFunctions.onProgress(element?.currentTime ?? 0);\n\n if (element && (element?.duration ?? 0) > 0) {\n const currentTime = element.currentTime;\n\n const buffered = [...Array(element.buffered.length)].reduce(\n (prev, _curr, i) => {\n const start = element.buffered.start(element.buffered.length - 1 - i);\n const end = element.buffered.end(element.buffered.length - 1 - i);\n\n // if the TimeRange covers the current time, then use this value\n if (start <= currentTime && end >= currentTime) {\n return end;\n }\n\n return prev;\n },\n // default to no buffering\n 0,\n );\n\n store.getState().__controlsFunctions.updateBuffered(buffered);\n }\n };\n\n const onError = async (e: ErrorEvent) => {\n const source = store.getState().currentSource;\n\n if (source?.type === \"video\") {\n const sourceElement = e.target;\n const parentElement = (sourceElement as HTMLSourceElement)?.parentElement;\n const videoUrl =\n (parentElement as HTMLVideoElement)?.currentSrc ??\n (sourceElement as HTMLVideoElement)?.currentSrc;\n\n if (videoUrl) {\n try {\n const response = await fetch(videoUrl);\n if (response.status === 404) {\n console.warn(\"Video not found\");\n return store\n .getState()\n .__controlsFunctions?.onError?.(\n new Error(STREAM_OFFLINE_ERROR_MESSAGE),\n );\n }\n if (response.status === 401) {\n console.warn(\"Unauthorized to view video\");\n return store\n .getState()\n .__controlsFunctions?.onError?.(\n new Error(ACCESS_CONTROL_ERROR_MESSAGE),\n );\n }\n } catch (err) {\n console.warn(err);\n return store\n .getState()\n .__con