@videosdk.live/react-native-sdk
Version:
<h1 align="center"> <img src="https://static.videosdk.live/videosdk_logo_website_black.png"/><br/> <p align="center"> Video SDK React Native App <br/> <a href="https://videosdk.live/">videosdk.live</a> </p> </h1>
1,611 lines (1,572 loc) • 50.3 kB
TypeScript
// Type definitions for @videosdk.live/react-native-sdk 0.0
// Project: https://docs.videosdk.live/docs/realtime-communication/sdk-reference/react-native-sdk/setup
// Definitions by: Rajan Surani <https://github.com/rajansurani>
// Ahmed Bhesaniya <https://github.com/ahmedbhesaniya97>
// Zujo Now <https://github.com/zujonow>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
import { Component } from "react";
import { ViewStyle } from "react-native";
import { Connection } from "./connection";
import { Meeting } from "./meeting";
import { Participant } from "./participant";
import { Stream } from "./stream";
import { Permission } from "./permission";
import { MediaStream, MediaStreamTrack } from "./mediaStream";
import { ReactNativeForegroundService } from "./reactNativeForegroundService";
import { CameraDeviceInfo, DeviceInfo, AudioDeviceInfo } from "./deviceInfo";
import { Character } from "./character";
/**
* @param mirror - Indicates whether the video specified by streamURL should be mirrored.
* ---
* @param objectFit - Can be 'contain' or 'cover' nothing more or less.
* ---
* @param streamURL - Required to have an actual video stream rendering.
* ---
* @param zOrder - Similar to zIndex.
* ---
*
* **Code Example**
* ```js
* <RTCView
* mirror={true}
* objectFit={'cover'}
* streamURL={new MediaStream([webcamStream.track]).toURL()}
* zOrder={0}
* />
* ```
*/
export interface RTCViewProps {
streamURL: string;
mirror?: boolean | undefined;
zOrder?: number | undefined;
objectFit?: "contain" | "cover" | undefined;
style?: ViewStyle | undefined;
}
export class RTCView extends Component<RTCViewProps, any> { }
export { MediaStream, MediaStreamTrack, ReactNativeForegroundService };
/**
* @param children - Render child component.
* ---
* @param config - This is meeting configuration object, whiich contains `meetingId`, `name` of the participant, `webcamEnabled`, `micEnabled` and many more.
* ---
* @param config.meetingId -
* - Unique Id of the meeting where that participant will be joining.
*
* - It will be in the format of xxx-yyy-zzz and will be generated using the [VideoSDK's Room API](https://docs.videosdk.live/api-reference/realtime-communication/create-room).
* ---
* @param config.name - Name of the participant who will be joining the meeting, this name will be displayed to other participants in the same meeting.
* ---
* @param config.micEnabled - Whether mic of the participant will be on while joining the meeting. If it is set to false, then mic of that participant will be disabled by default,
* but can be enabled or disabled later.
* ---
* @param config.webcamEnabled - Whether webcam of the participant will be on while joining the meeting. If it is set to false, then webcam of that participant will be disabled by default,
* but can be enabled or disabled later.
* ---
* @param config.participantId - You can specify your custom participantId here.
* ---
* @param config.multiStream - Sets wheather to send multi resoultion streams while publishing video. Please refere thi link for more understanding
* [What is multiStream ?](https://docs.videosdk.live/react-native/guide/video-and-audio-calling-api-sdk/render-media/optimize-video-track#what-is-multistream)
* ---
* @param config.maxResolution - You can specify your custom participantId here.
* ---
* @param config.mode -
*
* - There are 3 types of modes:
*
* - **SEND_AND_RECV**: Both audio and video streams will be produced and consumed in this mode.
*
* - **RECV_ONLY**: Both audio and video streams will be only consumed in this mode.
*
* - **SIGNALLING_ONLY**: Audio and video streams will not be produced or consumed in this mode.
*
* - defaultValue : **SEND_AND_RECV**
* ---
* @param config.defaultCamera -
* - This is an OPTIONAL parameter, it sets the initial camera orientation. Use `front` to initialize the camera with the front-facing (selfie) mode, or `back` to initialize it with the rear-facing (main) mode.
* - By default, it is set to `front`.
* ---
* @param config.debugMode - Enables users to view detailed error logs generated by our SDK directly on the VideoSDK's dashboard.
* ---
* @param config.customCameraVideoTrack - Set the initial custom video track using different encoding parameters, camera facing mode, and optimization mode.
* ---
* @param config.customMicrophoneAudioTrack - Set the initial custom audio track using different encoding parameters and optimization mode.
* ---
* @param config.notification - It contains the title and description for the notficiation that will be shown to user when they join the meeting.
*
* **title** - Represent the title fo the notification.
* **message** - epresent the Decription / message fo the notification.
* ---
* @param token -
* - You can generate a token in two ways:
*
* 1. **Temporary Token** : You can visit [Dashboard's API Key section](https://app.videosdk.live/api-keys) and generate the temporary token from there.
* 2. **Server** : You can setup JWT in backend and make an API call to get the token from your server.
* ---
* @param joinWithoutUserInteraction -
* - This is a boolean flag, when set to true, allows a participant to join a meeting directly without explicitly calling the join() function.
*
* - This is an OPTIONAL parameter. By default, it is set to false meaning, user has to manually call the join().
*/
export function MeetingProvider({
children,
config,
token,
joinWithoutUserInteraction,
reinitialiseMeetingOnConfigChange: _reinitialiseMeetingOnConfigChange,
deviceInfo,
}: {
children: any;
config: {
meetingId: string;
autoConsume?: boolean;
preferredProtocol?: "UDP_ONLY" | "UDP_OVER_TCP" | "TCP_ONLY";
participantId?: string | undefined;
name: string;
micEnabled: boolean;
webcamEnabled: boolean;
maxResolution?: "hd" | "sd";
customCameraVideoTrack?: MediaStream | undefined;
customMicrophoneAudioTrack?: MediaStream | undefined;
multiStream?: boolean;
mode?: "SEND_AND_RECV" | "SIGNALLING_ONLY" | "RECV_ONLY";
defaultCamera?: "front" | "back";
notification?: {
title: string;
message: string;
};
metaData?: object;
};
token: string;
joinWithoutUserInteraction?: boolean;
reinitialiseMeetingOnConfigChange?: boolean;
deviceInfo?: object;
}): any;
/**
* @param children - Render child component.
* ---
* @param onParticipantJoined - This event callback is trigger when a new participant joins the meeting.
* ---
* @param onParticipantLeft - This event callback is trigger when a participant leaves the meeting.
* ---
* @param onSpeakerChanged -
* - This event will be emitted when a active speaker changed.
* - If you want to know which participant is actively speaking, then this event will be used.
* - If no participant is actively speaking, then this event will pass null as en event callback parameter.
* ---
* @param onPresenterChanged -
* - This event will be emitted when any participant starts or stops screen sharing.
* - It will pass participantId as an event callback parameter.
* - If a participant stops screensharing, then this event will pass null as en event callback parameter.
* ---
* @param onEntryRequested -
* - This event will be triggered when a new participant who is trying to join the meeting, is having permission ask_join in token.
* - This event will only be triggered to the participants in the meeting, who is having the permission allow_join in token.
* - This event will pass following parameters as an event parameters, participantId and name of the new participant who is trying to join the meeting, allow() and deny() to take required actions.
* ---
* @param onEntryResponded -
* - This event will be triggered when the join() request is responded.
* - This event will be triggered to the participants in the meeting, who is having the permission allow_join in token.
* - This event will be also triggered to the participant who requested to join the meeting.
* ---
* @param onMeetingJoined - This event callback is trigger when a local participant joins the meeting.
* ---
* @param onMeetingLeft - This event callback is trigger when local participant leaves the meeting.
* ---
* @param onPausedAllStreams - This event will be emitted when all the consumer streams are paused succesfully.
* ---
* @param onResumedAllStreams - This event will be emitted when all the consumer streams are resumed succesfully.
* ---
* @param onRecordingStateChanged - This event will be emitted when the meeting's recording status changed.
* ---
* @param onLivestreamStateChanged - This event will be emitted when the meeting's livestream status changed.
* ---
* @param onHlsStateChanged - This event will be emitted when the meeting's HLS(Http Livestreaming) status changed.
* ---
* @param onWebcamRequested -
* - This event will be triggered to the participant B when any other participant A requests to enable webcam of participant B.
* - On accepting the request, webcam of participant B will be enabled.
* ---
* @param onMicRequested -
* - This event will be triggered to the participant B when any other participant A requests to enable mic of participant B.
* - On accepting the request, mic of participant B will be enabled.
* ---
* @param onError -
* - This event will be triggered when any error occured.
* - It will pass code and message, as an event callback parameter.
* - To see all available error codes from SDK. [Meeting Error Codes](https://docs.videosdk.live/react-native/api/sdk-reference/error-codes)
* ---
* @param onMeetingStateChanged -
* - This event will be triggered when state of meeting changes.
* - It will pass state as an event callback parameter which will indicate current state of the meeting.
* - All available states are `CONNECTING`, `CONNECTED`, `FAILED`, `DISCONNECTED`, `CLOSING`, `CLOSED`.
* ---
* @param onParticipantModeChanged -
* - This event will be triggered when mode gets chanded.
* - It will pass mode, as an event callback parameter.
* - **SEND_AND_RECV**: Both audio and video streams will be produced and consumed in this mode.
* - **RECV_ONLY**: Both audio and video streams will be only consumed in this mode.
* - **SIGNALLING_ONLY**: Audio and video streams will not be produced or consumed in this mode.
*/
export function MeetingConsumer({
children,
onParticipantJoined,
onParticipantLeft,
onSpeakerChanged,
onPresenterChanged,
onMainParticipantChanged,
onEntryRequested,
onEntryResponded,
onRecordingStarted,
onRecordingStopped,
onChatMessage,
onMeetingJoined,
onMeetingLeft,
onLiveStreamStarted,
onLiveStreamStopped,
onVideoStateChanged,
onVideoSeeked,
onWebcamRequested,
onMicRequested,
onPinStateChanged,
onConnectionOpen,
onConnetionClose,
onSwitchMeeting,
onError,
onHlsStarted,
onHlsStopped,
onHlsStateChanged,
onPausedAllStreams,
onResumedAllStreams,
onRecordingStateChanged,
onLivestreamStateChanged,
onMeetingStateChanged,
onParticipantModeChanged,
}: {
children: any;
onParticipantJoined?: (participant: Participant) => void;
onParticipantLeft?: (participant: Participant) => void;
onSpeakerChanged?: (activeSpeakerId: string | null) => void;
onPresenterChanged?: (presenterId: string | null) => void;
onMainParticipantChanged?: (participant: Participant) => void;
onEntryRequested?: ({
participantId,
name,
allow,
deny,
}: {
participantId: string;
name: string;
allow: () => void;
deny: () => void;
}) => void;
onEntryResponded?: ({
participantId,
decision,
}: {
participantId: string;
decision: string;
}) => void;
onRecordingStarted?: () => void;
onRecordingStopped?: () => void;
onChatMessage?: (data: {
message: string;
senderId: string;
timestamp: string;
senderName: string;
payload: object;
}) => void;
onMeetingJoined?: () => void;
onMeetingLeft?: () => void;
onLiveStreamStarted?: () => void;
onLiveStreamStopped?: () => void;
onVideoStateChanged?: () => void;
onVideoSeeked?: () => void;
onWebcamRequested?: ({
participantId,
accept,
reject,
}: {
participantId: string;
accept: () => void;
reject: () => void;
}) => void;
onMicRequested?: ({
participantId,
accept,
reject,
}: {
participantId: string;
accept: () => void;
reject: () => void;
}) => void;
onPinStateChanged?: ({
participantId,
state,
pinnedBy,
}: {
participantId: string;
state: { share: boolean; cam: boolean };
pinnedBy: string;
}) => void;
onConnectionOpen?: () => void;
onConnetionClose?: () => void;
onSwitchMeeting?: () => void;
onError?: ({ code, message }: { code: string; message: string }) => void;
onHlsStarted?: ({ downstreamUrl }: { downstreamUrl: string }) => void;
onHlsStopped?: () => void;
onHlsStateChanged?: ({
status,
downstreamUrl,
playbackHlsUrl,
livestreamUrl,
}: {
status:
| "HLS_STARTING"
| "HLS_STARTED"
| "HLS_PLAYABLE"
| "HLS_STOPPING"
| "HLS_STOPPED";
downstreamUrl?: string;
playbackHlsUrl?: string;
livestreamUrl?: string;
}) => void;
onPausedAllStreams?: ({ kind }: { kind: "audio" | "video" | "share" | "shareAudio" | "all" }) => void;
onResumedAllStreams?: ({ kind }: { kind: "audio" | "video" | "share" | "shareAudio" | "all" }) => void;
onRecordingStateChanged?: ({
status,
}: {
status:
| "RECORDING_STARTING"
| "RECORDING_STARTED"
| "RECORDING_STOPPING"
| "RECORDING_STOPPED";
}) => void;
onLivestreamStateChanged?: ({
status,
}: {
status:
| "LIVESTREAM_STARTING"
| "LIVESTREAM_STARTED"
| "LIVESTREAM_STOPPING"
| "LIVESTREAM_STOPPED";
}) => void;
onMeetingStateChanged?: ({
state,
}: {
state:
| "CONNECTING"
| "CONNECTED"
| "FAILED"
| "DISCONNECTED"
| "CLOSING"
| "CLOSED";
}) => void;
onParticipantModeChanged?: ({
participantId,
mode,
}: {
participantId: string;
mode: "SEND_AND_RECV" | "SIGNALLING_ONLY" | "RECV_ONLY";
}) => void;
}): any;
/**
*
* ---
* @param onAudioDeviceChanged - It's a callback which gets triggered whenever a media device such as a microphone, or speaker is connected to or removed from the system.
*
* **Code Example :**
* ```js
* function onAudioDeviceChanged(devices) {
* console.log("onAudioDeviceChanged", devices);
* }
* const {} = useMediaDevice({
* onAudioDeviceChanged
* });
* ```
* ---
* @returns This will returns methods and events associated with media devices and permissions. You can refer this [API Reference](https://docs.videosdk.live/react-native/api/sdk-reference/use-mediaDevice/introduction)
*/
export function useMediaDevice({
onAudioDeviceChanged,
}?: {
onAudioDeviceChanged?: (devices: Promise<Map<string, object>>) => void;
}): {
getDevices: () => Promise<Array<DeviceInfo>>;
getAudioDeviceList: () => Promise<Array<AudioDeviceInfo>>;
getCameras: () => Promise<Array<CameraDeviceInfo>>;
checkPermission: (permission?: Permission) => Promise<Map<string, boolean>>;
requestPermission: (permission?: Permission) => Promise<Map<string, boolean>>;
checkBlueToothPermission: () => Promise<boolean>;
requestBluetoothPermission: () => Promise<boolean>;
};
/**
*
* @param participantId - Id of the participant.
* ---
* @param onStreamEnabled - It's a callback which gets triggered whenever a participant's video, audio or screen share stream is enabled.
*
* **Code Example :**
* ```js
* function onStreamEnabled(stream) {
* console.log("onStreamEnabled", stream);
* }
* const { displayName } = useParticipant(participantId,{
* onStreamEnabled
* });
* ```
* ---
* @param onStreamEnabled - It's a callback which gets triggered whenever a participant's video, audio or screen share stream is disabled.
*
* **Code Example :**
* ```js
* function onStreamEnabled(stream) {
* console.log("onStreamEnabled", stream);
* }
* const { displayName } = useParticipant(participantId,{
* onStreamEnabled
* });
* ```
* ---
* @param onMediaStatusChanged - It's a callback which gets triggered whenever a participant's video or audio is disabled or enabled.
*
* **Code Example :**
* ```js
* function onMediaStatusChanged(stream) {
* const { kind, newStatus} = data;
* console.log("onMediaStatusChanged", kind, newStatus);
* }
* const { displayName } = useParticipant(participantId,{
* onMediaStatusChanged
* });
* ```
* ---
* @param onVideoQualityChanged -
* - It's a callback which gets triggered whenever a participant's video quality changes.
* - currentQuality and prevQuality can have values `HIGH` | `MEDIUM` | `LOW`.
*
* **Code Example :**
* ```js
* function onVideoQualityChanged(stream) {
* const { currentQuality, prevQuality } = data;
* console.log("onVideoQualityChanged", currentQuality, prevQuality );
* }
* const { displayName } = useParticipant(participantId,{
* onVideoQualityChanged
* });
* ```
* ---
* @param onE2EEStateChanged -
* - It's a callback which gets triggered To monitor encryption state changes for each participant's media stream
* - It returns the state of encryption/decryption along with the media kind (audio/video/share).
*
* **Code Example :**
* ```js
* function onE2EEStateChanged(stateInfo) {
* console.log("State Changed to", stateInfo.state, "for kind", stateInfo.kind);
* }
*
* const {displayName} = useParticipant(participantId, {
* onE2EEStateChanged,
* });
*
* ```
* ---
*
* @returns This will return particular participant properties and method. You can refer this [API Reference](https://docs.videosdk.live/react-native/api/sdk-reference/use-participant/introduction)
*/
export function useParticipant(
participantId: string,
{
onStreamEnabled,
onStreamDisabled,
onMediaStatusChanged,
onVideoQualityChanged,
onE2EEStateChanged,
}?: {
onStreamDisabled?: (stream: Stream) => void;
onStreamEnabled?: (stream: Stream) => void;
onMediaStatusChanged?: ({
kind,
peerId,
newStatus,
}: {
kind: "audio" | "video";
peerId: string;
newStatus: boolean;
}) => void;
onVideoQualityChanged?: ({
peerId,
prevQuality,
currentQuality,
}: {
peerId: string;
prevQuality: "low" | "med" | "high";
currentQuality: "low" | "med" | "high";
}) => void;
onE2EEStateChanged?: ({
state,
kind,
}: {
state: "EncryptionSuccess" | "EncryptionFailed" | "DecryptionSuccess" | "DecryptionFailed" | "InternalError" | "UnknownState",
kind: "audio" | "video" | "share" | "shareAudio";
})
}
): {
displayName: string;
participant: Participant;
webcamStream: Stream;
micStream: Stream;
screenShareStream: Stream;
screenShareAudioStream: Stream;
webcamOn: boolean;
micOn: boolean;
screenShareOn: boolean;
isLocal: boolean;
isActiveSpeaker: boolean;
isMainParticipant: boolean;
pinState: any;
mode: "SEND_AND_RECV" | "SIGNALLING_ONLY" | "RECV_ONLY";
consumeMicStreams: () => void;
consumeWebcamStreams: () => void;
stopConsumingMicStreams: () => void;
stopConsumingWebcamStreams: () => void;
setQuality: (quality: "low" | "med" | "high") => void;
setViewPort: (width: number, height: number) => void;
enableMic: () => void;
disableMic: () => void;
enableWebcam: () => void;
disableWebcam: () => void;
remove: () => void;
captureImage: ({
height,
width,
}: {
height?: number;
width?: number;
}) => Promise<string?>;
pin: (data: "SHARE_AND_CAM" | "CAM" | "SHARE") => void;
unpin: (data: "SHARE_AND_CAM" | "CAM" | "SHARE") => void;
switchTo: ({
meetingId,
payload,
token,
}: {
meetingId: string;
payload: string;
token: string;
}) => Promise<void>;
getAudioStats: () => Promise<
Array<{
bitrate: number;
rtt: number;
network: string;
codec: string;
jitter: number;
limitation: any;
totalPackets: number;
packetsLost: number;
concealmentEvents: number;
insertedSamplesForDecelaration: number;
removedSamplesForAccelaration: number;
size: any;
}>
>;
getVideoStats: () => Promise<
Array<{
bitrate: number;
rtt: number;
network: string;
codec: string;
jitter: number;
limitation: any;
totalPackets: number;
packetsLost: number;
concealmentEvents: number;
insertedSamplesForDecelaration: number;
removedSamplesForAccelaration: number;
size: any;
currentSpatialLayer: number;
currentTemporalLayer: number;
preferredSpatialLayer: number;
preferredTemporalLayer: number;
}>
>;
getShareStats: () => Promise<
Array<{
bitrate: number;
rtt: number;
network: string;
codec: string;
jitter: number;
limitation: any;
totalPackets: number;
packetsLost: number;
concealmentEvents: number;
insertedSamplesForDecelaration: number;
removedSamplesForAccelaration: number;
size: any;
currentSpatialLayer: number;
currentTemporalLayer: number;
preferredSpatialLayer: number;
preferredTemporalLayer: number;
}>
>;
};
/**
* @param onParticipantJoined - This event callback is trigger when a new participant joins the meeting.
* ---
* @param onParticipantLeft - This event callback is trigger when a participant leaves the meeting.
* ---
* @param onSpeakerChanged -
* - This event will be emitted when a active speaker changed.
* - If you want to know which participant is actively speaking, then this event will be used.
* - If no participant is actively speaking, then this event will pass null as en event callback parameter.
* ---
* @param onPresenterChanged -
* - This event will be emitted when any participant starts or stops screen sharing.
* - It will pass participantId as an event callback parameter.
* - If a participant stops screensharing, then this event will pass null as en event callback parameter.
* ---
* @param onEntryRequested -
* - This event will be triggered when a new participant who is trying to join the meeting, is having permission ask_join in token.
* - This event will only be triggered to the participants in the meeting, who is having the permission allow_join in token.
* - This event will pass following parameters as an event parameters, participantId and name of the new participant who is trying to join the meeting, allow() and deny() to take required actions.
* ---
* @param onEntryResponded -
* - This event will be triggered when the join() request is responded.
* - This event will be triggered to the participants in the meeting, who is having the permission allow_join in token.
* - This event will be also triggered to the participant who requested to join the meeting.
* ---
* @param onMeetingJoined - This event callback is trigger when a local participant joins the meeting.
* ---
* @param onMeetingLeft - This event callback is trigger when local participant leaves the meeting.
* ---
* @param onPausedAllStreams - This event will be emitted when all the consumer streams are paused succesfully.
* ---
* @param onResumedAllStreams - This event will be emitted when all the consumer streams are resumed succesfully.
* ---
* @param onRecordingStateChanged - This event will be emitted when the meeting's recording status changed.
* ---
* @param onLivestreamStateChanged - This event will be emitted when the meeting's livestream status changed.
* ---
* @param onHlsStateChanged - This event will be emitted when the meeting's HLS(Http Livestreaming) status changed.
* ---
* @param onWebcamRequested -
* - This event will be triggered to the participant B when any other participant A requests to enable webcam of participant B.
* - On accepting the request, webcam of participant B will be enabled.
* ---
* @param onMicRequested -
* - This event will be triggered to the participant B when any other participant A requests to enable mic of participant B.
* - On accepting the request, mic of participant B will be enabled.
* ---
* @param onError -
* - This event will be triggered when any error occured.
* - It will pass code and message, as an event callback parameter.
* - To see all available error codes from SDK. [Meeting Error Codes](https://docs.videosdk.live/react-native/api/sdk-reference/error-codes)
* ---
* @param onMeetingStateChanged -
* - This event will be triggered when state of meeting changes.
* - It will pass state as an event callback parameter which will indicate current state of the meeting.
* - All available states are `CONNECTING`, `CONNECTED`, `FAILED`, `DISCONNECTED`, `CLOSING`, `CLOSED`.
* ---
* @param onParticipantModeChanged -
* - This event will be triggered when mode gets chanded.
* - It will pass mode, as an event callback parameter.
* - **SEND_AND_RECV**: Both audio and video streams will be produced and consumed in this mode.
* - **RECV_ONLY**: Both audio and video streams will be only consumed in this mode.
* - **SIGNALLING_ONLY**: Audio and video streams will not be produced or consumed in this mode.
*/
export function useMeeting({
onParticipantJoined,
onParticipantLeft,
onSpeakerChanged,
onPresenterChanged,
onMainParticipantChanged,
onEntryRequested,
onEntryResponded,
onRecordingStarted,
onRecordingStopped,
onChatMessage,
onMeetingJoined,
onMeetingLeft,
onLiveStreamStarted,
onLiveStreamStopped,
onVideoStateChanged,
onVideoSeeked,
onWebcamRequested,
onMicRequested,
onPinStateChanged,
onConnectionOpen,
onConnetionClose,
onSwitchMeeting,
onError,
onHlsStarted,
onHlsStopped,
onHlsStateChanged,
onPausedAllStreams,
onResumedAllStreams,
onRecordingStateChanged,
onLivestreamStateChanged,
onMeetingStateChanged,
onParticipantModeChanged,
onMediaRelayStarted,
onMediaRelayStopped,
onMediaRelayError,
onMediaRelayRequestResponse,
onMediaRelayRequestReceived,
}?: {
onParticipantJoined?: (participant: Participant) => void;
onParticipantLeft?: (participant: Participant) => void;
onSpeakerChanged?: (activeSpeakerId: string | null) => void;
onPresenterChanged?: (presenterId: string | null) => void;
onMainParticipantChanged?: (participant: Participant) => void;
onEntryRequested?: ({
participantId,
name,
allow,
deny,
}: {
participantId: string;
name: string;
allow: () => void;
deny: () => void;
}) => void;
onEntryResponded?: ({
participantId,
decision,
}: {
participantId: string;
decision: string;
}) => void;
onRecordingStarted?: () => void;
onRecordingStopped?: () => void;
onChatMessage?: (data: {
message: string;
senderId: string;
timestamp: string;
senderName: string;
payload: object;
}) => void;
onMeetingJoined?: () => void;
onMeetingLeft?: () => void;
onLiveStreamStarted?: () => void;
onLiveStreamStopped?: () => void;
onVideoStateChanged?: () => void;
onVideoSeeked?: () => void;
onWebcamRequested?: ({
participantId,
accept,
reject,
}: {
participantId: string;
accept: () => void;
reject: () => void;
}) => void;
onMicRequested?: ({
participantId,
accept,
reject,
}: {
participantId: string;
accept: () => void;
reject: () => void;
}) => void;
onPinStateChanged?: ({
participantId,
state,
pinnedBy,
}: {
participantId: string;
state: { share: boolean; cam: boolean };
pinnedBy: string;
}) => void;
onConnectionOpen?: () => void;
onConnetionClose?: () => void;
onSwitchMeeting?: () => void;
onError?: ({ code, message }: { code: string; message: string }) => void;
onHlsStarted?: ({ downstreamUrl }: { downstreamUrl: string }) => void;
onHlsStopped?: () => void;
onHlsStateChanged?: ({
status,
downstreamUrl,
playbackHlsUrl,
livestreamUrl,
}: {
status:
| "HLS_STARTING"
| "HLS_STARTED"
| "HLS_PLAYABLE"
| "HLS_STOPPING"
| "HLS_STOPPED";
downstreamUrl?: string;
playbackHlsUrl?: string;
livestreamUrl?: string;
}) => void;
onPausedAllStreams?: ({ kind }: { kind: "audio" | "video" | "share" | "shareAudio" | "all" }) => void;
onResumedAllStreams?: ({ kind }: { kind: "audio" | "video" | "share" | "shareAudio" | "all" }) => void;
onRecordingStateChanged?: ({
status,
}: {
status:
| "RECORDING_STARTING"
| "RECORDING_STARTED"
| "RECORDING_STOPPING"
| "RECORDING_STOPPED";
}) => void;
onLivestreamStateChanged?: ({
status,
}: {
status:
| "LIVESTREAM_STARTING"
| "LIVESTREAM_STARTED"
| "LIVESTREAM_STOPPING"
| "LIVESTREAM_STOPPED";
}) => void;
onMeetingStateChanged?: ({
state,
}: {
state:
| "CONNECTING"
| "CONNECTED"
| "FAILED"
| "DISCONNECTED"
| "CLOSING"
| "CLOSED";
}) => void;
onParticipantModeChanged?: ({
participantId,
mode,
}: {
participantId: string;
mode: "SEND_AND_RECV" | "SIGNALLING_ONLY" | "RECV_ONLY";
}) => void;
onMediaRelayStarted?: ({
meetingId
}: {
meetingId: string
}) => void;
onMediaRelayStopped?: ({
meetingId,
reason
}: {
meetingId: string;
reason: string;
}) => void;
onMediaRelayError?: ({
meetingId,
error
}: {
meetingId: string;
error: string;
}) => void;
onMediaRelayRequestResponse?: ({
decision,
decidedBy,
meetingId
}: {
decision: "accepted" | "declined";
decidedBy: string;
meetingId: string;
}) => void;
onMediaRelayRequestReceived?: ({
participantId,
meetingId,
displayName,
accept,
reject
}: {
participantId: string;
meetingId: string;
displayName: string;
accept: () => void;
reject: () => void;
}) => void;
}): {
meetingId: string;
meeting: Meeting;
localParticipant: Participant;
activeSpeakerId: string;
participants: Map<string, Participant>;
characters: Map<string, Character>;
pinnedParticipants: Map<
string,
{
cam: boolean;
share: boolean;
}
>;
presenterId: string;
e2eeEnabled: boolean;
localMicOn: boolean;
localWebcamOn: boolean;
isRecording: boolean;
recordingState: string;
livestreamState: string;
hlsState: string;
hlsUrls: {
downstreamUrl: string;
playbackHlsUrl: string;
livestreamUrl: string;
};
localScreenShareOn: boolean;
connections: Map<string, Connection>;
join: () => void;
leave: () => void;
end: () => void;
unmuteMic: (customAudioTrack?: MediaStream | undefined) => void;
muteMic: () => void;
toggleMic: (customAudioTrack?: MediaStream | undefined) => void;
enableWebcam: (customVideoTrack?: MediaStream | undefined) => void;
disableWebcam: () => void;
toggleWebcam: (customVideoTrack?: MediaStream | undefined) => void;
enableScreenShare: (customScreenShareTrack?: MediaStream | undefined) => void;
disableScreenShare: () => void;
toggleScreenShare: (customScreenShareTrack?: MediaStream | undefined) => void;
pauseAllStreams: (kind?: "audio" | "video" | "share" | "shareAudio" | "all") => void;
resumeAllStreams: (kind?: "audio" | "video" | "share" | "shareAudio" | "all") => void;
startRecording: (
webhookUrl?: string,
awsDirPath?: string,
config?: {
layout: {
type: "GRID" | "SPOTLIGHT" | "SIDEBAR";
priority: "SPEAKER" | "PIN";
gridSize: number;
};
orientation: "landscape" | "portrait";
theme: "DEFAULT" | "DARK" | "LIGHT";
quality: "low" | "med" | "high";
mode: "video-and-audio" | "audio";
}
) => void;
stopRecording: () => void;
switchTo: ({
meetingId,
token
}: {
meetingId: string;
token?: string;
}) => void;
startLiveStream: (
outputs: Array<{
url: string;
streamKey: string;
}>,
config?: {
layout: {
type: "GRID" | "SPOTLIGHT" | "SIDEBAR";
priority: "SPEAKER" | "PIN";
gridSize: number;
};
theme: "DEFAULT" | "DARK" | "LIGHT";
}
) => void;
stopLiveStream: () => void;
startHls: (config?: {
layout: {
type: "GRID" | "SPOTLIGHT" | "SIDEBAR";
priority: "SPEAKER" | "PIN";
gridSize: number;
};
orientation: "landscape" | "portrait";
theme: "DEFAULT" | "DARK" | "LIGHT";
quality: "low" | "med" | "high";
mode: "video-and-audio" | "audio";
}) => void;
stopHls: () => void;
getMics: () => Promise<
Array<{
deviceId: string;
label: string;
}>
>;
getWebcams: () => Promise<
Array<{
deviceId: string;
label: string;
facingMode: "environment" | "user";
}>
>;
changeMic: (object?: string | MediaStream) => void;
changeWebcam: (object?: string | MediaStream) => void;
startVideo: ({ link }: { link: string }) => void;
stopVideo: () => void;
pauseVideo: ({ currentTime }: { currentTime: number }) => void;
resumeVideo: () => void;
seekVideo: ({ currentTime }: { currentTime: number }) => void;
connectTo: ({
meetingId,
payload,
}: {
meetingId: string;
payload: string;
}) => void;
};
/**
*
* @param topic - Represents the topic for which you are publishing and getting a message.
* ---
* @param onMessageReceived - This will triggered when a new message is published for the subscribed topic with the message object.
* ---
* @param onOldMessagesReceived - This will triggered once when you subscribe to the topic and will receive all the old messages present for that topic as an array of message object.
* ---
* @returns This will return `message` properties and `publish()` method. You can refer this [API Reference](https://docs.videosdk.live/react-native/api/sdk-reference/use-pubsub#returns)
* ---
* **usePubSub example**
* ```js
* var topic = 'CHAT';
*
* function onMessageReceived(message) {
* console.log('New Message:', message);
* }
*
* function onOldMessagesReceived(messages) {
* console.log('Old Messages:', messages);
* }
*
* const {publish, messages} = usePubSub(topic, {
* onMessageReceived,
* onOldMessagesReceived,
* });
* ```
*/
export function usePubSub(
topic: string,
{
onMessageReceived,
onOldMessagesReceived,
}?: {
onMessageReceived?: (message: {
id: string;
message: string;
senderId: string;
senderName: string;
timestamp: string;
topic: string;
payload: object;
}) => void;
onOldMessagesReceived?: (
messages: Array<{
id: string;
message: string;
senderId: string;
senderName: string;
timestamp: string;
topic: string;
payload: object;
}>
) => void;
}
): {
publish: (
message: string,
{
persist,
sendOnly,
}: {
persist: boolean;
sendOnly: Array<String>;
},
payload: object
) => void;
messages: Array<{
id: string;
message: string;
senderId: string;
senderName: string;
timestamp: string;
topic: string;
payload: object;
}>;
};
/**
* @returns - This will return `startWhiteboard()`, `stopWhiteboard()` and `whiteboardUrl`.
* ---
* **useWhiteboard example**
* ```javascript
* const { startWhiteboard, stopWhiteboard, whiteboardUrl } = useWhiteboard();
*
* async function handleStartWhiteboard() {
* await startWhiteboard();
* }
*
* async function handleStopWhiteboard() {
* await stopWhiteboard();
* }
* ```
*/
export function useWhiteboard(): {
/**
* @description Starts the whiteboard for the meeting.
*/
startWhiteboard: () => Promise<void>;
/**
* @description Stops the whiteboard session for the meeting.
*/
stopWhiteboard: () => Promise<void>;
/**
* @description The URL of the active whiteboard, or `null` if the whiteboard is not currently active.
*/
whiteboardUrl: string | null;
};
/**
* @param onTranscriptionStateChanged - This will triggered when a realtime transcription state is changed.
* ---
* @param onTranscriptionText - This will triggered when a realtime transcription text is published.
* ---
* @returns This will return `startTranscription()` and `stopTranscription()` method. You can refer this [API Reference](https://docs.videosdk.live/react/api/sdk-reference/use-transcription#returns)
* ---
* **useTranscription example**
* ```js
*
* function onTranscriptionStateChanged(data) {
* console.log('New State Payload:', data)
* }
*
* function onTranscriptionText(data) {
* console.log('Transcription Text Payload:', data);
* }
*
* const { startTranscription, stopTranscription } = useTranscription(topic, {
* onTranscriptionStateChanged,
* onTranscriptionText,
* });
*
* async function startTranscription()=>{
* await startTranscription()
* }
*
* async function stopTranscription()=>{
* await stopTranscription()
* }
* ```
*/
export function useTranscription({
onTranscriptionStateChanged,
onTranscriptionText,
}?: {
onTranscriptionStateChanged?: (data: { id: string; status: string }) => void;
onTranscriptionText?: (data: {
participantId: string;
participantName: string;
text: string;
timestamp: string;
type: "realtime";
}) => void;
}): {
/**
* @description This method is used to start the meeting transcription
* @param config.webhookUrl? Webhook URL which will be called by VideoSDK when the transcription state gets changed
* @param config.modelConfig? modelConfig if any, which will be used while doing transcription
* @param config.summary.enabled Enables or disables summary generation from realtime transcriptions.
* @param config.summary.prompt Guides summary generation (optional).
*/
startTranscription: ({
webhookUrl,
modelConfig,
summary,
}: {
webhookUrl?: string;
modelConfig?: object;
summary?: {
enabled: boolean;
prompt?: string;
};
}) => void;
stopTranscription: () => void;
};
/**
* @param discardFrameWhenCryptorNotReady - If true, media frames will be discarded when the cryptor is not ready.
* ---
* @returns This hook returns a set of methods to manage media encryption keys such as `setSharedKey`, `ratchetKey`, `setKey`, and more.
* ---
* **useKeyProvider example**
* ```js
* const keyProvider = useKeyProvider({
* discardFrameWhenCryptorNotReady: true,
* });
*
* await keyProvider.setSharedKey("secure-key");
* ```
*/
export function useKeyProvider(options?: {
discardFrameWhenCryptorNotReady?: boolean;
}): {
/**
* @description Sets a shared encryption key for all participants.
* @param key - Encryption key
* @param keyIndex - Index in the key ring
*/
setSharedKey: (key: string) => Promise<void>;
/**
* @description Underlying RTCKeyProvider instance exposed for advanced use cases.
*/
rtcKeyProvider: any;
};
export enum CharacterMode {
TEXT = "text",
CO_PILOT = "co_pilot",
AUTO_PILOT = "auto_pilot",
VISION_PILOT = "vision_pilot",
}
export enum CharacterState {
CHARACTER_SPEAKING = "CHARACTER_SPEAKING",
CHARACTER_THINKING = "CHARACTER_THINKING",
CHARACTER_LISTENING = "CHARACTER_LISTENING",
}
/**
* @param onCharacterStateChanged - This will triggered when a character state is changed.
* ---
* @param onCharacterMessage - This will triggered when a character response/message is published.
* ---
* @param onStreamEnabled - It's a callback which gets triggered whenever a character's video, audio or screen share stream is enabled.
* ---
* @param onStreamEnabled - It's a callback which gets triggered whenever a character's video, audio or screen share stream is disabled.
* ---
* @param onMediaStatusChanged - It's a callback which gets triggered whenever a character's video or audio is disabled or enabled.
* ---
* @param onVideoQualityChanged -
* - It's a callback which gets triggered whenever a character's video quality changes.
* - currentQuality and prevQuality can have values `HIGH` | `MEDIUM` | `LOW`.
* ---
* @returns This will return `character` Object.
* ---
* **useCharacter example**
* ```js
*
* function onCharacterStateChanged(data) {
* console.log('New State Payload:', data)
* }
*
* function onCharacterMessage(data) {
* console.log('character message Payload:', data);
* }
*
* const { join, leave, sendMessage, interrupt } = useCharacter({
* interactionId,
* // OR
* id,
* displayName,
* characterRole,
* characterMode,
* knowledgeBases,
* },
* {
* onCharacterStateChanged,
* onCharacterMessage,
* onCharacterJoined,
* onCharacterLeft,
*
* onStreamEnabled,
* onStreamDisabled,
* onMediaStatusChanged,
* onVideoQualityChanged
* });
*
* async function joinCharacter()=>{
* await join()
* }
*
* async function removeCharacter()=>{
* await leave()
* }
* ```
*/
export function useCharacter(
{
interactionId,
// OR
id,
displayName,
characterRole,
characterMode,
knowledgeBases,
}: {
interactionId: string;
// OR
id: string;
displayName: string;
characterRole: string;
characterMode: "text" | "co_pilot" | "auto_pilot" | "vision_pilot";
knowledgeBases: string[];
},
{
onCharacterStateChanged,
onCharacterMessage,
onStreamEnabled,
onStreamDisabled,
onMediaStatusChanged,
onVideoQualityChanged,
}: {
onCharacterStateChanged?: (data: {
id: string;
status: CharacterState;
}) => void;
onCharacterMessage?: (data: {
participantId: string;
participantName: string;
text: string;
timestamp: number;
}) => void;
onCharacterJoined?: () => void;
onCharacterLeft?: () => void;
onStreamDisabled?: (stream: Stream) => void;
onStreamEnabled?: (stream: Stream) => void;
onMediaStatusChanged?: ({
kind,
peerId,
newStatus,
}: {
kind: "audio" | "video";
peerId: string;
newStatus: boolean;
}) => void;
onVideoQualityChanged?: ({
peerId,
prevQuality,
currentQuality,
}: {
peerId: string;
prevQuality: "low" | "med" | "high";
currentQuality: "low" | "med" | "high";
}) => void;
}
): {
displayName: string;
webcamStream: Stream;
micStream: Stream;
webcamOn: boolean;
micOn: boolean;
isActiveSpeaker: boolean;
interactionId?: string;
id?: string;
characterMode?: "text" | "co_pilot" | "auto_pilot" | "vision_pilot";
characterState?:
| "CHARACTER_SPEAKING"
| "CHARACTER_THINKING"
| "CHARACTER_LISTENING";
knowledgeBases?: string[];
enableMic: () => void;
disableMic: () => void;
enableWebcam: () => void;
disableWebcam: () => void;
join: () => Promise<void>;
leave: () => Promise<void>;
sendMessage: (text: string) => Promise<void>;
interrupt: () => Promise<void>;
};
export function useFile(): {
uploadBase64File: ({
base64Data,
token,
fileName,
}: {
base64Data: string;
token: string;
fileName: string;
}) => Promise<string?>;
fetchBase64File: ({
url,
token,
}: {
url: string;
token: string;
}) => Promise<string?>;
};
/**
* @param microphoneId - It will be the id of the mic from which the audio should be captured.
* ---
* @param encoderConfig - This will accept the voice profile you want to capture.
* You can checkout all value [here](https://docs.videosdk.live/react-native/api/sdk-reference/custom-tracks#parameters-1)
*
* #### Example : `speech_standard`, `high_quality`, `music_standard`,
* ---
* @param noiseConfig - You can provide different noise configuration
* ---
* @param noiseConfig.noiseSuppression - It is used to improve the quality of audio by removing background noise that can interfere with the clarity of speech.
* ---
* @param noiseConfig.echoCancellation - It is used to remove unwanted echoes from voice.
* ---
* @param noiseConfig.autoGainControl - It is used to maintain a consistent level of loudness or amplitude in a voice.
* ---
*
* **Code Example**
* ```js
* import { createMicrophoneAudioTrack } from "@videosdk.live/react-native-sdk";
*
* let customTrack = await createMicrophoneAudioTrack({
* microphoneId : 'mic-id', // OPTIONAL
* encoderConfig: "speech_standard", // `high_quality` | `music_standard`, Default : `speech_standard`
* noiseConfig: {
* noiseSuppression: true,
* echoCancellation: true,
* autoGainControl: true,
* },
* });
* ```
*/
export function createMicrophoneAudioTrack({
noiseConfig,
encoderConfig,
microphoneId,
}: {
noiseConfig?:
| {
echoCancellation: boolean;
autoGainControl: boolean;
noiseSuppression: boolean;
}
| undefined;
encoderConfig?:
| "speech_low_quality"
| "speech_standard"
| "music_standard"
| "standard_stereo"
| "high_quality"
| "high_quality_stereo"
| undefined;
microphoneId?: string | undefined;
}): Promise<MediaStream>;
/**
* @param cameraId - It will be the id of the camera from which the video should be captured.
* ---
* @param encoderConfig - This will accept the resolution (height x width) of video you want to capture.
* You can checkout all value [here](https://docs.videosdk.live/react-native/api/sdk-reference/custom-tracks#parameters)
*
* #### Example : `h360p_w640p`, `h720p_w1280p`, `h1080p_w1440p`
* ---
* @param facingMode - For Mobile browser It will specifiy whether to use front or back camera for the video track.
*
* #### Values : `user`, `environment`
* ---
* @param optimizationMode - It will specifiy the optimization mode for the video track being generated.
*
* #### Values : `motion`, `text`, `detail`
*
* ---
* @param multiStream - It will specifiy if the stream should send multiple resolution layers or single resolution layer. Please refere thi link for more understanding
* [What is multiStream ?](https://docs.videosdk.live/react-native/guide/video-and-audio-calling-api-sdk/render-media/optimize-video-track#what-is-multistream)
* ---
*
* **Code Example**
* ```js
* import { createCameraVideoTrack } from "@videosdk.live/react-native-sdk";
*
* let customTrack = await createCameraVideoTrack({
* cameraId:"camera-id", // OPTIONAL
* optimizationMode: "motion", // "text" | "detail", Default : "motion"
* encoderConfig: "h480p_w640p", // "h540p_w960p" | "h720p_w1280p" ... // Default : "h360p_w640p"
* facingMode: "environment", // "user", Default : "environment"
* multiStream:true // false, Default : true
* });
* ```
*/
export function createCameraVideoTrack({
cameraId,
encoderConfig,
facingMode,
optimizationMode,
multiStream,
}: {
cameraId?: string | undefined;
encoderConfig?:
| "h90p_w160p"
| "h180p_w320p"
| "h216p_w384p"
| "h360p_w640p"
| 'h360p_w640p_150kbps'
| "h540p_w960p"
| "h720p_w1280p"
| "h1080p_w1920p"
| "h1440p_w2560p"
| "h2160p_w3840p"
| "h120p_w160p"
| "h180p_w240p"
| "h240p_w320p"
| "h360p_w480p"
| "h480p_w640p"
| "h540p_w720p"
| "h720p_w960p"
| "h1080p_w1440p"
| "h1440p_w1920p"
| undefined;
facingMode?: "user" | "environment" | undefined;
optimizationMode?: "text" | "motion" | "detail" | undefined;
multiStream?: boolean;
}): Promise<MediaStream>;
/**
* @deprecated
*/
export function getAudioDeviceList(): Promise<string[]>;
export function switchAudioDevice(
device: "SPEAKER_PHONE" | "EARPIECE" | "WIRED_HEADSET" | "BLUETOOTH"
): Promise<void>;
export function register(): Promise<void>;
export const Constants: {
errors: {
INVALID_API_KEY: number;
INVALID_TOKEN: number;
INVALID_MEETING_ID: number;
INVALID_PARTICIPANT_ID: number;
DUPLICATE_PARTICIPANT: number;
ACCOUNT_DEACTIVATED: number;
ACCOUNT_DISCONTINUED: number;
INVALID_PERMISSIONS: number;
MAX_PARTCIPANT_REACHED: number;
MAX_SPEAKER_REACHED: number;
START_RECORDING_FAILED: number;
STOP_RECORDING_FAILED: number;
START_LIVESTREAM_FAILED: number;
STOP_LIVESTREAM_FAILED: number;
INVALID_LIVESTREAM_CONFIG: number;
START_HLS_FAILED: number;
STOP_HLS_FAILED: number;
RECORDING_FAILED: number;
LIVESTREAM_FAILED: number;
HLS_FAILED: number;
ERROR_GET_VIDEO_MEDIA: number;
ERROR_GET_AUDIO_MEDIA: number;
ERROR_GET_DISPLAY_MEDIA: number;
ERROR_GET_VIDEO_MEDIA_PERMISSION_DENIED: number;
ERROR_GET_AUDIO_MEDIA_PERMISSION_DENIED: number;
ERROR_GET_DISPLAY_MEDIA_PERMISSION_DENIED: number;
ERROR_CUSTOM_VIDEO_TRACK_ENDED: number;
ERROR_CUSTOM_AUDIO_TRACK_ENDED: number;
ERROR_INVALID_CUSTOM_VIDEO_TRACK: number;
ERROR_INVALID_CUSTOM_AUDIO_TRACK: number;
ERROR_ACTION_PERFORMED_BEFORE_MEETING_JOINED: number;
ERROR_RN_CAMERA_ACCESS_DENIED_OR_DISMISSED: number;
ERROR_RN_CAMERA_NOT_FOUND: number;
ERROR_RN_MIC_ACCESS_DENIED_OR_DISMISSED: number;
ERROR_RN_MIC_NOT_FOUND: number;
ERROR_RN_CAMERA_ACCESS_UNAVAILABLE: number;
ERROR_RN_MIC_ACCESS_UNAVAILABLE: number;
ERROR_RN_CAMERA_TRACK_ENDED: number;
ERROR_RN_MIC_TRACK_ENDED: number;
};
recordingEvents: {
RECORDING_STARTING: string;
RECORDING_STARTED: string;
RECORDING_STOPPING: string;
RECORDING_STOPPED: string;
};
livestreamEvents: {
LIVESTREAM_STARTING: string;
LIVESTREAM_STARTED: string;
LIVESTREAM_STOPPING: string;
LIVESTREAM_STOPPED: string;
};
transcriptionEvents: {
TRANSCRIPTION_STARTING: string;
TRANSCRIPTION_STARTED: string;
TRANSCRIPTION_STOPPING: string;
TRANSCRIPTION_STOPPED: string;
};
hlsEvents: {
HLS_STARTING: string;
HLS_STARTED: string;
HLS_PLAYABLE: string;
HLS_STOPPING: string;
HLS_STOPPED: string;
};
modes: {