@azure/communication-react
Version:
React library for building modern communication user experiences utilizing Azure Communication Services
627 lines • 38.1 kB
JavaScript
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
import { LocalVideoStream } from '@azure/communication-calling';
import { toFlatCommunicationIdentifier } from "../../../acs-ui-common/src";
import { _toCommunicationIdentifier } from "../../../acs-ui-common/src";
import memoizeOne from 'memoize-one';
import { disposeAllLocalPreviewViews, _isInCall, _isInLobbyOrConnecting, _isPreviewOn, getCallStateIfExist } from '../utils/callUtils';
import { Features } from '@azure/communication-calling';
/**
* @private
*/
export const areStreamsEqual = (prevStream, newStream) => {
return !!prevStream && !!newStream && prevStream.source.id === newStream.source.id;
};
/**
* Create the common implementation of {@link CallingHandlers} for all types of Call
*
* @private
*/
export const createDefaultCommonCallingHandlers = memoizeOne((callClient, deviceManager, call, options) => {
const onStartLocalVideo = () => __awaiter(void 0, void 0, void 0, function* () {
// Before the call object creates a stream, dispose of any local preview streams.
// @TODO: is there any way to parent the unparented view to the call object instead
// of disposing and creating a new stream?
yield disposeAllLocalPreviewViews(callClient);
const callId = call === null || call === void 0 ? void 0 : call.id;
let videoDeviceInfo = callClient.getState().deviceManager.selectedCamera;
if (!videoDeviceInfo) {
const cameras = yield (deviceManager === null || deviceManager === void 0 ? void 0 : deviceManager.getCameras());
videoDeviceInfo = cameras && cameras.length > 0 ? cameras[0] : undefined;
if (videoDeviceInfo) {
deviceManager === null || deviceManager === void 0 ? void 0 : deviceManager.selectCamera(videoDeviceInfo);
}
}
if (!callId || !videoDeviceInfo) {
return;
}
const stream = new LocalVideoStream(videoDeviceInfo);
if (call && !call.localVideoStreams.find(s => areStreamsEqual(s, stream))) {
yield call.startVideo(stream);
}
});
const onStopLocalVideo = (stream) => __awaiter(void 0, void 0, void 0, function* () {
const callId = call === null || call === void 0 ? void 0 : call.id;
if (!callId) {
return;
}
if (call && call.localVideoStreams.find(s => areStreamsEqual(s, stream))) {
yield call.stopVideo(stream);
}
});
const onToggleCamera = (options) => __awaiter(void 0, void 0, void 0, function* () {
const previewOn = _isPreviewOn(callClient.getState().deviceManager);
// the disposal of the unparented views is to workaround: https://skype.visualstudio.com/SPOOL/_workitems/edit/3030558.
// The root cause of the issue is caused by never transitioning the unparented view to the
// call object when going from configuration page (disconnected call state) to connecting.
//
// Currently the only time the local video stream is moved from unparented view to the call
// object is when we transition from connecting -> call state. If the camera was on,
// inside the MediaGallery we trigger toggleCamera. This triggers onStartLocalVideo which
// destroys the unparentedView and creates a new stream in the call - so all looks well.
//
// However, if someone turns off their camera during the lobbyOrConnecting screen, the
// call.localVideoStreams will be empty (as the stream is currently stored in the unparented
// views and was never transitioned to the call object) and thus we incorrectly try to create
// a new video stream for the call object, instead of only stopping the unparented view.
//
// The correct fix for this is to ensure that callAgent.onStartCall is called with the
// localvideostream as a videoOption. That will mean call.onLocalVideoStreamsUpdated will
// be triggered when the call is in connecting state, which we can then transition the
// local video stream to the stateful call client and get into a clean state.
if (call && (_isInCall(call.state) || _isInLobbyOrConnecting(call.state))) {
const stream = call.localVideoStreams.find(stream => stream.mediaStreamType === 'Video');
const unparentedViews = callClient.getState().deviceManager.unparentedViews;
if (stream || unparentedViews.length > 0) {
unparentedViews.forEach(view => view.mediaStreamType === 'Video' && callClient.disposeView(undefined, undefined, view));
if (stream) {
yield onStopLocalVideo(stream);
}
}
else {
yield onStartLocalVideo();
}
}
else {
/**
* This will create a unparented view to be used on the configuration page and the connecting screen
*
* If the device that the stream will come from is not on from permissions checks, then it will take time
* to create the stream since device is off. If we are turn the camera on immedietly on the configuration page we see it is
* fast but that is because the device is already primed to return a stream.
*
* On the connecting page the device has already turned off and the connecting window is so small we do not see the resulting
* unparented view from the code below.
*/
const selectedCamera = callClient.getState().deviceManager.selectedCamera;
if (selectedCamera) {
if (previewOn) {
yield onDisposeLocalStreamView();
}
else {
yield callClient.createView(undefined, undefined, {
source: selectedCamera,
mediaStreamType: 'Video'
}, options);
}
}
}
});
const onSelectMicrophone = (device) => __awaiter(void 0, void 0, void 0, function* () {
if (!deviceManager) {
return;
}
return deviceManager.selectMicrophone(device);
});
const onSelectSpeaker = (device) => __awaiter(void 0, void 0, void 0, function* () {
if (!deviceManager) {
return;
}
return deviceManager.selectSpeaker(device);
});
const onSelectCamera = (device, options) => __awaiter(void 0, void 0, void 0, function* () {
if (!deviceManager) {
return;
}
if (call && _isInCall(call.state)) {
deviceManager.selectCamera(device);
const stream = call.localVideoStreams.find(stream => stream.mediaStreamType === 'Video');
yield (stream === null || stream === void 0 ? void 0 : stream.switchSource(device));
yield Promise.all([
/// TODO: TEMPORARY SOLUTION
/// The Calling SDK needs to wait until the stream is ready before resolving the switchSource promise.
/// This is a temporary solution to wait for the stream to be ready before resolving the promise.
/// This allows the onSelectCamera to be throttled to prevent the streams from getting in to a frozen state
/// if the user switches cameras too rapidly.
/// This is to be removed once the Calling SDK has issued a fix.
stream === null || stream === void 0 ? void 0 : stream.getMediaStream(),
// An extra wait here is also necessary to prevent the remote stream freezing issue.
// If this is removed, please test switching cameras rapidly won't cause stream to freeze for remote users.
// When this mitigation was introduced, the repro interval time that caused the issue was:
// - iPhone 11, safari, v8.3.1: 750ms
// - Pixel 6, chrome, Android 15: 400ms
// - Windows 11, edge: 100ms
new Promise(resolve => setTimeout(resolve, 1000))
]);
}
else {
const previewOn = _isPreviewOn(callClient.getState().deviceManager);
if (!previewOn) {
deviceManager.selectCamera(device);
return;
}
yield onDisposeLocalStreamView();
deviceManager.selectCamera(device);
yield callClient.createView(undefined, undefined, {
source: device,
mediaStreamType: 'Video'
}, options);
}
});
const onRaiseHand = () => __awaiter(void 0, void 0, void 0, function* () { var _a; return yield ((_a = call === null || call === void 0 ? void 0 : call.feature(Features.RaiseHand)) === null || _a === void 0 ? void 0 : _a.raiseHand()); });
const onLowerHand = () => __awaiter(void 0, void 0, void 0, function* () { var _b; return yield ((_b = call === null || call === void 0 ? void 0 : call.feature(Features.RaiseHand)) === null || _b === void 0 ? void 0 : _b.lowerHand()); });
const onToggleRaiseHand = () => __awaiter(void 0, void 0, void 0, function* () {
const raiseHandFeature = call === null || call === void 0 ? void 0 : call.feature(Features.RaiseHand);
const localUserId = callClient.getState().userId;
const isLocalRaisedHand = raiseHandFeature === null || raiseHandFeature === void 0 ? void 0 : raiseHandFeature.getRaisedHands().find(publishedState => toFlatCommunicationIdentifier(publishedState.identifier) === toFlatCommunicationIdentifier(localUserId));
if (isLocalRaisedHand) {
yield (raiseHandFeature === null || raiseHandFeature === void 0 ? void 0 : raiseHandFeature.lowerHand());
}
else {
yield (raiseHandFeature === null || raiseHandFeature === void 0 ? void 0 : raiseHandFeature.raiseHand());
}
});
const onReactionClick = (reaction) => __awaiter(void 0, void 0, void 0, function* () {
var _c;
if (reaction === 'like' || reaction === 'applause' || reaction === 'heart' || reaction === 'laugh' || reaction === 'surprised') {
yield ((_c = call === null || call === void 0 ? void 0 : call.feature(Features.Reaction)) === null || _c === void 0 ? void 0 : _c.sendReaction({
reactionType: reaction
}));
}
else {
console.warn(`Can not recognize ${reaction} as meeting reaction`);
}
return;
});
const onToggleMicrophone = () => __awaiter(void 0, void 0, void 0, function* () {
if (!call || !(_isInCall(call.state) || _isInLobbyOrConnecting(call.state))) {
throw new Error(`Please invoke onToggleMicrophone after call is started`);
}
return call.isMuted ? yield call.unmute() : yield call.mute();
});
const onStartScreenShare = () => __awaiter(void 0, void 0, void 0, function* () { return yield (call === null || call === void 0 ? void 0 : call.startScreenSharing()); });
const onStopScreenShare = () => __awaiter(void 0, void 0, void 0, function* () { return yield (call === null || call === void 0 ? void 0 : call.stopScreenSharing()); });
const onToggleScreenShare = () => __awaiter(void 0, void 0, void 0, function* () { return (call === null || call === void 0 ? void 0 : call.isScreenSharingOn) ? yield onStopScreenShare() : yield onStartScreenShare(); });
const onHangUp = (forEveryone) => __awaiter(void 0, void 0, void 0, function* () {
return yield (call === null || call === void 0 ? void 0 : call.hangUp({
forEveryone: forEveryone === true ? true : false
}));
});
const onToggleHold = () => __awaiter(void 0, void 0, void 0, function* () { return (call === null || call === void 0 ? void 0 : call.state) === 'LocalHold' ? yield (call === null || call === void 0 ? void 0 : call.resume()) : yield (call === null || call === void 0 ? void 0 : call.hold()); });
const onCreateLocalStreamView = (...args_1) => __awaiter(void 0, [...args_1], void 0, function* (options = {
scalingMode: 'Crop',
isMirrored: true
}) {
if (!call || call.localVideoStreams.length === 0) {
return;
}
const callState = callClient.getState().calls[call.id];
if (!callState) {
return;
}
const localStream = callState.localVideoStreams.find(item => item.mediaStreamType === 'Video');
const localScreenSharingStream = callState.localVideoStreams.find(item => item.mediaStreamType === 'ScreenSharing');
let createViewResult = undefined;
if (localStream && !localStream.view) {
createViewResult = yield callClient.createView(call.id, undefined, localStream, options);
}
if (localScreenSharingStream && !localScreenSharingStream.view && call.isScreenSharingOn) {
// Hardcoded `scalingMode` since it is highly unlikely that CONTOSO would ever want to use a different scaling mode for screenshare.
// Using `Crop` would crop the contents of screenshare and `Stretch` would warp it.
// `Fit` is the only mode that maintains the integrity of the screen being shared.
createViewResult = yield callClient.createView(call.id, undefined, localScreenSharingStream, {
scalingMode: 'Fit'
});
}
return (createViewResult === null || createViewResult === void 0 ? void 0 : createViewResult.view) ? {
view: createViewResult === null || createViewResult === void 0 ? void 0 : createViewResult.view
} : undefined;
});
const onCreateRemoteStreamView = (userId_1, ...args_2) => __awaiter(void 0, [userId_1, ...args_2], void 0, function* (userId, options = {
scalingMode: 'Crop'
}) {
if (!call) {
return;
}
const callState = getCallStateIfExist(callClient.getState(), call.id);
if (!callState) {
return;
}
const participant = Object.values(callState.remoteParticipants).find(participant => toFlatCommunicationIdentifier(participant.identifier) === userId);
if (!participant || !participant.videoStreams) {
return;
}
/**
* There is a bug from the calling sdk where if a user leaves and rejoins immediately
* it adds 2 more potential streams this remote participant can use. The old 2 streams
* still show as available and that is how we got a frozen stream in this case. The stopgap
* until streams accurately reflect their availability is to always prioritize the latest streams of a certain type
* e.g findLast instead of find
*/
// Find the first available stream, if there is none, then get the first stream
const remoteVideoStream = Object.values(participant.videoStreams).findLast(i => i.mediaStreamType === 'Video' && i.isAvailable) || Object.values(participant.videoStreams).findLast(i => i.mediaStreamType === 'Video');
const screenShareStream = Object.values(participant.videoStreams).findLast(i => i.mediaStreamType === 'ScreenSharing' && i.isAvailable) || Object.values(participant.videoStreams).findLast(i => i.mediaStreamType === 'ScreenSharing');
let createViewResult = undefined;
if (remoteVideoStream && remoteVideoStream.isAvailable && !remoteVideoStream.view) {
createViewResult = yield callClient.createView(call.id, participant.identifier, remoteVideoStream, options);
}
if (screenShareStream && screenShareStream.isAvailable && !screenShareStream.view) {
// Hardcoded `scalingMode` since it is highly unlikely that CONTOSO would ever want to use a different scaling mode for screenshare.
// Using `Crop` would crop the contents of screenshare and `Stretch` would warp it.
// `Fit` is the only mode that maintains the integrity of the screen being shared.
createViewResult = yield callClient.createView(call.id, participant.identifier, screenShareStream, {
scalingMode: 'Fit'
});
}
return (createViewResult === null || createViewResult === void 0 ? void 0 : createViewResult.view) ? {
view: createViewResult === null || createViewResult === void 0 ? void 0 : createViewResult.view
} : undefined;
});
const onDisposeRemoteStreamView = (userId) => __awaiter(void 0, void 0, void 0, function* () {
if (!call) {
return;
}
const callState = getCallStateIfExist(callClient.getState(), call.id);
if (!callState) {
return;
}
const participant = Object.values(callState.remoteParticipants).find(participant => toFlatCommunicationIdentifier(participant.identifier) === userId);
if (!participant || !participant.videoStreams) {
return;
}
const remoteVideoStream = Object.values(participant.videoStreams).find(i => i.mediaStreamType === 'Video');
const screenShareStream = Object.values(participant.videoStreams).find(i => i.mediaStreamType === 'ScreenSharing');
if (remoteVideoStream && remoteVideoStream.view) {
callClient.disposeView(call.id, participant.identifier, remoteVideoStream);
}
if (screenShareStream && screenShareStream.view) {
callClient.disposeView(call.id, participant.identifier, screenShareStream);
}
});
const onDisposeRemoteVideoStreamView = (userId) => __awaiter(void 0, void 0, void 0, function* () {
if (!call) {
return;
}
const callState = getCallStateIfExist(callClient.getState(), call.id);
if (!callState) {
return;
}
const participant = Object.values(callState.remoteParticipants).find(participant => toFlatCommunicationIdentifier(participant.identifier) === userId);
if (!participant || !participant.videoStreams) {
return;
}
const remoteVideoStream = Object.values(participant.videoStreams).filter(i => i.mediaStreamType === 'Video');
for (const stream of remoteVideoStream) {
if (stream.view) {
callClient.disposeView(call.id, participant.identifier, stream);
}
}
});
const onDisposeRemoteScreenShareStreamView = (userId) => __awaiter(void 0, void 0, void 0, function* () {
if (!call) {
return;
}
const callState = getCallStateIfExist(callClient.getState(), call.id);
if (!callState) {
return;
}
const participant = Object.values(callState.remoteParticipants).find(participant => toFlatCommunicationIdentifier(participant.identifier) === userId);
if (!participant || !participant.videoStreams) {
return;
}
const screenShareStreams = Object.values(participant.videoStreams).filter(i => i.mediaStreamType === 'ScreenSharing');
for (const stream of screenShareStreams) {
if (stream.view) {
callClient.disposeView(call.id, participant.identifier, stream);
}
}
});
const onDisposeLocalScreenShareStreamView = () => __awaiter(void 0, void 0, void 0, function* () {
if (!call) {
return;
}
const callState = getCallStateIfExist(callClient.getState(), call.id);
if (!callState) {
return;
}
const screenShareStream = callState === null || callState === void 0 ? void 0 : callState.localVideoStreams.find(item => item.mediaStreamType === 'ScreenSharing');
if (screenShareStream && screenShareStream.view) {
callClient.disposeView(call.id, undefined, screenShareStream);
}
});
const onDisposeLocalStreamView = () => __awaiter(void 0, void 0, void 0, function* () {
// If the user is currently in a call, dispose of the local stream view attached to that call.
const callState = call && callClient.getState().calls[call.id];
const localStream = callState === null || callState === void 0 ? void 0 : callState.localVideoStreams.find(item => item.mediaStreamType === 'Video');
if (call && callState && localStream) {
callClient.disposeView(call.id, undefined, localStream);
}
// If the user is not in a call we currently assume any unparented view is a LocalPreview and stop all
// since those are only used for LocalPreview currently.
// TODO: we need to remember which LocalVideoStream was used for LocalPreview and dispose that one.
yield disposeAllLocalPreviewViews(callClient);
});
const onSendDtmfTone = (dtmfTone) => __awaiter(void 0, void 0, void 0, function* () { return yield (call === null || call === void 0 ? void 0 : call.sendDtmf(dtmfTone)); });
//eslint-disable-next-line @typescript-eslint/no-explicit-any
const notImplemented = () => {
throw new Error('Not implemented, cannot call a method from an abstract object');
};
const onRemoveVideoBackgroundEffects = () => __awaiter(void 0, void 0, void 0, function* () {
const stream = (call === null || call === void 0 ? void 0 : call.localVideoStreams.find(stream => stream.mediaStreamType === 'Video')) || (deviceManager === null || deviceManager === void 0 ? void 0 : deviceManager.getUnparentedVideoStreams().find(stream => stream.mediaStreamType === 'Video'));
if (stream) {
if (!(options === null || options === void 0 ? void 0 : options.onResolveVideoBackgroundEffectsDependency)) {
throw new Error(`Video background effects dependency not resolved`);
}
else {
return stream.feature(Features.VideoEffects).stopEffects();
}
}
});
const onBlurVideoBackground = (backgroundBlurConfig) => __awaiter(void 0, void 0, void 0, function* () {
var _d;
const stream = (call === null || call === void 0 ? void 0 : call.localVideoStreams.find(stream => stream.mediaStreamType === 'Video')) || (deviceManager === null || deviceManager === void 0 ? void 0 : deviceManager.getUnparentedVideoStreams().find(stream => stream.mediaStreamType === 'Video'));
if (stream) {
if (!(options === null || options === void 0 ? void 0 : options.onResolveVideoBackgroundEffectsDependency)) {
throw new Error(`Video background effects dependency not resolved`);
}
const createEffect = (options === null || options === void 0 ? void 0 : options.onResolveVideoBackgroundEffectsDependency) && ((_d = (yield options.onResolveVideoBackgroundEffectsDependency())) === null || _d === void 0 ? void 0 : _d.createBackgroundBlurEffect);
return createEffect && stream.feature(Features.VideoEffects).startEffects(createEffect(backgroundBlurConfig));
}
});
const onReplaceVideoBackground = (backgroundReplacementConfig) => __awaiter(void 0, void 0, void 0, function* () {
var _e;
const stream = (call === null || call === void 0 ? void 0 : call.localVideoStreams.find(stream => stream.mediaStreamType === 'Video')) || (deviceManager === null || deviceManager === void 0 ? void 0 : deviceManager.getUnparentedVideoStreams().find(stream => stream.mediaStreamType === 'Video'));
if (stream) {
if (!(options === null || options === void 0 ? void 0 : options.onResolveVideoBackgroundEffectsDependency)) {
throw new Error(`Video background effects dependency not resolved`);
}
const createEffect = (options === null || options === void 0 ? void 0 : options.onResolveVideoBackgroundEffectsDependency) && ((_e = (yield options.onResolveVideoBackgroundEffectsDependency())) === null || _e === void 0 ? void 0 : _e.createBackgroundReplacementEffect);
return createEffect && stream.feature(Features.VideoEffects).startEffects(createEffect(backgroundReplacementConfig));
}
});
const onStartNoiseSuppressionEffect = () => __awaiter(void 0, void 0, void 0, function* () {
var _f;
const audioEffects = (options === null || options === void 0 ? void 0 : options.onResolveDeepNoiseSuppressionDependency) && ((_f = (yield options.onResolveDeepNoiseSuppressionDependency())) === null || _f === void 0 ? void 0 : _f.deepNoiseSuppressionEffect);
const stream = call === null || call === void 0 ? void 0 : call.localAudioStreams.find(stream => stream.mediaStreamType === 'Audio');
if (stream && audioEffects && audioEffects.noiseSuppression) {
const audioEffectsFeature = stream.feature(Features.AudioEffects);
const isNoiseSuppressionSupported = yield audioEffectsFeature.isSupported(audioEffects.noiseSuppression);
if (isNoiseSuppressionSupported) {
return yield audioEffectsFeature.startEffects(audioEffects);
}
else {
console.warn('Deep Noise Suppression is not supported on this platform.');
}
}
});
const onStopNoiseSuppressionEffect = () => __awaiter(void 0, void 0, void 0, function* () {
const stream = call === null || call === void 0 ? void 0 : call.localAudioStreams.find(stream => stream.mediaStreamType === 'Audio');
if (stream && (options === null || options === void 0 ? void 0 : options.onResolveDeepNoiseSuppressionDependency)) {
const audioEffects = {
noiseSuppression: true
};
return yield stream.feature(Features.AudioEffects).stopEffects(audioEffects);
}
});
const onStartCaptions = (options) => __awaiter(void 0, void 0, void 0, function* () {
const captionsFeature = call === null || call === void 0 ? void 0 : call.feature(Features.Captions).captions;
yield (captionsFeature === null || captionsFeature === void 0 ? void 0 : captionsFeature.startCaptions(options));
});
const onStopCaptions = () => __awaiter(void 0, void 0, void 0, function* () {
const captionsFeature = call === null || call === void 0 ? void 0 : call.feature(Features.Captions).captions;
yield (captionsFeature === null || captionsFeature === void 0 ? void 0 : captionsFeature.stopCaptions());
});
const onSetSpokenLanguage = (language) => __awaiter(void 0, void 0, void 0, function* () {
const captionsFeature = call === null || call === void 0 ? void 0 : call.feature(Features.Captions).captions;
yield (captionsFeature === null || captionsFeature === void 0 ? void 0 : captionsFeature.setSpokenLanguage(language));
});
const onSetCaptionLanguage = (language) => __awaiter(void 0, void 0, void 0, function* () {
const captionsFeature = call === null || call === void 0 ? void 0 : call.feature(Features.Captions).captions;
yield captionsFeature.setCaptionLanguage(language);
});
const onSendRealTimeText = (text, isFinalized) => __awaiter(void 0, void 0, void 0, function* () {
const realTimeTextFeature = call === null || call === void 0 ? void 0 : call.feature(Features.RealTimeText);
yield (realTimeTextFeature === null || realTimeTextFeature === void 0 ? void 0 : realTimeTextFeature.sendRealTimeText(text, isFinalized));
});
const onSubmitSurvey = (survey) => __awaiter(void 0, void 0, void 0, function* () { return yield (call === null || call === void 0 ? void 0 : call.feature(Features.CallSurvey).submitSurvey(survey)); });
const onStartSpotlight = (userIds) => __awaiter(void 0, void 0, void 0, function* () {
const participants = userIds === null || userIds === void 0 ? void 0 : userIds.map(userId => _toCommunicationIdentifier(userId));
yield (call === null || call === void 0 ? void 0 : call.feature(Features.Spotlight).startSpotlight(participants));
});
const onStopSpotlight = (userIds) => __awaiter(void 0, void 0, void 0, function* () {
const participants = userIds === null || userIds === void 0 ? void 0 : userIds.map(userId => _toCommunicationIdentifier(userId));
yield (call === null || call === void 0 ? void 0 : call.feature(Features.Spotlight).stopSpotlight(participants));
});
const onStopAllSpotlight = () => __awaiter(void 0, void 0, void 0, function* () {
yield (call === null || call === void 0 ? void 0 : call.feature(Features.Spotlight).stopAllSpotlight());
});
const onMuteParticipant = (userId) => __awaiter(void 0, void 0, void 0, function* () {
if (call === null || call === void 0 ? void 0 : call.remoteParticipants) {
call === null || call === void 0 ? void 0 : call.remoteParticipants.forEach((participant) => __awaiter(void 0, void 0, void 0, function* () {
// Using toFlatCommunicationIdentifier to convert the CommunicationIdentifier to string
// as _toCommunicationIdentifier(userId) comparison to participant.identifier did not work for this case
if (toFlatCommunicationIdentifier(participant.identifier) === userId) {
yield participant.mute();
}
}));
}
});
const onMuteAllRemoteParticipants = () => __awaiter(void 0, void 0, void 0, function* () {
call === null || call === void 0 ? void 0 : call.muteAllRemoteParticipants();
});
const canStartSpotlight = call === null || call === void 0 ? void 0 : call.feature(Features.Capabilities).capabilities.spotlightParticipant.isPresent;
const canRemoveSpotlight = call === null || call === void 0 ? void 0 : call.feature(Features.Capabilities).capabilities.removeParticipantsSpotlight.isPresent;
const onStartLocalSpotlight = canStartSpotlight ? () => __awaiter(void 0, void 0, void 0, function* () {
yield (call === null || call === void 0 ? void 0 : call.feature(Features.Spotlight).startSpotlight());
}) : undefined;
const onStopLocalSpotlight = () => __awaiter(void 0, void 0, void 0, function* () {
yield (call === null || call === void 0 ? void 0 : call.feature(Features.Spotlight).stopSpotlight());
});
const onStartRemoteSpotlight = canStartSpotlight ? (userIds) => __awaiter(void 0, void 0, void 0, function* () {
const participants = userIds === null || userIds === void 0 ? void 0 : userIds.map(userId => _toCommunicationIdentifier(userId));
yield (call === null || call === void 0 ? void 0 : call.feature(Features.Spotlight).startSpotlight(participants));
}) : undefined;
const onStopRemoteSpotlight = canRemoveSpotlight ? (userIds) => __awaiter(void 0, void 0, void 0, function* () {
const participants = userIds === null || userIds === void 0 ? void 0 : userIds.map(userId => _toCommunicationIdentifier(userId));
yield (call === null || call === void 0 ? void 0 : call.feature(Features.Spotlight).stopSpotlight(participants));
}) : undefined;
const onCreateTogetherModeStreamView = (...args_3) => __awaiter(void 0, [...args_3], void 0, function* (options = {
scalingMode: 'Fit',
isMirrored: false,
viewKind: 'main'
}) {
if (!call) {
return;
}
const callState = callClient.getState().calls[call.id];
if (!callState) {
return;
}
const togetherModeStreams = callState.togetherMode.streams;
const togetherModeCreateViewResult = {};
const mainVideoStream = togetherModeStreams.mainVideoStream;
if (mainVideoStream && mainVideoStream.isAvailable && !mainVideoStream.view) {
const createViewResult = yield callClient.createView(call.id, undefined, mainVideoStream, options);
// SDK currently only supports 1 Video media stream type
togetherModeCreateViewResult.mainVideoView = (createViewResult === null || createViewResult === void 0 ? void 0 : createViewResult.view) ? {
view: createViewResult === null || createViewResult === void 0 ? void 0 : createViewResult.view
} : undefined;
}
return togetherModeCreateViewResult;
});
const onDisposeTogetherModeStreamView = () => __awaiter(void 0, void 0, void 0, function* () {
if (!call) {
return;
}
const callState = callClient.getState().calls[call.id];
if (!callState) {
throw new Error(`Call Not Found: ${call.id}`);
}
const togetherModeStreams = callState.togetherMode.streams;
if (!togetherModeStreams.mainVideoStream) {
return;
}
if (togetherModeStreams.mainVideoStream.view) {
callClient.disposeView(call.id, undefined, togetherModeStreams.mainVideoStream);
}
});
const onSetTogetherModeSceneSize = (width, height) => {
const togetherModeFeature = call === null || call === void 0 ? void 0 : call.feature(Features.TogetherMode);
if (togetherModeFeature) {
togetherModeFeature.sceneSize = {
width,
height
};
}
};
const onForbidAudio = (userIds) => __awaiter(void 0, void 0, void 0, function* () {
const participants = userIds === null || userIds === void 0 ? void 0 : userIds.map(userId => _toCommunicationIdentifier(userId));
yield (call === null || call === void 0 ? void 0 : call.feature(Features.MediaAccess).forbidAudio(participants));
});
const onPermitAudio = (userIds) => __awaiter(void 0, void 0, void 0, function* () {
const participants = userIds === null || userIds === void 0 ? void 0 : userIds.map(userId => _toCommunicationIdentifier(userId));
yield (call === null || call === void 0 ? void 0 : call.feature(Features.MediaAccess).permitAudio(participants));
});
const onForbidOthersAudio = () => __awaiter(void 0, void 0, void 0, function* () {
yield (call === null || call === void 0 ? void 0 : call.feature(Features.MediaAccess).forbidOthersAudio());
});
const onPermitOthersAudio = () => __awaiter(void 0, void 0, void 0, function* () {
yield (call === null || call === void 0 ? void 0 : call.feature(Features.MediaAccess).permitOthersAudio());
});
const onForbidVideo = (userIds) => __awaiter(void 0, void 0, void 0, function* () {
const participants = userIds === null || userIds === void 0 ? void 0 : userIds.map(userId => _toCommunicationIdentifier(userId));
yield (call === null || call === void 0 ? void 0 : call.feature(Features.MediaAccess).forbidVideo(participants));
});
const onPermitVideo = (userIds) => __awaiter(void 0, void 0, void 0, function* () {
const participants = userIds === null || userIds === void 0 ? void 0 : userIds.map(userId => _toCommunicationIdentifier(userId));
yield (call === null || call === void 0 ? void 0 : call.feature(Features.MediaAccess).permitVideo(participants));
});
const onForbidOthersVideo = () => __awaiter(void 0, void 0, void 0, function* () {
yield (call === null || call === void 0 ? void 0 : call.feature(Features.MediaAccess).forbidOthersVideo());
});
const onPermitOthersVideo = () => __awaiter(void 0, void 0, void 0, function* () {
yield (call === null || call === void 0 ? void 0 : call.feature(Features.MediaAccess).permitOthersVideo());
});
return {
onHangUp,
onToggleHold,
onSelectCamera,
onSelectMicrophone,
onSelectSpeaker,
onStartScreenShare,
onStopScreenShare,
onToggleCamera,
onToggleMicrophone,
onToggleScreenShare,
onCreateLocalStreamView,
onCreateRemoteStreamView,
onStartLocalVideo,
onDisposeRemoteStreamView,
onDisposeLocalStreamView,
onDisposeRemoteScreenShareStreamView,
onDisposeLocalScreenShareStreamView,
onDisposeRemoteVideoStreamView,
onRaiseHand,
onLowerHand,
onToggleRaiseHand,
onReactionClick,
onAddParticipant: notImplemented,
onRemoveParticipant: notImplemented,
onStartCall: notImplemented,
onSendDtmfTone,
onRemoveVideoBackgroundEffects,
onBlurVideoBackground,
onReplaceVideoBackground,
onStartNoiseSuppressionEffect,
onStopNoiseSuppressionEffect,
onStartCaptions,
onStopCaptions,
onSetCaptionLanguage,
onSetSpokenLanguage,
onSubmitSurvey,
onStartSpotlight,
onStopSpotlight,
onStopAllSpotlight,
onStartLocalSpotlight,
onStopLocalSpotlight,
onStartRemoteSpotlight,
onStopRemoteSpotlight,
onMuteParticipant,
onMuteAllRemoteParticipants,
onAcceptCall: notImplemented,
onRejectCall: notImplemented,
onCreateTogetherModeStreamView,
onStartTogetherMode: notImplemented,
onSetTogetherModeSceneSize,
onDisposeTogetherModeStreamView,
onForbidAudio,
onPermitAudio,
onForbidOthersAudio,
onPermitOthersAudio,
onForbidVideo,
onPermitVideo,
onForbidOthersVideo,
onPermitOthersVideo,
onSendRealTimeText
};
});
//# sourceMappingURL=createCommonHandlers.js.map