rapida-react
Version:
An easy to use react client for building generative ai application using Rapida platform.
1,096 lines (1,082 loc) • 34 kB
JavaScript
import {
useObservableState,
useSelectInputDeviceAgent
} from "./chunk-FHDCWUKE.mjs";
import {
AgentLiveTranscript
} from "./chunk-DWJ3MINH.mjs";
import {
AgentMultibandAudioVisualizer
} from "./chunk-3YJ3G4GZ.mjs";
import {
Conversation
} from "./chunk-GKZ4RFKG.mjs";
import {
Channel,
ConnectionState,
DEFAULT_DEVICE_ID,
Feedback,
MessageRole,
MessageStatus,
VoiceAgent,
VoiceAgentContext,
agentAudioOutputMuteObservable,
agentConnectionStateObservable,
agentInputObservable,
require_common_pb,
require_talk_api_pb,
useEnsureVoiceAgent,
useMaybeVoiceAgent
} from "./chunk-5UOY4SG5.mjs";
import {
__commonJS,
__require,
__toESM
} from "./chunk-32YFHJN5.mjs";
// src/clients/protos/talk-api_pb_service.js
var require_talk_api_pb_service = __commonJS({
"src/clients/protos/talk-api_pb_service.js"(exports) {
"use strict";
var talk_api_pb = require_talk_api_pb();
var common_pb = require_common_pb();
var grpc2 = __require("@improbable-eng/grpc-web").grpc;
var TalkService = function() {
function TalkService2() {
}
TalkService2.serviceName = "talk_api.TalkService";
return TalkService2;
}();
TalkService.AssistantMessaging = {
methodName: "AssistantMessaging",
service: TalkService,
requestStream: false,
responseStream: true,
requestType: talk_api_pb.AssistantMessagingRequest,
responseType: talk_api_pb.AssistantMessagingResponse
};
TalkService.AssistantTalk = {
methodName: "AssistantTalk",
service: TalkService,
requestStream: true,
responseStream: true,
requestType: talk_api_pb.AssistantMessagingRequest,
responseType: talk_api_pb.AssistantMessagingResponse
};
TalkService.GetAllAssistantConversation = {
methodName: "GetAllAssistantConversation",
service: TalkService,
requestStream: false,
responseStream: false,
requestType: common_pb.GetAllAssistantConversationRequest,
responseType: common_pb.GetAllAssistantConversationResponse
};
TalkService.GetAllConversationMessage = {
methodName: "GetAllConversationMessage",
service: TalkService,
requestStream: false,
responseStream: false,
requestType: common_pb.GetAllConversationMessageRequest,
responseType: common_pb.GetAllConversationMessageResponse
};
TalkService.CreateMessageMetric = {
methodName: "CreateMessageMetric",
service: TalkService,
requestStream: false,
responseStream: false,
requestType: talk_api_pb.CreateMessageMetricRequest,
responseType: talk_api_pb.CreateMessageMetricResponse
};
TalkService.CreateConversationMetric = {
methodName: "CreateConversationMetric",
service: TalkService,
requestStream: false,
responseStream: false,
requestType: talk_api_pb.CreateConversationMetricRequest,
responseType: talk_api_pb.CreateConversationMetricResponse
};
exports.TalkService = TalkService;
function TalkServiceClient2(serviceHost, options) {
this.serviceHost = serviceHost;
this.options = options || {};
}
TalkServiceClient2.prototype.assistantMessaging = function assistantMessaging(requestMessage, metadata) {
var listeners = {
data: [],
end: [],
status: []
};
var client = grpc2.invoke(TalkService.AssistantMessaging, {
request: requestMessage,
host: this.serviceHost,
metadata,
transport: this.options.transport,
debug: this.options.debug,
onMessage: function(responseMessage) {
listeners.data.forEach(function(handler) {
handler(responseMessage);
});
},
onEnd: function(status, statusMessage, trailers) {
listeners.status.forEach(function(handler) {
handler({ code: status, details: statusMessage, metadata: trailers });
});
listeners.end.forEach(function(handler) {
handler({ code: status, details: statusMessage, metadata: trailers });
});
listeners = null;
}
});
return {
on: function(type, handler) {
listeners[type].push(handler);
return this;
},
cancel: function() {
listeners = null;
client.close();
}
};
};
TalkServiceClient2.prototype.assistantTalk = function assistantTalk(metadata) {
var listeners = {
data: [],
end: [],
status: []
};
var client = grpc2.client(TalkService.AssistantTalk, {
host: this.serviceHost,
metadata,
transport: this.options.transport
});
client.onEnd(function(status, statusMessage, trailers) {
listeners.status.forEach(function(handler) {
handler({ code: status, details: statusMessage, metadata: trailers });
});
listeners.end.forEach(function(handler) {
handler({ code: status, details: statusMessage, metadata: trailers });
});
listeners = null;
});
client.onMessage(function(message) {
listeners.data.forEach(function(handler) {
handler(message);
});
});
client.start(metadata);
return {
on: function(type, handler) {
listeners[type].push(handler);
return this;
},
write: function(requestMessage) {
client.send(requestMessage);
return this;
},
end: function() {
client.finishSend();
},
cancel: function() {
listeners = null;
client.close();
}
};
};
TalkServiceClient2.prototype.getAllAssistantConversation = function getAllAssistantConversation(requestMessage, metadata, callback) {
if (arguments.length === 2) {
callback = arguments[1];
}
var client = grpc2.unary(TalkService.GetAllAssistantConversation, {
request: requestMessage,
host: this.serviceHost,
metadata,
transport: this.options.transport,
debug: this.options.debug,
onEnd: function(response) {
if (callback) {
if (response.status !== grpc2.Code.OK) {
var err = new Error(response.statusMessage);
err.code = response.status;
err.metadata = response.trailers;
callback(err, null);
} else {
callback(null, response.message);
}
}
}
});
return {
cancel: function() {
callback = null;
client.close();
}
};
};
TalkServiceClient2.prototype.getAllConversationMessage = function getAllConversationMessage(requestMessage, metadata, callback) {
if (arguments.length === 2) {
callback = arguments[1];
}
var client = grpc2.unary(TalkService.GetAllConversationMessage, {
request: requestMessage,
host: this.serviceHost,
metadata,
transport: this.options.transport,
debug: this.options.debug,
onEnd: function(response) {
if (callback) {
if (response.status !== grpc2.Code.OK) {
var err = new Error(response.statusMessage);
err.code = response.status;
err.metadata = response.trailers;
callback(err, null);
} else {
callback(null, response.message);
}
}
}
});
return {
cancel: function() {
callback = null;
client.close();
}
};
};
TalkServiceClient2.prototype.createMessageMetric = function createMessageMetric(requestMessage, metadata, callback) {
if (arguments.length === 2) {
callback = arguments[1];
}
var client = grpc2.unary(TalkService.CreateMessageMetric, {
request: requestMessage,
host: this.serviceHost,
metadata,
transport: this.options.transport,
debug: this.options.debug,
onEnd: function(response) {
if (callback) {
if (response.status !== grpc2.Code.OK) {
var err = new Error(response.statusMessage);
err.code = response.status;
err.metadata = response.trailers;
callback(err, null);
} else {
callback(null, response.message);
}
}
}
});
return {
cancel: function() {
callback = null;
client.close();
}
};
};
TalkServiceClient2.prototype.createConversationMetric = function createConversationMetric(requestMessage, metadata, callback) {
if (arguments.length === 2) {
callback = arguments[1];
}
var client = grpc2.unary(TalkService.CreateConversationMetric, {
request: requestMessage,
host: this.serviceHost,
metadata,
transport: this.options.transport,
debug: this.options.debug,
onEnd: function(response) {
if (callback) {
if (response.status !== grpc2.Code.OK) {
var err = new Error(response.statusMessage);
err.code = response.status;
err.metadata = response.trailers;
callback(err, null);
} else {
callback(null, response.message);
}
}
}
});
return {
cancel: function() {
callback = null;
client.close();
}
};
};
exports.TalkServiceClient = TalkServiceClient2;
}
});
// src/hooks/use-message-feedback.ts
import * as React from "react";
function useMessageFeedback() {
const agent = useEnsureVoiceAgent();
const { handleMessageFeedback, handleHelpfulnessFeedback } = React.useMemo(
() => setupMessageFeedback(agent),
[]
);
return { handleMessageFeedback, handleHelpfulnessFeedback };
}
function setupMessageFeedback(agent) {
const handleMessageFeedback = async (messageId, name, description, value) => {
await agent.createMessageMetric(messageId, [
{
name,
description,
value
}
]);
};
const handleHelpfulnessFeedback = async (messageId, value) => {
await agent.createMessageMetric(messageId, [
{
name: "feedback",
description: "feedback given by end-user",
value
}
]);
};
return {
handleHelpfulnessFeedback,
handleMessageFeedback
};
}
// src/hooks/use-conversation-feedback.ts
import * as React2 from "react";
function useConversationFeedback() {
const agent = useEnsureVoiceAgent();
const { handleHelpfulnessFeedback, handleConversationFeedback } = React2.useMemo(() => setupConversationFeedback(agent), []);
return { handleHelpfulnessFeedback, handleConversationFeedback };
}
function setupConversationFeedback(agent) {
const handleConversationFeedback = async (name, description, value) => {
await agent.createConversationMetric([
{
name,
description,
value
}
]);
};
const handleHelpfulnessFeedback = async (value) => {
await agent.createConversationMetric([
{
name: "feedback",
description: "feedback given by end-user",
value
}
]);
};
return {
handleHelpfulnessFeedback,
handleConversationFeedback
};
}
// src/hooks/use-connect-agent.ts
import * as React3 from "react";
function useConnectAgent() {
const agent = useEnsureVoiceAgent();
const {
_agentConnectionStateObservable,
handleConnectAgent,
handleDisconnectAgent
} = React3.useMemo(() => setupConnectAgent(), []);
const observable = React3.useMemo(
() => _agentConnectionStateObservable(agent),
[agent, _agentConnectionStateObservable]
);
const { isConnected } = useObservableState(observable, {
isConnected: agent.isConnected
});
return { handleConnectAgent, handleDisconnectAgent, isConnected };
}
function setupConnectAgent() {
const handleConnectAgent = async (agent) => {
await agent.connect();
};
const handleDisconnectAgent = async (agent) => {
await agent.disconnect();
};
return {
_agentConnectionStateObservable: agentConnectionStateObservable,
handleConnectAgent,
handleDisconnectAgent
};
}
// src/hooks/use-input-mode-toggle-agent.ts
import * as React4 from "react";
function useInputModeToggleAgent() {
const agent = useEnsureVoiceAgent();
const { _agentInputObservable, handleTextToggle, handleVoiceToggle } = React4.useMemo(() => toggleInputMode(), []);
const observable = React4.useMemo(
() => _agentInputObservable(agent),
[agent, _agentInputObservable]
);
const { channel } = useObservableState(observable, {
channel: agent.inputChannel
});
return { handleTextToggle, handleVoiceToggle, channel };
}
function toggleInputMode() {
const handleTextToggle = async (agent) => {
if (agent.isTextInput) {
return;
}
await agent.setInputChannel("text" /* Text */);
return;
};
const handleVoiceToggle = async (agent) => {
if (agent.isAudioInput) {
console.warn("already in voice mode, ignore in toggle");
return;
}
await agent.setInputChannel("audio" /* Audio */);
return;
};
return {
_agentInputObservable: agentInputObservable,
handleVoiceToggle,
handleTextToggle
};
}
// src/hooks/use-speaker-output-toggle-agent.ts
import * as React5 from "react";
function useSpeakerOuputToggleAgent() {
const agent = useEnsureVoiceAgent();
const { _agentAudioInputMuteObservable, handleSpeakerOuputToggleAgent } = React5.useMemo(() => speakerOuputToggleAgent(), []);
const observable = React5.useMemo(
() => _agentAudioInputMuteObservable(agent),
[agent, _agentAudioInputMuteObservable]
);
const { isEnable } = useObservableState(observable, {
isEnable: agent.isAudioInputEnable
});
return { handleSpeakerOuputToggleAgent, isEnable };
}
function speakerOuputToggleAgent() {
const handleSpeakerOuputToggleAgent = async (agent) => {
await agent.toggelAudioOutput();
};
return {
_agentAudioInputMuteObservable: agentAudioOutputMuteObservable,
handleSpeakerOuputToggleAgent
};
}
// src/devices/device-failure.ts
var MediaDeviceFailure = /* @__PURE__ */ ((MediaDeviceFailure2) => {
MediaDeviceFailure2["PermissionDenied"] = "PermissionDenied";
MediaDeviceFailure2["NotFound"] = "NotFound";
MediaDeviceFailure2["DeviceInUse"] = "DeviceInUse";
MediaDeviceFailure2["Other"] = "Other";
return MediaDeviceFailure2;
})(MediaDeviceFailure || {});
// src/agents/agent-config.ts
var import_talk_api_pb = __toESM(require_talk_api_pb());
// src/utils/rapida_value.ts
import { Any } from "google-protobuf/google/protobuf/any_pb";
import {
StringValue,
Int32Value,
DoubleValue,
BoolValue,
BytesValue
} from "google-protobuf/google/protobuf/wrappers_pb";
function pack(serialized, typeUrlPrefix) {
const anyValue = new Any();
anyValue.pack(serialized, typeUrlPrefix);
return anyValue;
}
function StringArrayToAny(values) {
return values.map((x) => {
return StringToAny(x);
});
}
function StringToAny(value) {
const stringValue = new StringValue();
stringValue.setValue(value);
const serialized = stringValue.serializeBinary();
return pack(serialized, "type.googleapis.com/google.protobuf.StringValue");
}
// src/agents/agent-config.ts
var import_talk_api_pb2 = __toESM(require_talk_api_pb());
var InputOptions = class {
/**
* enable channels
*/
channels = ["audio" /* Audio */, "text" /* Text */];
/**
* sample rate for player
*/
recorderOptions = { sampleRate: 24e3 };
get recorderOption() {
return this.recorderOptions;
}
/**
* channel for providing output
*/
channel = "audio" /* Audio */;
get defaultChannel() {
return this.channel;
}
/**
* device that will be ouput the media
*/
deviceId;
get inputDeviceId() {
return DEFAULT_DEVICE_ID;
}
/**
*
* @param deviceId
*/
setDeviceId(deviceId) {
this.deviceId = deviceId;
}
/**
*
* @param channels
* @param channel
* @param deviceId
*/
constructor(channels, channel, deviceId) {
this.channels = channels;
if (channel) this.channel = channel;
this.deviceId = deviceId;
}
/**
*
* @param channel
*/
changeChannel(channel) {
this.channel = channel;
}
/**
*
* @param device
*/
changeDevice(device) {
this.deviceId = device;
}
};
var OutputOptions = class {
/**
* enable channels
*/
channels = ["audio" /* Audio */, "text" /* Text */];
/**
* sample rate for player
*/
playerOptions = { sampleRate: 24e3 };
get playerOption() {
return this.playerOptions;
}
/**
* channel for providing output
*/
channel = "audio" /* Audio */;
get defaultChannel() {
return this.channel;
}
/**
* device that will be ouput the media
*/
deviceId;
get outputDeviceId() {
return DEFAULT_DEVICE_ID;
}
/**
*
* @param channels
* @param channel
* @param deviceId
*/
constructor(channels, channel, deviceId) {
this.channels = channels;
if (channel) this.channel = channel;
this.deviceId = deviceId;
}
/**
*
* @param channel
*/
changeChannel(channel) {
this.channel = channel;
}
/**
*
* @param deviceId
*/
setDeviceId(deviceId) {
this.deviceId = deviceId;
}
/**
*
* @param device
*/
changeDevice(device) {
this.deviceId = device;
}
};
var AgentConfig = class {
/**
* Unique identifier for the agent.
*/
id;
/**
* (Optional) Version number of the agent.
*/
version;
/**
* arguments for assistant
*/
arguments;
/**
* options for assistants
*/
options;
/**
* metadata for assistant request
*/
metadata;
/**
* all the agent callback
*/
callbacks;
/**
*
*/
inputOptions;
/**
*
*/
outputOptions;
/**
* Initializes a new instance of `AgentConfig`.
*
* @param id - Unique identifier for the agent.
* @param version - (Optional) Version number of the agent.
* @param argument - (Optional) Configuration arguments for the agent.
*/
constructor(id, inputOptions = new InputOptions([
"audio" /* Audio */,
"text" /* Text */
]), outputOptions = new OutputOptions([
"audio" /* Audio */,
"text" /* Text */
]), version, argument, options, metadata) {
this.id = id;
this.version = version;
this.arguments = argument;
this.options = options;
this.metadata = metadata;
this.inputOptions = inputOptions;
this.outputOptions = outputOptions;
}
/**
* Retrieves the assistant definition for this agent.
*
* @returns {AssistantDefinition} A configured `AssistantDefinition` instance with the agent's details.
*/
get definition() {
const def = new import_talk_api_pb.AssistantDefinition();
def.setAssistantid(this.id);
if (this.version) {
def.setVersion(this.version);
}
return def;
}
/**
* for adding custom dictionary
* it allows user to add custom keywords to given agent it will perform correction
* @param keywords
*/
addKeywords(keywords) {
if (this.options == void 0) this.options = /* @__PURE__ */ new Map();
this.options["keywords"] = StringArrayToAny(keywords);
return this;
}
/**
* Want to add other options to override
* @param k
* @param otp
* @returns
*/
addCustomOption(k, otp) {
if (this.options == void 0) this.options = /* @__PURE__ */ new Map();
this.options.set(k, otp);
return this;
}
/**
*
* @param k
* @param meta
* @returns
*/
addMetadata(k, meta) {
if (this.metadata == void 0) this.metadata = /* @__PURE__ */ new Map();
this.metadata.set(k, meta);
return this;
}
/**
*
* @param k
* @param value
* @returns
*/
addArgument(k, value) {
if (this.arguments == void 0) this.arguments = /* @__PURE__ */ new Map();
this.arguments?.set(k, StringToAny(value));
return this;
}
/**
* Sets up callback functions for various events in the agent's conversation lifecycle.
*
* @param onStartConversation - Callback function triggered when a conversation starts.
* @param onInterruption - Callback function triggered when the conversation is interrupted.
* @param onListen - Callback function triggered when the agent starts listening.
* @param onComplete - Callback function triggered when a specific action or process is completed.
* @param onReceiveTranscript - Callback function triggered when a transcript is received.
* @param onReceive - Callback function triggered when any message is received.
* @param onSendGeneration - Callback function triggered before sending a generated response.
* @param onCompleteGeneration - Callback function triggered after completing the generation of a response.
* @param onCompleteConversation - Callback function triggered when the entire conversation is completed.
* @returns The current instance of the AgentConfig, allowing for method chaining.
*/
withAgentCallback(cl) {
this.callbacks = cl;
return this;
}
/**
*
* @param response
* @returns
*/
onCallback(response) {
switch (response.getDataCase()) {
case import_talk_api_pb2.AssistantMessagingResponse.DataCase.DATA_NOT_SET:
break;
case import_talk_api_pb2.AssistantMessagingResponse.DataCase.EVENT:
if (response.getEvent()) {
switch (response.getEvent()?.getName()) {
case "talk.onTranscript" /* Transcript */:
if (this.callbacks && this.callbacks?.onTranscript) {
this.callbacks.onTranscript(response.getEvent()?.getMeta());
}
break;
case "talk.onInterrupt" /* Interruption */:
if (this.callbacks && this.callbacks?.onInterrupt) {
this.callbacks.onInterrupt(response.getEvent()?.getMeta());
}
break;
case "talk.onGeneration" /* Generation */:
if (this.callbacks && this.callbacks?.onGeneration) {
this.callbacks.onGeneration(response.getEvent()?.getMeta());
}
break;
case "talk.onCompleteConversation" /* CompleteConversation */:
if (this.callbacks && this.callbacks?.onCompleteConversation) {
this.callbacks.onCompleteConversation(
response.getEvent()?.getMeta()
);
}
break;
case "talk.onComplete" /* Complete */:
if (this.callbacks && this.callbacks?.onComplete) {
this.callbacks.onComplete(response.getEvent()?.getMeta());
}
break;
case "talk.onCompleteGeneration" /* CompleteGeneration */:
if (this.callbacks && this.callbacks?.onCompleteGeneration) {
this.callbacks.onCompleteGeneration(
response.getEvent()?.getMeta()
);
}
break;
case "talk.onStart" /* Start */:
if (this.callbacks && this.callbacks?.onStart) {
this.callbacks.onStart(response.getEvent()?.getMeta());
}
break;
case "talk.onStartConversation" /* StartConversation */:
if (this.callbacks && this.callbacks?.onStartConversation) {
this.callbacks.onStartConversation(
response.getEvent()?.getMeta()
);
}
break;
}
}
return;
case import_talk_api_pb2.AssistantMessagingResponse.DataCase.MESSAGE:
if (this.callbacks && this.callbacks?.onMessage) {
this.callbacks.onMessage(response.getMessage());
}
break;
default:
break;
}
}
};
// src/hooks/use-multiband-track-volume.ts
import { useState, useEffect, useRef } from "react";
var useMultibandMicrophoneTrackVolume = (bands = 5, loPass = 0.1, hiPass = 1) => {
const [frequencyBands, setFrequencyBands] = useState(
Array(bands).fill([]).map(() => Array(32).fill(0))
);
const agentContext = useMaybeVoiceAgent();
useEffect(() => {
const updateVolume = () => {
if (!agentContext || agentContext.recorder.getStatus() !== "recording") {
return;
}
const frequencies = agentContext.recorder.getFrequencies("frequency")?.values;
if (!frequencies || frequencies.length === 0) return;
const startIndex = Math.floor(frequencies.length * loPass);
const endIndex = Math.floor(frequencies.length * hiPass);
const usableFrequencies = Array.from(
frequencies.slice(startIndex, endIndex)
);
const samplesPerBand = Math.floor(usableFrequencies.length / bands);
const bandArrays = [];
for (let bandIndex = 0; bandIndex < bands; bandIndex++) {
const bandStart = bandIndex * samplesPerBand;
const bandEnd = bandIndex === bands - 1 ? usableFrequencies.length : (bandIndex + 1) * samplesPerBand;
const bandFrequencies = usableFrequencies.slice(bandStart, bandEnd).map((amplitude) => {
const numericAmplitude = Number(amplitude);
return Math.min(1, Math.max(0, numericAmplitude * 4));
});
const resampledBand = resampleArray(bandFrequencies, 32);
bandArrays.push(resampledBand);
}
setFrequencyBands((prevBands) => {
if (prevBands.length !== bands) return bandArrays;
return bandArrays.map((bandFrequencies, bandIndex) => {
if (!prevBands[bandIndex]) return bandFrequencies;
const smoothingFactor = 0.7;
return bandFrequencies.map((freq, i) => {
const prevValue = prevBands[bandIndex][i] || 0;
return freq * smoothingFactor + prevValue * (1 - smoothingFactor);
});
});
});
};
const resampleArray = (arr, newLength) => {
const result = new Array(newLength);
const stepSize = arr.length / newLength;
for (let i = 0; i < newLength; i++) {
const start = Math.floor(i * stepSize);
const end = Math.floor((i + 1) * stepSize);
let sum = 0;
for (let j = start; j < end; j++) {
sum += arr[j] || 0;
}
result[i] = sum / (end - start);
}
return result;
};
const interval = setInterval(updateVolume, 100);
return () => clearInterval(interval);
}, [agentContext, loPass, hiPass, bands]);
return frequencyBands;
};
var useMultiband3DSpeakerTrackVolume = (bands = 5, loPass = 0.1, hiPass = 1) => {
const [frequencyBands, setFrequencyBands] = useState(
Array(bands).fill([]).map(() => Array(32).fill(0))
);
const [sphericalData, setSphericalData] = useState({ xNorm: 0, yNorm: 0, zNorm: 0, elapsedTimeSec: 0 });
const agentContext = useMaybeVoiceAgent();
const startTime = useRef(Date.now());
useEffect(() => {
const updateVolume = () => {
if (!agentContext?.player?.analyser) {
return;
}
const frequencies = agentContext.player.getFrequencies("frequency")?.values;
if (!frequencies || frequencies.length === 0) return;
const startIndex = Math.floor(frequencies.length * loPass);
const endIndex = Math.floor(frequencies.length * hiPass);
const usableFrequencies = Array.from(
frequencies.slice(startIndex, endIndex)
);
const samplesPerBand = Math.floor(usableFrequencies.length / bands);
const bandArrays = [];
for (let bandIndex = 0; bandIndex < bands; bandIndex++) {
const bandStart = bandIndex * samplesPerBand;
const bandEnd = bandIndex === bands - 1 ? usableFrequencies.length : (bandIndex + 1) * samplesPerBand;
const bandFrequencies = usableFrequencies.slice(bandStart, bandEnd).map((amplitude) => {
const numericAmplitude = Number(amplitude);
return Math.min(1, Math.max(0, numericAmplitude));
});
const resampledBand = resampleArray(bandFrequencies, 32);
bandArrays.push(resampledBand);
}
setFrequencyBands((prevBands) => {
if (prevBands.length !== bands) return bandArrays;
const smoothedBands = bandArrays.map((bandFrequencies, bandIndex) => {
if (!prevBands[bandIndex]) return bandFrequencies;
const smoothingFactor = 0.7;
return bandFrequencies.map((freq, i) => {
const prevValue = prevBands[bandIndex][i] || 0;
return freq * smoothingFactor + prevValue * (1 - smoothingFactor);
});
});
const xNorm = calculateAverageAmplitude(smoothedBands[0]);
const yNorm = bands > 1 ? calculateAverageAmplitude(smoothedBands[1]) : 0;
const zNorm = bands > 2 ? calculateAverageAmplitude(smoothedBands[2]) : 0;
const elapsedTimeSec = (Date.now() - startTime.current) / 1e3;
setSphericalData({ xNorm, yNorm, zNorm, elapsedTimeSec });
return smoothedBands;
});
};
const resampleArray = (arr, newLength) => {
const result = new Array(newLength);
const stepSize = arr.length / newLength;
for (let i = 0; i < newLength; i++) {
const start = Math.floor(i * stepSize);
const end = Math.floor((i + 1) * stepSize);
let sum = 0;
for (let j = start; j < end; j++) {
sum += arr[j] || 0;
}
result[i] = sum / (end - start);
}
return result;
};
const calculateAverageAmplitude = (band) => {
return band.reduce((sum, amplitude) => sum + amplitude, 0) / band.length;
};
const interval = setInterval(updateVolume, 100);
return () => clearInterval(interval);
}, [agentContext, loPass, hiPass, bands]);
return { frequencyBands, ...sphericalData };
};
// src/connections/connection-config.ts
var import_talk_api_pb_service = __toESM(require_talk_api_pb_service());
import { grpc } from "@improbable-eng/grpc-web";
// src/configs/index.ts
var ASSISTANT_API = "https://assistant-01.rapida.ai";
var LOCAL_ASSISTANT_API = "http://assistant.rapida.local";
// src/connections/connection-config.ts
var ConnectionConfig = class {
/**
* gRPC client for handling real-time streaming interactions.
*/
streamClient;
/**
* gRPC client for handling standard conversation requests.
*/
conversationClient;
/**
* Authentication information for the client, supporting both client and user authentication.
*/
auth;
/**
*
*/
callbacks;
/**
* Creates a new Connection instance, initializing the conversation and streaming clients.
*
* @param auth - Authentication information for the connection.
* @param endpoint - (Optional) Custom API endpoint for connecting to the TalkService.
* If not provided, it defaults to `ASSISTANT_API`.
*/
constructor(auth, endpoint) {
this.auth = auth;
if (endpoint) {
this.conversationClient = new import_talk_api_pb_service.TalkServiceClient(endpoint);
this.streamClient = new import_talk_api_pb_service.TalkServiceClient(endpoint, {
transport: grpc.WebsocketTransport(),
// Enables WebSocket transport for real-time communication.
debug: true
// Enables debugging for troubleshooting.
});
return;
}
this.conversationClient = new import_talk_api_pb_service.TalkServiceClient(ASSISTANT_API);
this.streamClient = new import_talk_api_pb_service.TalkServiceClient(ASSISTANT_API, {
transport: grpc.WebsocketTransport(),
debug: true
});
}
/**
* Only for testing
* @returns
*/
withLocal() {
return this.withCustomEndpoint(LOCAL_ASSISTANT_API);
}
/**
* On premise deployment options
* @param endpoint
* @returns
*/
withCustomEndpoint(endpoint) {
this.conversationClient = new import_talk_api_pb_service.TalkServiceClient(endpoint);
this.streamClient = new import_talk_api_pb_service.TalkServiceClient(endpoint, {
transport: grpc.WebsocketTransport(),
debug: true
});
return this;
}
/**
* Configures connection callbacks for the TalkService clients.
*
* @param onConnect - A callback function to be executed when a connection is established.
* @param onDisconnect - A callback function to be executed when the connection is terminated.
* @param onError - A callback function to be executed when the connection is terminated.
*
* @returns - The current instance of `ConnectionConfig` for method chaining.
*
* @example
* ```typescript
* const connection = new ConnectionConfig(auth);
* connection.withConnectionCallback(
* () => { console.log("Connected to the TalkService"); },
* () => { console.log("Disconnected from the TalkService"); }
* );
* ```
*/
withConnectionCallback(cl) {
this.callbacks = cl;
return this;
}
//
onConnectionChange(connection) {
if (connection === "connected" /* Connected */) {
if (this.callbacks && this.callbacks?.onConnect) {
this.callbacks.onConnect();
}
return;
}
if (this.callbacks && this.callbacks?.onDisconnect) {
this.callbacks.onDisconnect();
}
return;
}
};
export {
AgentConfig,
AgentLiveTranscript,
AgentMultibandAudioVisualizer,
Channel,
ConnectionConfig,
ConnectionState,
Conversation,
Feedback,
InputOptions,
MediaDeviceFailure,
MessageRole,
MessageStatus,
OutputOptions,
VoiceAgent,
VoiceAgentContext,
useConnectAgent,
useConversationFeedback,
useEnsureVoiceAgent,
useInputModeToggleAgent,
useMaybeVoiceAgent,
useMessageFeedback,
useMultiband3DSpeakerTrackVolume,
useMultibandMicrophoneTrackVolume,
useSelectInputDeviceAgent,
useSpeakerOuputToggleAgent
};
//# sourceMappingURL=index.mjs.map