UNPKG

@tixae-labs/web-sdk

Version:

Javascript Web SDK for doing WebRTC AI Voice Calls with Convocore.

1,115 lines 334 kB
"use strict"; var __importDefault = (this && this.__importDefault) || function (mod) { return (mod && mod.__esModule) ? mod : { "default": mod }; }; Object.defineProperty(exports, "__esModule", { value: true }); exports.rerouterConfigSchema = exports.LLMConfigSchema = exports.appDefaultTools = exports.toolSchema = exports.toolParamType = exports.allModelIdsSchema = exports.AlibabaModels = exports.DeepseekModels = exports.deepseekModelIds = exports.AnthropicModels = exports.anthropicModelIds = exports.GroqModelsOpenSource = exports.groqModelIds = exports.GoogleGeminiModels = exports.googleModelIds = exports.OpenAIModels = exports.azureOpenAiModelIds = exports.openAiModelIds = exports.speechGenProvidersInfo = exports.speechGenInfoSchema = exports.speechGenOnInterruptSchema = exports.speechGenOnAudioChunkSchema = exports.llmServiceToolCallIdSchema = exports.llmServiceChunkSchema = exports.interruptionSchema = exports.transcriptSchema = exports.transcriberProvidersInfo = exports.transcriberInfoSchema = exports.transcriberProvidersName = exports.transcriberEventName = exports.toolCallSchema = exports.speechProvidersName = exports.llmProvidersName = exports.SupportedProviders = exports.LLMSProvider = exports.VGSupportedModelZod = exports.LLMSProviderKeySchema = exports.ALIBABA_SUPPORTED_LLM_ZOD = exports.XAI_SUPPORTED_LLM_ZOD = exports.VG_SUPPORTED_LLM_ZOD = exports.llmsToolSchema = exports.toolFieldSchema = exports.toolFieldVariableTypeSchema = exports.CUSTOM_TOOL_EVENT_ZOD = exports.llmMessageSchema = exports.callConfigSchema = exports.defaultPunctuationBreaks = exports.backgroundNoises = exports.backgroundNoiseEnum = exports.eventsHooksUrls = void 0; exports.GladiaStreamingConfigSchema = exports.AppIceServers = exports.genericResponsesArray = exports.AgentSchema = exports.AgentRootSchema = exports.speechGenProviderOptionsSchema = exports.InternalSpeechGenOptionsSchema = exports.audioFormatConfigSchema = exports.APISpeechGenOptionsSchema = exports.PlayhtPlatformSpecificOptionsSchema = exports.ElevenLabsPlatformSpecificOptionsSchema = exports.buttonsLayoutSchema = exports.regionSchema = exports.agentToolSchemaAPI = exports.VGAgentToolSchema = exports.supportedMessageOriginsEnum = exports.transcriberProviderOptionsSchema = exports.GoogleCloudPlatformSpecificOptionsSchema = exports.AssemblyaiPlatformSpecificOptionsSchema = exports.DeepgramPlatformSpecificOptionsSchema = exports.allSTTProvidersType = exports.allSTTProviders = exports.googleCloudSpeechModelLanguages = exports.googleCloudSpeechModelIds = exports.GladiaModelLanguages = exports.GladiaModelIds = exports.allSTTModelIds = exports.openaiSTTModelLanguages = exports.openaiSTTModelIds = exports.assemblyAiModelLanguages = exports.assemblyAiModelIds = exports.deepgramModelLanguages = exports.deepgramModelIds = exports.speechmaticsModelLanguages = exports.speechmaticsModelIds = exports.supportedMessageOrigins = exports.speechProviderOptionsSchema = exports.llmProviderOptionsSchema = exports.llmServiceOptionsMetadataSchema = exports.globalOptionsSchema = exports.toolFieldSchemaOverride = exports.globalDefaultToolsEnums = exports.smartEndpointConfigSchema = exports.llmProviderBaseSchema = exports.LLMNodeSchema = exports.mcpServerSchema = exports.globalVariableCaptureSchema = exports.LLMNodeRequiredSchema = exports.SpecialNodeTypesEnums = exports.LLMRouterSchema = void 0; exports.featuresList = exports.featureModelSchema = exports.featuresKeysZod = exports.freeAccountLimits = exports.BRANDING = exports.RESELL_DOMAINS = exports.AgentTemplates = exports.VG_SUPPORTED_LLMS = exports.BASE_CREDIT_PRICE_USD = exports.GCLOUD_REGIONS = exports.FB_ALL_BUCKETS_ARRAY = exports.DEFAULT_FREE_LTS_NUM = exports.widgetConvoModelSchema = exports.leadsOnlyModelSchema = exports.clientModelSchemaPUBLIC = exports.ClientModelSchema = exports.CannedResponseModelSchema = exports.CLIENT_TABS = exports.GLOBAL_OPENAI_MODELS_ARRAY = exports.marketPlaceProfileSchema = exports.shouldIsPayAsYoGoWork = exports.EventsSchema = exports.BugReportEventsSchema = exports.ChatDelegationEventsSchema = exports.FormEventsSchema = exports.LeadEventsSchema = exports.ClientEventsSchema = exports.OrganisationEventsSchema = exports.MessageEventsSchema = exports.AgentEventsSchema = exports.SingleEventSchema = exports.AvailableEvents = exports.DEFAULT_REFRESH_RATES = exports.DEFAULT_VARIABLE_TYPE = exports.ToolExamples = exports.getToolsTemplate = exports.DEFAULT_VG_PROMPT_VARS = exports.vgPromptVarSchema = exports.DEFAULT_WIDGET_TABS = exports.V2GetStripeCustomPlan = exports.startTemplate = exports.createRfNode = exports.convertInputJsonToToolFinalBody = exports.createDefaultLlmNode = exports.defaultLlmNodePosition = exports.defaultRerouterConfig = exports.defaultLlmNodeConfig = exports.StartNodeExample = exports.createRfNodeProps = exports.NodesTemplateSchema = void 0; exports.campaignSchema = exports.analyticsMetricSchema = exports.analyticsMetricTypeEnum = exports.leadWithoutMetadataSchema = exports.leadWithMetadataSchema = exports.baseLeadSchema = exports.conditionSchema = exports.initWebRtcCallSchema = exports.lightLangGraphLLMConfigSchema = exports.countries = exports.whatsAppTemplateSchema = exports.languageCodesEnum = exports.whatsAppTemplateCategory = exports.postUtteranceEndSchema = exports.DEFAULT_LLM_CONFIG = exports.realtimeProvidersName = exports.DEFAULT_SPEECH_GEN_CONFIG = exports.DEFAULT_TRANSCRIBER_CONFIG = exports.DEFAULT_PG_VECTOR_DB_DIMENSIONS = exports.createVGBaseAgentData = exports.builtInTemplates = exports.builtInTemplateSchema = exports.leadDataSchema = exports.voiceProvidersCosts = exports.voiceProvidersCostsSchema = exports.heavyConvoDataSchema = exports.agentTemplateSchema = exports.defaultNewVariable = exports.WebRtcCallEventsKeysSchema = exports.speechGenProviderPricingConfigSchema = exports.onConversationUpdatedEventHookBodySchema = exports.onConversationUpdatedSchema = exports.baseAgentCallServiceHandlerConfigSchema = exports.initCallOptionsSchema = exports.supportedChannels = exports.defaultTwilioOutgoingFallbackEndpoint = exports.defaultTwilioOutgoingEndpoint = exports.defaultTwilioFallbackEndpoint = exports.defaultTwilioEndpoint = exports.baseDefaultCallServiceOptions = exports.CallHandlerSTSOptionsSchema = exports.CallHandlerOptionsSchema = exports.CallHandlerSpeechToSpeechOptionsSchema = exports.callEndedSchema = exports.twilioNumberSchema = exports.defaultStartNode = exports.DEFAULT_VG_AGENTS_TOOLS = exports.STRIPE_PUBLIC_KEY = exports.DEFAULT_QDRANT_DIMENSIONS = exports.DEFAULT_QDRANT_MAX_CHUNKS = void 0; exports.personalizedAgentStepSchema = exports.transactionSchema = exports.AddonType = exports.languageMap = exports.speechGenProviderModels = exports.googleCloudTTSModelsSchema = exports.rimeAIModelsSchema = exports.openaiModelsSchema = exports.playhtModelsSchema = exports.cartesiaModelsSchema = exports.AllDeepGramVoicesAura = exports.RecommendedVoices = exports.geminiLiveDefaultConfigs = exports.GeminiLiveOptionsSchema = exports.GeminiLiveAPIConfigSchema = exports.GeminiLiveConfigSchema = exports.SystemInstructionSchema = exports.GenerationConfigSchema = exports.AudioTranscriptionSchema = exports.SpeechConfigSchema = exports.VoiceConfigSchema = exports.ProactivitySchema = exports.RealtimeInputConfigSchema = exports.AutomaticActivityDetectionSchema = exports.LanguageCharacteristics = exports.VoiceCharacteristics = exports.LanguageCodeSchema = exports.EndSensitivitySchema = exports.StartSensitivitySchema = exports.ResponseModalitySchema = exports.AllGoogleLiveVoices = exports.GoogleLiveVoiceSchema = exports.GeminiLiveVoiceSchema = exports.GeminiLiveVoicesNative = exports.GeminiLiveVoicesNonNative = exports.googleLiveModelsSchema = exports.GeminiLiveModelSchema = exports.AllDeepGramVoicesAuraTwo = exports.deepgramVoiceSchema = exports.deepgramModelsSchema = exports.elevenlabsModelsSchema = exports.onAiSpeechStartedSchema = exports.onAiSpeechEndedSchema = exports.webhookEventSchema = exports.metricOutputsSchema = exports.metricOutputSchema = exports.DEFAULT_ANALYTICS_METRICS = exports.analyticDataSchema = exports.analyticsMetricsSchema = exports.analyticsMetricResultSchema = void 0; exports.defaultSipProviders = exports.sipPhoneNumberAsteriskSchema = exports.sipTrunkAsteriskSchema = exports.sipTrunkConvocoreSchema = exports.customProviderConfigSchema = exports.sipProviderKeys = exports.LLMReplacements = exports.agentSchemaForLLmToGenerate = exports.workspaceClassInitSchema = exports.ScrapeResponseSchema = exports.ScrapeErrorResponseSchema = exports.ScrapeSuccessResponseSchema = exports.ScrapeResultSchema = exports.ContentSchema = exports.BrandingSchema = exports.MetadataSchema = void 0; exports.GET_VG_CLOUDFLARE_API_URL = GET_VG_CLOUDFLARE_API_URL; exports.llMNodesToReactFlowNodes = llMNodesToReactFlowNodes; exports.createRfNodeOptions = createRfNodeOptions; exports.llMNodeToReactFlowNode = llMNodeToReactFlowNode; exports.reactFlowNodesToLLMNodes = reactFlowNodesToLLMNodes; exports.createLLMNode = createLLMNode; exports.llMNodeToReactFlowEdges = llMNodeToReactFlowEdges; exports.llmNodesToReactFlowEdges = llmNodesToReactFlowEdges; exports.generateRandomId = generateRandomId; exports.GET_VF_API_EP = GET_VF_API_EP; exports.GET_QDRANT_ENDPOINT = GET_QDRANT_ENDPOINT; exports.GET_BUN_SERVER_ENDPOINT = GET_BUN_SERVER_ENDPOINT; exports.GET_HEAVY_WS_URL = GET_HEAVY_WS_URL; exports.GET_VG_WS_URL = GET_VG_WS_URL; exports.GET_VG_ORIGIN_URL = GET_VG_ORIGIN_URL; exports.GET_NODE_WS_SERVER = GET_NODE_WS_SERVER; exports.GET_VG_API_URL = GET_VG_API_URL; exports.GET_NEW_CRAWLER_URL = GET_NEW_CRAWLER_URL; exports.GET_APP_CONNECTOR_URL = GET_APP_CONNECTOR_URL; exports.WITH_PROXY_URL = WITH_PROXY_URL; exports.GET_VG_EDGE_API_URL = GET_VG_EDGE_API_URL; exports.formatNumToMini = formatNumToMini; exports.turnsToLangchainMessages = turnsToLangchainMessages; exports.langchainMessagesToTurns = langchainMessagesToTurns; exports.oldSystemPromptAgentToNewNodes = oldSystemPromptAgentToNewNodes; exports.migrateCanvasMarkdownToNewAgent = migrateCanvasMarkdownToNewAgent; exports.roundTo3Decimals = roundTo3Decimals; exports.costUsdToCredits = costUsdToCredits; exports.agentConfigToPricingAnalysis = agentConfigToPricingAnalysis; exports.variablesIdsToJsonSchema = variablesIdsToJsonSchema; exports.appMarkdownToFinalText = appMarkdownToFinalText; exports.formatNodeTool = formatNodeTool; exports.GET_NEXT_APP_URL = GET_NEXT_APP_URL; exports.GET_CALL_SERVICE_API_URL = GET_CALL_SERVICE_API_URL; exports.baseDataCleaner = baseDataCleaner; exports.cleanWorkspaceData = cleanWorkspaceData; exports.convertToSafeUserData = convertToSafeUserData; exports.convertLightConvoDataToGlobalVariables = convertLightConvoDataToGlobalVariables; exports.finalAgentToolsGiver = finalAgentToolsGiver; exports.formatTurnsToTextMessagesHistory = formatTurnsToTextMessagesHistory; exports.properCustomPlanInput = properCustomPlanInput; exports.safeAgentData = safeAgentData; exports.shouldShowWhitelabelTrialBanner = shouldShowWhitelabelTrialBanner; exports.formatDate = formatDate; exports.formatSecondsToSecondsAndMinutes = formatSecondsToSecondsAndMinutes; exports.getModelWorkspaceApiKey = getModelWorkspaceApiKey; exports.canWorkspaceAccessCustomBranding = canWorkspaceAccessCustomBranding; exports.hasUnlimitedClientsAccess = hasUnlimitedClientsAccess; exports.isFinalPricingAndSubscribed = isFinalPricingAndSubscribed; exports.getMaxClientsLimit = getMaxClientsLimit; exports.toolsIdsFilterer = toolsIdsFilterer; exports.toCamelCase = toCamelCase; exports.csvStringToJson = csvStringToJson; exports.hasCompletedOnboarding = hasCompletedOnboarding; exports.hasCompletedFullOnboarding = hasCompletedFullOnboarding; exports.shouldShowSinglePromptTab = shouldShowSinglePromptTab; exports.shouldNeverShowSinglePromptTab = shouldNeverShowSinglePromptTab; exports.assembleAgentPrototypeUrl = assembleAgentPrototypeUrl; exports.assembleNewWorkspace = assembleNewWorkspace; exports.assembleNodesSettingsForGemini = assembleNodesSettingsForGemini; /** @format */ // NOTE // This file is huge because it contains all the schemas/types for the entire app // ANY SAFE CONSTANT ESPCECIALLY TYPES AND ZOD SCHEMAS MUST BE DEFINED HERE // the reason it is only 1 file is to avoid circular dependencies, any type MUST be defined here // try to always import types only if possible as this is imported across nodejs, browser envs. const zod_1 = require("zod"); // import { VGMessageItem } from "./vf_types"; const dayjs_1 = __importDefault(require("dayjs")); const builltInNodesTemplates_1 = require("./builltInNodesTemplates"); const defaults_1 = require("./defaults"); const client_1 = require("../../../vg-docker/src/+global_consts/client"); const project_v_merge_temp_url = ""; exports.eventsHooksUrls = { newAgent: "https://hook.us1.make.com/y7504ldptx3ivar1n5hnih4jcqlhpfha", // added newUser: "https://hook.us1.make.com/5hjsbn3nmkewkgj7jcmh2m9p3hgt97sw", // added newSubscriptionCompleted: "https://hook.us1.make.com/bxovghhivjnvk3igtor4c8hkp85u1x8s", newSubscriptionIncompleted: "https://hook.us1.make.com/https://hook.us1.make.com/s9w6q9q5sm5e1rk8esikvpugmh8m8et4", // added newAgencySubdomain: "https://hook.us1.make.com/ddtgx9luf13lqdc8hwhl5lism34nione", // added newVoiceAgent: "https://hook.us1.make.com/edgbuh1si4u7rn502iwbc7m2se398l9g", // added newCall: "", newPhoneNumberBought: "https://hook.us1.make.com/2r9g16m9eq145qdbq7934mzmgj7y95yv", // added newChannelConnection: "https://hook.us1.make.com/2oijiuim3nbbdb7duxb3jn8i9pucgofu", // added }; function GET_VG_CLOUDFLARE_API_URL({ region, forceLive, }) { if ((process.env.NODE_ENV === "development" || process.env.WORKER_ENV === "development") && !forceLive) { return `http://127.0.0.1:8787`; // eu } try { if (window) { // look for dom element with vg_bundle.js script somewhere in the src const script = document.getElementById(`spjallmenni-web-runtime`); if (script) { return `https://eu-api.spjallmenni.workers.dev`; } } } catch (error) { console.error(`is nodejs ENV..`); } if (region === "eu") { return `https://${client_1.GLOBAL_ONPREM_CLIENT.EU.EDGE_ROOT}`; // eu } return `https://${client_1.GLOBAL_ONPREM_CLIENT.NA.EDGE_ROOT}`; // na } exports.backgroundNoiseEnum = zod_1.z.enum([ "restaurant", "office", "street", "none", ]); exports.backgroundNoises = [ { iconUrl: "", key: "restaurant", name: "Restaurant", }, { iconUrl: "", key: "office", name: "Office", }, { iconUrl: "", key: "street", name: "Street", }, { iconUrl: "", key: "none", name: "None", }, ]; exports.defaultPunctuationBreaks = [".", "!", "?", ",", ";", ":", "|"]; exports.callConfigSchema = zod_1.z.object({ recordAudio: zod_1.z.boolean(), backgroundNoise: exports.backgroundNoiseEnum.optional(), enableWebCalling: zod_1.z.boolean().optional(), firstInputChunkUNIXMs: zod_1.z.number().optional(), firstOutputChunkUNIXMs: zod_1.z.number().optional(), }); exports.llmMessageSchema = zod_1.z.object({ role: zod_1.z.enum(["user", "assistant", "system", "tool"]), content: zod_1.z.string(), name: zod_1.z.string().optional(), tool_call_id: zod_1.z.string().optional(), tool_name: zod_1.z.string().optional(), tool_calls: zod_1.z.any(), }); exports.CUSTOM_TOOL_EVENT_ZOD = zod_1.z.enum(["END_CALL", "FORWARD_CALL"]); exports.toolFieldVariableTypeSchema = zod_1.z.enum([ "string", "number", "boolean", "system", ]); exports.toolFieldSchema = zod_1.z.object({ id: zod_1.z .string() .describe("A unique identifier for the field, ensuring it is distinct across the system."), in: zod_1.z .string() .describe("Defines the location or context in which the field is utilized. Common values include 'query', 'body', or 'header'.") .optional(), type: exports.toolFieldVariableTypeSchema .optional() .describe("Specifies the variable type of the field. Acceptable values include 'string', 'number', 'boolean', and 'system'.") .optional(), value: zod_1.z .any() .optional() .describe("The current assigned value of the field. This value is optional and supports any type depending on the field's context."), defaultValue: zod_1.z .any() .optional() .describe("A predefined value assigned to the field when no specific value is provided. Useful for ensuring consistent behavior."), key: zod_1.z .string() .optional() .describe("The unique key that identifies this tool field or variable. Often used for referencing the field programmatically."), description: zod_1.z .string() .optional() .describe("A detailed explanation of the field's purpose and usage. Helps users understand the field's role in the system."), required: zod_1.z .boolean() .optional() .describe("Indicates whether the presence of this field is mandatory for successful operation. Defaults to false if unspecified."), reusable: zod_1.z .boolean() .optional() .describe("Denotes whether this field can be reused across multiple contexts or tools. Useful for reducing redundancy."), isEnv: zod_1.z .boolean() .optional() .describe("Specifies if this field represents an environment variable, typically used for configuration or deployment."), isSystem: zod_1.z .boolean() .optional() .describe("Indicates whether this field is a system-level variable, reserved for core operations or internal use."), isGlobal: zod_1.z .boolean() .optional() .describe("If enabled, this field remains globally accessible to the agent across all operational contexts. Useful for global constants."), agentId: zod_1.z .string() .optional() .describe("References the unique identifier of the agent associated with this field. Helps in mapping fields to specific agents."), userId: zod_1.z .string() .optional() .describe("References the unique identifier of the user associated with this field. Useful for user-specific customizations."), }); exports.llmsToolSchema = zod_1.z.object({ isDefault: zod_1.z .boolean() .optional() .describe("Indicates if this tool is the default tool."), id: zod_1.z.string().describe("A unique identifier for the tool."), name: zod_1.z.string().describe("The name of the tool."), description: zod_1.z.string().describe("A description of what the tool does."), imageUrl: zod_1.z .string() .optional() .describe("URL of the tool's image, if available."), fields: exports.toolFieldSchema .array() .optional() .describe("An array of fields associated with the tool."), serverUrl: zod_1.z .string() .optional() .describe("The URL of the server that the tool connects to."), serverUrlSecret: zod_1.z .string() .optional() .describe("The secret or token used to authenticate with the server URL."), userId: zod_1.z .string() .optional() .describe("ID of the user associated with this tool."), createdAt: zod_1.z .string() .optional() .describe("Timestamp indicating when the tool was created."), updatedAt: zod_1.z .string() .optional() .describe("Timestamp indicating when the tool was last updated."), disabled: zod_1.z .boolean() .optional() .describe("Indicates if the tool is currently disabled."), }); exports.VG_SUPPORTED_LLM_ZOD = zod_1.z.enum([ "gpt-5-chat-latest", "gpt-5-2025-08-07", "gpt-5-mini-2025-08-07", "gpt-5-nano-2025-08-07", "gpt-3.5-turbo-0125", "gpt-4-1106-preview", "gpt-4.5-preview-2025-02-27", "gpt-4o", "gpt-4o-mini", "gpt-4.1-2025-04-14", "gpt-4.1-mini-2025-04-14", "ft:gpt-4o-mini-2024-07-18:personal:4o-with-tools-t11:A6mByttv", "llama3-8b-8192", "llama3-70b-8192", "llama-3.1-8b-instant", "llama-3.1-70b-versatile", "llama-3.2-90b-text-preview", "llama-3.2-11b-text-preview", "llama-3.3-70b-versatile", "meta-llama/llama-4-scout-17b-16e-instruct", "meta-llama/llama-4-maverick-17b-128e-instruct", "mixtral-8x7b-32768", "gemma-7b-it", "gemma2-9b-it", "claude-opus-4-20250514", "claude-sonnet-4-20250514", "claude-3-5-sonnet-20240620", "claude-3-5-sonnet-20241022", "claude-3-opus-20240229", "claude-3-sonnet-20240229", "claude-3-haiku-20240307", "claude-3-5-haiku-20241022", "claude-3-7-sonnet-20250219", "gemini-1.5-pro", "gemini-1.5-flash", "gemini-1.0-pro", "gemini-2.0-flash-exp", "gemini-2.0-flash-thinking-exp-1219", "gemini-2.5-pro", "gemini-2.5-pro-exp-03-25", "gemini-2.5-pro-preview-03-25", "gemini-2.5-flash-preview-05-20", "gemini-2.5-flash", "gpt-4-32k", "gpt-4", "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "deepseek-chat", "deepseek-r1-distill-llama-70b", "grok-2-latest", "grok-3", "grok-3-mini", "grok-3-fast", "grok-4-0709", "qwen-max-latest", "qwen-plus-latest", "qwen-turbo-latest", "qwen/qwen3-next-80b-a3b-instruct", "custom-llm", "models/gemini-2.5-pro-preview-03-25", "azure-eu-gpt-4o", "azure-na-gpt-4o", "glm-4.5-air" ]); exports.XAI_SUPPORTED_LLM_ZOD = zod_1.z.enum([ "grok-2-latest", "grok-4-0709", "grok-3", "grok-3-mini", "grok-3-fast", ]); exports.ALIBABA_SUPPORTED_LLM_ZOD = zod_1.z.enum([ "qwen-max-latest", "qwen-plus-latest", "qwen-turbo-latest", ]); exports.LLMSProviderKeySchema = zod_1.z.enum([ "azure", "openai", "groq", "anthropic", "google", "meta", "deepseek", "xai", "alibaba", "convocore", ]); exports.VGSupportedModelZod = zod_1.z.object({ label: zod_1.z.string().optional(), isBeta: zod_1.z.boolean().optional(), provider: exports.LLMSProviderKeySchema, model_id: exports.VG_SUPPORTED_LLM_ZOD, icon: zod_1.z.string().optional(), react_icon: zod_1.z.any().optional(), streamUrl: zod_1.z.string().optional(), envKey: zod_1.z.string().optional(), supportTools: zod_1.z.boolean().optional(), pricing: zod_1.z.object({ input: zod_1.z.object({ tokens: zod_1.z.number(), providerPriceUsd: zod_1.z.number(), priceCredits: zod_1.z.number(), }), output: zod_1.z.object({ tokens: zod_1.z.number(), providerPriceUsd: zod_1.z.number(), priceCredits: zod_1.z.number(), }), }), maxContextTokens: zod_1.z.number().optional(), isLegacy: zod_1.z.boolean().optional(), doesSupportFileUpload: zod_1.z.boolean().optional(), hidden: zod_1.z.boolean().optional(), }); exports.LLMSProvider = zod_1.z.object({ key: exports.LLMSProviderKeySchema, logoUrl: zod_1.z.string(), label: zod_1.z.string(), }); exports.SupportedProviders = [ { key: "convocore", logoUrl: "https://spjall-eu-assets.s3.amazonaws.com/icons/kvant_cloud_icon.png", label: "Convocore (Self-Hosted)", }, { key: "openai", logoUrl: "https://openai.com/favicon.ico", label: "OpenAI", }, { key: "groq", logoUrl: "https://groq.com/favicon.ico", label: "Groq", }, { key: "anthropic", logoUrl: "https://anthropic.com/favicon.ico", label: "Anthropic", }, { key: "google", logoUrl: "https://google.com/favicon.ico", label: "Google", }, { key: "meta", logoUrl: "https://meta.com/favicon.ico", label: "Meta", }, ]; exports.llmProvidersName = zod_1.z .enum([ "openai", "google", "meta", "anthropic", "groq", "deepseek", "alibaba", "xai", "convocore", "custom-llm", ]) .describe("LLM Providers Name"); exports.speechProvidersName = zod_1.z .enum([ "elevenlabs", "deepgram", "cartesia", "playht", "azure", "rime-ai", "openai", "playai-groq", "google-cloud", "google-live", ]) .describe("Speech providers supported by the app."); // { type: "tool-call"; toolCallId: string; toolName: string; args: any; } [] | undefined exports.toolCallSchema = zod_1.z.object({ type: zod_1.z.literal("tool-call"), toolCallId: zod_1.z.string(), toolName: zod_1.z.string(), args: zod_1.z.any(), }); exports.transcriberEventName = zod_1.z.enum([ "transcript", "final_transcript", "close", "error", ]); exports.transcriberProvidersName = zod_1.z.enum([ "deepgram", "gladia", "assemblyai", "speechmatics", "google-cloud-speech", "google-live-transcription", ]); exports.transcriberInfoSchema = zod_1.z.object({ name: exports.transcriberProvidersName, iconUrl: zod_1.z.string(), providerLandingUrl: zod_1.z.string().optional(), }); exports.transcriberProvidersInfo = [ { name: "deepgram", iconUrl: "https://avatars.githubusercontent.com/u/17422641?s=280&v=4", providerLandingUrl: "https://www.deepgram.com", }, { name: "gladia", iconUrl: "https://media.licdn.com/dms/image/v2/D4E0BAQGRbY6rhl2KAA/company-logo_200_200/company-logo_200_200/0/1687265381995/gladia_io_logo?e=2147483647&v=beta&t=QL6phcEAgTHfLb6Pt4Ito1kWc71V8Az9dYC01hxhnrI", providerLandingUrl: "https://gladia.ai", }, { name: "assemblyai", iconUrl: "https://pbs.twimg.com/profile_images/1618629111110418434/9N-fATVT_400x400.jpg", providerLandingUrl: "https://assemblyai.com", }, { name: "speechmatics", iconUrl: "https://avatars.githubusercontent.com/u/15156030?s=200&v=4", providerLandingUrl: "https://www.speechmatics.com", }, { name: "google-cloud-speech", iconUrl: "https://cdn.worldvectorlogo.com/logos/google-cloud-1.svg", providerLandingUrl: "https://cloud.google.com/speech-to-text", }, ]; exports.transcriptSchema = zod_1.z.object({ transcript: zod_1.z.string().describe("Thi is th etranscript of the event."), isFinal: zod_1.z.boolean(), provider: zod_1.z.object({ name: exports.transcriberProvidersName, payload: zod_1.z.any(), }), }); exports.interruptionSchema = zod_1.z.object({ transcript: zod_1.z.string(), provider: zod_1.z.object({ name: exports.transcriberProvidersName, payload: zod_1.z.any(), }), }); exports.llmServiceChunkSchema = zod_1.z.object({ content: zod_1.z.string(), tool_call_id: zod_1.z.string().optional(), tool_name: zod_1.z.string().optional(), error: zod_1.z.string().optional(), tool_input: zod_1.z.any().optional(), tool_output: zod_1.z.any().optional(), messagesHistory: zod_1.z.array(exports.llmMessageSchema).optional(), currentNodeId: zod_1.z.string().optional(), isPlaceholder: zod_1.z.boolean().optional(), creditsConsumed: zod_1.z.number().optional(), inputTokens: zod_1.z.number().optional(), outputTokens: zod_1.z.number().optional(), modelId: exports.VG_SUPPORTED_LLM_ZOD.optional(), forceSync: zod_1.z.boolean().optional(), }); exports.llmServiceToolCallIdSchema = zod_1.z.object({ tool_call_id: zod_1.z.string(), }); exports.speechGenOnAudioChunkSchema = zod_1.z.object({ chunk: zod_1.z.custom(), }); exports.speechGenOnInterruptSchema = zod_1.z.object({ content: zod_1.z.string(), }); exports.speechGenInfoSchema = zod_1.z.object({ name: exports.speechProvidersName, iconUrl: zod_1.z.string(), providerLandingUrl: zod_1.z.string().optional(), }); exports.speechGenProvidersInfo = [ { name: "elevenlabs", iconUrl: "https://play-lh.googleusercontent.com/Im726gXJYWu6Lw0M6BcPNV1BdlO7QVB1wQfMtEnsn1-J4e80WI7AEivVXDrK1ypsm0o=w240-h480-rw", providerLandingUrl: "https://elevenlabs.io", }, { name: "deepgram", iconUrl: "https://avatars.githubusercontent.com/u/17422641?s=280&v=4", providerLandingUrl: "https://www.deepgram.com", }, { name: "cartesia", iconUrl: "https://framerusercontent.com/images/4ge6ACVfLVdGypI4msfYc6KL60.svg", providerLandingUrl: "https://cartesia.ai", }, { name: "playht", iconUrl: "https://playhtassets.play.ht/img/logos/playCube-DarkMode.svg", providerLandingUrl: "https://play.ht", }, { name: "rime-ai", iconUrl: "https://rime.ai/favicon.ico", providerLandingUrl: "https://rime.ai", }, { name: "azure", iconUrl: "https://swimburger.net/media/0zcpmk1b/azure.jpg", providerLandingUrl: "https://azure.microsoft.com/en-us/products/cognitive-services/speech-to-text", }, { name: "openai", iconUrl: "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcSobT6Nq7W-FJnK5lLapZlwySLwB0W4sKCYDg&s", providerLandingUrl: "https://openai.com", }, { name: "playai-groq", iconUrl: "https://groq.com/wp-content/uploads/2024/02/android-icon-192x192-1.png", providerLandingUrl: "https://groq.com/build-fast-with-text-to-speech/", }, { name: "google-cloud", iconUrl: "https://cdn.worldvectorlogo.com/logos/google-cloud-1.svg", providerLandingUrl: "https://cloud.google.com/text-to-speech", }, { name: "google-live", iconUrl: "https://cdn.worldvectorlogo.com/logos/google-cloud-1.svg", providerLandingUrl: "https://cloud.google.com/livestream", }, ]; exports.openAiModelIds = zod_1.z .enum([ "gpt-5-chat-latest", "gpt-5-2025-08-07", "gpt-5-mini-2025-08-07", "gpt-5-nano-2025-08-07", "ft:gpt-4o-mini-2024-07-18:personal:4o-with-tools-t11:A6mByttv", "gpt-4o-mini", "gpt-3.5-turbo-0125", "gpt-4-1106-preview", "gpt-4o", "gpt-4-32k", "gpt-4", "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "gpt-4.5-preview-2025-02-27", "gpt-4.1-2025-04-14", "gpt-4.1-mini-2025-04-14", ]) .describe("OpenAI Model Ids"); exports.azureOpenAiModelIds = zod_1.z .enum(["azure-eu-gpt-4o", "azure-na-gpt-4o"]) .describe("Azure OpenAI Model Ids"); exports.OpenAIModels = [...exports.openAiModelIds.options]; exports.googleModelIds = zod_1.z .enum([ "gemini-1.5-pro", "gemini-1.5-flash", "gemini-1.0-pro", "gemini-2.0-flash-exp", "gemini-2.0-flash-thinking-exp-1219", "gemini-2.5-pro-exp-03-25", "gemini-2.5-flash-preview-05-20", "gemini-2.5-flash", "models/gemini-2.5-pro-preview-03-25", "gemini-2.5-pro-preview-03-25", "gemini-2.5-pro", ]) .describe("Google Model Ids"); exports.GoogleGeminiModels = [ ...exports.googleModelIds.options, ]; exports.groqModelIds = zod_1.z .enum([ "llama-3.1-8b-instant", "llama-3.1-70b-versatile", "llama3-8b-8192", "llama3-70b-8192", "mixtral-8x7b-32768", "gemma-7b-it", "gemma2-9b-it", "llama-3.3-70b-versatile", "deepseek-r1-distill-llama-70b", "meta-llama/llama-4-scout-17b-16e-instruct", "meta-llama/llama-4-maverick-17b-128e-instruct", ]) .describe("Groq Model Ids"); exports.GroqModelsOpenSource = [ ...exports.groqModelIds.options, ]; exports.anthropicModelIds = zod_1.z .enum([ "claude-3-5-sonnet-20240620", "claude-3-opus-20240229", "claude-3-sonnet-20240229", "claude-3-5-haiku-20241022", "claude-3-5-sonnet-20240620", "claude-3-5-sonnet-20241022", "claude-3-7-sonnet-20250219", "claude-opus-4-20250514", "claude-sonnet-4-20250514", ]) .describe("Anthropic Model Ids"); exports.AnthropicModels = [ ...exports.anthropicModelIds.options, ]; exports.deepseekModelIds = zod_1.z .enum(["deepseek-chat"]) .describe("Deepseek Model Ids"); exports.DeepseekModels = [...exports.deepseekModelIds.options]; exports.AlibabaModels = [ ...exports.ALIBABA_SUPPORTED_LLM_ZOD.options, ]; exports.allModelIdsSchema = zod_1.z .union([ exports.openAiModelIds, exports.googleModelIds, exports.groqModelIds, exports.anthropicModelIds, exports.deepseekModelIds, exports.ALIBABA_SUPPORTED_LLM_ZOD, ]) .describe("All Model Ids"); exports.toolParamType = zod_1.z.enum([ "string", "number", "boolean", "object", "array", "null", ]); exports.toolSchema = zod_1.z.object({ id: zod_1.z.string(), name: zod_1.z.string(), description: zod_1.z.string(), serverUrl: zod_1.z.string().optional(), serverUrlSecret: zod_1.z.string().optional(), variablesIds: zod_1.z.array(zod_1.z.string()).optional(), // parameters: z.record( // z.object({ // type: toolParamType, // description: z.string(), // required: z.boolean().optional(), // }) // ), }); exports.appDefaultTools = [ { id: "search", name: "search", description: "Use to surf the web, fetch current information, check the weather, and retrieve other information.", // parameters: { // query: { // type: "string", // description: "The query to use in your search.", // required: true, // }, // }, }, ]; exports.LLMConfigSchema = zod_1.z.object({ modelId: exports.VG_SUPPORTED_LLM_ZOD, temperature: zod_1.z.number().describe(`The temperature of the LLM`), maxTokens: zod_1.z.number().describe(`The max tokens of the LLM`), customModelId: zod_1.z .string() .optional() .describe(`The custom model id of the LLM`), serverUrl: zod_1.z.string().optional().describe(`The server url of the LLM`), apiKey: zod_1.z.string().optional().describe(`The api key of the LLM`), }); const STSOpenaiModelIdsSchema = zod_1.z .enum([ "gpt-4o-realtime-preview", "gpt-4o-realtime-preview-2024-12-17", "gpt-4o-realtime-preview-2024-10-01", "gpt-4o-mini-realtime-preview", "gpt-4o-mini-realtime-preview-2024-12-17", ]) .describe("Openai Speech to Speech Model Ids"); const STSProviderNames = zod_1.z.enum(["openai"]); const realtimeAudioFormatSchema = zod_1.z.enum(["pcm16", "g711_ulaw", "g711_alaw"]); const realtimeAudioTranscriptionSchema = zod_1.z.object({ model: zod_1.z.enum(["whisper-1"]), }); const realtimeTurnDetectionServerVadSchema = zod_1.z.object({ type: zod_1.z.enum(["server_vad"]), threshold: zod_1.z.number(), prefix_padding_ms: zod_1.z.number(), silence_duration_ms: zod_1.z.number(), }); const realtimeToolDefinitionSchema = zod_1.z.object({ type: zod_1.z.literal("function").default("function"), name: zod_1.z.string().optional(), description: zod_1.z.string().optional(), parameters: zod_1.z.record(zod_1.z.string(), zod_1.z.any()).optional(), handler: zod_1.z.any(), }); const realtimeSessionResourceSchema = zod_1.z.object({ model: zod_1.z.string().optional(), modalities: zod_1.z.array(zod_1.z.string()).optional(), instructions: zod_1.z.string().optional(), voice: zod_1.z .enum([ "alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", ]) .optional() .default("alloy"), input_audio_format: realtimeAudioFormatSchema.optional(), output_audio_format: realtimeAudioFormatSchema.optional(), input_audio_transcription: realtimeAudioTranscriptionSchema .or(zod_1.z.null()) .default(null), turn_detection: realtimeTurnDetectionServerVadSchema .or(zod_1.z.null()) .default(null), tools: zod_1.z.array(realtimeToolDefinitionSchema).optional(), tool_choice: zod_1.z .enum(["auto", "none", "required"]) .or(zod_1.z.object({ type: zod_1.z.literal("function"), name: zod_1.z.string(), })) .optional() .default("auto"), temperature: zod_1.z.number(), max_response_output_tokens: zod_1.z.number().or(zod_1.z.literal("inf")), }); // Define options for class SpeechToSpeech Service. const STSProviderOptionsSchema = zod_1.z.object({ agentId: zod_1.z.string().optional().describe("The agent id of the STS."), modelId: STSOpenaiModelIdsSchema.describe("The model id of the STS.").optional(), provider: STSProviderNames.optional(), apiKey: zod_1.z.string().optional(), defaultFunctions: zod_1.z .object({ kbSearchFunction: zod_1.z.custom().optional(), kbSearchFunctionRaw: zod_1.z.any().optional(), }) .optional(), metadata: zod_1.z .object({ channel: zod_1.z.string().optional(), phone_number: zod_1.z.string().optional(), }) .optional(), speechConfig: zod_1.z .object({ format: zod_1.z.string().optional(), sampleRate: zod_1.z.number().optional(), }) .optional(), language: zod_1.z.string().optional(), internal: zod_1.z .object({ inputAudioStream: zod_1.z.any(), debug: zod_1.z.boolean().optional(), }) .optional(), sessionConfig: realtimeSessionResourceSchema.optional(), }); exports.rerouterConfigSchema = zod_1.z.object({ enabled: zod_1.z .boolean() .describe(`If true, the LLM will try to detect if the user has changed his mind about something enabling the AI to rewind back to the previous node.`), level: zod_1.z .number() .describe(`The level of rewinding, if 1 the LLM will have access to only 1 level or rewind capabilities meaning if it went from node A > B > C > D then if it at node d & it detects it needs to be at node B again it will be able to rewind back to node B if the level is at least 2, it will not be able to detect node A unless the level is 3 or more, and so on.`), }); exports.LLMRouterSchema = zod_1.z.object({ nodeId: zod_1.z.string(), condition: zod_1.z .string() .describe(`The condition that will be checked to determine if the node should advanced to the selected node id or not or should simply loop in the current node.`), }); exports.SpecialNodeTypesEnums = zod_1.z.enum([ "start", "end", "default", "condition", "note", ]); exports.LLMNodeRequiredSchema = zod_1.z.object({ id: zod_1.z.string(), name: zod_1.z.string(), description: zod_1.z .string() .describe(`A short description of what this node does and when it should be used, specially useful if the node is global or when the LLM detects automatic rerouting.`), instructions: zod_1.z.string().describe(`What should this LLM node do.`), }); exports.globalVariableCaptureSchema = zod_1.z.any(); exports.mcpServerSchema = zod_1.z.object({ name: zod_1.z.string().optional(), url: zod_1.z.string().optional(), transport: zod_1.z.enum(["sse", "websocket", "stdio"]).optional(), useNodeEventSource: zod_1.z.boolean().optional(), reconnect: zod_1.z .object({ enabled: zod_1.z.boolean().optional(), maxAttempts: zod_1.z.number().optional(), delayMs: zod_1.z.number().optional(), }) .optional(), command: zod_1.z.string().optional(), args: zod_1.z.array(zod_1.z.string()).optional(), }); const StartConfigSchema = zod_1.z.object({ initialMessage: zod_1.z .string() .optional() .describe("The initial message to start the conversation with"), userStarts: zod_1.z .boolean() .optional() .describe("Whether the user or AI agent gives the first message"), }); exports.LLMNodeSchema = exports.LLMNodeRequiredSchema.extend({ mcpServers: zod_1.z.array(exports.mcpServerSchema).optional(), voiceInstructions: zod_1.z .string() .describe(`The voice instructions for this node, will be used instead of instructions in voice channels if provided.`) .optional(), isGlobal: zod_1.z .boolean() .describe(`If true, this node will be always present for the LLM`) .optional(), toolsIds: zod_1.z .array(zod_1.z.string()) .describe(`The tools that will be available for this node`) .optional(), toolsSettings: zod_1.z .object({ googleCalendar: zod_1.z .object({ calendarId: zod_1.z.string().optional(), methods: zod_1.z .array(zod_1.z.enum([ "create-event", "check-availability", "get-slots", "delete-event", ])) .optional(), //create events, check availability, get slots }) .optional(), }) .optional(), childrenNodes: zod_1.z .array(exports.LLMRouterSchema) .describe(`The outputs of this node which the node after finishing will choose from to either advance or to keep looping in this node till it caputures or reaches one of the mentioned routers.`) .optional(), llmConfig: exports.LLMConfigSchema, routerLlmConfig: exports.LLMConfigSchema.optional().describe(`The LLM config that will be used for the router node, NOTE: This must be a fast LLM model or the latency will be too high between responses.`), toolUseBias: zod_1.z .number() .describe(`The bias of the LLM to use tools if 0 it will never use tools, if 1 it will only use tools.`) .optional(), autoRerouter: zod_1.z .object({ enabled: zod_1.z .boolean() .describe(`If true, the LLM will try to detect if the user has changed his mind about something enabling the AI to rewind back to the previous node.`), level: zod_1.z .number() .describe(`The level of rewinding, if 1 the LLM will have access to only 1 level or rewind capabilities meaning if it went from node A > B > C > D then if it at node d & it detects it needs to be at node B again it will be able to rewind back to node B if the level is at least 2, it will not be able to detect node A unless the level is 3 or more, and so on.`), }) .describe(`The auto rerouter of this node.`) .optional(), type: exports.SpecialNodeTypesEnums.optional().describe(`The type of the node, if not provided it will be a normal LLM node.`), rf: zod_1.z .any() .optional() .describe(`The position of the node in the reactflow canvas.`), kb: zod_1.z .object({ enabled: zod_1.z.boolean(), maxChunks: zod_1.z.number().describe(`The max chunks to use from the KB.`), tags: zod_1.z .array(zod_1.z.string()) .optional() .describe(`The tags to use to filter the docs we search through`), smartSearch: zod_1.z .boolean() .optional() .describe(`If true, the LLM will use the smart search to search through the docs.`), }) .optional(), conditionData: zod_1.z .object({ condition: zod_1.z.string().describe(`The condition to check.`), }) .optional(), preStart: zod_1.z .object({ url: zod_1.z .string() .optional() .describe(`The URL to send the GET request to.`), enabled: zod_1.z .boolean() .optional() .describe(`If true, we will send a GET request to the URL before starting the node.`), testConvoIdValue: zod_1.z .string() .optional() .describe(`The value to test the URL with.`), }) .optional() .describe(`The pre start tool of the node, we will send a GET request to this URL before starting the node and provide the results to the AI. GET {url}/{user_id}/{node_name}`), startConfig: StartConfigSchema.optional().describe("Configuration for start nodes"), }).describe(`A node in the LLM chain.`); exports.llmProviderBaseSchema = zod_1.z.object({ nodes: zod_1.z.array(exports.LLMNodeSchema).optional(), variables: zod_1.z.array(exports.toolFieldSchema).optional(), tools: zod_1.z.array(exports.toolSchema).optional(), }); exports.smartEndpointConfigSchema = zod_1.z.object({ /** Whether smart endpointing is enabled */ enabled: zod_1.z.boolean().optional(), /** Base wait time in seconds before speaking */ waitSeconds: zod_1.z.number().optional(), /** Minimum seconds to wait after transcription ending with punctuation */ onPunctuationSeconds: zod_1.z.number().optional(), /** Minimum seconds to wait after transcription ending without punctuation */ onNoPunctuationSeconds: zod_1.z.number().optional(), /** Minimum seconds to wait after transcription ending with a number */ onNumberSeconds: zod_1.z.number().optional(), }); exports.globalDefaultToolsEnums = zod_1.z.enum(["handoffHumanDashboardTool"]); exports.toolFieldSchemaOverride = zod_1.z.object({ type: zod_1.z.enum(["string", "number", "boolean"]).optional(), description: zod_1.z.string().optional(), }); exports.globalOptionsSchema = zod_1.z.object({ enableUIEngineForms: zod_1.z .boolean() .optional() .describe("If true, the LLM will be able to show forms on web channel only."), geminiLiveOptions: zod_1.z.custom().optional(), appendBeforePrompt: zod_1.z .string() .optional() .describe("The prompt to append to the nodes' prompt."), routerLLmOptions: exports.LLMConfigSchema.optional(), defaultLLmOptions: exports.LLMConfigSchema.optional(), backchannelOnChoosingNodes: zod_1.z .boolean() .optional() .describe("If true, the LLM will say uha/i see before choosing a node."), enableUiEngine: zod_1.z .boolean() .optional() .describe("If true, the LLM will be able to show buttons, cards, images on text channels only."), fallbackModelIds: zod_1.z.array(exports.VG_SUPPORTED_LLM_ZOD).optional(), silenceDetection: zod_1.z .object({ enabled: zod_1.z .boolean() .default(false) .describe("Whether to enable the silence detection feature which will insert filler phrases during long periods of silence") .optional(), timeoutSeconds: zod_1.z .number() .default(60) .describe("Number of seconds of silence before triggering a filler phrase") .optional(), endCallAfterNPhrases: zod_1.z .number() .default(1) .describe("Number of filler phrases utterances to say before ending the call if 0 it will end the call instantly after reching the timeout.") .optional(), }) .optional(), startCallPhrases: zod_1.z.array(zod_1.z.string()).optional(), fillerWordsOnToolUsage: zod_1.z .boolean() .optional() .describe("If true, the LLM will say filler words on tool usage, like '1 moment', 'be right back', 'you know', etc."), smartEndpointing: exports.smartEndpointConfigSchema.optional(), stopSpeakPlan: zod_1.z .object({ minWords: zod_1.z .number() .optional() .describe("The minimum words to speak from the user for the AI to be interrupted and stop talking ."), }) .optional(), callTimeoutSeconds: zod_1.z .number() .optional() .describe("The timeout for the call in seconds."), enableEndcallTool: zod_1.z .boolean() .optional() .describe("Whether to enable endcall tool."), voiceSpecific: zod_1.z .object({ minCharacters: zod_1.z .number() .optional() .describe("The minimum number of characters to init the speech gen to generate audio for, the more the higher the latency will be, default is 5"), maxLengthWithoutPunctuation: zod_1.z .number() .optional() .describe("The maximum length of the string without punctuation to init the speech gen to generate audio for, the more the higher the latency will be, default is 100"), }) .optional(), enabledGlobalTools: zod_1.z .array(exports.globalDefaultToolsEnums) .optional() .describe("The global tools that will be enabled for the LLM."), defaultToolOverrides: zod_1.z .record(zod_1.z.object({ description: zod_1.z.string().nullable().optional(), fields: zod_1.z.record(exports.toolFieldSchemaOverride).nullable().optional(), })) .optional() .describe("Overrides for default system tools configuration."), }); exports.llmServiceOptionsMetadataSchema = zod_1.z.object({ channel: zod_1.z.string().optional(), phone_number: zod_1.z.string().optional(), agentId: zod_1.z.string().optional(), convoId: zod_1.z.string().optional(), sessionId: zod_1.z.string().optional(), workspaceId: zod_1.z.string().optional(), region: zod_1.z.enum(["eu", "na"]).optional(), enableVertex: zod_1.z.boolean().optional(), speechProvider: zod_1.z.any(), lightConvoData: zod_1.z.any(), speechModel: zod_1.z.string().optional(), // Model ID for speech provider (e.g., "sonic-3" for Cartesia) }); // export type AppLLMServiceOptions = z.infer<typeof llmServiceOptionsSchema>; exports.llmProviderOptionsSchema = zod_1.z.object({ channel: zod_1.z.enum(["text", "voice"]).optional(), apiKeys: zod_1.z .object({ agentSecret: zod_1.z.string().optional(), openai: zod_1.z.string().optional(), anthropic