UNPKG

@langchain/openai

Version:
1 lines 22.4 kB
{"version":3,"file":"index.d.cts","names":["openai_resources0","__responses_js0","CallbackManagerForLLMRun","AIMessageChunk","BaseMessage","ChatGenerationChunk","ChatResult","BaseLanguageModelInput","Runnable","OpenAICallOptions","OpenAIChatInput","ChatOpenAICompletions","ChatOpenAICompletionsCallOptions","ChatOpenAIResponses","ChatOpenAIResponsesCallOptions","BaseChatOpenAI","BaseChatOpenAIFields","ChatOpenAICallOptions","ChatOpenAIFields","ChatOpenAI","CallOptions","_langchain_core_language_models_chat_models0","LangSmithParams","ChatModel","ChatCompletionAudioParam","ChatCompletionFunctionCallOption","ChatCompletionCreateParams","Function","Metadata","ChatCompletionPredictionContent","ReasoningEffort","ResponseFormatJSONObject","ResponseFormatJSONSchema","ResponseFormatText","ChatCompletionStreamOptions","ChatCompletionToolChoiceOption","ChatCompletionTool","WebSearchOptions","ChatResponsesInvocationParams","Promise","AsyncGenerator","Partial"],"sources":["../../src/chat_models/index.d.ts"],"sourcesContent":["import { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\nimport { AIMessageChunk, type BaseMessage } from \"@langchain/core/messages\";\nimport { ChatGenerationChunk, type ChatResult } from \"@langchain/core/outputs\";\nimport { type BaseLanguageModelInput } from \"@langchain/core/language_models/base\";\nimport { Runnable } from \"@langchain/core/runnables\";\nimport { type OpenAICallOptions, type OpenAIChatInput } from \"../types.js\";\nimport { ChatOpenAICompletions, ChatOpenAICompletionsCallOptions } from \"./completions.js\";\nimport { ChatOpenAIResponses, ChatOpenAIResponsesCallOptions } from \"./responses.js\";\nimport { BaseChatOpenAI, BaseChatOpenAIFields } from \"./base.js\";\nexport type { OpenAICallOptions, OpenAIChatInput };\nexport type ChatOpenAICallOptions = ChatOpenAICompletionsCallOptions & ChatOpenAIResponsesCallOptions;\nexport interface ChatOpenAIFields extends BaseChatOpenAIFields {\n /**\n * Whether to use the responses API for all requests. If `false` the responses API will be used\n * only when required in order to fulfill the request.\n */\n useResponsesApi?: boolean;\n /**\n * The completions chat instance\n * @internal\n */\n completions?: ChatOpenAICompletions;\n /**\n * The responses chat instance\n * @internal\n */\n responses?: ChatOpenAIResponses;\n}\n/**\n * OpenAI chat model integration.\n *\n * To use with Azure, import the `AzureChatOpenAI` class.\n *\n * Setup:\n * Install `@langchain/openai` and set an environment variable named `OPENAI_API_KEY`.\n *\n * ```bash\n * npm install @langchain/openai\n * export OPENAI_API_KEY=\"your-api-key\"\n * ```\n *\n * ## [Constructor args](https://api.js.langchain.com/classes/langchain_openai.ChatOpenAI.html#constructor)\n *\n * ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_openai.ChatOpenAICallOptions.html)\n *\n * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.\n * They can also be passed via `.withConfig`, or the second arg in `.bindTools`, like shown in the examples below:\n *\n * ```typescript\n * // When calling `.withConfig`, call options should be passed via the first argument\n * const llmWithArgsBound = llm.withConfig({\n * stop: [\"\\n\"],\n * tools: [...],\n * });\n *\n * // When calling `.bindTools`, call options should be passed via the second argument\n * const llmWithTools = llm.bindTools(\n * [...],\n * {\n * tool_choice: \"auto\",\n * }\n * );\n * ```\n *\n * ## Examples\n *\n * <details open>\n * <summary><strong>Instantiate</strong></summary>\n *\n * ```typescript\n * import { ChatOpenAI } from '@langchain/openai';\n *\n * const llm = new ChatOpenAI({\n * model: \"gpt-4o-mini\",\n * temperature: 0,\n * maxTokens: undefined,\n * timeout: undefined,\n * maxRetries: 2,\n * // apiKey: \"...\",\n * // configuration: {\n * // baseURL: \"...\",\n * // }\n * // organization: \"...\",\n * // other params...\n * });\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Invoking</strong></summary>\n *\n * ```typescript\n * const input = `Translate \"I love programming\" into French.`;\n *\n * // Models also accept a list of chat messages or a formatted prompt\n * const result = await llm.invoke(input);\n * console.log(result);\n * ```\n *\n * ```txt\n * AIMessage {\n * \"id\": \"chatcmpl-9u4Mpu44CbPjwYFkTbeoZgvzB00Tz\",\n * \"content\": \"J'adore la programmation.\",\n * \"response_metadata\": {\n * \"tokenUsage\": {\n * \"completionTokens\": 5,\n * \"promptTokens\": 28,\n * \"totalTokens\": 33\n * },\n * \"finish_reason\": \"stop\",\n * \"system_fingerprint\": \"fp_3aa7262c27\"\n * },\n * \"usage_metadata\": {\n * \"input_tokens\": 28,\n * \"output_tokens\": 5,\n * \"total_tokens\": 33\n * }\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Streaming Chunks</strong></summary>\n *\n * ```typescript\n * for await (const chunk of await llm.stream(input)) {\n * console.log(chunk);\n * }\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"id\": \"chatcmpl-9u4NWB7yUeHCKdLr6jP3HpaOYHTqs\",\n * \"content\": \"\"\n * }\n * AIMessageChunk {\n * \"content\": \"J\"\n * }\n * AIMessageChunk {\n * \"content\": \"'adore\"\n * }\n * AIMessageChunk {\n * \"content\": \" la\"\n * }\n * AIMessageChunk {\n * \"content\": \" programmation\",,\n * }\n * AIMessageChunk {\n * \"content\": \".\",,\n * }\n * AIMessageChunk {\n * \"content\": \"\",\n * \"response_metadata\": {\n * \"finish_reason\": \"stop\",\n * \"system_fingerprint\": \"fp_c9aa9c0491\"\n * },\n * }\n * AIMessageChunk {\n * \"content\": \"\",\n * \"usage_metadata\": {\n * \"input_tokens\": 28,\n * \"output_tokens\": 5,\n * \"total_tokens\": 33\n * }\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Aggregate Streamed Chunks</strong></summary>\n *\n * ```typescript\n * import { AIMessageChunk } from '@langchain/core/messages';\n * import { concat } from '@langchain/core/utils/stream';\n *\n * const stream = await llm.stream(input);\n * let full: AIMessageChunk | undefined;\n * for await (const chunk of stream) {\n * full = !full ? chunk : concat(full, chunk);\n * }\n * console.log(full);\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"id\": \"chatcmpl-9u4PnX6Fy7OmK46DASy0bH6cxn5Xu\",\n * \"content\": \"J'adore la programmation.\",\n * \"response_metadata\": {\n * \"prompt\": 0,\n * \"completion\": 0,\n * \"finish_reason\": \"stop\",\n * },\n * \"usage_metadata\": {\n * \"input_tokens\": 28,\n * \"output_tokens\": 5,\n * \"total_tokens\": 33\n * }\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Bind tools</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const GetWeather = {\n * name: \"GetWeather\",\n * description: \"Get the current weather in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const GetPopulation = {\n * name: \"GetPopulation\",\n * description: \"Get the current population in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const llmWithTools = llm.bindTools(\n * [GetWeather, GetPopulation],\n * {\n * // strict: true // enforce tool args schema is respected\n * }\n * );\n * const aiMsg = await llmWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\"\n * );\n * console.log(aiMsg.tool_calls);\n * ```\n *\n * ```txt\n * [\n * {\n * name: 'GetWeather',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call',\n * id: 'call_uPU4FiFzoKAtMxfmPnfQL6UK'\n * },\n * {\n * name: 'GetWeather',\n * args: { location: 'New York, NY' },\n * type: 'tool_call',\n * id: 'call_UNkEwuQsHrGYqgDQuH9nPAtX'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call',\n * id: 'call_kL3OXxaq9OjIKqRTpvjaCH14'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'New York, NY' },\n * type: 'tool_call',\n * id: 'call_s9KQB1UWj45LLGaEnjz0179q'\n * }\n * ]\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Structured Output</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const Joke = z.object({\n * setup: z.string().describe(\"The setup of the joke\"),\n * punchline: z.string().describe(\"The punchline to the joke\"),\n * rating: z.number().nullable().describe(\"How funny the joke is, from 1 to 10\")\n * }).describe('Joke to tell user.');\n *\n * const structuredLlm = llm.withStructuredOutput(Joke, {\n * name: \"Joke\",\n * strict: true, // Optionally enable OpenAI structured outputs\n * });\n * const jokeResult = await structuredLlm.invoke(\"Tell me a joke about cats\");\n * console.log(jokeResult);\n * ```\n *\n * ```txt\n * {\n * setup: 'Why was the cat sitting on the computer?',\n * punchline: 'Because it wanted to keep an eye on the mouse!',\n * rating: 7\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>JSON Object Response Format</strong></summary>\n *\n * ```typescript\n * const jsonLlm = llm.withConfig({ response_format: { type: \"json_object\" } });\n * const jsonLlmAiMsg = await jsonLlm.invoke(\n * \"Return a JSON object with key 'randomInts' and a value of 10 random ints in [0-99]\"\n * );\n * console.log(jsonLlmAiMsg.content);\n * ```\n *\n * ```txt\n * {\n * \"randomInts\": [23, 87, 45, 12, 78, 34, 56, 90, 11, 67]\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Multimodal</strong></summary>\n *\n * ```typescript\n * import { HumanMessage } from '@langchain/core/messages';\n *\n * const imageUrl = \"https://example.com/image.jpg\";\n * const imageData = await fetch(imageUrl).then(res => res.arrayBuffer());\n * const base64Image = Buffer.from(imageData).toString('base64');\n *\n * const message = new HumanMessage({\n * content: [\n * { type: \"text\", text: \"describe the weather in this image\" },\n * {\n * type: \"image_url\",\n * image_url: { url: `data:image/jpeg;base64,${base64Image}` },\n * },\n * ]\n * });\n *\n * const imageDescriptionAiMsg = await llm.invoke([message]);\n * console.log(imageDescriptionAiMsg.content);\n * ```\n *\n * ```txt\n * The weather in the image appears to be clear and sunny. The sky is mostly blue with a few scattered white clouds, indicating fair weather. The bright sunlight is casting shadows on the green, grassy hill, suggesting it is a pleasant day with good visibility. There are no signs of rain or stormy conditions.\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Usage Metadata</strong></summary>\n *\n * ```typescript\n * const aiMsgForMetadata = await llm.invoke(input);\n * console.log(aiMsgForMetadata.usage_metadata);\n * ```\n *\n * ```txt\n * { input_tokens: 28, output_tokens: 5, total_tokens: 33 }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Logprobs</strong></summary>\n *\n * ```typescript\n * const logprobsLlm = new ChatOpenAI({ model: \"gpt-4o-mini\", logprobs: true });\n * const aiMsgForLogprobs = await logprobsLlm.invoke(input);\n * console.log(aiMsgForLogprobs.response_metadata.logprobs);\n * ```\n *\n * ```txt\n * {\n * content: [\n * {\n * token: 'J',\n * logprob: -0.000050616763,\n * bytes: [Array],\n * top_logprobs: []\n * },\n * {\n * token: \"'\",\n * logprob: -0.01868736,\n * bytes: [Array],\n * top_logprobs: []\n * },\n * {\n * token: 'ad',\n * logprob: -0.0000030545007,\n * bytes: [Array],\n * top_logprobs: []\n * },\n * { token: 'ore', logprob: 0, bytes: [Array], top_logprobs: [] },\n * {\n * token: ' la',\n * logprob: -0.515404,\n * bytes: [Array],\n * top_logprobs: []\n * },\n * {\n * token: ' programm',\n * logprob: -0.0000118755715,\n * bytes: [Array],\n * top_logprobs: []\n * },\n * { token: 'ation', logprob: 0, bytes: [Array], top_logprobs: [] },\n * {\n * token: '.',\n * logprob: -0.0000037697225,\n * bytes: [Array],\n * top_logprobs: []\n * }\n * ],\n * refusal: null\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Response Metadata</strong></summary>\n *\n * ```typescript\n * const aiMsgForResponseMetadata = await llm.invoke(input);\n * console.log(aiMsgForResponseMetadata.response_metadata);\n * ```\n *\n * ```txt\n * {\n * tokenUsage: { completionTokens: 5, promptTokens: 28, totalTokens: 33 },\n * finish_reason: 'stop',\n * system_fingerprint: 'fp_3aa7262c27'\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>JSON Schema Structured Output</strong></summary>\n *\n * ```typescript\n * const llmForJsonSchema = new ChatOpenAI({\n * model: \"gpt-4o-2024-08-06\",\n * }).withStructuredOutput(\n * z.object({\n * command: z.string().describe(\"The command to execute\"),\n * expectedOutput: z.string().describe(\"The expected output of the command\"),\n * options: z\n * .array(z.string())\n * .describe(\"The options you can pass to the command\"),\n * }),\n * {\n * method: \"jsonSchema\",\n * strict: true, // Optional when using the `jsonSchema` method\n * }\n * );\n *\n * const jsonSchemaRes = await llmForJsonSchema.invoke(\n * \"What is the command to list files in a directory?\"\n * );\n * console.log(jsonSchemaRes);\n * ```\n *\n * ```txt\n * {\n * command: 'ls',\n * expectedOutput: 'A list of files and subdirectories within the specified directory.',\n * options: [\n * '-a: include directory entries whose names begin with a dot (.).',\n * '-l: use a long listing format.',\n * '-h: with -l, print sizes in human readable format (e.g., 1K, 234M, 2G).',\n * '-t: sort by time, newest first.',\n * '-r: reverse order while sorting.',\n * '-S: sort by file size, largest first.',\n * '-R: list subdirectories recursively.'\n * ]\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Audio Outputs</strong></summary>\n *\n * ```typescript\n * import { ChatOpenAI } from \"@langchain/openai\";\n *\n * const modelWithAudioOutput = new ChatOpenAI({\n * model: \"gpt-4o-audio-preview\",\n * // You may also pass these fields to `.withConfig` as a call argument.\n * modalities: [\"text\", \"audio\"], // Specifies that the model should output audio.\n * audio: {\n * voice: \"alloy\",\n * format: \"wav\",\n * },\n * });\n *\n * const audioOutputResult = await modelWithAudioOutput.invoke(\"Tell me a joke about cats.\");\n * const castMessageContent = audioOutputResult.content[0] as Record<string, any>;\n *\n * console.log({\n * ...castMessageContent,\n * data: castMessageContent.data.slice(0, 100) // Sliced for brevity\n * })\n * ```\n *\n * ```txt\n * {\n * id: 'audio_67117718c6008190a3afad3e3054b9b6',\n * data: 'UklGRqYwBgBXQVZFZm10IBAAAAABAAEAwF0AAIC7AAACABAATElTVBoAAABJTkZPSVNGVA4AAABMYXZmNTguMjkuMTAwAGRhdGFg',\n * expires_at: 1729201448,\n * transcript: 'Sure! Why did the cat sit on the computer? Because it wanted to keep an eye on the mouse!'\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Audio Outputs</strong></summary>\n *\n * ```typescript\n * import { ChatOpenAI } from \"@langchain/openai\";\n *\n * const modelWithAudioOutput = new ChatOpenAI({\n * model: \"gpt-4o-audio-preview\",\n * // You may also pass these fields to `.withConfig` as a call argument.\n * modalities: [\"text\", \"audio\"], // Specifies that the model should output audio.\n * audio: {\n * voice: \"alloy\",\n * format: \"wav\",\n * },\n * });\n *\n * const audioOutputResult = await modelWithAudioOutput.invoke(\"Tell me a joke about cats.\");\n * const castAudioContent = audioOutputResult.additional_kwargs.audio as Record<string, any>;\n *\n * console.log({\n * ...castAudioContent,\n * data: castAudioContent.data.slice(0, 100) // Sliced for brevity\n * })\n * ```\n *\n * ```txt\n * {\n * id: 'audio_67117718c6008190a3afad3e3054b9b6',\n * data: 'UklGRqYwBgBXQVZFZm10IBAAAAABAAEAwF0AAIC7AAACABAATElTVBoAAABJTkZPSVNGVA4AAABMYXZmNTguMjkuMTAwAGRhdGFg',\n * expires_at: 1729201448,\n * transcript: 'Sure! Why did the cat sit on the computer? Because it wanted to keep an eye on the mouse!'\n * }\n * ```\n * </details>\n *\n * <br />\n */\nexport declare class ChatOpenAI<CallOptions extends ChatOpenAICallOptions = ChatOpenAICallOptions> extends BaseChatOpenAI<CallOptions> {\n protected fields?: ChatOpenAIFields | undefined;\n /**\n * Whether to use the responses API for all requests. If `false` the responses API will be used\n * only when required in order to fulfill the request.\n */\n useResponsesApi: boolean;\n protected responses: ChatOpenAIResponses;\n protected completions: ChatOpenAICompletions;\n get lc_serializable_keys(): string[];\n get callKeys(): string[];\n constructor(fields?: ChatOpenAIFields | undefined);\n protected _useResponsesApi(options: this[\"ParsedCallOptions\"] | undefined): boolean;\n getLsParams(options: this[\"ParsedCallOptions\"]): import(\"@langchain/core/language_models/chat_models\").LangSmithParams;\n invocationParams(options?: this[\"ParsedCallOptions\"]): {\n model: import(\"openai/resources\").ChatModel | (string & {});\n audio?: import(\"openai/resources\").ChatCompletionAudioParam | null | undefined;\n frequency_penalty?: number | null | undefined;\n function_call?: \"auto\" | \"none\" | import(\"openai/resources\").ChatCompletionFunctionCallOption | undefined;\n functions?: import(\"openai/resources\").ChatCompletionCreateParams.Function[] | undefined;\n logit_bias?: {\n [key: string]: number;\n } | null | undefined;\n logprobs?: boolean | null | undefined;\n max_completion_tokens?: number | null | undefined;\n max_tokens?: number | null | undefined;\n metadata?: import(\"openai/resources\").Metadata | null | undefined;\n modalities?: (\"audio\" | \"text\")[] | null | undefined;\n n?: number | null | undefined;\n parallel_tool_calls?: boolean | undefined;\n prediction?: import(\"openai/resources\").ChatCompletionPredictionContent | null | undefined;\n presence_penalty?: number | null | undefined;\n prompt_cache_key?: string | undefined;\n prompt_cache_retention?: \"24h\" | \"in-memory\" | null | undefined;\n reasoning_effort?: import(\"openai/resources\").ReasoningEffort | undefined;\n response_format?: import(\"openai/resources\").ResponseFormatJSONObject | import(\"openai/resources\").ResponseFormatJSONSchema | import(\"openai/resources\").ResponseFormatText | undefined;\n safety_identifier?: string | undefined;\n seed?: number | null | undefined;\n service_tier?: \"auto\" | \"default\" | \"flex\" | \"priority\" | \"scale\" | null | undefined;\n stop?: string | string[] | null | undefined;\n store?: boolean | null | undefined;\n stream_options?: import(\"openai/resources\").ChatCompletionStreamOptions | null | undefined;\n temperature?: number | null | undefined;\n tool_choice?: import(\"openai/resources\").ChatCompletionToolChoiceOption | undefined;\n tools?: import(\"openai/resources\").ChatCompletionTool[] | undefined;\n top_logprobs?: number | null | undefined;\n top_p?: number | null | undefined;\n user?: string | undefined;\n verbosity?: \"high\" | \"low\" | \"medium\" | null | undefined;\n web_search_options?: import(\"openai/resources\").ChatCompletionCreateParams.WebSearchOptions | undefined;\n stream?: boolean | null | undefined;\n } | import(\"./responses.js\").ChatResponsesInvocationParams;\n /** @ignore */\n _generate(messages: BaseMessage[], options: this[\"ParsedCallOptions\"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;\n _streamResponseChunks(messages: BaseMessage[], options: this[\"ParsedCallOptions\"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;\n withConfig(config: Partial<CallOptions>): Runnable<BaseLanguageModelInput, AIMessageChunk, CallOptions>;\n}\n//# sourceMappingURL=index.d.ts.map"],"mappings":";;;;;;;;;;;;;KAUYiB,qBAAAA,GAAwBL,mCAAmCE;UACtDI,gBAAAA,SAAyBF;;AAD1C;AACA;;EAegBH,eAAAA,CAAAA,EAAAA,OAAAA;EAf0BG;AAAoB;AA6iB9D;;EAA4EC,WAAAA,CAAAA,EAniB1DN,qBAmiB0DM;EAA8CG;;;;EAWjGF,SAAAA,CAAAA,EAziBTL,mBAyiBSK;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAXgG;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;cAApGC,+BAA+BF,wBAAwBA,+BAA+BF,eAAeK;qBACnGF;;;;;;uBAMEL;yBACEF;;;uBAGFO;;mDAAgBG,4CAAAA,CAEkEC;;WAAetB,iBAAAA,CAEhFuB,SAAAA;YAASvB,iBAAAA,CACRwB,wBAAAA;;sCAAwBxB,iBAAAA,CAEEyB,gCAAAA;gBACtBzB,iBAAAA,CAAA0B,0BAAAA,CAA2BC,QAAAA;;;;;;;eAAQ3B,iBAAAA,CAOpC4B,QAAAA;;;;iBAAQ5B,iBAAAA,CAIN6B,+BAAAA;;;;uBAA+B7B,iBAAAA,CAIzB8B,eAAAA;sBAAe9B,iBAAAA,CAChB+B,wBAAAA,GAAwB/B,iBAAAA,CAA8BgC,wBAAAA,GAAwBhC,iBAAAA,CAA8BiC,kBAAAA;;;;;;qBAAkBjC,iBAAAA,CAM/HkC,2BAAAA;;kBAA2BlC,iBAAAA,CAE9BmC,8BAAAA;YAA8BnC,iBAAAA,CACpCoC,kBAAAA;;;;;yBAKapC,iBAAAA,CAAA0B,0BAAAA,CAA2BW,gBAAAA;;;;sBAI3DjC,gEAAgEF,2BAA2BqC,QAAQjC;kCACvFF,gEAAgEF,2BAA2BsC,eAAenC;qBACvHoC,QAAQrB,eAAeZ,SAASD,wBAAwBJ,gBAAgBiB"}