UNPKG

@langchain/openai

Version:
1 lines 22.3 kB
{"version":3,"file":"index.cjs","names":["BaseChatOpenAI","fields?: ChatOpenAIFields","ChatOpenAIResponses","ChatOpenAICompletions","options: this[\"ParsedCallOptions\"] | undefined","isBuiltInTool","isOpenAICustomTool","isCustomTool","_modelPrefersResponsesAPI","options: this[\"ParsedCallOptions\"]","options?: this[\"ParsedCallOptions\"]","messages: BaseMessage[]","runManager?: CallbackManagerForLLMRun","config: Partial<CallOptions>"],"sources":["../../src/chat_models/index.ts"],"sourcesContent":["import { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\nimport { AIMessageChunk, type BaseMessage } from \"@langchain/core/messages\";\nimport { ChatGenerationChunk, type ChatResult } from \"@langchain/core/outputs\";\nimport { type BaseLanguageModelInput } from \"@langchain/core/language_models/base\";\nimport { Runnable } from \"@langchain/core/runnables\";\nimport { type OpenAICallOptions, type OpenAIChatInput } from \"../types.js\";\nimport {\n _convertToOpenAITool,\n isBuiltInTool,\n isCustomTool,\n isOpenAICustomTool,\n} from \"../utils/tools.js\";\nimport { _modelPrefersResponsesAPI } from \"../utils/misc.js\";\nimport { _convertOpenAIResponsesUsageToLangChainUsage } from \"../utils/output.js\";\nimport {\n ChatOpenAICompletions,\n ChatOpenAICompletionsCallOptions,\n} from \"./completions.js\";\nimport {\n ChatOpenAIResponses,\n ChatOpenAIResponsesCallOptions,\n} from \"./responses.js\";\nimport { BaseChatOpenAI, BaseChatOpenAIFields } from \"./base.js\";\n\nexport type { OpenAICallOptions, OpenAIChatInput };\n\nexport type ChatOpenAICallOptions = ChatOpenAICompletionsCallOptions &\n ChatOpenAIResponsesCallOptions;\n\nexport interface ChatOpenAIFields extends BaseChatOpenAIFields {\n /**\n * Whether to use the responses API for all requests. If `false` the responses API will be used\n * only when required in order to fulfill the request.\n */\n useResponsesApi?: boolean;\n /**\n * The completions chat instance\n * @internal\n */\n completions?: ChatOpenAICompletions;\n /**\n * The responses chat instance\n * @internal\n */\n responses?: ChatOpenAIResponses;\n}\n\n/**\n * OpenAI chat model integration.\n *\n * To use with Azure, import the `AzureChatOpenAI` class.\n *\n * Setup:\n * Install `@langchain/openai` and set an environment variable named `OPENAI_API_KEY`.\n *\n * ```bash\n * npm install @langchain/openai\n * export OPENAI_API_KEY=\"your-api-key\"\n * ```\n *\n * ## [Constructor args](https://api.js.langchain.com/classes/langchain_openai.ChatOpenAI.html#constructor)\n *\n * ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_openai.ChatOpenAICallOptions.html)\n *\n * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.\n * They can also be passed via `.withConfig`, or the second arg in `.bindTools`, like shown in the examples below:\n *\n * ```typescript\n * // When calling `.withConfig`, call options should be passed via the first argument\n * const llmWithArgsBound = llm.withConfig({\n * stop: [\"\\n\"],\n * tools: [...],\n * });\n *\n * // When calling `.bindTools`, call options should be passed via the second argument\n * const llmWithTools = llm.bindTools(\n * [...],\n * {\n * tool_choice: \"auto\",\n * }\n * );\n * ```\n *\n * ## Examples\n *\n * <details open>\n * <summary><strong>Instantiate</strong></summary>\n *\n * ```typescript\n * import { ChatOpenAI } from '@langchain/openai';\n *\n * const llm = new ChatOpenAI({\n * model: \"gpt-4o-mini\",\n * temperature: 0,\n * maxTokens: undefined,\n * timeout: undefined,\n * maxRetries: 2,\n * // apiKey: \"...\",\n * // configuration: {\n * // baseURL: \"...\",\n * // }\n * // organization: \"...\",\n * // other params...\n * });\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Invoking</strong></summary>\n *\n * ```typescript\n * const input = `Translate \"I love programming\" into French.`;\n *\n * // Models also accept a list of chat messages or a formatted prompt\n * const result = await llm.invoke(input);\n * console.log(result);\n * ```\n *\n * ```txt\n * AIMessage {\n * \"id\": \"chatcmpl-9u4Mpu44CbPjwYFkTbeoZgvzB00Tz\",\n * \"content\": \"J'adore la programmation.\",\n * \"response_metadata\": {\n * \"tokenUsage\": {\n * \"completionTokens\": 5,\n * \"promptTokens\": 28,\n * \"totalTokens\": 33\n * },\n * \"finish_reason\": \"stop\",\n * \"system_fingerprint\": \"fp_3aa7262c27\"\n * },\n * \"usage_metadata\": {\n * \"input_tokens\": 28,\n * \"output_tokens\": 5,\n * \"total_tokens\": 33\n * }\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Streaming Chunks</strong></summary>\n *\n * ```typescript\n * for await (const chunk of await llm.stream(input)) {\n * console.log(chunk);\n * }\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"id\": \"chatcmpl-9u4NWB7yUeHCKdLr6jP3HpaOYHTqs\",\n * \"content\": \"\"\n * }\n * AIMessageChunk {\n * \"content\": \"J\"\n * }\n * AIMessageChunk {\n * \"content\": \"'adore\"\n * }\n * AIMessageChunk {\n * \"content\": \" la\"\n * }\n * AIMessageChunk {\n * \"content\": \" programmation\",,\n * }\n * AIMessageChunk {\n * \"content\": \".\",,\n * }\n * AIMessageChunk {\n * \"content\": \"\",\n * \"response_metadata\": {\n * \"finish_reason\": \"stop\",\n * \"system_fingerprint\": \"fp_c9aa9c0491\"\n * },\n * }\n * AIMessageChunk {\n * \"content\": \"\",\n * \"usage_metadata\": {\n * \"input_tokens\": 28,\n * \"output_tokens\": 5,\n * \"total_tokens\": 33\n * }\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Aggregate Streamed Chunks</strong></summary>\n *\n * ```typescript\n * import { AIMessageChunk } from '@langchain/core/messages';\n * import { concat } from '@langchain/core/utils/stream';\n *\n * const stream = await llm.stream(input);\n * let full: AIMessageChunk | undefined;\n * for await (const chunk of stream) {\n * full = !full ? chunk : concat(full, chunk);\n * }\n * console.log(full);\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"id\": \"chatcmpl-9u4PnX6Fy7OmK46DASy0bH6cxn5Xu\",\n * \"content\": \"J'adore la programmation.\",\n * \"response_metadata\": {\n * \"prompt\": 0,\n * \"completion\": 0,\n * \"finish_reason\": \"stop\",\n * },\n * \"usage_metadata\": {\n * \"input_tokens\": 28,\n * \"output_tokens\": 5,\n * \"total_tokens\": 33\n * }\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Bind tools</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const GetWeather = {\n * name: \"GetWeather\",\n * description: \"Get the current weather in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const GetPopulation = {\n * name: \"GetPopulation\",\n * description: \"Get the current population in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const llmWithTools = llm.bindTools(\n * [GetWeather, GetPopulation],\n * {\n * // strict: true // enforce tool args schema is respected\n * }\n * );\n * const aiMsg = await llmWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\"\n * );\n * console.log(aiMsg.tool_calls);\n * ```\n *\n * ```txt\n * [\n * {\n * name: 'GetWeather',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call',\n * id: 'call_uPU4FiFzoKAtMxfmPnfQL6UK'\n * },\n * {\n * name: 'GetWeather',\n * args: { location: 'New York, NY' },\n * type: 'tool_call',\n * id: 'call_UNkEwuQsHrGYqgDQuH9nPAtX'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call',\n * id: 'call_kL3OXxaq9OjIKqRTpvjaCH14'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'New York, NY' },\n * type: 'tool_call',\n * id: 'call_s9KQB1UWj45LLGaEnjz0179q'\n * }\n * ]\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Structured Output</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const Joke = z.object({\n * setup: z.string().describe(\"The setup of the joke\"),\n * punchline: z.string().describe(\"The punchline to the joke\"),\n * rating: z.number().nullable().describe(\"How funny the joke is, from 1 to 10\")\n * }).describe('Joke to tell user.');\n *\n * const structuredLlm = llm.withStructuredOutput(Joke, {\n * name: \"Joke\",\n * strict: true, // Optionally enable OpenAI structured outputs\n * });\n * const jokeResult = await structuredLlm.invoke(\"Tell me a joke about cats\");\n * console.log(jokeResult);\n * ```\n *\n * ```txt\n * {\n * setup: 'Why was the cat sitting on the computer?',\n * punchline: 'Because it wanted to keep an eye on the mouse!',\n * rating: 7\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>JSON Object Response Format</strong></summary>\n *\n * ```typescript\n * const jsonLlm = llm.withConfig({ response_format: { type: \"json_object\" } });\n * const jsonLlmAiMsg = await jsonLlm.invoke(\n * \"Return a JSON object with key 'randomInts' and a value of 10 random ints in [0-99]\"\n * );\n * console.log(jsonLlmAiMsg.content);\n * ```\n *\n * ```txt\n * {\n * \"randomInts\": [23, 87, 45, 12, 78, 34, 56, 90, 11, 67]\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Multimodal</strong></summary>\n *\n * ```typescript\n * import { HumanMessage } from '@langchain/core/messages';\n *\n * const imageUrl = \"https://example.com/image.jpg\";\n * const imageData = await fetch(imageUrl).then(res => res.arrayBuffer());\n * const base64Image = Buffer.from(imageData).toString('base64');\n *\n * const message = new HumanMessage({\n * content: [\n * { type: \"text\", text: \"describe the weather in this image\" },\n * {\n * type: \"image_url\",\n * image_url: { url: `data:image/jpeg;base64,${base64Image}` },\n * },\n * ]\n * });\n *\n * const imageDescriptionAiMsg = await llm.invoke([message]);\n * console.log(imageDescriptionAiMsg.content);\n * ```\n *\n * ```txt\n * The weather in the image appears to be clear and sunny. The sky is mostly blue with a few scattered white clouds, indicating fair weather. The bright sunlight is casting shadows on the green, grassy hill, suggesting it is a pleasant day with good visibility. There are no signs of rain or stormy conditions.\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Usage Metadata</strong></summary>\n *\n * ```typescript\n * const aiMsgForMetadata = await llm.invoke(input);\n * console.log(aiMsgForMetadata.usage_metadata);\n * ```\n *\n * ```txt\n * { input_tokens: 28, output_tokens: 5, total_tokens: 33 }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Logprobs</strong></summary>\n *\n * ```typescript\n * const logprobsLlm = new ChatOpenAI({ model: \"gpt-4o-mini\", logprobs: true });\n * const aiMsgForLogprobs = await logprobsLlm.invoke(input);\n * console.log(aiMsgForLogprobs.response_metadata.logprobs);\n * ```\n *\n * ```txt\n * {\n * content: [\n * {\n * token: 'J',\n * logprob: -0.000050616763,\n * bytes: [Array],\n * top_logprobs: []\n * },\n * {\n * token: \"'\",\n * logprob: -0.01868736,\n * bytes: [Array],\n * top_logprobs: []\n * },\n * {\n * token: 'ad',\n * logprob: -0.0000030545007,\n * bytes: [Array],\n * top_logprobs: []\n * },\n * { token: 'ore', logprob: 0, bytes: [Array], top_logprobs: [] },\n * {\n * token: ' la',\n * logprob: -0.515404,\n * bytes: [Array],\n * top_logprobs: []\n * },\n * {\n * token: ' programm',\n * logprob: -0.0000118755715,\n * bytes: [Array],\n * top_logprobs: []\n * },\n * { token: 'ation', logprob: 0, bytes: [Array], top_logprobs: [] },\n * {\n * token: '.',\n * logprob: -0.0000037697225,\n * bytes: [Array],\n * top_logprobs: []\n * }\n * ],\n * refusal: null\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Response Metadata</strong></summary>\n *\n * ```typescript\n * const aiMsgForResponseMetadata = await llm.invoke(input);\n * console.log(aiMsgForResponseMetadata.response_metadata);\n * ```\n *\n * ```txt\n * {\n * tokenUsage: { completionTokens: 5, promptTokens: 28, totalTokens: 33 },\n * finish_reason: 'stop',\n * system_fingerprint: 'fp_3aa7262c27'\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>JSON Schema Structured Output</strong></summary>\n *\n * ```typescript\n * const llmForJsonSchema = new ChatOpenAI({\n * model: \"gpt-4o-2024-08-06\",\n * }).withStructuredOutput(\n * z.object({\n * command: z.string().describe(\"The command to execute\"),\n * expectedOutput: z.string().describe(\"The expected output of the command\"),\n * options: z\n * .array(z.string())\n * .describe(\"The options you can pass to the command\"),\n * }),\n * {\n * method: \"jsonSchema\",\n * strict: true, // Optional when using the `jsonSchema` method\n * }\n * );\n *\n * const jsonSchemaRes = await llmForJsonSchema.invoke(\n * \"What is the command to list files in a directory?\"\n * );\n * console.log(jsonSchemaRes);\n * ```\n *\n * ```txt\n * {\n * command: 'ls',\n * expectedOutput: 'A list of files and subdirectories within the specified directory.',\n * options: [\n * '-a: include directory entries whose names begin with a dot (.).',\n * '-l: use a long listing format.',\n * '-h: with -l, print sizes in human readable format (e.g., 1K, 234M, 2G).',\n * '-t: sort by time, newest first.',\n * '-r: reverse order while sorting.',\n * '-S: sort by file size, largest first.',\n * '-R: list subdirectories recursively.'\n * ]\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Audio Outputs</strong></summary>\n *\n * ```typescript\n * import { ChatOpenAI } from \"@langchain/openai\";\n *\n * const modelWithAudioOutput = new ChatOpenAI({\n * model: \"gpt-4o-audio-preview\",\n * // You may also pass these fields to `.withConfig` as a call argument.\n * modalities: [\"text\", \"audio\"], // Specifies that the model should output audio.\n * audio: {\n * voice: \"alloy\",\n * format: \"wav\",\n * },\n * });\n *\n * const audioOutputResult = await modelWithAudioOutput.invoke(\"Tell me a joke about cats.\");\n * const castMessageContent = audioOutputResult.content[0] as Record<string, any>;\n *\n * console.log({\n * ...castMessageContent,\n * data: castMessageContent.data.slice(0, 100) // Sliced for brevity\n * })\n * ```\n *\n * ```txt\n * {\n * id: 'audio_67117718c6008190a3afad3e3054b9b6',\n * data: 'UklGRqYwBgBXQVZFZm10IBAAAAABAAEAwF0AAIC7AAACABAATElTVBoAAABJTkZPSVNGVA4AAABMYXZmNTguMjkuMTAwAGRhdGFg',\n * expires_at: 1729201448,\n * transcript: 'Sure! Why did the cat sit on the computer? Because it wanted to keep an eye on the mouse!'\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Audio Outputs</strong></summary>\n *\n * ```typescript\n * import { ChatOpenAI } from \"@langchain/openai\";\n *\n * const modelWithAudioOutput = new ChatOpenAI({\n * model: \"gpt-4o-audio-preview\",\n * // You may also pass these fields to `.withConfig` as a call argument.\n * modalities: [\"text\", \"audio\"], // Specifies that the model should output audio.\n * audio: {\n * voice: \"alloy\",\n * format: \"wav\",\n * },\n * });\n *\n * const audioOutputResult = await modelWithAudioOutput.invoke(\"Tell me a joke about cats.\");\n * const castAudioContent = audioOutputResult.additional_kwargs.audio as Record<string, any>;\n *\n * console.log({\n * ...castAudioContent,\n * data: castAudioContent.data.slice(0, 100) // Sliced for brevity\n * })\n * ```\n *\n * ```txt\n * {\n * id: 'audio_67117718c6008190a3afad3e3054b9b6',\n * data: 'UklGRqYwBgBXQVZFZm10IBAAAAABAAEAwF0AAIC7AAACABAATElTVBoAAABJTkZPSVNGVA4AAABMYXZmNTguMjkuMTAwAGRhdGFg',\n * expires_at: 1729201448,\n * transcript: 'Sure! Why did the cat sit on the computer? Because it wanted to keep an eye on the mouse!'\n * }\n * ```\n * </details>\n *\n * <br />\n */\nexport class ChatOpenAI<\n CallOptions extends ChatOpenAICallOptions = ChatOpenAICallOptions\n> extends BaseChatOpenAI<CallOptions> {\n /**\n * Whether to use the responses API for all requests. If `false` the responses API will be used\n * only when required in order to fulfill the request.\n */\n useResponsesApi = false;\n\n protected responses: ChatOpenAIResponses;\n\n protected completions: ChatOpenAICompletions;\n\n get lc_serializable_keys(): string[] {\n return [...super.lc_serializable_keys, \"useResponsesApi\"];\n }\n\n get callKeys(): string[] {\n return [...super.callKeys, \"useResponsesApi\"];\n }\n\n constructor(protected fields?: ChatOpenAIFields) {\n super(fields);\n this.useResponsesApi = fields?.useResponsesApi ?? false;\n this.responses = fields?.responses ?? new ChatOpenAIResponses(fields);\n this.completions = fields?.completions ?? new ChatOpenAICompletions(fields);\n }\n\n protected _useResponsesApi(options: this[\"ParsedCallOptions\"] | undefined) {\n const usesBuiltInTools = options?.tools?.some(isBuiltInTool);\n const hasResponsesOnlyKwargs =\n options?.previous_response_id != null ||\n options?.text != null ||\n options?.truncation != null ||\n options?.include != null ||\n options?.reasoning?.summary != null ||\n this.reasoning?.summary != null;\n const hasCustomTools =\n options?.tools?.some(isOpenAICustomTool) ||\n options?.tools?.some(isCustomTool);\n\n return (\n this.useResponsesApi ||\n usesBuiltInTools ||\n hasResponsesOnlyKwargs ||\n hasCustomTools ||\n _modelPrefersResponsesAPI(this.model)\n );\n }\n\n override getLsParams(options: this[\"ParsedCallOptions\"]) {\n const optionsWithDefaults = this._combineCallOptions(options);\n if (this._useResponsesApi(options)) {\n return this.responses.getLsParams(optionsWithDefaults);\n }\n return this.completions.getLsParams(optionsWithDefaults);\n }\n\n override invocationParams(options?: this[\"ParsedCallOptions\"]) {\n const optionsWithDefaults = this._combineCallOptions(options);\n if (this._useResponsesApi(options)) {\n return this.responses.invocationParams(optionsWithDefaults);\n }\n return this.completions.invocationParams(optionsWithDefaults);\n }\n\n /** @ignore */\n override async _generate(\n messages: BaseMessage[],\n options: this[\"ParsedCallOptions\"],\n runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n if (this._useResponsesApi(options)) {\n return this.responses._generate(messages, options);\n }\n return this.completions._generate(messages, options, runManager);\n }\n\n override async *_streamResponseChunks(\n messages: BaseMessage[],\n options: this[\"ParsedCallOptions\"],\n runManager?: CallbackManagerForLLMRun\n ): AsyncGenerator<ChatGenerationChunk> {\n if (this._useResponsesApi(options)) {\n yield* this.responses._streamResponseChunks(\n messages,\n this._combineCallOptions(options),\n runManager\n );\n return;\n }\n yield* this.completions._streamResponseChunks(\n messages,\n this._combineCallOptions(options),\n runManager\n );\n }\n\n override withConfig(\n config: Partial<CallOptions>\n ): Runnable<BaseLanguageModelInput, AIMessageChunk, CallOptions> {\n const newModel = new ChatOpenAI<CallOptions>(this.fields);\n newModel.defaultOptions = { ...this.defaultOptions, ...config };\n return newModel;\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA2kBA,IAAa,aAAb,MAAa,mBAEHA,4BAA4B;;;;;CAKpC,kBAAkB;CAElB,AAAU;CAEV,AAAU;CAEV,IAAI,uBAAiC;AACnC,SAAO,CAAC,GAAG,MAAM,sBAAsB,iBAAkB;CAC1D;CAED,IAAI,WAAqB;AACvB,SAAO,CAAC,GAAG,MAAM,UAAU,iBAAkB;CAC9C;CAED,YAAsBC,QAA2B;EAC/C,MAAM,OAAO;EADO;EAEpB,KAAK,kBAAkB,QAAQ,mBAAmB;EAClD,KAAK,YAAY,QAAQ,aAAa,IAAIC,sCAAoB;EAC9D,KAAK,cAAc,QAAQ,eAAe,IAAIC,0CAAsB;CACrE;CAED,AAAU,iBAAiBC,SAAgD;EACzE,MAAM,mBAAmB,SAAS,OAAO,KAAKC,4BAAc;EAC5D,MAAM,yBACJ,SAAS,wBAAwB,QACjC,SAAS,QAAQ,QACjB,SAAS,cAAc,QACvB,SAAS,WAAW,QACpB,SAAS,WAAW,WAAW,QAC/B,KAAK,WAAW,WAAW;EAC7B,MAAM,iBACJ,SAAS,OAAO,KAAKC,iCAAmB,IACxC,SAAS,OAAO,KAAKC,2BAAa;AAEpC,SACE,KAAK,mBACL,oBACA,0BACA,kBACAC,uCAA0B,KAAK,MAAM;CAExC;CAED,AAAS,YAAYC,SAAoC;EACvD,MAAM,sBAAsB,KAAK,oBAAoB,QAAQ;AAC7D,MAAI,KAAK,iBAAiB,QAAQ,CAChC,QAAO,KAAK,UAAU,YAAY,oBAAoB;AAExD,SAAO,KAAK,YAAY,YAAY,oBAAoB;CACzD;CAED,AAAS,iBAAiBC,SAAqC;EAC7D,MAAM,sBAAsB,KAAK,oBAAoB,QAAQ;AAC7D,MAAI,KAAK,iBAAiB,QAAQ,CAChC,QAAO,KAAK,UAAU,iBAAiB,oBAAoB;AAE7D,SAAO,KAAK,YAAY,iBAAiB,oBAAoB;CAC9D;;CAGD,MAAe,UACbC,UACAF,SACAG,YACqB;AACrB,MAAI,KAAK,iBAAiB,QAAQ,CAChC,QAAO,KAAK,UAAU,UAAU,UAAU,QAAQ;AAEpD,SAAO,KAAK,YAAY,UAAU,UAAU,SAAS,WAAW;CACjE;CAED,OAAgB,sBACdD,UACAF,SACAG,YACqC;AACrC,MAAI,KAAK,iBAAiB,QAAQ,EAAE;GAClC,OAAO,KAAK,UAAU,sBACpB,UACA,KAAK,oBAAoB,QAAQ,EACjC,WACD;AACD;EACD;EACD,OAAO,KAAK,YAAY,sBACtB,UACA,KAAK,oBAAoB,QAAQ,EACjC,WACD;CACF;CAED,AAAS,WACPC,QAC+D;EAC/D,MAAM,WAAW,IAAI,WAAwB,KAAK;EAClD,SAAS,iBAAiB;GAAE,GAAG,KAAK;GAAgB,GAAG;EAAQ;AAC/D,SAAO;CACR;AACF"}