UNPKG

@langchain/openai

Version:
1 lines 20.2 kB
{"version":3,"file":"llms.cjs","names":["BaseLLM","fields?: Partial<OpenAIInput> &\n BaseLLMParams & {\n configuration?: ClientOptions;\n }","options?: this[\"ParsedCallOptions\"]","prompts: string[]","options: this[\"ParsedCallOptions\"]","runManager?: CallbackManagerForLLMRun","choices: OpenAIClient.CompletionChoice[]","tokenUsage: TokenUsage","response: Omit<OpenAIClient.Completion, \"choices\"> | undefined","choices","input: string","GenerationChunk","request:\n | OpenAIClient.CompletionCreateParamsStreaming\n | OpenAIClient.CompletionCreateParamsNonStreaming","options?: OpenAICoreRequestOptions","wrapOpenAIClientError","options: OpenAICoreRequestOptions | undefined","openAIEndpointConfig: OpenAIEndpointConfig","getEndpoint","getHeadersWithUserAgent","OpenAIClient"],"sources":["../src/llms.ts"],"sourcesContent":["import type { TiktokenModel } from \"js-tiktoken/lite\";\nimport { type ClientOptions, OpenAI as OpenAIClient } from \"openai\";\nimport { calculateMaxTokens } from \"@langchain/core/language_models/base\";\nimport { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\nimport { GenerationChunk, type LLMResult } from \"@langchain/core/outputs\";\nimport { getEnvironmentVariable } from \"@langchain/core/utils/env\";\nimport {\n BaseLLM,\n type BaseLLMParams,\n} from \"@langchain/core/language_models/llms\";\nimport { chunkArray } from \"@langchain/core/utils/chunk_array\";\nimport type {\n OpenAIApiKey,\n OpenAICallOptions,\n OpenAICoreRequestOptions,\n OpenAIInput,\n} from \"./types.js\";\nimport {\n OpenAIEndpointConfig,\n getEndpoint,\n getHeadersWithUserAgent,\n} from \"./utils/azure.js\";\nimport { wrapOpenAIClientError } from \"./utils/client.js\";\n\nexport type { OpenAICallOptions, OpenAIInput };\n\n/**\n * Interface for tracking token usage in OpenAI calls.\n */\ninterface TokenUsage {\n completionTokens?: number;\n promptTokens?: number;\n totalTokens?: number;\n}\n\n/**\n * Wrapper around OpenAI large language models.\n *\n * To use you should have the `openai` package installed, with the\n * `OPENAI_API_KEY` environment variable set.\n *\n * To use with Azure, import the `AzureOpenAI` class.\n *\n * @remarks\n * Any parameters that are valid to be passed to {@link\n * https://platform.openai.com/docs/api-reference/completions/create |\n * `openai.createCompletion`} can be passed through {@link modelKwargs}, even\n * if not explicitly available on this class.\n * @example\n * ```typescript\n * const model = new OpenAI({\n * modelName: \"gpt-4\",\n * temperature: 0.7,\n * maxTokens: 1000,\n * maxRetries: 5,\n * });\n *\n * const res = await model.invoke(\n * \"Question: What would be a good company name for a company that makes colorful socks?\\nAnswer:\"\n * );\n * console.log({ res });\n * ```\n */\nexport class OpenAI<CallOptions extends OpenAICallOptions = OpenAICallOptions>\n extends BaseLLM<CallOptions>\n implements Partial<OpenAIInput>\n{\n static lc_name() {\n return \"OpenAI\";\n }\n\n get callKeys() {\n return [...super.callKeys, \"options\"];\n }\n\n lc_serializable = true;\n\n get lc_secrets(): { [key: string]: string } | undefined {\n return {\n openAIApiKey: \"OPENAI_API_KEY\",\n apiKey: \"OPENAI_API_KEY\",\n organization: \"OPENAI_ORGANIZATION\",\n };\n }\n\n get lc_aliases(): Record<string, string> {\n return {\n modelName: \"model\",\n openAIApiKey: \"openai_api_key\",\n apiKey: \"openai_api_key\",\n };\n }\n\n temperature?: number;\n\n maxTokens?: number;\n\n topP?: number;\n\n frequencyPenalty?: number;\n\n presencePenalty?: number;\n\n n = 1;\n\n bestOf?: number;\n\n logitBias?: Record<string, number>;\n\n model = \"gpt-3.5-turbo-instruct\";\n\n /** @deprecated Use \"model\" instead */\n modelName: string;\n\n modelKwargs?: OpenAIInput[\"modelKwargs\"];\n\n batchSize = 20;\n\n timeout?: number;\n\n stop?: string[];\n\n stopSequences?: string[];\n\n user?: string;\n\n streaming = false;\n\n openAIApiKey?: OpenAIApiKey;\n\n apiKey?: OpenAIApiKey;\n\n organization?: string;\n\n protected client: OpenAIClient;\n\n protected clientConfig: ClientOptions;\n\n constructor(\n fields?: Partial<OpenAIInput> &\n BaseLLMParams & {\n configuration?: ClientOptions;\n }\n ) {\n super(fields ?? {});\n\n this.openAIApiKey =\n fields?.apiKey ??\n fields?.openAIApiKey ??\n getEnvironmentVariable(\"OPENAI_API_KEY\");\n this.apiKey = this.openAIApiKey;\n\n this.organization =\n fields?.configuration?.organization ??\n getEnvironmentVariable(\"OPENAI_ORGANIZATION\");\n\n this.model = fields?.model ?? fields?.modelName ?? this.model;\n if (\n (this.model?.startsWith(\"gpt-3.5-turbo\") ||\n this.model?.startsWith(\"gpt-4\") ||\n this.model?.startsWith(\"o1\")) &&\n !this.model?.includes(\"-instruct\")\n ) {\n throw new Error(\n [\n `Your chosen OpenAI model, \"${this.model}\", is a chat model and not a text-in/text-out LLM.`,\n `Passing it into the \"OpenAI\" class is no longer supported.`,\n `Please use the \"ChatOpenAI\" class instead.`,\n \"\",\n `See this page for more information:`,\n \"|\",\n `└> https://js.langchain.com/docs/integrations/chat/openai`,\n ].join(\"\\n\")\n );\n }\n this.modelName = this.model;\n this.modelKwargs = fields?.modelKwargs ?? {};\n this.batchSize = fields?.batchSize ?? this.batchSize;\n this.timeout = fields?.timeout;\n\n this.temperature = fields?.temperature ?? this.temperature;\n this.maxTokens = fields?.maxTokens ?? this.maxTokens;\n this.topP = fields?.topP ?? this.topP;\n this.frequencyPenalty = fields?.frequencyPenalty ?? this.frequencyPenalty;\n this.presencePenalty = fields?.presencePenalty ?? this.presencePenalty;\n this.n = fields?.n ?? this.n;\n this.bestOf = fields?.bestOf ?? this.bestOf;\n this.logitBias = fields?.logitBias;\n this.stop = fields?.stopSequences ?? fields?.stop;\n this.stopSequences = this.stop;\n this.user = fields?.user;\n\n this.streaming = fields?.streaming ?? false;\n\n if (this.streaming && this.bestOf && this.bestOf > 1) {\n throw new Error(\"Cannot stream results when bestOf > 1\");\n }\n\n this.clientConfig = {\n apiKey: this.apiKey,\n organization: this.organization,\n dangerouslyAllowBrowser: true,\n ...fields?.configuration,\n };\n }\n\n /**\n * Get the parameters used to invoke the model\n */\n invocationParams(\n options?: this[\"ParsedCallOptions\"]\n ): Omit<OpenAIClient.CompletionCreateParams, \"prompt\"> {\n return {\n model: this.model,\n temperature: this.temperature,\n max_tokens: this.maxTokens,\n top_p: this.topP,\n frequency_penalty: this.frequencyPenalty,\n presence_penalty: this.presencePenalty,\n n: this.n,\n best_of: this.bestOf,\n logit_bias: this.logitBias,\n stop: options?.stop ?? this.stopSequences,\n user: this.user,\n stream: this.streaming,\n ...this.modelKwargs,\n };\n }\n\n /** @ignore */\n _identifyingParams(): Omit<OpenAIClient.CompletionCreateParams, \"prompt\"> & {\n model_name: string;\n } & ClientOptions {\n return {\n model_name: this.model,\n ...this.invocationParams(),\n ...this.clientConfig,\n };\n }\n\n /**\n * Get the identifying parameters for the model\n */\n identifyingParams(): Omit<OpenAIClient.CompletionCreateParams, \"prompt\"> & {\n model_name: string;\n } & ClientOptions {\n return this._identifyingParams();\n }\n\n /**\n * Call out to OpenAI's endpoint with k unique prompts\n *\n * @param [prompts] - The prompts to pass into the model.\n * @param [options] - Optional list of stop words to use when generating.\n * @param [runManager] - Optional callback manager to use when generating.\n *\n * @returns The full LLM output.\n *\n * @example\n * ```ts\n * import { OpenAI } from \"langchain/llms/openai\";\n * const openai = new OpenAI();\n * const response = await openai.generate([\"Tell me a joke.\"]);\n * ```\n */\n async _generate(\n prompts: string[],\n options: this[\"ParsedCallOptions\"],\n runManager?: CallbackManagerForLLMRun\n ): Promise<LLMResult> {\n const subPrompts = chunkArray(prompts, this.batchSize);\n const choices: OpenAIClient.CompletionChoice[] = [];\n const tokenUsage: TokenUsage = {};\n\n const params = this.invocationParams(options);\n\n if (params.max_tokens === -1) {\n if (prompts.length !== 1) {\n throw new Error(\n \"max_tokens set to -1 not supported for multiple inputs\"\n );\n }\n params.max_tokens = await calculateMaxTokens({\n prompt: prompts[0],\n // Cast here to allow for other models that may not fit the union\n modelName: this.model as TiktokenModel,\n });\n }\n\n for (let i = 0; i < subPrompts.length; i += 1) {\n const data = params.stream\n ? await (async () => {\n const choices: OpenAIClient.CompletionChoice[] = [];\n let response: Omit<OpenAIClient.Completion, \"choices\"> | undefined;\n const stream = await this.completionWithRetry(\n {\n ...params,\n stream: true,\n prompt: subPrompts[i],\n },\n options\n );\n for await (const message of stream) {\n // on the first message set the response properties\n if (!response) {\n response = {\n id: message.id,\n object: message.object,\n created: message.created,\n model: message.model,\n };\n }\n\n // on all messages, update choice\n for (const part of message.choices) {\n if (!choices[part.index]) {\n choices[part.index] = part;\n } else {\n const choice = choices[part.index];\n choice.text += part.text;\n choice.finish_reason = part.finish_reason;\n choice.logprobs = part.logprobs;\n }\n // eslint-disable-next-line no-void\n void runManager?.handleLLMNewToken(part.text, {\n prompt: Math.floor(part.index / this.n),\n completion: part.index % this.n,\n });\n }\n }\n if (options.signal?.aborted) {\n throw new Error(\"AbortError\");\n }\n return { ...response, choices };\n })()\n : await this.completionWithRetry(\n {\n ...params,\n stream: false,\n prompt: subPrompts[i],\n },\n {\n signal: options.signal,\n ...options.options,\n }\n );\n\n choices.push(...data.choices);\n const {\n completion_tokens: completionTokens,\n prompt_tokens: promptTokens,\n total_tokens: totalTokens,\n } = data.usage\n ? data.usage\n : {\n completion_tokens: undefined,\n prompt_tokens: undefined,\n total_tokens: undefined,\n };\n\n if (completionTokens) {\n tokenUsage.completionTokens =\n (tokenUsage.completionTokens ?? 0) + completionTokens;\n }\n\n if (promptTokens) {\n tokenUsage.promptTokens = (tokenUsage.promptTokens ?? 0) + promptTokens;\n }\n\n if (totalTokens) {\n tokenUsage.totalTokens = (tokenUsage.totalTokens ?? 0) + totalTokens;\n }\n }\n\n const generations = chunkArray(choices, this.n).map((promptChoices) =>\n promptChoices.map((choice) => ({\n text: choice.text ?? \"\",\n generationInfo: {\n finishReason: choice.finish_reason,\n logprobs: choice.logprobs,\n },\n }))\n );\n return {\n generations,\n llmOutput: { tokenUsage },\n };\n }\n\n // TODO(jacoblee): Refactor with _generate(..., {stream: true}) implementation?\n async *_streamResponseChunks(\n input: string,\n options: this[\"ParsedCallOptions\"],\n runManager?: CallbackManagerForLLMRun\n ): AsyncGenerator<GenerationChunk> {\n const params = {\n ...this.invocationParams(options),\n prompt: input,\n stream: true as const,\n };\n const stream = await this.completionWithRetry(params, options);\n for await (const data of stream) {\n const choice = data?.choices[0];\n if (!choice) {\n continue;\n }\n const chunk = new GenerationChunk({\n text: choice.text,\n generationInfo: {\n finishReason: choice.finish_reason,\n },\n });\n yield chunk;\n // eslint-disable-next-line no-void\n void runManager?.handleLLMNewToken(chunk.text ?? \"\");\n }\n if (options.signal?.aborted) {\n throw new Error(\"AbortError\");\n }\n }\n\n /**\n * Calls the OpenAI API with retry logic in case of failures.\n * @param request The request to send to the OpenAI API.\n * @param options Optional configuration for the API call.\n * @returns The response from the OpenAI API.\n */\n async completionWithRetry(\n request: OpenAIClient.CompletionCreateParamsStreaming,\n options?: OpenAICoreRequestOptions\n ): Promise<AsyncIterable<OpenAIClient.Completion>>;\n\n async completionWithRetry(\n request: OpenAIClient.CompletionCreateParamsNonStreaming,\n options?: OpenAICoreRequestOptions\n ): Promise<OpenAIClient.Completions.Completion>;\n\n async completionWithRetry(\n request:\n | OpenAIClient.CompletionCreateParamsStreaming\n | OpenAIClient.CompletionCreateParamsNonStreaming,\n options?: OpenAICoreRequestOptions\n ): Promise<\n AsyncIterable<OpenAIClient.Completion> | OpenAIClient.Completions.Completion\n > {\n const requestOptions = this._getClientOptions(options);\n return this.caller.call(async () => {\n try {\n const res = await this.client.completions.create(\n request,\n requestOptions\n );\n return res;\n } catch (e) {\n const error = wrapOpenAIClientError(e);\n throw error;\n }\n });\n }\n\n /**\n * Calls the OpenAI API with retry logic in case of failures.\n * @param request The request to send to the OpenAI API.\n * @param options Optional configuration for the API call.\n * @returns The response from the OpenAI API.\n */\n protected _getClientOptions(\n options: OpenAICoreRequestOptions | undefined\n ): OpenAICoreRequestOptions {\n if (!this.client) {\n const openAIEndpointConfig: OpenAIEndpointConfig = {\n baseURL: this.clientConfig.baseURL,\n };\n\n const endpoint = getEndpoint(openAIEndpointConfig);\n\n const params = {\n ...this.clientConfig,\n baseURL: endpoint,\n timeout: this.timeout,\n maxRetries: 0,\n };\n\n if (!params.baseURL) {\n delete params.baseURL;\n }\n\n params.defaultHeaders = getHeadersWithUserAgent(params.defaultHeaders);\n\n this.client = new OpenAIClient(params);\n }\n const requestOptions = {\n ...this.clientConfig,\n ...options,\n } as OpenAICoreRequestOptions;\n return requestOptions;\n }\n\n _llmType() {\n return \"openai\";\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA+DA,IAAa,SAAb,cACUA,8CAEV;CACE,OAAO,UAAU;AACf,SAAO;CACR;CAED,IAAI,WAAW;AACb,SAAO,CAAC,GAAG,MAAM,UAAU,SAAU;CACtC;CAED,kBAAkB;CAElB,IAAI,aAAoD;AACtD,SAAO;GACL,cAAc;GACd,QAAQ;GACR,cAAc;EACf;CACF;CAED,IAAI,aAAqC;AACvC,SAAO;GACL,WAAW;GACX,cAAc;GACd,QAAQ;EACT;CACF;CAED;CAEA;CAEA;CAEA;CAEA;CAEA,IAAI;CAEJ;CAEA;CAEA,QAAQ;;CAGR;CAEA;CAEA,YAAY;CAEZ;CAEA;CAEA;CAEA;CAEA,YAAY;CAEZ;CAEA;CAEA;CAEA,AAAU;CAEV,AAAU;CAEV,YACEC,QAIA;EACA,MAAM,UAAU,CAAE,EAAC;EAEnB,KAAK,eACH,QAAQ,UACR,QAAQ,uEACe,iBAAiB;EAC1C,KAAK,SAAS,KAAK;EAEnB,KAAK,eACH,QAAQ,eAAe,uEACA,sBAAsB;EAE/C,KAAK,QAAQ,QAAQ,SAAS,QAAQ,aAAa,KAAK;AACxD,OACG,KAAK,OAAO,WAAW,gBAAgB,IACtC,KAAK,OAAO,WAAW,QAAQ,IAC/B,KAAK,OAAO,WAAW,KAAK,KAC9B,CAAC,KAAK,OAAO,SAAS,YAAY,CAElC,OAAM,IAAI,MACR;GACE,CAAC,2BAA2B,EAAE,KAAK,MAAM,kDAAkD,CAAC;GAC5F,CAAC,0DAA0D,CAAC;GAC5D,CAAC,0CAA0C,CAAC;GAC5C;GACA,CAAC,mCAAmC,CAAC;GACrC;GACA,CAAC,yDAAyD,CAAC;EAC5D,EAAC,KAAK,KAAK;EAGhB,KAAK,YAAY,KAAK;EACtB,KAAK,cAAc,QAAQ,eAAe,CAAE;EAC5C,KAAK,YAAY,QAAQ,aAAa,KAAK;EAC3C,KAAK,UAAU,QAAQ;EAEvB,KAAK,cAAc,QAAQ,eAAe,KAAK;EAC/C,KAAK,YAAY,QAAQ,aAAa,KAAK;EAC3C,KAAK,OAAO,QAAQ,QAAQ,KAAK;EACjC,KAAK,mBAAmB,QAAQ,oBAAoB,KAAK;EACzD,KAAK,kBAAkB,QAAQ,mBAAmB,KAAK;EACvD,KAAK,IAAI,QAAQ,KAAK,KAAK;EAC3B,KAAK,SAAS,QAAQ,UAAU,KAAK;EACrC,KAAK,YAAY,QAAQ;EACzB,KAAK,OAAO,QAAQ,iBAAiB,QAAQ;EAC7C,KAAK,gBAAgB,KAAK;EAC1B,KAAK,OAAO,QAAQ;EAEpB,KAAK,YAAY,QAAQ,aAAa;AAEtC,MAAI,KAAK,aAAa,KAAK,UAAU,KAAK,SAAS,EACjD,OAAM,IAAI,MAAM;EAGlB,KAAK,eAAe;GAClB,QAAQ,KAAK;GACb,cAAc,KAAK;GACnB,yBAAyB;GACzB,GAAG,QAAQ;EACZ;CACF;;;;CAKD,iBACEC,SACqD;AACrD,SAAO;GACL,OAAO,KAAK;GACZ,aAAa,KAAK;GAClB,YAAY,KAAK;GACjB,OAAO,KAAK;GACZ,mBAAmB,KAAK;GACxB,kBAAkB,KAAK;GACvB,GAAG,KAAK;GACR,SAAS,KAAK;GACd,YAAY,KAAK;GACjB,MAAM,SAAS,QAAQ,KAAK;GAC5B,MAAM,KAAK;GACX,QAAQ,KAAK;GACb,GAAG,KAAK;EACT;CACF;;CAGD,qBAEkB;AAChB,SAAO;GACL,YAAY,KAAK;GACjB,GAAG,KAAK,kBAAkB;GAC1B,GAAG,KAAK;EACT;CACF;;;;CAKD,oBAEkB;AAChB,SAAO,KAAK,oBAAoB;CACjC;;;;;;;;;;;;;;;;;CAkBD,MAAM,UACJC,SACAC,SACAC,YACoB;EACpB,MAAM,gEAAwB,SAAS,KAAK,UAAU;EACtD,MAAMC,UAA2C,CAAE;EACnD,MAAMC,aAAyB,CAAE;EAEjC,MAAM,SAAS,KAAK,iBAAiB,QAAQ;AAE7C,MAAI,OAAO,eAAe,IAAI;AAC5B,OAAI,QAAQ,WAAW,EACrB,OAAM,IAAI,MACR;GAGJ,OAAO,aAAa,oEAAyB;IAC3C,QAAQ,QAAQ;IAEhB,WAAW,KAAK;GACjB,EAAC;EACH;AAED,OAAK,IAAI,IAAI,GAAG,IAAI,WAAW,QAAQ,KAAK,GAAG;GAC7C,MAAM,OAAO,OAAO,SAChB,OAAO,YAAY;IACjB,MAAMD,YAA2C,CAAE;IACnD,IAAIE;IACJ,MAAM,SAAS,MAAM,KAAK,oBACxB;KACE,GAAG;KACH,QAAQ;KACR,QAAQ,WAAW;IACpB,GACD,QACD;AACD,eAAW,MAAM,WAAW,QAAQ;AAElC,SAAI,CAAC,UACH,WAAW;MACT,IAAI,QAAQ;MACZ,QAAQ,QAAQ;MAChB,SAAS,QAAQ;MACjB,OAAO,QAAQ;KAChB;AAIH,UAAK,MAAM,QAAQ,QAAQ,SAAS;AAClC,UAAI,CAACC,UAAQ,KAAK,QAChBA,UAAQ,KAAK,SAAS;WACjB;OACL,MAAM,SAASA,UAAQ,KAAK;OAC5B,OAAO,QAAQ,KAAK;OACpB,OAAO,gBAAgB,KAAK;OAC5B,OAAO,WAAW,KAAK;MACxB;MAEI,YAAY,kBAAkB,KAAK,MAAM;OAC5C,QAAQ,KAAK,MAAM,KAAK,QAAQ,KAAK,EAAE;OACvC,YAAY,KAAK,QAAQ,KAAK;MAC/B,EAAC;KACH;IACF;AACD,QAAI,QAAQ,QAAQ,QAClB,OAAM,IAAI,MAAM;AAElB,WAAO;KAAE,GAAG;KAAU;IAAS;GAChC,IAAG,GACJ,MAAM,KAAK,oBACT;IACE,GAAG;IACH,QAAQ;IACR,QAAQ,WAAW;GACpB,GACD;IACE,QAAQ,QAAQ;IAChB,GAAG,QAAQ;GACZ,EACF;GAEL,QAAQ,KAAK,GAAG,KAAK,QAAQ;GAC7B,MAAM,EACJ,mBAAmB,kBACnB,eAAe,cACf,cAAc,aACf,GAAG,KAAK,QACL,KAAK,QACL;IACE,mBAAmB;IACnB,eAAe;IACf,cAAc;GACf;AAEL,OAAI,kBACF,WAAW,oBACR,WAAW,oBAAoB,KAAK;AAGzC,OAAI,cACF,WAAW,gBAAgB,WAAW,gBAAgB,KAAK;AAG7D,OAAI,aACF,WAAW,eAAe,WAAW,eAAe,KAAK;EAE5D;EAED,MAAM,iEAAyB,SAAS,KAAK,EAAE,CAAC,IAAI,CAAC,kBACnD,cAAc,IAAI,CAAC,YAAY;GAC7B,MAAM,OAAO,QAAQ;GACrB,gBAAgB;IACd,cAAc,OAAO;IACrB,UAAU,OAAO;GAClB;EACF,GAAE,CACJ;AACD,SAAO;GACL;GACA,WAAW,EAAE,WAAY;EAC1B;CACF;CAGD,OAAO,sBACLC,OACAN,SACAC,YACiC;EACjC,MAAM,SAAS;GACb,GAAG,KAAK,iBAAiB,QAAQ;GACjC,QAAQ;GACR,QAAQ;EACT;EACD,MAAM,SAAS,MAAM,KAAK,oBAAoB,QAAQ,QAAQ;AAC9D,aAAW,MAAM,QAAQ,QAAQ;GAC/B,MAAM,SAAS,MAAM,QAAQ;AAC7B,OAAI,CAAC,OACH;GAEF,MAAM,QAAQ,IAAIM,yCAAgB;IAChC,MAAM,OAAO;IACb,gBAAgB,EACd,cAAc,OAAO,cACtB;GACF;GACD,MAAM;GAED,YAAY,kBAAkB,MAAM,QAAQ,GAAG;EACrD;AACD,MAAI,QAAQ,QAAQ,QAClB,OAAM,IAAI,MAAM;CAEnB;CAkBD,MAAM,oBACJC,SAGAC,SAGA;EACA,MAAM,iBAAiB,KAAK,kBAAkB,QAAQ;AACtD,SAAO,KAAK,OAAO,KAAK,YAAY;AAClC,OAAI;IACF,MAAM,MAAM,MAAM,KAAK,OAAO,YAAY,OACxC,SACA,eACD;AACD,WAAO;GACR,SAAQ,GAAG;IACV,MAAM,QAAQC,qCAAsB,EAAE;AACtC,UAAM;GACP;EACF,EAAC;CACH;;;;;;;CAQD,AAAU,kBACRC,SAC0B;AAC1B,MAAI,CAAC,KAAK,QAAQ;GAChB,MAAMC,uBAA6C,EACjD,SAAS,KAAK,aAAa,QAC5B;GAED,MAAM,WAAWC,0BAAY,qBAAqB;GAElD,MAAM,SAAS;IACb,GAAG,KAAK;IACR,SAAS;IACT,SAAS,KAAK;IACd,YAAY;GACb;AAED,OAAI,CAAC,OAAO,SACV,OAAO,OAAO;GAGhB,OAAO,iBAAiBC,sCAAwB,OAAO,eAAe;GAEtE,KAAK,SAAS,IAAIC,cAAa;EAChC;EACD,MAAM,iBAAiB;GACrB,GAAG,KAAK;GACR,GAAG;EACJ;AACD,SAAO;CACR;CAED,WAAW;AACT,SAAO;CACR;AACF"}