@langchain/core
Version:
Core LangChain.js abstractions and schemas
1 lines • 25 kB
Source Map (JSON)
{"version":3,"file":"base.cjs","names":["modelName: string","modelName?: string","tool: unknown","encodingForModel","Runnable","params: BaseLangChainParams","InMemoryCache","AsyncCaller","content: MessageContent","textContent: string","input: BaseLanguageModelInput","StringPromptValue","ChatPromptValue","coerceMessageLikeToMessage","params: Record<string, any>","_data: SerializedLLM"],"sources":["../../src/language_models/base.ts"],"sourcesContent":["import type { Tiktoken, TiktokenModel } from \"js-tiktoken/lite\";\nimport type { ZodType as ZodTypeV3 } from \"zod/v3\";\nimport type { $ZodType as ZodTypeV4 } from \"zod/v4/core\";\n\nimport { type BaseCache, InMemoryCache } from \"../caches/index.js\";\nimport {\n type BasePromptValueInterface,\n StringPromptValue,\n ChatPromptValue,\n} from \"../prompt_values.js\";\nimport {\n type BaseMessage,\n type BaseMessageLike,\n type MessageContent,\n} from \"../messages/base.js\";\nimport { coerceMessageLikeToMessage } from \"../messages/utils.js\";\nimport { type LLMResult } from \"../outputs.js\";\nimport { CallbackManager, Callbacks } from \"../callbacks/manager.js\";\nimport { AsyncCaller, AsyncCallerParams } from \"../utils/async_caller.js\";\nimport { encodingForModel } from \"../utils/tiktoken.js\";\nimport { Runnable, type RunnableInterface } from \"../runnables/base.js\";\nimport { RunnableConfig } from \"../runnables/config.js\";\nimport { JSONSchema } from \"../utils/json_schema.js\";\nimport {\n InferInteropZodOutput,\n InteropZodObject,\n InteropZodType,\n} from \"../utils/types/zod.js\";\nimport { ModelProfile } from \"./profile.js\";\n\n// https://www.npmjs.com/package/js-tiktoken\n\nexport const getModelNameForTiktoken = (modelName: string): TiktokenModel => {\n if (modelName.startsWith(\"gpt-5\")) {\n return \"gpt-5\" as TiktokenModel;\n }\n\n if (modelName.startsWith(\"gpt-3.5-turbo-16k\")) {\n return \"gpt-3.5-turbo-16k\";\n }\n\n if (modelName.startsWith(\"gpt-3.5-turbo-\")) {\n return \"gpt-3.5-turbo\";\n }\n\n if (modelName.startsWith(\"gpt-4-32k\")) {\n return \"gpt-4-32k\";\n }\n\n if (modelName.startsWith(\"gpt-4-\")) {\n return \"gpt-4\";\n }\n\n if (modelName.startsWith(\"gpt-4o\")) {\n return \"gpt-4o\";\n }\n\n return modelName as TiktokenModel;\n};\n\nexport const getEmbeddingContextSize = (modelName?: string): number => {\n switch (modelName) {\n case \"text-embedding-ada-002\":\n return 8191;\n default:\n return 2046;\n }\n};\n\n/**\n * Get the context window size (max input tokens) for a given model.\n *\n * Context window sizes are sourced from official model documentation:\n * - OpenAI: https://platform.openai.com/docs/models\n * - Anthropic: https://docs.anthropic.com/claude/docs/models-overview\n * - Google: https://ai.google.dev/gemini/docs/models/gemini\n *\n * @param modelName - The name of the model\n * @returns The context window size in tokens\n */\nexport const getModelContextSize = (modelName: string): number => {\n const normalizedName = getModelNameForTiktoken(modelName) as string;\n\n switch (normalizedName) {\n // GPT-5 series\n case \"gpt-5\":\n case \"gpt-5-turbo\":\n case \"gpt-5-turbo-preview\":\n return 400000;\n\n // GPT-4o series\n case \"gpt-4o\":\n case \"gpt-4o-mini\":\n case \"gpt-4o-2024-05-13\":\n case \"gpt-4o-2024-08-06\":\n return 128000;\n\n // GPT-4 Turbo series\n case \"gpt-4-turbo\":\n case \"gpt-4-turbo-preview\":\n case \"gpt-4-turbo-2024-04-09\":\n case \"gpt-4-0125-preview\":\n case \"gpt-4-1106-preview\":\n return 128000;\n\n // GPT-4 series\n case \"gpt-4-32k\":\n case \"gpt-4-32k-0314\":\n case \"gpt-4-32k-0613\":\n return 32768;\n case \"gpt-4\":\n case \"gpt-4-0314\":\n case \"gpt-4-0613\":\n return 8192;\n\n // GPT-3.5 Turbo series\n case \"gpt-3.5-turbo-16k\":\n case \"gpt-3.5-turbo-16k-0613\":\n return 16384;\n case \"gpt-3.5-turbo\":\n case \"gpt-3.5-turbo-0301\":\n case \"gpt-3.5-turbo-0613\":\n case \"gpt-3.5-turbo-1106\":\n case \"gpt-3.5-turbo-0125\":\n return 4096;\n\n // Legacy GPT-3 models\n case \"text-davinci-003\":\n case \"text-davinci-002\":\n return 4097;\n case \"text-davinci-001\":\n return 2049;\n case \"text-curie-001\":\n case \"text-babbage-001\":\n case \"text-ada-001\":\n return 2048;\n\n // Code models\n case \"code-davinci-002\":\n case \"code-davinci-001\":\n return 8000;\n case \"code-cushman-001\":\n return 2048;\n\n // Claude models (Anthropic)\n case \"claude-3-5-sonnet-20241022\":\n case \"claude-3-5-sonnet-20240620\":\n case \"claude-3-opus-20240229\":\n case \"claude-3-sonnet-20240229\":\n case \"claude-3-haiku-20240307\":\n case \"claude-2.1\":\n return 200000;\n case \"claude-2.0\":\n case \"claude-instant-1.2\":\n return 100000;\n\n // Gemini models (Google)\n case \"gemini-1.5-pro\":\n case \"gemini-1.5-pro-latest\":\n case \"gemini-1.5-flash\":\n case \"gemini-1.5-flash-latest\":\n return 1000000; // 1M tokens\n case \"gemini-pro\":\n case \"gemini-pro-vision\":\n return 32768;\n\n default:\n return 4097;\n }\n};\n\n/**\n * Whether or not the input matches the OpenAI tool definition.\n * @param {unknown} tool The input to check.\n * @returns {boolean} Whether the input is an OpenAI tool definition.\n */\nexport function isOpenAITool(tool: unknown): tool is ToolDefinition {\n if (typeof tool !== \"object\" || !tool) return false;\n if (\n \"type\" in tool &&\n tool.type === \"function\" &&\n \"function\" in tool &&\n typeof tool.function === \"object\" &&\n tool.function &&\n \"name\" in tool.function &&\n \"parameters\" in tool.function\n ) {\n return true;\n }\n return false;\n}\n\ninterface CalculateMaxTokenProps {\n prompt: string;\n modelName: TiktokenModel;\n}\n\nexport const calculateMaxTokens = async ({\n prompt,\n modelName,\n}: CalculateMaxTokenProps) => {\n let numTokens;\n\n try {\n numTokens = (\n await encodingForModel(getModelNameForTiktoken(modelName))\n ).encode(prompt).length;\n } catch {\n console.warn(\n \"Failed to calculate number of tokens, falling back to approximate count\"\n );\n\n // fallback to approximate calculation if tiktoken is not available\n // each token is ~4 characters: https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them#\n numTokens = Math.ceil(prompt.length / 4);\n }\n\n const maxTokens = getModelContextSize(modelName);\n return maxTokens - numTokens;\n};\n\nconst getVerbosity = () => false;\n\nexport type SerializedLLM = {\n _model: string;\n _type: string;\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n} & Record<string, any>;\n\nexport interface BaseLangChainParams {\n verbose?: boolean;\n callbacks?: Callbacks;\n tags?: string[];\n metadata?: Record<string, unknown>;\n}\n\n/**\n * Base class for language models, chains, tools.\n */\nexport abstract class BaseLangChain<\n RunInput,\n RunOutput,\n CallOptions extends RunnableConfig = RunnableConfig\n >\n extends Runnable<RunInput, RunOutput, CallOptions>\n implements BaseLangChainParams\n{\n /**\n * Whether to print out response text.\n */\n verbose: boolean;\n\n callbacks?: Callbacks;\n\n tags?: string[];\n\n metadata?: Record<string, unknown>;\n\n get lc_attributes(): { [key: string]: undefined } | undefined {\n return {\n callbacks: undefined,\n verbose: undefined,\n };\n }\n\n constructor(params: BaseLangChainParams) {\n super(params);\n this.verbose = params.verbose ?? getVerbosity();\n this.callbacks = params.callbacks;\n this.tags = params.tags ?? [];\n this.metadata = params.metadata ?? {};\n }\n}\n\n/**\n * Base interface for language model parameters.\n * A subclass of {@link BaseLanguageModel} should have a constructor that\n * takes in a parameter that extends this interface.\n */\nexport interface BaseLanguageModelParams\n extends AsyncCallerParams,\n BaseLangChainParams {\n /**\n * @deprecated Use `callbacks` instead\n */\n callbackManager?: CallbackManager;\n\n cache?: BaseCache | boolean;\n}\n\nexport interface BaseLanguageModelTracingCallOptions {\n /**\n * Describes the format of structured outputs.\n * This should be provided if an output is considered to be structured\n */\n ls_structured_output_format?: {\n /**\n * An object containing the method used for structured output (e.g., \"jsonMode\").\n */\n kwargs: { method: string };\n /**\n * The JSON schema describing the expected output structure.\n */\n schema?: JSONSchema;\n };\n}\n\nexport interface BaseLanguageModelCallOptions\n extends RunnableConfig,\n BaseLanguageModelTracingCallOptions {\n /**\n * Stop tokens to use for this call.\n * If not provided, the default stop tokens for the model will be used.\n */\n stop?: string[];\n}\n\nexport interface FunctionDefinition {\n /**\n * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain\n * underscores and dashes, with a maximum length of 64.\n */\n name: string;\n\n /**\n * The parameters the functions accepts, described as a JSON Schema object. See the\n * [guide](https://platform.openai.com/docs/guides/gpt/function-calling) for\n * examples, and the\n * [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for\n * documentation about the format.\n *\n * To describe a function that accepts no parameters, provide the value\n * `{\"type\": \"object\", \"properties\": {}}`.\n */\n parameters: Record<string, unknown> | JSONSchema;\n\n /**\n * A description of what the function does, used by the model to choose when and\n * how to call the function.\n */\n description?: string;\n}\n\nexport interface ToolDefinition {\n type: \"function\";\n function: FunctionDefinition;\n}\n\nexport type FunctionCallOption = {\n name: string;\n};\n\nexport interface BaseFunctionCallOptions extends BaseLanguageModelCallOptions {\n function_call?: FunctionCallOption;\n functions?: FunctionDefinition[];\n}\n\nexport type BaseLanguageModelInput =\n | BasePromptValueInterface\n | string\n | BaseMessageLike[];\n\nexport type StructuredOutputType = InferInteropZodOutput<InteropZodObject>;\n\nexport type StructuredOutputMethodOptions<IncludeRaw extends boolean = false> =\n {\n name?: string;\n method?: \"functionCalling\" | \"jsonMode\" | \"jsonSchema\" | string;\n includeRaw?: IncludeRaw;\n /** Whether to use strict mode. Currently only supported by OpenAI models. */\n strict?: boolean;\n };\n\n/** @deprecated Use StructuredOutputMethodOptions instead */\nexport type StructuredOutputMethodParams<\n RunOutput,\n IncludeRaw extends boolean = false\n> = {\n /** @deprecated Pass schema in as the first argument */\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n schema: InteropZodType<RunOutput> | Record<string, any>;\n name?: string;\n method?: \"functionCalling\" | \"jsonMode\";\n includeRaw?: IncludeRaw;\n};\n\nexport interface BaseLanguageModelInterface<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput = any,\n CallOptions extends BaseLanguageModelCallOptions = BaseLanguageModelCallOptions\n> extends RunnableInterface<BaseLanguageModelInput, RunOutput, CallOptions> {\n get callKeys(): string[];\n\n generatePrompt(\n promptValues: BasePromptValueInterface[],\n options?: string[] | CallOptions,\n callbacks?: Callbacks\n ): Promise<LLMResult>;\n\n _modelType(): string;\n\n _llmType(): string;\n\n getNumTokens(content: MessageContent): Promise<number>;\n\n /**\n * Get the identifying parameters of the LLM.\n */\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n _identifyingParams(): Record<string, any>;\n\n serialize(): SerializedLLM;\n}\n\nexport type LanguageModelOutput = BaseMessage | string;\n\nexport type LanguageModelLike = Runnable<\n BaseLanguageModelInput,\n LanguageModelOutput\n>;\n\n/**\n * Base class for language models.\n */\nexport abstract class BaseLanguageModel<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput = any,\n CallOptions extends BaseLanguageModelCallOptions = BaseLanguageModelCallOptions\n >\n extends BaseLangChain<BaseLanguageModelInput, RunOutput, CallOptions>\n implements\n BaseLanguageModelParams,\n BaseLanguageModelInterface<RunOutput, CallOptions>\n{\n /**\n * Keys that the language model accepts as call options.\n */\n get callKeys(): string[] {\n return [\"stop\", \"timeout\", \"signal\", \"tags\", \"metadata\", \"callbacks\"];\n }\n\n /**\n * The async caller should be used by subclasses to make any async calls,\n * which will thus benefit from the concurrency and retry logic.\n */\n caller: AsyncCaller;\n\n cache?: BaseCache;\n\n constructor({\n callbacks,\n callbackManager,\n ...params\n }: BaseLanguageModelParams) {\n const { cache, ...rest } = params;\n super({\n callbacks: callbacks ?? callbackManager,\n ...rest,\n });\n if (typeof cache === \"object\") {\n this.cache = cache;\n } else if (cache) {\n this.cache = InMemoryCache.global();\n } else {\n this.cache = undefined;\n }\n this.caller = new AsyncCaller(params ?? {});\n }\n\n abstract generatePrompt(\n promptValues: BasePromptValueInterface[],\n options?: string[] | CallOptions,\n callbacks?: Callbacks\n ): Promise<LLMResult>;\n\n abstract _modelType(): string;\n\n abstract _llmType(): string;\n\n private _encoding?: Tiktoken;\n\n /**\n * Get the number of tokens in the content.\n * @param content The content to get the number of tokens for.\n * @returns The number of tokens in the content.\n */\n async getNumTokens(content: MessageContent) {\n // Extract text content from MessageContent\n let textContent: string;\n if (typeof content === \"string\") {\n textContent = content;\n } else {\n /**\n * Content is an array of ContentBlock\n *\n * ToDo(@christian-bromann): This is a temporary fix to get the number of tokens for the content.\n * We need to find a better way to do this.\n * @see https://github.com/langchain-ai/langchainjs/pull/8341#pullrequestreview-2933713116\n */\n textContent = content\n .map((item) => {\n if (typeof item === \"string\") return item;\n if (item.type === \"text\" && \"text\" in item) return item.text;\n return \"\";\n })\n .join(\"\");\n }\n\n // fallback to approximate calculation if tiktoken is not available\n let numTokens = Math.ceil(textContent.length / 4);\n\n if (!this._encoding) {\n try {\n this._encoding = await encodingForModel(\n \"modelName\" in this\n ? getModelNameForTiktoken(this.modelName as string)\n : \"gpt2\"\n );\n } catch (error) {\n console.warn(\n \"Failed to calculate number of tokens, falling back to approximate count\",\n error\n );\n }\n }\n\n if (this._encoding) {\n try {\n numTokens = this._encoding.encode(textContent).length;\n } catch (error) {\n console.warn(\n \"Failed to calculate number of tokens, falling back to approximate count\",\n error\n );\n }\n }\n\n return numTokens;\n }\n\n protected static _convertInputToPromptValue(\n input: BaseLanguageModelInput\n ): BasePromptValueInterface {\n if (typeof input === \"string\") {\n return new StringPromptValue(input);\n } else if (Array.isArray(input)) {\n return new ChatPromptValue(input.map(coerceMessageLikeToMessage));\n } else {\n return input;\n }\n }\n\n /**\n * Get the identifying parameters of the LLM.\n */\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n _identifyingParams(): Record<string, any> {\n return {};\n }\n\n /**\n * Create a unique cache key for a specific call to a specific language model.\n * @param callOptions Call options for the model\n * @returns A unique cache key.\n */\n _getSerializedCacheKeyParametersForCall(\n // TODO: Fix when we remove the RunnableLambda backwards compatibility shim.\n { config, ...callOptions }: CallOptions & { config?: RunnableConfig }\n ): string {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const params: Record<string, any> = {\n ...this._identifyingParams(),\n ...callOptions,\n _type: this._llmType(),\n _model: this._modelType(),\n };\n const filteredEntries = Object.entries(params).filter(\n ([_, value]) => value !== undefined\n );\n const serializedEntries = filteredEntries\n .map(([key, value]) => `${key}:${JSON.stringify(value)}`)\n .sort()\n .join(\",\");\n return serializedEntries;\n }\n\n /**\n * @deprecated\n * Return a json-like object representing this LLM.\n */\n serialize(): SerializedLLM {\n return {\n ...this._identifyingParams(),\n _type: this._llmType(),\n _model: this._modelType(),\n };\n }\n\n /**\n * @deprecated\n * Load an LLM from a json-like object describing it.\n */\n static async deserialize(_data: SerializedLLM): Promise<BaseLanguageModel> {\n throw new Error(\"Use .toJSON() instead\");\n }\n\n /**\n * Return profiling information for the model.\n *\n * @returns {ModelProfile} An object describing the model's capabilities and constraints\n */\n get profile(): ModelProfile {\n return {};\n }\n\n withStructuredOutput?<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n schema:\n | ZodTypeV3<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<false>\n ): Runnable<BaseLanguageModelInput, RunOutput>;\n\n withStructuredOutput?<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n schema:\n | ZodTypeV3<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<true>\n ): Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>;\n\n withStructuredOutput?<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n schema:\n | ZodTypeV4<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<false>\n ): Runnable<BaseLanguageModelInput, RunOutput>;\n\n withStructuredOutput?<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n schema:\n | ZodTypeV4<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<true>\n ): Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>;\n\n /**\n * Model wrapper that returns outputs formatted to match the given schema.\n *\n * @template {BaseLanguageModelInput} RunInput The input type for the Runnable, expected to be the same input for the LLM.\n * @template {Record<string, any>} RunOutput The output type for the Runnable, expected to be a Zod schema object for structured output validation.\n *\n * @param {InteropZodType<RunOutput>} schema The schema for the structured output. Either as a Zod schema or a valid JSON schema object.\n * If a Zod schema is passed, the returned attributes will be validated, whereas with JSON schema they will not be.\n * @param {string} name The name of the function to call.\n * @param {\"functionCalling\" | \"jsonMode\"} [method=functionCalling] The method to use for getting the structured output. Defaults to \"functionCalling\".\n * @param {boolean | undefined} [includeRaw=false] Whether to include the raw output in the result. Defaults to false.\n * @returns {Runnable<RunInput, RunOutput> | Runnable<RunInput, { raw: BaseMessage; parsed: RunOutput }>} A new runnable that calls the LLM with structured output.\n */\n withStructuredOutput?<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n schema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<boolean>\n ):\n | Runnable<BaseLanguageModelInput, RunOutput>\n | Runnable<\n BaseLanguageModelInput,\n {\n raw: BaseMessage;\n parsed: RunOutput;\n }\n >;\n}\n\n/**\n * Shared interface for token usage\n * return type from LLM calls.\n */\nexport interface TokenUsage {\n completionTokens?: number;\n promptTokens?: number;\n totalTokens?: number;\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;AAgCA,MAAa,0BAA0B,CAACA,cAAqC;AAC3E,KAAI,UAAU,WAAW,QAAQ,CAC/B,QAAO;AAGT,KAAI,UAAU,WAAW,oBAAoB,CAC3C,QAAO;AAGT,KAAI,UAAU,WAAW,iBAAiB,CACxC,QAAO;AAGT,KAAI,UAAU,WAAW,YAAY,CACnC,QAAO;AAGT,KAAI,UAAU,WAAW,SAAS,CAChC,QAAO;AAGT,KAAI,UAAU,WAAW,SAAS,CAChC,QAAO;AAGT,QAAO;AACR;AAED,MAAa,0BAA0B,CAACC,cAA+B;AACrE,SAAQ,WAAR;EACE,KAAK,yBACH,QAAO;EACT,QACE,QAAO;CACV;AACF;;;;;;;;;;;;AAaD,MAAa,sBAAsB,CAACD,cAA8B;CAChE,MAAM,iBAAiB,wBAAwB,UAAU;AAEzD,SAAQ,gBAAR;EAEE,KAAK;EACL,KAAK;EACL,KAAK,sBACH,QAAO;EAGT,KAAK;EACL,KAAK;EACL,KAAK;EACL,KAAK,oBACH,QAAO;EAGT,KAAK;EACL,KAAK;EACL,KAAK;EACL,KAAK;EACL,KAAK,qBACH,QAAO;EAGT,KAAK;EACL,KAAK;EACL,KAAK,iBACH,QAAO;EACT,KAAK;EACL,KAAK;EACL,KAAK,aACH,QAAO;EAGT,KAAK;EACL,KAAK,yBACH,QAAO;EACT,KAAK;EACL,KAAK;EACL,KAAK;EACL,KAAK;EACL,KAAK,qBACH,QAAO;EAGT,KAAK;EACL,KAAK,mBACH,QAAO;EACT,KAAK,mBACH,QAAO;EACT,KAAK;EACL,KAAK;EACL,KAAK,eACH,QAAO;EAGT,KAAK;EACL,KAAK,mBACH,QAAO;EACT,KAAK,mBACH,QAAO;EAGT,KAAK;EACL,KAAK;EACL,KAAK;EACL,KAAK;EACL,KAAK;EACL,KAAK,aACH,QAAO;EACT,KAAK;EACL,KAAK,qBACH,QAAO;EAGT,KAAK;EACL,KAAK;EACL,KAAK;EACL,KAAK,0BACH,QAAO;EACT,KAAK;EACL,KAAK,oBACH,QAAO;EAET,QACE,QAAO;CACV;AACF;;;;;;AAOD,SAAgB,aAAaE,MAAuC;AAClE,KAAI,OAAO,SAAS,YAAY,CAAC,KAAM,QAAO;AAC9C,KACE,UAAU,QACV,KAAK,SAAS,cACd,cAAc,QACd,OAAO,KAAK,aAAa,YACzB,KAAK,YACL,UAAU,KAAK,YACf,gBAAgB,KAAK,SAErB,QAAO;AAET,QAAO;AACR;AAOD,MAAa,qBAAqB,OAAO,EACvC,QACA,WACuB,KAAK;CAC5B,IAAI;AAEJ,KAAI;EACF,aACE,MAAMC,wCAAiB,wBAAwB,UAAU,CAAC,EAC1D,OAAO,OAAO,CAAC;CAClB,QAAO;EACN,QAAQ,KACN,0EACD;EAID,YAAY,KAAK,KAAK,OAAO,SAAS,EAAE;CACzC;CAED,MAAM,YAAY,oBAAoB,UAAU;AAChD,QAAO,YAAY;AACpB;AAED,MAAM,eAAe,MAAM;;;;AAkB3B,IAAsB,gBAAtB,cAKUC,sBAEV;;;;CAIE;CAEA;CAEA;CAEA;CAEA,IAAI,gBAA0D;AAC5D,SAAO;GACL,WAAW;GACX,SAAS;EACV;CACF;CAED,YAAYC,QAA6B;EACvC,MAAM,OAAO;EACb,KAAK,UAAU,OAAO,WAAW,cAAc;EAC/C,KAAK,YAAY,OAAO;EACxB,KAAK,OAAO,OAAO,QAAQ,CAAE;EAC7B,KAAK,WAAW,OAAO,YAAY,CAAE;CACtC;AACF;;;;AAwJD,IAAsB,oBAAtB,cAKU,cAIV;;;;CAIE,IAAI,WAAqB;AACvB,SAAO;GAAC;GAAQ;GAAW;GAAU;GAAQ;GAAY;EAAY;CACtE;;;;;CAMD;CAEA;CAEA,YAAY,EACV,WACA,gBACA,GAAG,QACqB,EAAE;EAC1B,MAAM,EAAE,MAAO,GAAG,MAAM,GAAG;EAC3B,MAAM;GACJ,WAAW,aAAa;GACxB,GAAG;EACJ,EAAC;AACF,MAAI,OAAO,UAAU,UACnB,KAAK,QAAQ;WACJ,OACT,KAAK,QAAQC,mCAAc,QAAQ;OAEnC,KAAK,QAAQ;EAEf,KAAK,SAAS,IAAIC,uCAAY,UAAU,CAAE;CAC3C;CAYD,AAAQ;;;;;;CAOR,MAAM,aAAaC,SAAyB;EAE1C,IAAIC;AACJ,MAAI,OAAO,YAAY,UACrB,cAAc;;;;;;;;;EASd,cAAc,QACX,IAAI,CAAC,SAAS;AACb,OAAI,OAAO,SAAS,SAAU,QAAO;AACrC,OAAI,KAAK,SAAS,UAAU,UAAU,KAAM,QAAO,KAAK;AACxD,UAAO;EACR,EAAC,CACD,KAAK,GAAG;EAIb,IAAI,YAAY,KAAK,KAAK,YAAY,SAAS,EAAE;AAEjD,MAAI,CAAC,KAAK,UACR,KAAI;GACF,KAAK,YAAY,MAAMN,wCACrB,eAAe,OACX,wBAAwB,KAAK,UAAoB,GACjD,OACL;EACF,SAAQ,OAAO;GACd,QAAQ,KACN,2EACA,MACD;EACF;AAGH,MAAI,KAAK,UACP,KAAI;GACF,YAAY,KAAK,UAAU,OAAO,YAAY,CAAC;EAChD,SAAQ,OAAO;GACd,QAAQ,KACN,2EACA,MACD;EACF;AAGH,SAAO;CACR;CAED,OAAiB,2BACfO,OAC0B;AAC1B,MAAI,OAAO,UAAU,SACnB,QAAO,IAAIC,wCAAkB;WACpB,MAAM,QAAQ,MAAM,CAC7B,QAAO,IAAIC,sCAAgB,MAAM,IAAIC,yCAA2B;MAEhE,QAAO;CAEV;;;;CAMD,qBAA0C;AACxC,SAAO,CAAE;CACV;;;;;;CAOD,wCAEE,EAAE,OAAQ,GAAG,aAAwD,EAC7D;EAER,MAAMC,SAA8B;GAClC,GAAG,KAAK,oBAAoB;GAC5B,GAAG;GACH,OAAO,KAAK,UAAU;GACtB,QAAQ,KAAK,YAAY;EAC1B;EACD,MAAM,kBAAkB,OAAO,QAAQ,OAAO,CAAC,OAC7C,CAAC,CAAC,GAAG,MAAM,KAAK,UAAU,OAC3B;EACD,MAAM,oBAAoB,gBACvB,IAAI,CAAC,CAAC,KAAK,MAAM,KAAK,GAAG,IAAI,CAAC,EAAE,KAAK,UAAU,MAAM,EAAE,CAAC,CACxD,MAAM,CACN,KAAK,IAAI;AACZ,SAAO;CACR;;;;;CAMD,YAA2B;AACzB,SAAO;GACL,GAAG,KAAK,oBAAoB;GAC5B,OAAO,KAAK,UAAU;GACtB,QAAQ,KAAK,YAAY;EAC1B;CACF;;;;;CAMD,aAAa,YAAYC,OAAkD;AACzE,QAAM,IAAI,MAAM;CACjB;;;;;;CAOD,IAAI,UAAwB;AAC1B,SAAO,CAAE;CACV;AA6EF"}