@samchon/openapi
Version:
OpenAPI definitions and converters for 'typia' and 'nestia'.
1 lines • 11.5 kB
Source Map (JSON)
{"version":3,"file":"HttpLlm.mjs","sources":["../src/HttpLlm.ts"],"sourcesContent":["import { HttpMigration } from \"./HttpMigration\";\nimport { OpenApi } from \"./OpenApi\";\nimport { OpenApiV3 } from \"./OpenApiV3\";\nimport { OpenApiV3_1 } from \"./OpenApiV3_1\";\nimport { SwaggerV2 } from \"./SwaggerV2\";\nimport { HttpLlmComposer } from \"./composers/HttpLlmApplicationComposer\";\nimport { LlmSchemaComposer } from \"./composers/LlmSchemaComposer\";\nimport { HttpLlmFunctionFetcher } from \"./http/HttpLlmFunctionFetcher\";\nimport { IHttpConnection } from \"./structures/IHttpConnection\";\nimport { IHttpLlmApplication } from \"./structures/IHttpLlmApplication\";\nimport { IHttpLlmFunction } from \"./structures/IHttpLlmFunction\";\nimport { IHttpMigrateApplication } from \"./structures/IHttpMigrateApplication\";\nimport { IHttpResponse } from \"./structures/IHttpResponse\";\nimport { ILlmFunction } from \"./structures/ILlmFunction\";\nimport { ILlmSchema } from \"./structures/ILlmSchema\";\nimport { LlmDataMerger } from \"./utils/LlmDataMerger\";\n\n/**\n * LLM function calling application composer from OpenAPI document.\n *\n * `HttpLlm` is a module for composing LLM (Large Language Model) function calling\n * application from the {@link OpenApi.IDocument OpenAPI document}, and also for\n * LLM function call execution and parameter merging.\n *\n * At first, you can construct the LLM function calling application by the\n * {@link HttpLlm.application HttpLlm.application()} function. And then the LLM\n * has selected a {@link IHttpLlmFunction function} to call and composes its\n * arguments, you can execute the function by\n * {@link HttpLlm.execute HttpLlm.execute()} or\n * {@link HttpLlm.propagate HttpLlm.propagate()}.\n *\n * By the way, if you have configured the {@link IHttpLlmApplication.IOptions.separate}\n * option to separate the parameters into human and LLM sides, you can merge these\n * human and LLM sides' parameters into one through\n * {@link HttpLlm.mergeParameters HttpLlm.mergeParameters()} before the actual LLM\n * function call execution.\n *\n * @author Jeongho Nam - https://github.com/samchon\n */\nexport namespace HttpLlm {\n /* -----------------------------------------------------------\n COMPOSERS\n ----------------------------------------------------------- */\n /**\n * Properties for the LLM function calling application composer.\n *\n * @template Model Target LLM model\n */\n export interface IApplicationProps<Model extends ILlmSchema.Model> {\n /**\n * Target LLM model.\n */\n model: Model;\n\n /**\n * OpenAPI document to convert.\n */\n document:\n | OpenApi.IDocument\n | SwaggerV2.IDocument\n | OpenApiV3.IDocument\n | OpenApiV3_1.IDocument;\n\n /**\n * Options for the LLM function calling schema conversion.\n */\n options?: Partial<IHttpLlmApplication.IOptions<Model>>;\n }\n\n /**\n * Convert OpenAPI document to LLM function calling application.\n *\n * Converts {@link OpenApi.IDocument OpenAPI document} or\n * {@link IHttpMigrateApplication migrated application} to the\n * {@link IHttpLlmApplication LLM function calling application}. Every\n * {@link OpenApi.IOperation API operations} in the OpenAPI document are converted\n * to the {@link IHttpLlmFunction LLM function} type, and they would be used for\n * the LLM function calling.\n *\n * If you have configured the {@link IHttpLlmApplication.IOptions.separate} option,\n * every parameters in the {@link IHttpLlmFunction} would be separated into both\n * human and LLM sides. In that case, you can merge these human and LLM sides'\n * parameters into one through {@link HttpLlm.mergeParameters} before the actual\n * LLM function call execution.\n *\n * Additionally, if you have configured the {@link IHttpLlmApplication.IOptions.keyword}\n * as `true`, the number of {@link IHttpLlmFunction.parameters} are always 1 and the\n * first parameter type is always {@link ILlmSchemaV3.IObject}. I recommend this option\n * because LLM can understand the keyword arguments more easily.\n *\n * @param document Target OpenAPI document to convert (or migrate application)\n * @param options Options for the LLM function calling application conversion\n * @returns LLM function calling application\n */\n export const application = <Model extends ILlmSchema.Model>(\n props: IApplicationProps<Model>,\n ): IHttpLlmApplication<Model> => {\n // MIGRATE\n const migrate: IHttpMigrateApplication = HttpMigration.application(\n props.document,\n );\n const defaultConfig: ILlmSchema.IConfig<Model> =\n LlmSchemaComposer.defaultConfig(props.model);\n return HttpLlmComposer.application<Model>({\n migrate,\n model: props.model,\n options: {\n ...Object.fromEntries(\n Object.entries(defaultConfig).map(\n ([key, value]) =>\n [key, (props.options as any)?.[key] ?? value] as const,\n ),\n ),\n separate: props.options?.separate ?? null,\n maxLength: props.options?.maxLength ?? null,\n } as any as IHttpLlmApplication.IOptions<Model>,\n });\n };\n\n /* -----------------------------------------------------------\n FETCHERS\n ----------------------------------------------------------- */\n /**\n * Properties for the LLM function call.\n */\n export interface IFetchProps<Model extends ILlmSchema.Model> {\n /**\n * Application of the LLM function calling.\n */\n application: IHttpLlmApplication<Model>;\n\n /**\n * LLM function schema to call.\n */\n function: IHttpLlmFunction<ILlmSchema.Model>;\n\n /**\n * Connection info to the HTTP server.\n */\n connection: IHttpConnection;\n\n /**\n * Input arguments for the function call.\n */\n input: object;\n }\n\n /**\n * Execute the LLM function call.\n *\n * `HttmLlm.execute()` is a function executing the target\n * {@link OpenApi.IOperation API endpoint} with with the connection information\n * and arguments composed by Large Language Model like OpenAI (+human sometimes).\n *\n * By the way, if you've configured the {@link IHttpLlmApplication.IOptions.separate},\n * so that the parameters are separated to human and LLM sides, you have to merge\n * these humand and LLM sides' parameters into one through\n * {@link HttpLlm.mergeParameters} function.\n *\n * About the {@link IHttpLlmApplication.IOptions.keyword} option, don't worry anything.\n * This `HttmLlm.execute()` function will automatically recognize the keyword arguments\n * and convert them to the proper sequence.\n *\n * For reference, if the target API endpoinnt responds none 200/201 status, this\n * would be considered as an error and the {@link HttpError} would be thrown.\n * Otherwise you don't want such rule, you can use the {@link HttpLlm.propagate}\n * function instead.\n *\n * @param props Properties for the LLM function call\n * @returns Return value (response body) from the API endpoint\n * @throws HttpError when the API endpoint responds none 200/201 status\n */\n export const execute = <Model extends ILlmSchema.Model>(\n props: IFetchProps<Model>,\n ): Promise<unknown> => HttpLlmFunctionFetcher.execute<Model>(props);\n\n /**\n * Propagate the LLM function call.\n *\n * `HttmLlm.propagate()` is a function propagating the target\n * {@link OpenApi.IOperation API endpoint} with with the connection information\n * and arguments composed by Large Language Model like OpenAI (+human sometimes).\n *\n * By the way, if you've configured the {@link IHttpLlmApplication.IOptions.separate},\n * so that the parameters are separated to human and LLM sides, you have to merge\n * these humand and LLM sides' parameters into one through\n * {@link HttpLlm.mergeParameters} function.\n *\n * About the {@link IHttpLlmApplication.IOptions.keyword} option, don't worry anything.\n * This `HttmLlm.propagate()` function will automatically recognize the keyword arguments\n * and convert them to the proper sequence.\n *\n * For reference, the propagation means always returning the response from the API\n * endpoint, even if the status is not 200/201. This is useful when you want to\n * handle the response by yourself.\n *\n * @param props Properties for the LLM function call\n * @returns Response from the API endpoint\n * @throws Error only when the connection is failed\n */\n export const propagate = <Model extends ILlmSchema.Model>(\n props: IFetchProps<Model>,\n ): Promise<IHttpResponse> => HttpLlmFunctionFetcher.propagate<Model>(props);\n\n /* -----------------------------------------------------------\n MERGERS\n ----------------------------------------------------------- */\n /**\n * Properties for the parameters' merging.\n */\n export interface IMergeProps<Model extends ILlmSchema.Model> {\n /**\n * Metadata of the target function.\n */\n function: ILlmFunction<Model>;\n\n /**\n * Arguments composed by the LLM.\n */\n llm: object | null;\n\n /**\n * Arguments composed by the human.\n */\n human: object | null;\n }\n\n /**\n * Merge the parameters.\n *\n * If you've configured the {@link IHttpLlmApplication.IOptions.separate} option,\n * so that the parameters are separated to human and LLM sides, you can merge these\n * humand and LLM sides' parameters into one through this `HttpLlm.mergeParameters()`\n * function before the actual LLM function call wexecution.\n *\n * On contrary, if you've not configured the\n * {@link IHttpLlmApplication.IOptions.separate} option, this function would throw\n * an error.\n *\n * @param props Properties for the parameters' merging\n * @returns Merged parameter values\n */\n export const mergeParameters = <Model extends ILlmSchema.Model>(\n props: IMergeProps<Model>,\n ): object => LlmDataMerger.parameters(props);\n\n /**\n * Merge two values.\n *\n * If both values are objects, then combines them in the properties level.\n *\n * Otherwise, returns the latter value if it's not null, otherwise the former value.\n *\n * - `return (y ?? x)`\n *\n * @param x Value X to merge\n * @param y Value Y to merge\n * @returns Merged value\n */\n export const mergeValue = (x: unknown, y: unknown): unknown =>\n LlmDataMerger.value(x, y);\n}\n"],"names":["HttpLlm","application","props","migrate","HttpMigration","document","defaultConfig","LlmSchemaComposer","model","HttpLlmComposer","options","Object","fromEntries","entries","map","key","value","separate","maxLength","execute","HttpLlmFunctionFetcher","propagate","mergeParameters","LlmDataMerger","parameters","mergeValue","x","y"],"mappings":";;;;;;;;;;AAuCM,IAAWA;;CAAjB,SAAiBA;IAuDFA,QAAAC,cACXC;QAGA,MAAMC,UAAmCC,cAAcH,YACrDC,MAAMG;QAER,MAAMC,gBACJC,kBAAkBD,cAAcJ,MAAMM;QACxC,OAAOC,gBAAgBR,YAAmB;YACxCE;YACAK,OAAON,MAAMM;YACbE,SAAS;mBACJC,OAAOC,YACRD,OAAOE,QAAQP,eAAeQ,KAC5B,EAAEC,KAAKC,WACL,EAACD,KAAMb,MAAMQ,UAAkBK,QAAQC;gBAG7CC,UAAUf,MAAMQ,SAASO,YAAY;gBACrCC,WAAWhB,MAAMQ,SAASQ,aAAa;;;AAEzC;IAwDSlB,QAAAmB,UACXjB,SACqBkB,uBAAuBD,QAAejB;IA0BhDF,QAAAqB,YACXnB,SAC2BkB,uBAAuBC,UAAiBnB;IAwCxDF,QAAAsB,kBACXpB,SACWqB,cAAcC,WAAWtB;IAezBF,QAAAyB,aAAa,CAACC,GAAYC,MACrCJ,cAAcP,MAAMU,GAAGC;AAC1B,EA9ND,CAAiB3B,YAAAA,UA8NhB,CAAA;;"}