@samchon/openapi
Version:
OpenAPI definitions and converters for 'typia' and 'nestia'.
84 lines (83 loc) • 3.82 kB
text/typescript
import { ILlmSchema } from "./ILlmSchema";
import { IValidation } from "./IValidation";
/**
* MCP LLM function.
*
* `IMcpLlmFunction` is an interface representing a function metadata of MCP
* (Model Context Protocol), which has been designed for the LLM (Language Large
* Model) function calling, instead of providing
* [`mcp_servers`](https://openai.github.io/openai-agents-python/mcp/#using-mcp-servers)
* property to the LLM vendor API.
*
* It contains the target function's metadata {@link name}, {@link parameters} and
* {@link description}. You can put these properties to the LLM vendor API to let
* the LLM know the function's purpose and how to call it.
*
* Also, it contains {@link validate} function, which is used to validate the
* function arguments generated by AI agent. It is very important to the LLM
* function calling, because LLM like OpenAI takes a lot of mistakes when
* composing arguments in function calling. Even though `number` like simple
* type is defined in the {@link parameters} schema, LLM often fills it just by a
* `string` typed value.
*
* > This is the reason why `@samchon/openapi` recommends not to use the
* > [`mcp_servers`](https://openai.github.io/openai-agents-python/mcp/#using-mcp-servers)
* > property of LLM API directly, but to use the function calling feature
* > instead. You have to make the AI agent to correct the type level mistakes by
* > using the `validate` function.
*
* @author Jeongho Nam - https://github.com/samchon
* @author Byeongjin Oh - https://github.com/sunrabbit123
*/
export interface IMcpLlmFunction<Model extends ILlmSchema.Model> {
/**
* Representative name of the function.
*
* @maxLength 64
*/
name: string;
/**
* Description of the function.
*
* For reference, the `description` is very important property to teach the
* purpose of the function to the LLM (Language Large Model), and LLM actually
* determines which function to call by the description.
*
* Also, when the LLM conversates with the user, the `description` is used to
* explain the function to the user. Therefore, the `description` property has
* the highest priority, and you have to consider it.
*/
description?: string | undefined;
/** Parameters of the function. */
parameters: ILlmSchema.IParameters<Model>;
/**
* Validate function of the arguments.
*
* You know what? LLM (Large Language Model) like OpenAI takes a lot of
* mistakes when composing arguments in function calling. Even though `number`
* like simple type is defined in the {@link parameters} schema, LLM often
* fills it just by a `string` typed value.
*
* In that case, you have to give a validation feedback to the LLM by using
* this `validate` function. The `validate` function will return detailed
* information about every type errors about the arguments.
*
* And in my experience, OpenAI's `gpt-4o-mini` model tends to construct an
* invalid function calling arguments at the first trial about 50% of the
* time. However, if correct it through this `validate` function, the success
* rate soars to 99% at the second trial, and I've never failed at the third
* trial.
*
* > If you've {@link separated} parameters, use the
* > {@link IMcpLlmFunction.ISeparated.validate} function instead when validating
* > the LLM composed arguments.
*
* > In that case, This `validate` function would be meaningful only when you've
* > merged the LLM and human composed arguments by
* > {@link McpLlm.mergeParameters} function.
*
* @param args Arguments to validate
* @returns Validation result
*/
validate: (args: unknown) => IValidation<unknown>;
}