llm-exe
Version:
Simplify building LLM-powered apps with easy-to-use base components, supporting text and chat-based prompts with handlebars template engine, output parsers, and flexible function calling capabilities.
1,302 lines (1,245 loc) • 48.8 kB
text/typescript
import { JSONSchema, FromSchema } from 'json-schema-to-ts';
import { Narrow } from 'json-schema-to-ts/lib/types/type-utils';
type PrimitiveValue = bigint | boolean | null | number | string | symbol | undefined;
type ObjectValue = PrimitiveValue | PlainObject | ObjectArray;
interface PlainObject {
[key: string]: ObjectValue;
}
interface ObjectArray extends Array<ObjectValue> {
}
interface Serializable {
serialize?(): Record<string, any>;
deserialize?(): void;
}
type IChatMessageRole = "system" | "model" | "assistant" | "user" | "function" | "function_call";
interface IChatMessageContentDetailed {
type: string;
text?: string;
image_url?: {
url: string;
};
}
interface IChatMessageBase {
role: IChatMessageRole;
content: string | null | IChatMessageContentDetailed[];
}
interface IChatUserMessage extends IChatMessageBase {
role: Extract<IChatMessageRole, "user">;
content: string | IChatMessageContentDetailed[];
name?: string;
}
interface IChatFunctionMessage extends IChatMessageBase {
role: Extract<IChatMessageRole, "function">;
content: string;
name: string;
}
interface IChatAssistantMessage extends IChatMessageBase {
role: Extract<IChatMessageRole, "assistant">;
content: string;
function_call?: undefined;
}
interface IChatAssistantFunctionCallMessage extends IChatMessageBase {
role: Extract<IChatMessageRole, "assistant">;
content: null;
function_call?: {
name: string;
arguments: string;
};
}
interface IChatSystemMessage extends IChatMessageBase {
role: Extract<IChatMessageRole, "system">;
content: string;
}
interface IChatMessagesPlaceholder {
role: "placeholder";
content: string;
}
type IPromptMessages = (IChatSystemMessage | IChatMessagesPlaceholder)[];
type IPromptChatMessages = (IChatUserMessage | IChatAssistantMessage | IChatAssistantFunctionCallMessage | IChatSystemMessage | IChatMessagesPlaceholder | IChatFunctionMessage)[];
type IChatMessage = IChatUserMessage | IChatAssistantMessage | IChatAssistantFunctionCallMessage | IChatSystemMessage | IChatFunctionMessage;
type IChatMessages = IChatMessage[];
type OpenAIChatModelName = "gpt-3.5-turbo" | "gpt-3.5-turbo-0613" | "gpt-3.5-turbo-16k" | "gpt-4-0613" | "gpt-4" | "gpt-4o" | "gpt-4o-mini" | "gpt-4-0613" | "gpt-4-32k-0613" | `gpt-4${string}` | `gpt-3.5-turbo-${string}`;
type OpenAIConversationModelName = "davinci" | "text-curie-001" | "text-babbage-001" | "text-ada-001";
type OpenAIEmbeddingModelName = "text-embedding-ada-002";
type OpenAIModelName = OpenAIChatModelName | OpenAIConversationModelName | OpenAIEmbeddingModelName;
interface LlmExecutorExecuteOptions {
functions?: CallableExecutorCore[];
functionCall?: any;
jsonSchema?: Record<string, any>;
}
type GenericFunctionCall = "auto" | "none" | "any" | {
name: string;
};
interface OpenAiLlmExecutorOptions<T extends GenericFunctionCall = "auto"> extends LlmExecutorExecuteOptions {
functions?: CallableExecutorCore[];
functionCall?: T;
functionCallStrictInput?: boolean;
jsonSchema?: Record<string, any>;
}
/**
* BaseParser is an abstract class for parsing text and enforcing JSON schema on the parsed data.
*/
declare abstract class BaseParser<T = any> {
name: string;
options: BaseParserOptions;
target: "text" | "function_call";
/**
* Create a new BaseParser.
* @param name - The name of the parser.
* @param options - options
*/
constructor(name: string, options?: BaseParserOptions, target?: "text" | "function_call");
/**
* Parse the given text and return the parsed data.
* @abstract
* @param text - The text to parse.
* @param [attributes] - Optional attributes to use during parsing.
* @returns The parsed data.
*/
abstract parse(text: string | OutputResultContent[], attributes?: Record<string, any>): T;
}
declare abstract class BaseParserWithJson<S extends JSONSchema | undefined = undefined, T = S extends JSONSchema ? FromSchema<S> : Record<string, any>> extends BaseParser<T> {
schema: S;
validateSchema: boolean;
/**
* Create a new BaseParser.
* @param name - The name of the parser.
* @param options - options
*/
constructor(name: string, options: BaseParserOptionsWithSchema<S>);
}
interface OpenAiFunctionParserOptions<T extends BaseParser<any>> extends BaseParserOptions {
parser: T;
}
declare class OpenAiFunctionParser<T extends BaseParser<any>> extends BaseParser<ParserOutput<T> | {
name: any;
arguments: any;
}> {
parser: T;
constructor(options: OpenAiFunctionParserOptions<T>);
parse(text: OutputResultContent[], _options?: Record<string, any>): ParserOutput<T> | {
name: string;
arguments: any;
};
}
interface StringParserOptions extends BaseParserOptions {
}
declare class StringParser extends BaseParser<string> {
constructor(options?: StringParserOptions);
parse(text: string | OutputResultContent[], _options?: Record<string, any>): string;
}
interface BooleanParserOptions extends BaseParserOptions {
}
declare class BooleanParser extends BaseParser<boolean> {
constructor(options?: BooleanParserOptions);
parse(text: string): boolean;
}
interface NumberParserOptions extends BaseParserOptions {
}
declare class NumberParser extends BaseParser<number> {
constructor(options?: NumberParserOptions);
parse(text: string): number;
}
declare class JsonParser<S extends JSONSchema | undefined = undefined> extends BaseParserWithJson<S> {
constructor(options?: BaseParserOptionsWithSchema<S>);
parse(text: string, _attributes?: Record<string, any>): ParserOutput<BaseParserWithJson<S>>;
}
declare class ListToJsonParser<S extends JSONSchema | undefined = undefined> extends BaseParserWithJson<S> {
constructor(options?: BaseParserOptionsWithSchema<S>);
parse(text: string): ParserOutput<BaseParserWithJson<S>>;
}
interface ListToKeyValueParserOptions extends BaseParserOptions {
}
declare class ListToKeyValueParser extends BaseParser<Array<{
key: string;
value: string;
}>> {
constructor(options?: ListToKeyValueParserOptions);
parse(text: string): {
key: string;
value: string;
}[];
}
/**
* CustomParser class, extending the BaseParser class.
* @template I The expected type of the input
* @template O The type of the parsed value (output)
* @extends {BaseParser<T>}
*/
declare class CustomParser<O = any> extends BaseParser<O> {
/**
* Custom parsing function.
* @type {any}
*/
parserFn: (text: string, inputValues: ExecutorContext<any, any>) => O;
/**
* Creates a new CustomParser instance.
* @param {string} name The name of the parser.
* @param {any} parserFn The custom parsing function.
*/
constructor(name: string, parserFn: (text: string, inputValues: ExecutorContext<any, any>) => O);
/**
* Parses the text using the custom parsing function.
* @param {string} text The text to be parsed.
* @param {any} inputValues Additional input values for the parser function.
* @returns {O} The parsed value.
*/
parse(text: string, inputValues: ExecutorContext<any, O>): O;
}
declare class ListToArrayParser extends BaseParser<string[]> {
constructor();
parse(text: string): string[];
}
interface ReplaceStringTemplateParserOptions extends BaseParserOptions {
}
declare class ReplaceStringTemplateParser extends BaseParser<string> {
constructor(options?: ReplaceStringTemplateParserOptions);
parse(text: string, attributes?: Record<string, any>): string;
}
interface MarkdownCodeBlockParserOptions extends BaseParserOptions {
}
declare class MarkdownCodeBlockParser extends BaseParser<{
language: string;
code: string;
}> {
constructor(options?: MarkdownCodeBlockParserOptions);
parse(input: string): {
code: string;
language: string;
};
}
interface MarkdownCodeBlocksParserOptions extends BaseParserOptions {
}
declare class MarkdownCodeBlocksParser extends BaseParser<{
language: string;
code: string;
}[]> {
constructor(options?: MarkdownCodeBlocksParserOptions);
parse(input: string): {
code: string;
language: string;
}[];
}
interface StringExtractParserOptions extends BaseParserOptions {
enum: string[];
ignoreCase?: boolean;
}
declare class StringExtractParser extends BaseParser<string> {
private enum;
private ignoreCase;
constructor(options?: StringExtractParserOptions);
parse(text: string): string;
}
/**
* Creates a parser based on the given type.
* @template S - JSON schema type.
* @param type - The type of parser to create.
* @returns An instance of ListToKeyValueParser.
*/
declare function createParser<T extends Extract<CreateParserType, "markdownCodeBlocks">>(type: T, options?: BaseParserOptions): MarkdownCodeBlocksParser;
/**
* Creates a parser based on the given type.
* @template S - JSON schema type.
* @param type - The type of parser to create.
* @returns An instance of ListToKeyValueParser.
*/
declare function createParser<T extends Extract<CreateParserType, "markdownCodeBlock">>(type: T, options?: BaseParserOptions): MarkdownCodeBlockParser;
/**
* Creates a parser based on the given type.
* @template S - JSON schema type.
* @param type - The type of parser to create.
* @returns An instance of ListToKeyValueParser.
*/
declare function createParser<T extends Extract<CreateParserType, "listToKeyValue">>(type: T, options?: BaseParserOptions): ListToKeyValueParser;
/**
* Creates a parser based on the given type.
* @template S - JSON schema type.
* @param type - The type of parser to create.
* @returns An instance of ListToArrayParser.
*/
declare function createParser<T extends Extract<CreateParserType, "listToArray">, S extends JSONSchema | undefined = undefined>(type: T, options?: BaseParserOptions): ListToArrayParser;
/**
* Creates a parser based on the given type.
* @template S - JSON schema type.
* @param type - The type of parser to create.
* @returns An instance of NumberParser.
*/
declare function createParser<T extends Extract<CreateParserType, "number">, S extends JSONSchema | undefined = undefined>(type: T, options?: BaseParserOptions): NumberParser;
/**
* Creates a parser based on the given type.
* @template S - JSON schema type.
* @param type - The type of parser to create.
* @returns An instance of NumberParser.
*/
declare function createParser<T extends Extract<CreateParserType, "boolean">, S extends JSONSchema | undefined = undefined>(type: T, options?: BaseParserOptions): BooleanParser;
/**
* Creates a parser based on the given type.
* @template S - JSON schema type.
* @param type - The type of parser to create.
* @returns An instance of ReplaceStringTemplateParser.
*/
declare function createParser<T extends Extract<CreateParserType, "replaceStringTemplate">, S extends JSONSchema | undefined = undefined>(type: T, options?: BaseParserOptions): ReplaceStringTemplateParser;
/**
* Creates a parser based on the given type.
* @template S - JSON schema type.
* @param type - The type of parser to create.
* @returns An instance of StringParser.
*/
declare function createParser<T extends Extract<CreateParserType, "string">, S extends JSONSchema | undefined = undefined>(type: T, options?: BaseParserOptions): StringParser;
/**
* Creates a parser based on the given type.
* @template S - JSON schema type.
* @param type - The type of parser to create.
* @returns An instance of StringParser.
*/
declare function createParser<T extends Extract<CreateParserType, "stringExtract">, S extends JSONSchema | undefined = undefined>(type: T, options?: StringExtractParserOptions): StringExtractParser;
/**
* Creates a parser based on the given type and schema.
* @template S - JSON schema type.
* @param type - The type of parser to create.
* @param options - The JSON schema.
* @returns An instance of ListToJsonParser.
*/
declare function createParser<T extends Extract<CreateParserType, "listToJson">, S extends JSONSchema | undefined = undefined>(type: T, options?: BaseParserOptionsWithSchema<S>): ListToJsonParser<S>;
/**
* Creates a parser based on the given type and schema.
* @template S - JSON schema type.
* @param type - The type of parser to create.
* @param [options] - The JSON schema.
* @returns An instance of JsonParser.
*/
declare function createParser<T extends Extract<CreateParserType, "json">, S extends JSONSchema | undefined = undefined>(type: T, options?: BaseParserOptionsWithSchema<S>): JsonParser<S>;
declare function createParser<T extends CreateParserType, S extends JSONSchema | undefined = undefined>(type: T, options?: BaseParserOptionsWithSchema<S> | BaseParserOptions): JsonParser<S> | ListToJsonParser<S> | StringParser | NumberParser | BooleanParser | ListToArrayParser | ListToKeyValueParser | ReplaceStringTemplateParser | MarkdownCodeBlockParser | MarkdownCodeBlocksParser | StringExtractParser;
declare function createCustomParser<O>(name: string, parserFn: (text: string, inputValues: ExecutorContext<any, any>) => O): CustomParser<ReturnType<typeof parserFn>>;
declare function replaceTemplateString(templateString?: string, substitutions?: Record<string, any>, configuration?: PromptTemplateOptions): string;
declare function replaceTemplateStringAsync(templateString?: string, substitutions?: Record<string, any>, configuration?: PromptTemplateOptions): Promise<string>;
/**
* BasePrompt should be extended.
*/
declare abstract class BasePrompt<I extends Record<string, any>> {
readonly type: PromptType;
messages: IPromptMessages | IPromptChatMessages;
partials: PromptPartial[];
helpers: PromptHelper[];
replaceTemplateString: typeof replaceTemplateString;
replaceTemplateStringAsync: typeof replaceTemplateStringAsync;
filters: {
pre: ((prompt: string) => string)[];
post: ((prompt: string) => string)[];
};
/**
* constructor description
* @param initialPromptMessage An initial message to add to the prompt.
*/
constructor(initialPromptMessage?: string, options?: PromptOptions);
/**
* addToPrompt description
* @param content The message content
* @param role The role of the user. Defaults to system for base text prompt.
* @return instance of BasePrompt.
*/
addToPrompt(content: string, role?: string): BasePrompt<I>;
/**
* addSystemMessage description
* @param content The message content
* @return returns BasePrompt so it can be chained.
*/
addSystemMessage(content: string): this;
/**
* registerPartial description
* @param partialOrPartials Additional partials that can be made available to the template parser.
* @return BasePrompt so it can be chained.
*/
registerPartial(partialOrPartials: PromptPartial | PromptPartial[]): this;
/**
* registerHelpers description
* @param helperOrHelpers Additional helper functions that can be made available to the template parser.
* @return BasePrompt so it can be chained.
*/
registerHelpers(helperOrHelpers: PromptHelper | PromptHelper[]): this;
/**
* format description
* @param values The message content
* @param separator The separator between messages. defaults to "\n\n"
* @return returns messages formatted with template replacement
*/
format(values: I, separator?: string): string | IChatMessages;
/**
* format description
* @param values The message content
* @param separator The separator between messages. defaults to "\n\n"
* @return returns messages formatted with template replacement
*/
formatAsync(values: I, separator?: string): Promise<string | IChatMessages>;
runPromptFilter(prompt: string, filters: ((prompt: string, values: I) => string)[], values: I): string;
getReplacements(values: I): Omit<I, "input"> & {
input: any;
_input: any;
};
/**
* validate description
* @return {boolean} Returns false if the template is not valid.
*/
validate(): boolean;
}
/**
* `TextPrompt` provides a standard text-based prompt.
* The text prompt can be used with models such as davinci.
* @extends BasePrompt
*/
declare class TextPrompt<I extends Record<string, any>> extends BasePrompt<I> {
constructor(base?: string, options?: PromptOptions);
}
interface ChatPrompt<I extends Record<string, any>> extends BasePrompt<I> {
messages: IPromptChatMessages;
}
/**
* `ChatPrompt` provides a conversation-style prompt enabling various roles.
* The chat prompt can be used with models such as gpt-3.5.turbo and gpt-4+.
* @extends BasePrompt
*/
declare class ChatPrompt<I extends Record<string, any>> extends BasePrompt<I> {
/**
* @property type - Prompt type (chat)
*/
readonly type: ChatPromptType;
/**
* @property parseUserTemplates - Whether or not to allow parsing
* user messages with the template engine. This could be a risk,
* so we only parse user messages if explicitly set.
*/
private parseUserTemplates;
/**
* new `ChatPrompt`
* @param initialSystemPromptMessage (optional) An initial system message to add to the new prompt.
* @param options (optional) Options to pass in when creating the prompt.
*/
constructor(initialSystemPromptMessage?: string, options?: ChatPromptOptions);
/**
* addToPrompt Adds a message to the prompt based on role.
* @param content The message content.
* @param role The role of the chat user. Must be one of: assistant, system, user.
* @param name (optional) The name of the user. Only accepted if role is `user`.
* @return instance of ChatPrompt.
*/
addToPrompt(content: string, role: Extract<IChatMessageRole, "assistant">, name?: undefined): ChatPrompt<I>;
addToPrompt(content: string, role: Extract<IChatMessageRole, "system">, name?: undefined): ChatPrompt<I>;
addToPrompt(content: string, role: Extract<IChatMessageRole, "user">, name?: string): ChatPrompt<I>;
addToPrompt(content: string, role: Extract<IChatMessageRole, "function">, name: string): ChatPrompt<I>;
addToPrompt(content: string, role: Extract<IChatMessageRole, "function_call">, name: string): ChatPrompt<I>;
/**
* addUserMessage Helper to add a user message to the prompt.
* @param content The message content.
* @param name (optional) The name of the user.
* @return instance of ChatPrompt.
*/
addUserMessage(content: string | IChatMessageContentDetailed[], name?: string): ChatPrompt<I>;
/**
* addAssistantMessage Helper to add an assistant message to the prompt.
* @param content The message content.
* @return ChatPrompt so it can be chained.
*/
addAssistantMessage(content: string): ChatPrompt<I>;
/**
* addFunctionMessage Helper to add an assistant message to the prompt.
* @param content The message content.
* @return ChatPrompt so it can be chained.
*/
addFunctionMessage(content: string, name: string): ChatPrompt<I>;
/**
* addFunctionCallMessage Helper to add an assistant message to the prompt.
* @param content The message content.
* @return ChatPrompt so it can be chained.
*/
addFunctionCallMessage(function_call?: {
name: string;
arguments: string;
}): this;
/**
* addFromHistory Adds multiple messages at one time.
* @param history History of chat messages.
* @return ChatPrompt so it can be chained.
*/
addFromHistory(history: IChatMessages): ChatPrompt<I>;
/**
* addPlaceholder description
* @param content The message content
* @return returns ChatPrompt so it can be chained.
*/
addChatHistoryPlaceholder(key: keyof I, options?: {
assistant?: string;
user?: string;
}): ChatPrompt<I>;
/**
* addTokenPlaceholder description
* @param content The message content
* @return returns ChatPrompt so it can be chained.
*/
addMessagePlaceholder(content: string, role?: IChatMessageRole, name?: string): this;
private _format_placeholderDialogueHistory;
/**
* format formats the stored prompt based on input values.
* Uses template engine.
* Output is intended for LLM.
* @param values input values.
* @return formatted prompt.
*/
format(values: I): IChatMessages;
/**
* format formats the stored prompt based on input values.
* Uses template engine.
* Output is intended for LLM.
* @param values input values.
* @return formatted prompt.
*/
formatAsync(values: I): Promise<IChatMessages>;
/**
* validate Ensures there are not unresolved tokens in prompt.
* @TODO Make this work!
* @return Returns false if the template is not valid.
*/
validate(): boolean;
}
type TextPromptType = "text";
type ChatPromptType = "chat";
type PromptType = TextPromptType | ChatPromptType;
type PromptHelper = {
name: string;
handler: (args: any) => any;
};
type PromptPartial = {
name: string;
template: string;
};
interface PromptTemplateOptions {
partials?: PromptPartial[];
helpers?: PromptHelper[];
}
interface PromptOptions extends PromptTemplateOptions {
preFilters?: ((prompt: string) => string)[];
postFilters?: ((prompt: string) => string)[];
replaceTemplateString?: (...args: any[]) => string;
}
interface ChatPromptOptions extends PromptOptions {
allowUnsafeUserTemplate?: boolean;
}
/**
* `createPrompt` Creates a new instance of a prompt.
*
* @param type Define whether using chat or string prompt
* @param initialPromptMessage (optional) A message to use for an initial system message.
*/
declare function createPrompt<I extends Record<string, any>>(type: Extract<PromptType, "text">, initialPromptMessage?: string, options?: PromptOptions): TextPrompt<I>;
declare function createPrompt<I extends Record<string, any>>(type: Extract<PromptType, "chat">, initialPromptMessage?: string, options?: ChatPromptOptions): ChatPrompt<I>;
declare function createPrompt<I extends Record<string, any>>(type?: PromptType, initialPromptMessage?: string, options?: PromptOptions | ChatPromptOptions): TextPrompt<I>;
/**
* `createChatPrompt` Creates a new instance of a chat prompt.
*
* @param initialSystemPromptMessage (optional) A message to use for an initial system message.
*/
declare function createChatPrompt<I extends Record<string, any>>(initialSystemPromptMessage?: string, options?: ChatPromptOptions): ChatPrompt<I>;
/**
* BaseExecutor
* @template I - Input type.
* @template O - Output type.
* @template H - Hooks type.
*/
declare abstract class BaseExecutor<I extends PlainObject, O = any, H extends BaseExecutorHooks = BaseExecutorHooks> {
/**
* @property id - internal id of the executor
*/
readonly id: string;
/**
* @property type - type of executor
*/
type: string;
/**
* @property created - timestamp date created
*/
readonly created: number;
/**
* @property name - name of executor
*/
name: string;
/**
* @property executions -
*/
executions: number;
traceId: string | null;
/**
* @property hooks - hooks to be ran during execution
*/
hooks: any;
readonly allowedHooks: any[];
constructor(name: string, type: string, options?: CoreExecutorExecuteOptions<H>);
abstract handler(input: I, _options?: any): Promise<any>;
/**
*
* Used to filter the input of the handler
* @param _input
* @returns original input formatted for handler
*/
getHandlerInput(_input: I, _metadata: ExecutorExecutionMetadata<I, any>, _options?: any): Promise<any>;
/**
*
* Used to filter the output of the handler
* @param _input
* @returns output O
*/
getHandlerOutput(out: any, _metadata: ExecutorExecutionMetadata<any, O>, _options?: any): O;
/**
*
* execute - Runs the executor
* @param _input
* @returns handler output
*/
execute(_input: I, _options?: any): Promise<O>;
metadata(): Record<string, any>;
getMetadata(metadata?: Record<string, any>): ExecutorMetadata;
runHook(hook: keyof H, _metadata: ExecutorExecutionMetadata): void;
setHooks(hooks?: CoreExecutorHookInput<H>): this;
removeHook(eventName: keyof H, fn: ListenerFunction): this;
on(eventName: keyof H, fn: ListenerFunction): this;
off(eventName: keyof H, fn: ListenerFunction): this;
once(eventName: keyof H, fn: ListenerFunction): this;
withTraceId(traceId: string): this;
getTraceId(): string | null;
}
/**
* Core Function Executor
*/
declare class CoreExecutor<I extends PlainObject, O> extends BaseExecutor<I, O> {
_handler: (input: I) => Promise<any> | any;
constructor(fn: CoreExecutorInput<I, O>, options?: CoreExecutorExecuteOptions);
handler(_input: I): Promise<O>;
}
declare abstract class BaseStateItem<T> implements Serializable {
protected key: string;
protected value: T;
protected initialValue: T;
constructor(key: string, initialValue: T);
setValue(value: T): void;
getKey(): string;
getValue(): T;
resetValue(): void;
serializeValue(): {
[x: string]: T;
};
serialize(): {
class: string;
name: string;
value: any;
};
}
declare class DefaultStateItem extends BaseStateItem<any> {
constructor(name: string, defaultValue: any);
}
declare class Dialogue extends BaseStateItem<IChatMessages> {
name: string;
constructor(name: string);
setUserMessage(content: string | IChatMessageContentDetailed[], name?: string): this;
setAssistantMessage(content: string): this;
setSystemMessage(content: string): this;
setFunctionMessage(content: string, name: string): this;
setFunctionCallMessage(input: {
function_call: {
name: string;
arguments: string;
};
}): this;
setMessageTurn(userMessage: string, assistantMessage: string, systemMessage?: string): this;
setHistory(messages: IChatMessages): this;
getHistory(): IChatMessages;
serialize(): {
class: string;
name: string;
value: IChatMessage[];
};
}
declare abstract class BaseState {
dialogues: {
[key in string]: Dialogue;
};
attributes: Record<string, any>;
context: Record<string, BaseStateItem<any>>;
constructor();
createDialogue(name?: string): Dialogue;
useDialogue(name?: string): Dialogue;
getDialogue(name?: string): Dialogue;
createContextItem<T extends BaseStateItem<any>>(item: T): T;
getContext<T>(key: string): BaseStateItem<T>;
getContextValue<T>(key: string): T;
setAttribute(key: string, value: any): void;
deleteAttribute(key: string): void;
clearAttributes(): void;
serialize(): {
dialogues: any;
context: any;
attributes: any;
};
abstract saveState(): Promise<void>;
}
declare class DefaultState extends BaseState {
constructor();
saveState(): Promise<void>;
}
declare function createState(): DefaultState;
declare function createDialogue(name: string): Dialogue;
declare function createStateItem<T>(name: string, defaultValue: T): DefaultStateItem;
/**
* Core Executor With LLM
*/
declare class LlmExecutor<Llm extends BaseLlm<any>, Prompt extends BasePrompt<Record<string, any>>, Parser extends BaseParser, State extends BaseState> extends BaseExecutor<PromptInput<Prompt>, ParserOutput<Parser>, LlmExecutorHooks> {
llm: Llm;
prompt: Prompt | undefined;
promptFn: any;
parser: StringParser | Parser;
constructor(llmConfiguration: ExecutorWithLlmOptions<Llm, Prompt, Parser, State>, options?: CoreExecutorExecuteOptions<LlmExecutorHooks>);
execute(_input: PromptInput<Prompt>, _options?: LlmExecutorExecuteOptions): Promise<ParserOutput<Parser>>;
handler(_input: PromptInput<Prompt>, ..._args: any[]): Promise<any>;
getHandlerInput(_input: PromptInput<Prompt>): Promise<any>;
getHandlerOutput(out: BaseLlCall, _metadata: ExecutorExecutionMetadata<PromptInput<Prompt>, ParserOutput<Parser>>): ParserOutput<Parser>;
metadata(): {
llm: Record<string, any>;
};
getTraceId(): string | null;
}
/**
* Core Executor With LLM
*/
declare class LlmExecutorOpenAiFunctions<Llm extends BaseLlm, Prompt extends BasePrompt<Record<string, any>>, Parser extends BaseParser, State extends BaseState> extends LlmExecutor<Llm, Prompt, Parser, State> {
constructor(llmConfiguration: ExecutorWithLlmOptions<Llm, Prompt, Parser, State>, options?: CoreExecutorExecuteOptions<LlmExecutorHooks>);
execute<T extends GenericFunctionCall>(_input: PromptInput<Prompt>, _options: OpenAiLlmExecutorOptions<T>): Promise<ParserOutput<Parser>>;
}
/**
* Function to create a core executor.
* @template I - Input type.
* @template O - Output type.
* @param handler - The handler function.
* @returns - A new CoreExecutor instance.
*/
declare function createCoreExecutor<I extends PlainObject, O>(handler: (input: I) => Promise<O> | O, options?: CoreExecutorExecuteOptions): CoreExecutor<I, O>;
/**
* Function to create a core executor with Llm.
* @template Llm - Llm type.
* @template Prompt - Prompt type.
* @template Parser - Parser type.
* @template State - State type.
* @param options - Options for BaseExecutorV3.
* @returns - A new LlmExecutor instance.
*/
declare function createLlmExecutor<Llm extends BaseLlm<any>, Prompt extends BasePrompt<any>, Parser extends BaseParser, State extends BaseState>(llmConfiguration: ExecutorWithLlmOptions<Llm, Prompt, Parser, State>, options?: CoreExecutorExecuteOptions<LlmExecutorHooks>): LlmExecutor<Llm, Prompt, Parser, State>;
declare const hookOnComplete = "onComplete";
declare const hookOnError = "onError";
declare const hookOnSuccess = "onSuccess";
type ListenerFunction = (...args: any[]) => void;
type ParserOutput<P> = P extends BaseParser<infer T> ? T : never;
type PromptInput<P> = P extends BasePrompt<infer T> ? T : never;
interface ExecutorWithLlmOptions<Llm, Prompt, Parser, State> {
name?: string;
llm: Llm;
prompt: Prompt | ((values: PromptInput<Prompt>) => Prompt);
parser?: Parser;
state?: State;
__mock_response_key__?: string;
}
interface CoreExecutorInput<I, O> {
name?: string;
handler: (input: I) => Promise<O> | O;
getHandlerInput?(input: I): Promise<any>;
getHandlerOutput?(out: any): O;
}
type FunctionOrExecutor<I extends PlainObject | {
input: string;
}, O> = ((input: I) => Promise<O> | O) | BaseExecutor<I, O>;
interface ExecutorMetadata {
id: string;
type: string;
name: string;
created: number;
executions: number;
metadata?: Record<string, any>;
}
interface ExecutorExecutionMetadata<I = any, O = any> {
start: null | number;
end: null | number;
input: I;
handlerInput?: any;
handlerOutput?: any;
output?: O;
errorMessage?: string;
error?: Error;
metadata?: null | ExecutorMetadata;
}
interface ExecutorContext<I = any, O = any, A = Record<string, any>> extends ExecutorExecutionMetadata<I, O> {
metadata: ExecutorMetadata;
attributes: A;
}
interface BaseExecutorHooks {
[hookOnError]: ListenerFunction[];
[hookOnSuccess]: ListenerFunction[];
[hookOnComplete]: ListenerFunction[];
}
interface LlmExecutorHooks extends BaseExecutorHooks {
}
type CoreExecutorHookInput<H = BaseExecutorHooks> = {
[key in keyof H]?: ListenerFunction | ListenerFunction[];
};
interface CoreExecutorExecuteOptions<T = BaseExecutorHooks> {
hooks?: CoreExecutorHookInput<T>;
}
interface CallableExecutorCore {
name: string;
description: string;
parameters?: Record<string, any>;
}
type CreateParserType = "json" | "string" | "boolean" | "number" | "stringExtract" | "listToArray" | "listToJson" | "listToKeyValue" | "replaceStringTemplate" | "markdownCodeBlocks" | "markdownCodeBlock";
interface BaseParserOptions {
}
interface BaseParserOptionsWithSchema<S extends JSONSchema | undefined = undefined> extends BaseParserOptions {
schema?: S;
validateSchema?: boolean;
}
interface OutputResultsBase {
type: "text" | "function_use";
text?: string;
}
interface OutputResultsText extends OutputResultsBase {
type: "text";
text: string;
}
interface OutputResultsFunction extends OutputResultsBase {
type: "function_use";
name: string;
input: Record<string, any>;
}
type OutputResultContent = OutputResultsText | OutputResultsFunction;
interface OutputResult {
id: string;
name?: string;
created: number;
stopReason: string;
content: OutputResultContent[];
options?: OutputResultContent[][];
usage: {
input_tokens: number;
output_tokens: number;
total_tokens: number;
};
}
interface EmbeddingOutputResult {
id: string;
model?: string;
created: number;
embedding: number[][];
usage: {
input_tokens: number;
output_tokens: number;
total_tokens: number;
};
}
interface BaseLlmOptions {
traceId?: null | string;
timeout?: number;
maxDelay?: number;
numOfAttempts?: number;
jitter?: "none" | "full";
promptType?: PromptType;
}
interface GenericEmbeddingOptions extends BaseLlmOptions {
model?: string;
dimensions?: number;
}
interface OpenAiEmbeddingOptions extends GenericEmbeddingOptions {
model?: string;
openAiApiKey?: string;
}
interface AmazonEmbeddingOptions extends GenericEmbeddingOptions {
model: string;
awsRegion?: string;
awsSecretKey?: string;
awsAccessKey?: string;
}
interface GenericLLm extends BaseLlmOptions {
model?: string;
system?: string;
prompt?: string | {
role: string;
content: string;
}[];
temperature?: number;
topP?: number;
stream?: boolean;
streamOptions?: Record<string, any>;
maxTokens?: number;
stopSequences?: string[];
}
interface OpenAiRequest extends GenericLLm {
model: string;
frequencyPenalty?: number;
logitBias?: Record<string, any> | null;
responseFormat?: Record<string, any>;
openAiApiKey?: string;
useJson?: boolean;
}
interface AmazonBedrockRequest extends GenericLLm {
model: string;
awsRegion?: string;
awsSecretKey?: string;
awsAccessKey?: string;
}
interface AnthropicRequest extends GenericLLm {
model: string;
anthropicApiKey?: string;
}
interface GeminiRequest extends GenericLLm {
model: string;
geminiApiKey?: string;
}
interface DeepseekRequest extends GenericLLm {
model: string;
responseFormat?: Record<string, any>;
deepseekApiKey?: string;
useJson?: boolean;
}
type AllEmbedding = {
"openai.embedding.v1": {
input: OpenAiEmbeddingOptions;
};
"amazon.embedding.v1": {
input: AmazonEmbeddingOptions;
};
};
type AllLlm = {
"openai.chat.v1": {
input: OpenAiRequest;
};
"openai.chat-mock.v1": {
input: OpenAiRequest;
};
"anthropic.chat.v1": {
input: AnthropicRequest;
};
"amazon:anthropic.chat.v1": {
input: AnthropicRequest & AmazonBedrockRequest;
};
"amazon:meta.chat.v1": {
input: AmazonBedrockRequest;
};
"xai.chat.v1": {
input: GenericLLm;
};
"ollama.chat.v1": {
input: GenericLLm;
};
"google.chat.v1": {
input: GeminiRequest;
};
"deepseek.chat.v1": {
input: DeepseekRequest;
};
};
type AllUseLlmOptions = AllLlm & {
"openai.gpt-4": {
input: OpenAiRequest;
};
"openai.gpt-4o": {
input: Omit<OpenAiRequest, "model">;
};
"openai.gpt-4o-mini": {
input: Omit<OpenAiRequest, "model">;
};
"anthropic.claude-3-7-sonnet": {
input: Omit<AnthropicRequest, "model">;
};
"anthropic.claude-3-5-sonnet": {
input: Omit<AnthropicRequest, "model">;
};
"anthropic.claude-3-opus": {
input: Omit<AnthropicRequest, "model">;
};
"anthropic.claude-3-sonnet": {
input: Omit<AnthropicRequest, "model">;
};
"anthropic.claude-3-5-haiku": {
input: Omit<AnthropicRequest, "model">;
};
"google.gemini-2.5-pro-exp-03-25": {
input: Omit<GeminiRequest, "model">;
};
"google.gemini-2.0-flash": {
input: Omit<GeminiRequest, "model">;
};
"google.gemini-2.0-flash-lite": {
input: Omit<GeminiRequest, "model">;
};
"google.gemini-1.5-pro": {
input: Omit<GeminiRequest, "model">;
};
"xai.grok-2": {
input: OpenAiRequest;
};
"ollama.deepseek-r1": {
input: GenericLLm;
};
"ollama.llama3.3": {
input: GenericLLm;
};
"ollama.llama3.2": {
input: GenericLLm;
};
"ollama.llama3.1": {
input: GenericLLm;
};
"ollama.qwq": {
input: GenericLLm;
};
"deepseek.chat": {
input: DeepseekRequest;
};
};
type LlmProviderKey = keyof AllLlm;
type EmbeddingProviderKey = keyof AllEmbedding;
type UseLlmKey = keyof AllUseLlmOptions;
interface BaseLlCall {
getResultContent: () => OutputResultContent[];
getResultText: () => string;
getResult: () => OutputResult;
}
interface BaseRequest<_T extends Record<string, any>> {
call: (...args: any[]) => Promise<_T>;
getTraceId: () => string | null;
withTraceId: (traceId: string) => void;
getMetadata: () => Record<string, any>;
}
interface BaseLlm<_T extends BaseLlCall = BaseLlCall> extends BaseRequest<_T> {
}
type LlmProvider = "openai.chat" | "openai.embedding" | "google.embedding" | "openai.chat-mock" | "anthropic.chat" | "amazon:anthropic.chat" | "amazon:meta.chat" | "amazon:nova.chat" | "amazon.embedding" | "xai.chat" | "google.chat" | "ollama.chat" | "deepseek.chat";
interface Config<Pk = LlmProviderKey> {
key: Pk;
provider: LlmProvider;
method: string;
endpoint: string;
options: {
[key in string]: {
default?: number | string;
required?: [boolean, string] | [boolean];
};
};
mapBody: {
[key in string]: {
key: string;
default?: number | string;
sanitize?: (i: any, arg: Record<string, any>, arg2: Record<string, any>) => any;
};
};
headers: string;
prompt?: (messages: IChatMessages) => any;
}
declare function enforceResultAttributes<O>(input: any): {
result: O;
attributes: Record<string, any>;
};
/**
* Represents the input for a CallableExecutor.
* @interface CallableExecutorInput
* @property name - The name of the callable function.
* @property key - The key for the callable function. Defaults to the name if not provided.
* @property description - A description of the callable function.
* @property input - The input for the callable function.
* @property visibilityHandler - An optional visibility handler for the callable function.
* @property handler - An optional handler for the callable function.
*/
interface CallableExecutorInput<I extends PlainObject | {
input: string;
}, O> extends CallableExecutorCore {
name: string;
description: string;
key?: string;
parameters?: Record<string, any>;
input: string;
attributes?: Record<string, any>;
visibilityHandler?(input: I, context: any, attributes?: Record<string, any>): boolean;
handler?: FunctionOrExecutor<I, O>;
validateInput?(input: I, ...args: any[]): ReturnType<typeof enforceResultAttributes<boolean>> | Promise<ReturnType<typeof enforceResultAttributes<boolean>>>;
}
/**
* Represents a CallableExecutor.
* @interface CallableExecutor
* @property name - The name of the callable core function.
* @property key - The key for the callable core function.
* @property description - A description of the callable core function.
* @property input - The input for the callable core function.
* @property visibilityHandler - The visibility handler for the callable core function.
* @property - The handler for the callable core function.
*/
interface CallableExecutor<I, O> extends CallableExecutorCore {
key: string;
attributes: Record<string, any>;
parameters: Record<string, any>;
input: string;
_handler: BaseExecutor<I, O>;
visibilityHandler(input: any, attributes?: Record<string, any>): boolean;
_visibilityHandler?(input: any, context: any, attributes?: Record<string, any>): boolean;
validateInput(input: I): Promise<ReturnType<typeof enforceResultAttributes<boolean>>>;
_validateInput?(input: I, context: any): ReturnType<typeof enforceResultAttributes<boolean>> | Promise<ReturnType<typeof enforceResultAttributes<boolean>>>;
}
/**
* A class representing a CallableExecutor.
* @class CallableExecutor
*/
declare class CallableExecutor<I extends PlainObject | {
input: string;
}, O> {
name: string;
key: string;
description: string;
input: string;
attributes: Record<string, any>;
parameters: Record<string, any>;
_handler: BaseExecutor<I, O>;
_validateInput?(input: I, context: any): ReturnType<typeof enforceResultAttributes<boolean>> | Promise<ReturnType<typeof enforceResultAttributes<boolean>>>;
_visibilityHandler?(input: any, context: any, attributes?: Record<string, any>): boolean;
constructor(options: CallableExecutorInput<I, O>);
execute(input: I): Promise<{
result: O;
attributes: any;
}>;
}
declare abstract class UseExecutorsBase<I extends PlainObject | {
input: string;
}, O> {
handlers: CallableExecutor<I, O>[];
constructor(handlers: CallableExecutor<I, O>[]);
hasFunction(name: string): boolean;
getFunction(name: string): CallableExecutor<I, O> | undefined;
getFunctions(): CallableExecutor<I, O>[];
getVisibleFunctions(_input: any, _attributes?: any): CallableExecutor<I, O>[];
callFunction(name: string, input: string): Promise<{
result: any;
attributes: any;
}>;
validateFunctionInput(name: string, input: string): Promise<{
result: boolean;
attributes: any;
}>;
}
/**
* Creates a new CallableExecutor instance.
* @function createCallableExecutor
* @param options - The input options for the callable core function.
* @returns A new CallableExecutor instance.
*/
declare function createCallableExecutor<I extends PlainObject | {
input: string;
}, O>(options: CallableExecutorInput<I, O>): CallableExecutor<I, O>;
declare class UseExecutors<I extends PlainObject | {
input: string;
}, O extends any> extends UseExecutorsBase<I, O> {
constructor(handlers: CallableExecutor<I, O>[]);
}
declare function useExecutors<I extends PlainObject | {
input: string;
}, O extends any>(executors: [
...(CallableExecutor<any, any> | CallableExecutorInput<any, any>)[]
]): UseExecutors<I, O>;
declare function assert(condition: any, message?: string | Error | undefined): asserts condition;
declare function defineSchema<T>(obj: Narrow<T>): Narrow<T>;
declare function importPartials(_partials: {
[key in string]: string;
}): PromptPartial[];
declare function importHelpers(_helpers: {
[key in string]: (...args: any[]) => any;
}): PromptHelper[];
declare function filterObjectOnSchema(schema: any, doc: any, detach?: any, property?: string): any;
declare const asyncCallWithTimeout: <T = any>(asyncPromise: Promise<T>, timeLimit?: number) => Promise<T>;
declare function guessProviderFromModel(payload: {
model: string;
}): "openai" | "xai" | "bedrock:anthropic" | "anthropic";
declare const maybeStringifyJSON: (objOrMaybeString: any) => string;
declare const maybeParseJSON: <Expected = any>(objOrMaybeJSON: any) => Expected;
declare function isObjectStringified(maybeObject: string): boolean;
declare function registerHelpers(helpers: any[]): void;
declare function registerPartials(partials: any[]): void;
declare const index_assert: typeof assert;
declare const index_asyncCallWithTimeout: typeof asyncCallWithTimeout;
declare const index_defineSchema: typeof defineSchema;
declare const index_filterObjectOnSchema: typeof filterObjectOnSchema;
declare const index_guessProviderFromModel: typeof guessProviderFromModel;
declare const index_importHelpers: typeof importHelpers;
declare const index_importPartials: typeof importPartials;
declare const index_isObjectStringified: typeof isObjectStringified;
declare const index_maybeParseJSON: typeof maybeParseJSON;
declare const index_maybeStringifyJSON: typeof maybeStringifyJSON;
declare const index_registerHelpers: typeof registerHelpers;
declare const index_registerPartials: typeof registerPartials;
declare const index_replaceTemplateString: typeof replaceTemplateString;
declare const index_replaceTemplateStringAsync: typeof replaceTemplateStringAsync;
declare namespace index {
export { index_assert as assert, index_asyncCallWithTimeout as asyncCallWithTimeout, index_defineSchema as defineSchema, index_filterObjectOnSchema as filterObjectOnSchema, index_guessProviderFromModel as guessProviderFromModel, index_importHelpers as importHelpers, index_importPartials as importPartials, index_isObjectStringified as isObjectStringified, index_maybeParseJSON as maybeParseJSON, index_maybeStringifyJSON as maybeStringifyJSON, index_registerHelpers as registerHelpers, index_registerPartials as registerPartials, index_replaceTemplateString as replaceTemplateString, index_replaceTemplateStringAsync as replaceTemplateStringAsync };
}
declare const configs: {
"deepseek.chat.v1": Config<keyof AllLlm>;
"deepseek.chat": Config<keyof AllLlm>;
"google.chat.v1": Config<keyof AllLlm>;
"google.gemini-2.0-flash": Config<keyof AllLlm>;
"google.gemini-2.0-flash-lite": Config<keyof AllLlm>;
"google.gemini-1.5-pro": Config<keyof AllLlm>;
"ollama.chat.v1": Config<keyof AllLlm>;
"ollama.deepseek-r1": Config<keyof AllLlm>;
"ollama.llama3.3": Config<keyof AllLlm>;
"ollama.llama3.2": Config<keyof AllLlm>;
"ollama.llama3.1": Config<keyof AllLlm>;
"ollama.qwq": Config<keyof AllLlm>;
"xai.chat.v1": Config<keyof AllLlm>;
"xai.grok-2": Config<keyof AllLlm>;
"amazon:anthropic.chat.v1": Config<keyof AllLlm>;
"amazon:meta.chat.v1": Config<keyof AllLlm>;
"anthropic.chat.v1": Config<keyof AllLlm>;
"anthropic.claude-3-7-sonnet": Config<keyof AllLlm>;
"anthropic.claude-3-5-sonnet": Config<keyof AllLlm>;
"anthropic.claude-3-5-haiku": Config<keyof AllLlm>;
"anthropic.claude-3-opus": Config<keyof AllLlm>;
"openai.chat.v1": Config<keyof AllLlm>;
"openai.chat-mock.v1": Config<keyof AllLlm>;
"openai.gpt-4o": Config<keyof AllLlm>;
"openai.gpt-4o-mini": Config<keyof AllLlm>;
};
declare function useLlm<T extends keyof typeof configs>(provider: T, options?: AllUseLlmOptions[T]["input"]): {
call: (messages: string | IChatMessages, options?: OpenAiLlmExecutorOptions) => Promise<{
getResultContent: (index?: number) => OutputResultContent[];
getResultText: () => string;
getResult: () => OutputResult;
}>;
getTraceId: () => string | null;
withTraceId: (id: string) => void;
getMetadata: () => {
traceId: string | null;
timeout: number;
jitter: "none" | "full";
maxDelay: number;
numOfAttempts: number;
metrics: any;
} & {
[x: string]: any;
};
};
declare function createEmbedding<T extends EmbeddingProviderKey>(provider: T, options: AllEmbedding[T]["input"]): {
call: (messages: string | string[], options?: OpenAiLlmExecutorOptions) => Promise<{
getEmbedding: (index?: number) => number[];
getResult: () => EmbeddingOutputResult;
}>;
getTraceId: () => string | null;
withTraceId: (id: string) => void;
getMetadata: () => {
traceId: string | null;
timeout: number;
jitter: "none" | "full";
maxDelay: number;
numOfAttempts: number;
metrics: any;
} & {
[x: string]: any;
};
};
export { BaseExecutor, type BaseLlm, BaseParser, BasePrompt, BaseStateItem, ChatPrompt, CustomParser, DefaultState, DefaultStateItem, type EmbeddingProviderKey, type ExecutorContext, type IChatMessages, LlmExecutorOpenAiFunctions, type LlmProvider, type LlmProviderKey, type OpenAIModelName, OpenAiFunctionParser, TextPrompt, type UseLlmKey, createCallableExecutor, createChatPrompt, createCoreExecutor, createCustomParser, createDialogue, createEmbedding, createLlmExecutor, createParser, createPrompt, createState, createStateItem, defineSchema, registerHelpers, registerPartials, useExecutors, useLlm, index as utils };