UNPKG

@langchain/core

Version:
1 lines 78.7 kB
{"version":3,"file":"base.d.ts","names":["TraceableFunction","RunnableInterface","RunnableBatchOptions","RunnableConfig","CallbackManagerForChainRun","LogStreamCallbackHandler","LogStreamCallbackHandlerInput","RunLogPatch","EventStreamCallbackHandlerInput","StreamEvent","Serializable","IterableReadableStream","Run","Graph","ToolCall","InferInteropZodOutput","InteropZodType","RunnableFunc","RunInput","RunOutput","CallOptions","Record","Promise","RunnableMapLike","K","RunnableLike","RunnableRetryFailedAttemptHandler","_coerceToDict","Runnable","O","T","I","NewRunOutput","Partial","RunnableRetry","RunnableWithFallbacks","Error","AsyncGenerator","Omit","Exclude","Uint8Array","onStart","onEnd","onError","RunnableToolLike","RunnableBindingArgs","Array","RunnableBinding","Awaited","RunnableEach","RunInputItem","RunOutputItem","ReturnExceptions","RunnableSequenceFields","RunnableSequence","RunnableMap","AnyTraceableFunction","RunnableTraceable","RunnableLambda","RunnableParallel","Generator","_coerceToRunnable","RunnableAssignFields","RunnableAssign","RunnablePickFields","RunnablePick","RunnableToolLikeArgs","convertRunnableToTool"],"sources":["../../src/runnables/base.d.ts"],"sourcesContent":["import { type TraceableFunction } from \"langsmith/singletons/traceable\";\nimport type { RunnableInterface, RunnableBatchOptions, RunnableConfig } from \"./types.js\";\nimport { CallbackManagerForChainRun } from \"../callbacks/manager.js\";\nimport { LogStreamCallbackHandler, LogStreamCallbackHandlerInput, RunLogPatch } from \"../tracers/log_stream.js\";\nimport { EventStreamCallbackHandlerInput, StreamEvent } from \"../tracers/event_stream.js\";\nimport { Serializable } from \"../load/serializable.js\";\nimport { IterableReadableStream } from \"../utils/stream.js\";\nimport { Run } from \"../tracers/base.js\";\nimport { Graph } from \"./graph.js\";\nimport { ToolCall } from \"../messages/tool.js\";\nimport { InferInteropZodOutput, InteropZodType } from \"../utils/types/zod.js\";\nexport { type RunnableInterface, RunnableBatchOptions };\nexport type RunnableFunc<RunInput, RunOutput, CallOptions extends RunnableConfig = RunnableConfig> = (input: RunInput, options: CallOptions | Record<string, any> | (Record<string, any> & CallOptions)) => RunOutput | Promise<RunOutput>;\nexport type RunnableMapLike<RunInput, RunOutput> = {\n [K in keyof RunOutput]: RunnableLike<RunInput, RunOutput[K]>;\n};\nexport type RunnableLike<RunInput = any, RunOutput = any, CallOptions extends RunnableConfig = RunnableConfig> = RunnableInterface<RunInput, RunOutput, CallOptions> | RunnableFunc<RunInput, RunOutput, CallOptions> | RunnableMapLike<RunInput, RunOutput>;\nexport type RunnableRetryFailedAttemptHandler = (error: any, input: any) => any;\nexport declare function _coerceToDict(value: any, defaultKey: string): any;\n/**\n * A Runnable is a generic unit of work that can be invoked, batched, streamed, and/or\n * transformed.\n */\nexport declare abstract class Runnable<RunInput = any, RunOutput = any, CallOptions extends RunnableConfig = RunnableConfig> extends Serializable implements RunnableInterface<RunInput, RunOutput, CallOptions> {\n protected lc_runnable: boolean;\n name?: string;\n getName(suffix?: string): string;\n abstract invoke(input: RunInput, options?: Partial<CallOptions>): Promise<RunOutput>;\n /**\n * Add retry logic to an existing runnable.\n * @param fields.stopAfterAttempt The number of attempts to retry.\n * @param fields.onFailedAttempt A function that is called when a retry fails.\n * @returns A new RunnableRetry that, when invoked, will retry according to the parameters.\n */\n withRetry(fields?: {\n stopAfterAttempt?: number;\n onFailedAttempt?: RunnableRetryFailedAttemptHandler;\n }): RunnableRetry<RunInput, RunOutput, CallOptions>;\n /**\n * Bind config to a Runnable, returning a new Runnable.\n * @param config New configuration parameters to attach to the new runnable.\n * @returns A new RunnableBinding with a config matching what's passed.\n */\n withConfig(config: Partial<CallOptions>): Runnable<RunInput, RunOutput, CallOptions>;\n /**\n * Create a new runnable from the current one that will try invoking\n * other passed fallback runnables if the initial invocation fails.\n * @param fields.fallbacks Other runnables to call if the runnable errors.\n * @returns A new RunnableWithFallbacks.\n */\n withFallbacks(fields: {\n fallbacks: Runnable<RunInput, RunOutput>[];\n } | Runnable<RunInput, RunOutput>[]): RunnableWithFallbacks<RunInput, RunOutput>;\n protected _getOptionsList<O extends CallOptions & {\n runType?: string;\n }>(options: Partial<O> | Partial<O>[], length?: number): Partial<O>[];\n /**\n * Default implementation of batch, which calls invoke N times.\n * Subclasses should override this method if they can batch more efficiently.\n * @param inputs Array of inputs to each batch call.\n * @param options Either a single call options object to apply to each batch call or an array for each call.\n * @param batchOptions.returnExceptions Whether to return errors rather than throwing on the first one\n * @returns An array of RunOutputs, or mixed RunOutputs and errors if batchOptions.returnExceptions is set\n */\n batch(inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions & {\n returnExceptions?: false;\n }): Promise<RunOutput[]>;\n batch(inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions & {\n returnExceptions: true;\n }): Promise<(RunOutput | Error)[]>;\n batch(inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions): Promise<(RunOutput | Error)[]>;\n /**\n * Default streaming implementation.\n * Subclasses should override this method if they support streaming output.\n * @param input\n * @param options\n */\n _streamIterator(input: RunInput, options?: Partial<CallOptions>): AsyncGenerator<RunOutput>;\n /**\n * Stream output in chunks.\n * @param input\n * @param options\n * @returns A readable stream that is also an iterable.\n */\n stream(input: RunInput, options?: Partial<CallOptions>): Promise<IterableReadableStream<RunOutput>>;\n protected _separateRunnableConfigFromCallOptions(options?: Partial<CallOptions>): [RunnableConfig, Omit<Partial<CallOptions>, keyof RunnableConfig>];\n protected _callWithConfig<T extends RunInput>(func: ((input: T) => Promise<RunOutput>) | ((input: T, config?: Partial<CallOptions>, runManager?: CallbackManagerForChainRun) => Promise<RunOutput>), input: T, options?: Partial<CallOptions> & {\n runType?: string;\n }): Promise<RunOutput>;\n /**\n * Internal method that handles batching and configuration for a runnable\n * It takes a function, input values, and optional configuration, and\n * returns a promise that resolves to the output values.\n * @param func The function to be executed for each input value.\n * @param input The input values to be processed.\n * @param config Optional configuration for the function execution.\n * @returns A promise that resolves to the output values.\n */\n _batchWithConfig<T extends RunInput>(func: (inputs: T[], options?: Partial<CallOptions>[], runManagers?: (CallbackManagerForChainRun | undefined)[], batchOptions?: RunnableBatchOptions) => Promise<(RunOutput | Error)[]>, inputs: T[], options?: Partial<CallOptions & {\n runType?: string;\n }> | Partial<CallOptions & {\n runType?: string;\n }>[], batchOptions?: RunnableBatchOptions): Promise<(RunOutput | Error)[]>;\n /** @internal */\n _concatOutputChunks<O>(first: O, second: O): O;\n /**\n * Helper method to transform an Iterator of Input values into an Iterator of\n * Output values, with callbacks.\n * Use this to implement `stream()` or `transform()` in Runnable subclasses.\n */\n protected _transformStreamWithConfig<I extends RunInput, O extends RunOutput>(inputGenerator: AsyncGenerator<I>, transformer: (generator: AsyncGenerator<I>, runManager?: CallbackManagerForChainRun, options?: Partial<CallOptions>) => AsyncGenerator<O>, options?: Partial<CallOptions> & {\n runType?: string;\n }): AsyncGenerator<O>;\n getGraph(_?: RunnableConfig): Graph;\n /**\n * Create a new runnable sequence that runs each individual runnable in series,\n * piping the output of one runnable into another runnable or runnable-like.\n * @param coerceable A runnable, function, or object whose values are functions or runnables.\n * @returns A new runnable sequence.\n */\n pipe<NewRunOutput>(coerceable: RunnableLike<RunOutput, NewRunOutput>): Runnable<RunInput, Exclude<NewRunOutput, Error>>;\n /**\n * Pick keys from the dict output of this runnable. Returns a new runnable.\n */\n pick(keys: string | string[]): Runnable;\n /**\n * Assigns new fields to the dict output of this runnable. Returns a new runnable.\n */\n assign(mapping: RunnableMapLike<Record<string, unknown>, Record<string, unknown>>): Runnable;\n /**\n * Default implementation of transform, which buffers input and then calls stream.\n * Subclasses should override this method if they can start producing output while\n * input is still being generated.\n * @param generator\n * @param options\n */\n transform(generator: AsyncGenerator<RunInput>, options: Partial<CallOptions>): AsyncGenerator<RunOutput>;\n /**\n * Stream all output from a runnable, as reported to the callback system.\n * This includes all inner runs of LLMs, Retrievers, Tools, etc.\n * Output is streamed as Log objects, which include a list of\n * jsonpatch ops that describe how the state of the run has changed in each\n * step, and the final state of the run.\n * The jsonpatch ops can be applied in order to construct state.\n * @param input\n * @param options\n * @param streamOptions\n */\n streamLog(input: RunInput, options?: Partial<CallOptions>, streamOptions?: Omit<LogStreamCallbackHandlerInput, \"autoClose\">): AsyncGenerator<RunLogPatch>;\n protected _streamLog(input: RunInput, logStreamCallbackHandler: LogStreamCallbackHandler, config: Partial<CallOptions>): AsyncGenerator<RunLogPatch>;\n /**\n * Generate a stream of events emitted by the internal steps of the runnable.\n *\n * Use to create an iterator over StreamEvents that provide real-time information\n * about the progress of the runnable, including StreamEvents from intermediate\n * results.\n *\n * A StreamEvent is a dictionary with the following schema:\n *\n * - `event`: string - Event names are of the format: on_[runnable_type]_(start|stream|end).\n * - `name`: string - The name of the runnable that generated the event.\n * - `run_id`: string - Randomly generated ID associated with the given execution of\n * the runnable that emitted the event. A child runnable that gets invoked as part of the execution of a\n * parent runnable is assigned its own unique ID.\n * - `tags`: string[] - The tags of the runnable that generated the event.\n * - `metadata`: Record<string, any> - The metadata of the runnable that generated the event.\n * - `data`: Record<string, any>\n *\n * Below is a table that illustrates some events that might be emitted by various\n * chains. Metadata fields have been omitted from the table for brevity.\n * Chain definitions have been included after the table.\n *\n * **ATTENTION** This reference table is for the V2 version of the schema.\n *\n * ```md\n * +----------------------+-----------------------------+------------------------------------------+\n * | event | input | output/chunk |\n * +======================+=============================+==========================================+\n * | on_chat_model_start | {\"messages\": BaseMessage[]} | |\n * +----------------------+-----------------------------+------------------------------------------+\n * | on_chat_model_stream | | AIMessageChunk(\"hello\") |\n * +----------------------+-----------------------------+------------------------------------------+\n * | on_chat_model_end | {\"messages\": BaseMessage[]} | AIMessageChunk(\"hello world\") |\n * +----------------------+-----------------------------+------------------------------------------+\n * | on_llm_start | {'input': 'hello'} | |\n * +----------------------+-----------------------------+------------------------------------------+\n * | on_llm_stream | | 'Hello' |\n * +----------------------+-----------------------------+------------------------------------------+\n * | on_llm_end | 'Hello human!' | |\n * +----------------------+-----------------------------+------------------------------------------+\n * | on_chain_start | | |\n * +----------------------+-----------------------------+------------------------------------------+\n * | on_chain_stream | | \"hello world!\" |\n * +----------------------+-----------------------------+------------------------------------------+\n * | on_chain_end | [Document(...)] | \"hello world!, goodbye world!\" |\n * +----------------------+-----------------------------+------------------------------------------+\n * | on_tool_start | {\"x\": 1, \"y\": \"2\"} | |\n * +----------------------+-----------------------------+------------------------------------------+\n * | on_tool_end | | {\"x\": 1, \"y\": \"2\"} |\n * +----------------------+-----------------------------+------------------------------------------+\n * | on_retriever_start | {\"query\": \"hello\"} | |\n * +----------------------+-----------------------------+------------------------------------------+\n * | on_retriever_end | {\"query\": \"hello\"} | [Document(...), ..] |\n * +----------------------+-----------------------------+------------------------------------------+\n * | on_prompt_start | {\"question\": \"hello\"} | |\n * +----------------------+-----------------------------+------------------------------------------+\n * | on_prompt_end | {\"question\": \"hello\"} | ChatPromptValue(messages: BaseMessage[]) |\n * +----------------------+-----------------------------+------------------------------------------+\n * ```\n *\n * The \"on_chain_*\" events are the default for Runnables that don't fit one of the above categories.\n *\n * In addition to the standard events above, users can also dispatch custom events.\n *\n * Custom events will be only be surfaced with in the `v2` version of the API!\n *\n * A custom event has following format:\n *\n * ```md\n * +-----------+------+------------------------------------------------------------+\n * | Attribute | Type | Description |\n * +===========+======+============================================================+\n * | name | str | A user defined name for the event. |\n * +-----------+------+------------------------------------------------------------+\n * | data | Any | The data associated with the event. This can be anything. |\n * +-----------+------+------------------------------------------------------------+\n * ```\n *\n * Here's an example:\n *\n * ```ts\n * import { RunnableLambda } from \"@langchain/core/runnables\";\n * import { dispatchCustomEvent } from \"@langchain/core/callbacks/dispatch\";\n * // Use this import for web environments that don't support \"async_hooks\"\n * // and manually pass config to child runs.\n * // import { dispatchCustomEvent } from \"@langchain/core/callbacks/dispatch/web\";\n *\n * const slowThing = RunnableLambda.from(async (someInput: string) => {\n * // Placeholder for some slow operation\n * await new Promise((resolve) => setTimeout(resolve, 100));\n * await dispatchCustomEvent(\"progress_event\", {\n * message: \"Finished step 1 of 2\",\n * });\n * await new Promise((resolve) => setTimeout(resolve, 100));\n * return \"Done\";\n * });\n *\n * const eventStream = await slowThing.streamEvents(\"hello world\", {\n * version: \"v2\",\n * });\n *\n * for await (const event of eventStream) {\n * if (event.event === \"on_custom_event\") {\n * console.log(event);\n * }\n * }\n * ```\n */\n streamEvents(input: RunInput, options: Partial<CallOptions> & {\n version: \"v1\" | \"v2\";\n }, streamOptions?: Omit<EventStreamCallbackHandlerInput, \"autoClose\">): IterableReadableStream<StreamEvent>;\n streamEvents(input: RunInput, options: Partial<CallOptions> & {\n version: \"v1\" | \"v2\";\n encoding: \"text/event-stream\";\n }, streamOptions?: Omit<EventStreamCallbackHandlerInput, \"autoClose\">): IterableReadableStream<Uint8Array>;\n private _streamEventsV2;\n private _streamEventsV1;\n static isRunnable(thing: any): thing is Runnable;\n /**\n * Bind lifecycle listeners to a Runnable, returning a new Runnable.\n * The Run object contains information about the run, including its id,\n * type, input, output, error, startTime, endTime, and any tags or metadata\n * added to the run.\n *\n * @param {Object} params - The object containing the callback functions.\n * @param {(run: Run) => void} params.onStart - Called before the runnable starts running, with the Run object.\n * @param {(run: Run) => void} params.onEnd - Called after the runnable finishes running, with the Run object.\n * @param {(run: Run) => void} params.onError - Called if the runnable throws an error, with the Run object.\n */\n withListeners({ onStart, onEnd, onError }: {\n onStart?: (run: Run, config?: RunnableConfig) => void | Promise<void>;\n onEnd?: (run: Run, config?: RunnableConfig) => void | Promise<void>;\n onError?: (run: Run, config?: RunnableConfig) => void | Promise<void>;\n }): Runnable<RunInput, RunOutput, CallOptions>;\n /**\n * Convert a runnable to a tool. Return a new instance of `RunnableToolLike`\n * which contains the runnable, name, description and schema.\n *\n * @template {T extends RunInput = RunInput} RunInput - The input type of the runnable. Should be the same as the `RunInput` type of the runnable.\n *\n * @param fields\n * @param {string | undefined} [fields.name] The name of the tool. If not provided, it will default to the name of the runnable.\n * @param {string | undefined} [fields.description] The description of the tool. Falls back to the description on the Zod schema if not provided, or undefined if neither are provided.\n * @param {z.ZodType<T>} [fields.schema] The Zod schema for the input of the tool. Infers the Zod type from the input type of the runnable.\n * @returns {RunnableToolLike<z.ZodType<T>, RunOutput>} An instance of `RunnableToolLike` which is a runnable that can be used as a tool.\n */\n asTool<T extends RunInput = RunInput>(fields: {\n name?: string;\n description?: string;\n schema: InteropZodType<T>;\n }): RunnableToolLike<InteropZodType<T | ToolCall>, RunOutput>;\n}\nexport type RunnableBindingArgs<RunInput, RunOutput, CallOptions extends RunnableConfig = RunnableConfig> = {\n bound: Runnable<RunInput, RunOutput, CallOptions>;\n /** @deprecated Use {@link config} instead. */\n kwargs?: Partial<CallOptions>;\n config: RunnableConfig;\n configFactories?: Array<(config: RunnableConfig) => RunnableConfig | Promise<RunnableConfig>>;\n};\n/**\n * Wraps a runnable and applies partial config upon invocation.\n *\n * @example\n * ```typescript\n * import {\n * type RunnableConfig,\n * RunnableLambda,\n * } from \"@langchain/core/runnables\";\n *\n * const enhanceProfile = (\n * profile: Record<string, any>,\n * config?: RunnableConfig\n * ) => {\n * if (config?.configurable?.role) {\n * return { ...profile, role: config.configurable.role };\n * }\n * return profile;\n * };\n *\n * const runnable = RunnableLambda.from(enhanceProfile);\n *\n * // Bind configuration to the runnable to set the user's role dynamically\n * const adminRunnable = runnable.withConfig({ configurable: { role: \"Admin\" } });\n * const userRunnable = runnable.withConfig({ configurable: { role: \"User\" } });\n *\n * const result1 = await adminRunnable.invoke({\n * name: \"Alice\",\n * email: \"alice@example.com\"\n * });\n *\n * // { name: \"Alice\", email: \"alice@example.com\", role: \"Admin\" }\n *\n * const result2 = await userRunnable.invoke({\n * name: \"Bob\",\n * email: \"bob@example.com\"\n * });\n *\n * // { name: \"Bob\", email: \"bob@example.com\", role: \"User\" }\n * ```\n */\nexport declare class RunnableBinding<RunInput, RunOutput, CallOptions extends RunnableConfig = RunnableConfig> extends Runnable<RunInput, RunOutput, CallOptions> {\n static lc_name(): string;\n lc_namespace: string[];\n lc_serializable: boolean;\n bound: Runnable<RunInput, RunOutput, CallOptions>;\n config: RunnableConfig;\n kwargs?: Partial<CallOptions>;\n configFactories?: Array<(config: RunnableConfig) => RunnableConfig | Promise<RunnableConfig>>;\n constructor(fields: RunnableBindingArgs<RunInput, RunOutput, CallOptions>);\n getName(suffix?: string | undefined): string;\n _mergeConfig(...options: (Partial<CallOptions> | RunnableConfig | undefined)[]): Promise<Partial<CallOptions>>;\n withConfig(config: Partial<CallOptions>): Runnable<RunInput, RunOutput, CallOptions>;\n withRetry(fields?: {\n stopAfterAttempt?: number;\n onFailedAttempt?: RunnableRetryFailedAttemptHandler;\n }): RunnableRetry<RunInput, RunOutput, CallOptions>;\n invoke(input: RunInput, options?: Partial<CallOptions>): Promise<RunOutput>;\n batch(inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions & {\n returnExceptions?: false;\n }): Promise<RunOutput[]>;\n batch(inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions & {\n returnExceptions: true;\n }): Promise<(RunOutput | Error)[]>;\n batch(inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions): Promise<(RunOutput | Error)[]>;\n /** @internal */\n _concatOutputChunks<O>(first: O, second: O): O;\n _streamIterator(input: RunInput, options?: Partial<CallOptions> | undefined): AsyncGenerator<Awaited<RunOutput>, void, any>;\n stream(input: RunInput, options?: Partial<CallOptions> | undefined): Promise<IterableReadableStream<RunOutput>>;\n transform(generator: AsyncGenerator<RunInput>, options?: Partial<CallOptions>): AsyncGenerator<RunOutput>;\n streamEvents(input: RunInput, options: Partial<CallOptions> & {\n version: \"v1\" | \"v2\";\n }, streamOptions?: Omit<LogStreamCallbackHandlerInput, \"autoClose\">): IterableReadableStream<StreamEvent>;\n streamEvents(input: RunInput, options: Partial<CallOptions> & {\n version: \"v1\" | \"v2\";\n encoding: \"text/event-stream\";\n }, streamOptions?: Omit<LogStreamCallbackHandlerInput, \"autoClose\">): IterableReadableStream<Uint8Array>;\n static isRunnableBinding(thing: any): thing is RunnableBinding<any, any, any>;\n /**\n * Bind lifecycle listeners to a Runnable, returning a new Runnable.\n * The Run object contains information about the run, including its id,\n * type, input, output, error, startTime, endTime, and any tags or metadata\n * added to the run.\n *\n * @param {Object} params - The object containing the callback functions.\n * @param {(run: Run) => void} params.onStart - Called before the runnable starts running, with the Run object.\n * @param {(run: Run) => void} params.onEnd - Called after the runnable finishes running, with the Run object.\n * @param {(run: Run) => void} params.onError - Called if the runnable throws an error, with the Run object.\n */\n withListeners({ onStart, onEnd, onError }: {\n onStart?: (run: Run, config?: RunnableConfig) => void | Promise<void>;\n onEnd?: (run: Run, config?: RunnableConfig) => void | Promise<void>;\n onError?: (run: Run, config?: RunnableConfig) => void | Promise<void>;\n }): Runnable<RunInput, RunOutput, CallOptions>;\n}\n/**\n * A runnable that delegates calls to another runnable\n * with each element of the input sequence.\n * @example\n * ```typescript\n * import { RunnableEach, RunnableLambda } from \"@langchain/core/runnables\";\n *\n * const toUpperCase = (input: string): string => input.toUpperCase();\n * const addGreeting = (input: string): string => `Hello, ${input}!`;\n *\n * const upperCaseLambda = RunnableLambda.from(toUpperCase);\n * const greetingLambda = RunnableLambda.from(addGreeting);\n *\n * const chain = new RunnableEach({\n * bound: upperCaseLambda.pipe(greetingLambda),\n * });\n *\n * const result = await chain.invoke([\"alice\", \"bob\", \"carol\"])\n *\n * // [\"Hello, ALICE!\", \"Hello, BOB!\", \"Hello, CAROL!\"]\n * ```\n */\nexport declare class RunnableEach<RunInputItem, RunOutputItem, CallOptions extends RunnableConfig> extends Runnable<RunInputItem[], RunOutputItem[], CallOptions> {\n static lc_name(): string;\n lc_serializable: boolean;\n lc_namespace: string[];\n bound: Runnable<RunInputItem, RunOutputItem, CallOptions>;\n constructor(fields: {\n bound: Runnable<RunInputItem, RunOutputItem, CallOptions>;\n });\n /**\n * Invokes the runnable with the specified input and configuration.\n * @param input The input to invoke the runnable with.\n * @param config The configuration to invoke the runnable with.\n * @returns A promise that resolves to the output of the runnable.\n */\n invoke(inputs: RunInputItem[], config?: Partial<CallOptions>): Promise<RunOutputItem[]>;\n /**\n * A helper method that is used to invoke the runnable with the specified input and configuration.\n * @param input The input to invoke the runnable with.\n * @param config The configuration to invoke the runnable with.\n * @returns A promise that resolves to the output of the runnable.\n */\n protected _invoke(inputs: RunInputItem[], config?: Partial<CallOptions>, runManager?: CallbackManagerForChainRun): Promise<RunOutputItem[]>;\n /**\n * Bind lifecycle listeners to a Runnable, returning a new Runnable.\n * The Run object contains information about the run, including its id,\n * type, input, output, error, startTime, endTime, and any tags or metadata\n * added to the run.\n *\n * @param {Object} params - The object containing the callback functions.\n * @param {(run: Run) => void} params.onStart - Called before the runnable starts running, with the Run object.\n * @param {(run: Run) => void} params.onEnd - Called after the runnable finishes running, with the Run object.\n * @param {(run: Run) => void} params.onError - Called if the runnable throws an error, with the Run object.\n */\n withListeners({ onStart, onEnd, onError }: {\n onStart?: (run: Run, config?: RunnableConfig) => void | Promise<void>;\n onEnd?: (run: Run, config?: RunnableConfig) => void | Promise<void>;\n onError?: (run: Run, config?: RunnableConfig) => void | Promise<void>;\n }): Runnable<any, any, CallOptions>;\n}\n/**\n * Base class for runnables that can be retried a\n * specified number of times.\n * @example\n * ```typescript\n * import {\n * RunnableLambda,\n * RunnableRetry,\n * } from \"@langchain/core/runnables\";\n *\n * // Simulate an API call that fails\n * const simulateApiCall = (input: string): string => {\n * console.log(`Attempting API call with input: ${input}`);\n * throw new Error(\"API call failed due to network issue\");\n * };\n *\n * const apiCallLambda = RunnableLambda.from(simulateApiCall);\n *\n * // Apply retry logic using the .withRetry() method\n * const apiCallWithRetry = apiCallLambda.withRetry({ stopAfterAttempt: 3 });\n *\n * // Alternatively, create a RunnableRetry instance manually\n * const manualRetry = new RunnableRetry({\n * bound: apiCallLambda,\n * maxAttemptNumber: 3,\n * config: {},\n * });\n *\n * // Example invocation using the .withRetry() method\n * const res = await apiCallWithRetry\n * .invoke(\"Request 1\")\n * .catch((error) => {\n * console.error(\"Failed after multiple retries:\", error.message);\n * });\n *\n * // Example invocation using the manual retry instance\n * const res2 = await manualRetry\n * .invoke(\"Request 2\")\n * .catch((error) => {\n * console.error(\"Failed after multiple retries:\", error.message);\n * });\n * ```\n */\nexport declare class RunnableRetry<RunInput = any, RunOutput = any, CallOptions extends RunnableConfig = RunnableConfig> extends RunnableBinding<RunInput, RunOutput, CallOptions> {\n static lc_name(): string;\n lc_namespace: string[];\n protected maxAttemptNumber: number;\n onFailedAttempt: RunnableRetryFailedAttemptHandler;\n constructor(fields: RunnableBindingArgs<RunInput, RunOutput, CallOptions> & {\n maxAttemptNumber?: number;\n onFailedAttempt?: RunnableRetryFailedAttemptHandler;\n });\n _patchConfigForRetry(attempt: number, config?: Partial<CallOptions>, runManager?: CallbackManagerForChainRun): Partial<CallOptions>;\n protected _invoke(input: RunInput, config?: CallOptions, runManager?: CallbackManagerForChainRun): Promise<RunOutput>;\n /**\n * Method that invokes the runnable with the specified input, run manager,\n * and config. It handles the retry logic by catching any errors and\n * recursively invoking itself with the updated config for the next retry\n * attempt.\n * @param input The input for the runnable.\n * @param runManager The run manager for the runnable.\n * @param config The config for the runnable.\n * @returns A promise that resolves to the output of the runnable.\n */\n invoke(input: RunInput, config?: CallOptions): Promise<RunOutput>;\n _batch<ReturnExceptions extends boolean = false>(inputs: RunInput[], configs?: RunnableConfig[], runManagers?: (CallbackManagerForChainRun | undefined)[], batchOptions?: RunnableBatchOptions): Promise<ReturnExceptions extends false ? RunOutput[] : (RunOutput | Error)[]>;\n batch(inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions & {\n returnExceptions?: false;\n }): Promise<RunOutput[]>;\n batch(inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions & {\n returnExceptions: true;\n }): Promise<(RunOutput | Error)[]>;\n batch(inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions): Promise<(RunOutput | Error)[]>;\n}\nexport type RunnableSequenceFields<RunInput, RunOutput> = {\n first: Runnable<RunInput>;\n middle?: Runnable[];\n last: Runnable<any, RunOutput>;\n name?: string;\n omitSequenceTags?: boolean;\n};\n/**\n * A sequence of runnables, where the output of each is the input of the next.\n * @example\n * ```typescript\n * const promptTemplate = PromptTemplate.fromTemplate(\n * \"Tell me a joke about {topic}\",\n * );\n * const chain = RunnableSequence.from([promptTemplate, new ChatOpenAI({ model: \"gpt-4o-mini\" })]);\n * const result = await chain.invoke({ topic: \"bears\" });\n * ```\n */\nexport declare class RunnableSequence<RunInput = any, RunOutput = any> extends Runnable<RunInput, RunOutput> {\n static lc_name(): string;\n protected first: Runnable<RunInput>;\n protected middle: Runnable[];\n protected last: Runnable<any, RunOutput>;\n omitSequenceTags: boolean;\n lc_serializable: boolean;\n lc_namespace: string[];\n constructor(fields: RunnableSequenceFields<RunInput, RunOutput>);\n get steps(): Runnable<any, any, RunnableConfig<Record<string, any>>>[];\n invoke(input: RunInput, options?: RunnableConfig): Promise<RunOutput>;\n batch(inputs: RunInput[], options?: Partial<RunnableConfig> | Partial<RunnableConfig>[], batchOptions?: RunnableBatchOptions & {\n returnExceptions?: false;\n }): Promise<RunOutput[]>;\n batch(inputs: RunInput[], options?: Partial<RunnableConfig> | Partial<RunnableConfig>[], batchOptions?: RunnableBatchOptions & {\n returnExceptions: true;\n }): Promise<(RunOutput | Error)[]>;\n batch(inputs: RunInput[], options?: Partial<RunnableConfig> | Partial<RunnableConfig>[], batchOptions?: RunnableBatchOptions): Promise<(RunOutput | Error)[]>;\n /** @internal */\n _concatOutputChunks<O>(first: O, second: O): O;\n _streamIterator(input: RunInput, options?: RunnableConfig): AsyncGenerator<RunOutput>;\n getGraph(config?: RunnableConfig): Graph;\n pipe<NewRunOutput>(coerceable: RunnableLike<RunOutput, NewRunOutput>): RunnableSequence<RunInput, Exclude<NewRunOutput, Error>>;\n static isRunnableSequence(thing: any): thing is RunnableSequence;\n static from<RunInput = any, RunOutput = any>([first, ...runnables]: [\n RunnableLike<RunInput>,\n ...RunnableLike[],\n RunnableLike<any, RunOutput>\n ], nameOrFields?: string | Omit<RunnableSequenceFields<RunInput, RunOutput>, \"first\" | \"middle\" | \"last\">): RunnableSequence<RunInput, Exclude<RunOutput, Error>>;\n}\n/**\n * A runnable that runs a mapping of runnables in parallel,\n * and returns a mapping of their outputs.\n * @example\n * ```typescript\n * const mapChain = RunnableMap.from({\n * joke: PromptTemplate.fromTemplate(\"Tell me a joke about {topic}\").pipe(\n * new ChatAnthropic({}),\n * ),\n * poem: PromptTemplate.fromTemplate(\"write a 2-line poem about {topic}\").pipe(\n * new ChatAnthropic({}),\n * ),\n * });\n * const result = await mapChain.invoke({ topic: \"bear\" });\n * ```\n */\nexport declare class RunnableMap<RunInput = any, RunOutput extends Record<string, any> = Record<string, any>> extends Runnable<RunInput, RunOutput> {\n static lc_name(): string;\n lc_namespace: string[];\n lc_serializable: boolean;\n protected steps: Record<string, Runnable<RunInput>>;\n getStepsKeys(): string[];\n constructor(fields: {\n steps: RunnableMapLike<RunInput, RunOutput>;\n });\n static from<RunInput, RunOutput extends Record<string, any> = Record<string, any>>(steps: RunnableMapLike<RunInput, RunOutput>): RunnableMap<RunInput, RunOutput>;\n invoke(input: RunInput, options?: Partial<RunnableConfig>): Promise<RunOutput>;\n _transform(generator: AsyncGenerator<RunInput>, runManager?: CallbackManagerForChainRun, options?: Partial<RunnableConfig>): AsyncGenerator<RunOutput>;\n transform(generator: AsyncGenerator<RunInput>, options?: Partial<RunnableConfig>): AsyncGenerator<RunOutput>;\n stream(input: RunInput, options?: Partial<RunnableConfig>): Promise<IterableReadableStream<RunOutput>>;\n}\ntype AnyTraceableFunction = TraceableFunction<(...any: any[]) => any>;\n/**\n * A runnable that wraps a traced LangSmith function.\n */\nexport declare class RunnableTraceable<RunInput, RunOutput> extends Runnable<RunInput, RunOutput> {\n lc_serializable: boolean;\n lc_namespace: string[];\n protected func: AnyTraceableFunction;\n constructor(fields: {\n func: AnyTraceableFunction;\n });\n invoke(input: RunInput, options?: Partial<RunnableConfig>): Promise<RunOutput>;\n _streamIterator(input: RunInput, options?: Partial<RunnableConfig>): AsyncGenerator<RunOutput>;\n static from(func: AnyTraceableFunction): RunnableTraceable<unknown, unknown>;\n}\n/**\n * A runnable that wraps an arbitrary function that takes a single argument.\n * @example\n * ```typescript\n * import { RunnableLambda } from \"@langchain/core/runnables\";\n *\n * const add = (input: { x: number; y: number }) => input.x + input.y;\n *\n * const multiply = (input: { value: number; multiplier: number }) =>\n * input.value * input.multiplier;\n *\n * // Create runnables for the functions\n * const addLambda = RunnableLambda.from(add);\n * const multiplyLambda = RunnableLambda.from(multiply);\n *\n * // Chain the lambdas for a mathematical operation\n * const chainedLambda = addLambda.pipe((result) =>\n * multiplyLambda.invoke({ value: result, multiplier: 2 })\n * );\n *\n * // Example invocation of the chainedLambda\n * const result = await chainedLambda.invoke({ x: 2, y: 3 });\n *\n * // Will log \"10\" (since (2 + 3) * 2 = 10)\n * ```\n */\nexport declare class RunnableLambda<RunInput, RunOutput, CallOptions extends RunnableConfig = RunnableConfig> extends Runnable<RunInput, RunOutput, CallOptions> {\n static lc_name(): string;\n lc_namespace: string[];\n protected func: RunnableFunc<RunInput, RunOutput | Runnable<RunInput, RunOutput, CallOptions>, CallOptions>;\n constructor(fields: {\n func: RunnableFunc<RunInput, RunOutput | Runnable<RunInput, RunOutput, CallOptions>, CallOptions> | TraceableFunction<RunnableFunc<RunInput, RunOutput | Runnable<RunInput, RunOutput, CallOptions>, CallOptions>>;\n });\n static from<RunInput, RunOutput, CallOptions extends RunnableConfig = RunnableConfig>(func: RunnableFunc<RunInput, RunOutput | Runnable<RunInput, RunOutput, CallOptions>, CallOptions>): RunnableLambda<RunInput, RunOutput, CallOptions>;\n static from<RunInput, RunOutput, CallOptions extends RunnableConfig = RunnableConfig>(func: TraceableFunction<RunnableFunc<RunInput, RunOutput | Runnable<RunInput, RunOutput, CallOptions>, CallOptions>>): RunnableLambda<RunInput, RunOutput, CallOptions>;\n _invoke(input: RunInput, config?: Partial<CallOptions>, runManager?: CallbackManagerForChainRun): Promise<RunOutput>;\n invoke(input: RunInput, options?: Partial<CallOptions>): Promise<RunOutput>;\n _transform(generator: AsyncGenerator<RunInput>, runManager?: CallbackManagerForChainRun, config?: Partial<CallOptions>): AsyncGenerator<RunOutput>;\n transform(generator: AsyncGenerator<RunInput>, options?: Partial<CallOptions>): AsyncGenerator<RunOutput>;\n stream(input: RunInput, options?: Partial<CallOptions>): Promise<IterableReadableStream<RunOutput>>;\n}\n/**\n * A runnable that runs a mapping of runnables in parallel,\n * and returns a mapping of their outputs.\n * @example\n * ```typescript\n * import {\n * RunnableLambda,\n * RunnableParallel,\n * } from \"@langchain/core/runnables\";\n *\n * const addYears = (age: number): number => age + 5;\n * const yearsToFifty = (age: number): number => 50 - age;\n * const yearsToHundred = (age: number): number => 100 - age;\n *\n * const addYearsLambda = RunnableLambda.from(addYears);\n * const milestoneFiftyLambda = RunnableLambda.from(yearsToFifty);\n * const milestoneHundredLambda = RunnableLambda.from(yearsToHundred);\n *\n * // Pipe will coerce objects into RunnableParallel by default, but we\n * // explicitly instantiate one here to demonstrate\n * const sequence = addYearsLambda.pipe(\n * RunnableParallel.from({\n * years_to_fifty: milestoneFiftyLambda,\n * years_to_hundred: milestoneHundredLambda,\n * })\n * );\n *\n * // Invoke the sequence with a single age input\n * const res = await sequence.invoke(25);\n *\n * // { years_to_fifty: 20, years_to_hundred: 70 }\n * ```\n */\nexport declare class RunnableParallel<RunInput> extends RunnableMap<RunInput> {\n}\n/**\n * A Runnable that can fallback to other Runnables if it fails.\n * External APIs (e.g., APIs for a language model) may at times experience\n * degraded performance or even downtime.\n *\n * In these cases, it can be useful to have a fallback Runnable that can be\n * used in place of the original Runnable (e.g., fallback to another LLM provider).\n *\n * Fallbacks can be defined at the level of a single Runnable, or at the level\n * of a chain of Runnables. Fallbacks are tried in order until one succeeds or\n * all fail.\n *\n * While you can instantiate a `RunnableWithFallbacks` directly, it is usually\n * more convenient to use the `withFallbacks` method on an existing Runnable.\n *\n * When streaming, fallbacks will only be called on failures during the initial\n * stream creation. Errors that occur after a stream starts will not fallback\n * to the next Runnable.\n *\n * @example\n * ```typescript\n * import {\n * RunnableLambda,\n * RunnableWithFallbacks,\n * } from \"@langchain/core/runnables\";\n *\n * const primaryOperation = (input: string): string => {\n * if (input !== \"safe\") {\n * throw new Error(\"Primary operation failed due to unsafe input\");\n * }\n * return `Processed: ${input}`;\n * };\n *\n * // Define a fallback operation that processes the input differently\n * const fallbackOperation = (input: string): string =>\n * `Fallback processed: ${input}`;\n *\n * const primaryRunnable = RunnableLambda.from(primaryOperation);\n * const fallbackRunnable = RunnableLambda.from(fallbackOperation);\n *\n * // Apply the fallback logic using the .withFallbacks() method\n * const runnableWithFallback = primaryRunnable.withFallbacks([fallbackRunnable]);\n *\n * // Alternatively, create a RunnableWithFallbacks instance manually\n * const manualFallbackChain = new RunnableWithFallbacks({\n * runnable: primaryRunnable,\n * fallbacks: [fallbackRunnable],\n * });\n *\n * // Example invocation using .withFallbacks()\n * const res = await runnableWithFallback\n * .invoke(\"unsafe input\")\n * .catch((error) => {\n * console.error(\"Failed after all attempts:\", error.message);\n * });\n *\n * // \"Fallback processed: unsafe input\"\n *\n * // Example invocation using manual instantiation\n * const res = await manualFallbackChain\n * .invoke(\"safe\")\n * .catch((error) => {\n * console.error(\"Failed after all attempts:\", error.message);\n * });\n *\n * // \"Processed: safe\"\n * ```\n */\nexport declare class RunnableWithFallbacks<RunInput, RunOutput> extends Runnable<RunInput, RunOutput> {\n static lc_name(): string;\n lc_namespace: string[];\n lc_serializable: boolean;\n runnable: Runnable<RunInput, RunOutput>;\n fallbacks: Runnable<RunInput, RunOutput>[];\n constructor(fields: {\n runnable: Runnable<RunInput, RunOutput>;\n fallbacks: Runnable<RunInput, RunOutput>[];\n });\n runnables(): Generator<Runnable<RunInput, RunOutput, RunnableConfig<Record<string, any>>>, void, unknown>;\n invoke(input: RunInput, options?: Partial<RunnableConfig>): Promise<RunOutput>;\n _streamIterator(input: RunInput, options?: Partial<RunnableConfig> | undefined): AsyncGenerator<RunOutput>;\n batch(inputs: RunInput[], options?: Partial<RunnableConfig> | Partial<RunnableConfig>[], batchOptions?: RunnableBatchOptions & {\n returnExceptions?: false;\n }): Promise<RunOutput[]>;\n batch(inputs: RunInput[], options?: Partial<RunnableConfig> | Partial<RunnableConfig>[], batchOptions?: RunnableBatchOptions & {\n returnExceptions: true;\n }): Promise<(RunOutput | Error)[]>;\n batch(inputs: RunInput[], options?: Partial<RunnableConfig> | Partial<RunnableConfig>[], batchOptions?: RunnableBatchOptions): Promise<(RunOutput | Error)[]>;\n}\nexport declare function _coerceToRunnable<RunInput, RunOutput, CallOptions extends RunnableConfig = RunnableConfig>(coerceable: RunnableLike<RunInput, RunOutput, CallOptions>): Runnable<RunInput, Exclude<RunOutput, Error>, CallOptions>;\nexport interface RunnableAssignFields<RunInput> {\n mapper: RunnableMap<RunInput>;\n}\n/**\n * A runnable that assigns key-value pairs to inputs of type `Record<string, unknown>`.\n * @example\n * ```typescript\n * import {\n * RunnableAssign,\n * RunnableLambda,\n * RunnableParallel,\n * } from \"@langchain/core/runnables\";\n *\n * const calculateAge = (x: { birthYear: number }): { age: number } => {\n * const currentYear = new Date().getFullYear();\n * return { age: currentYear - x.birthYear };\n * };\n *\n * const createGreeting = (x: { name: string }): { greeting: string } => {\n * return { greeting: `Hello, ${x.name}!` };\n * };\n *\n * const mapper = RunnableParallel.from({\n * age_step: RunnableLambda.from(calculateAge),\n * greeting_step: RunnableLambda.from(createGreeting),\n * });\n *\n * const runnableAssign = new RunnableAssign({ mapper });\n *\n * const res = await runnableAssign.invoke({ name: \"Alice\", birthYear: 1990 });\n *\n * // { name: \"Alice\", birthYear: 1990, age_step: { age: 34 }, greeting_step: { greeting: \"Hello, Alice!\" } }\n * ```\n */\nexport declare class RunnableAssign<RunInput extends Record<string, any> = Record<string, any>, RunOutput extends Record<string, any> = Record<string, any>, CallOptions extends RunnableConfig = RunnableConfig> extends Runnable<RunInput, RunOutput> implements RunnableAssignFields<RunInput> {\n static lc_name(): string;\n lc_namespace: string[];\n lc_serializable: boolean;\n mapper: RunnableMap<RunInput>;\n constructor(fields: RunnableMap<RunInput> | RunnableAssignFields<RunInput>);\n invoke(input: RunInput, options?: Partial<CallOptions>): Promise<RunOutput>;\n _transform(generator: AsyncGenerator<RunInput>, runManager?: CallbackManagerForChainRun, options?: Partial<RunnableConfig>): AsyncGenerator<RunOutput>;\n transform(generator: AsyncGenerator<RunInput>, options?: Partial<RunnableConfig>): AsyncGenerator<RunOutput>;\n stream(input: RunInput, options?: Partial<RunnableConfig>): Promise<IterableReadableStream<RunOutput>>;\n}\nexport interface RunnablePickFields {\n keys: string | string[];\n}\n/**\n * A runnable that assigns key-value pairs to inputs of type `Record<string, unknown>`.\n * Useful for streaming, can be automatically created and chained by calling `runnable.pick();`.\n * @example\n * ```typescript\n * import { RunnablePick } from \"@langchain/core/runnables\";\n *\n * const inputData = {\n * name: \"John\",\n * age: 30,\n * city: \"New York\",\n * country: \"USA\",\n * email: \"john.doe@example.com\",\n * phone: \"+1234567890\",\n * };\n *\n * const basicInfoRunnable = new RunnablePick([\"name\", \"city\"]);\n *\n * // Example invocation\n * const res = await basicInfoRunnable.invoke(inputData);\n *\n * // { name: 'John', city: 'New York' }\n * ```\n */\nexport declare class RunnablePick<RunInput extends Record<string, any> = Record<string, any>, RunOutput extends Record<string, any> | any = Record<string, any> | any, CallOptions extends RunnableConfig = RunnableConfig> extends Runnable<RunInput, RunOutput> implements RunnablePickFields {\n static lc_name(): string;\n lc_namespace: string[];\n lc_serializable: boolean;\n keys: string | string[];\n constructor(fields: string | string[] | RunnablePickFields);\n _pick(input: RunInput): Promise<RunOutput>;\n invoke(input: RunInput, options?: Partial<CallOptions>): Promise<RunOutput>;\n _transform(generator: AsyncGenerator<RunInput>): AsyncGenerator<RunOutput>;\n transform(generator: AsyncGenerator<RunInput>, options?: Partial<RunnableConfig>): AsyncGenerator<RunOutput>;\n stream(input: RunInput, options?: Partial<RunnableConfig>): Promise<IterableReadableStream<RunOutput>>;\n}\nexport interface RunnableToolLikeArgs<RunInput extends InteropZodType = InteropZodType, RunOutput = unknown> extends Omit<RunnableBindingArgs<InferInteropZodOutput<RunInput>, RunOutput>, \"config\"> {\n name: string;\n description?: string;\n schema: RunInput;\n config?: RunnableConfig;\n}\nexport declare class RunnableToolLike<RunInput extends InteropZodType = InteropZodType, RunOutput = unknown> extends RunnableBinding<InferInteropZodOutput<RunInput>, RunOutput> {\n name: string;\n description?: string;\n schema: RunInput;\n constructor(fields: RunnableToolLikeArgs<RunInput, RunOutput>);\n static lc_name(): string;\n}\n/**\n * Given a runnable and a Zod schema, convert the runnable to a tool.\n *\n * @template RunInput The input type for the runnable.\n * @template RunOutput The output type for the runnable.\n *\n * @param {Runnable<RunInput, RunOutput>} runnable The runnable to convert to a tool.\n * @param fields\n * @param {string | undefined} [fields.name] The name of the tool. If not provided, it will default to the name of the runnable.\n * @param {string | undefined} [fields.description] The description of the tool. Falls back to the description on the Zod schema if not provided, or undefined if neither are provided.\n * @param {InteropZodType<RunInput>} [fields.schema] The Zod s