UNPKG

langchain

Version:
668 lines (666 loc) 30.1 kB
const require_rolldown_runtime = require('../_virtual/rolldown_runtime.cjs'); const require_annotation = require('./annotation.cjs'); const require_utils = require('./utils.cjs'); const require_utils$1 = require('./nodes/utils.cjs'); const require_AgentNode = require('./nodes/AgentNode.cjs'); const require_ToolNode = require('./nodes/ToolNode.cjs'); const require_utils$2 = require('./middleware/utils.cjs'); const require_BeforeAgentNode = require('./nodes/BeforeAgentNode.cjs'); const require_BeforeModelNode = require('./nodes/BeforeModelNode.cjs'); const require_AfterModelNode = require('./nodes/AfterModelNode.cjs'); const require_AfterAgentNode = require('./nodes/AfterAgentNode.cjs'); const require_state = require('./state.cjs'); const __langchain_core_messages = require_rolldown_runtime.__toESM(require("@langchain/core/messages")); const __langchain_langgraph = require_rolldown_runtime.__toESM(require("@langchain/langgraph")); //#region src/agents/ReactAgent.ts /** * ReactAgent is a production-ready ReAct (Reasoning + Acting) agent that combines * language models with tools and middleware. * * The agent is parameterized by a single type bag `Types` that encapsulates all * type information: * * @typeParam Types - An {@link AgentTypeConfig} that bundles: * - `Response`: The structured response type * - `State`: The custom state schema type * - `Context`: The context schema type * - `Middleware`: The middleware array type * - `Tools`: The combined tools type from agent and middleware * * @example * ```typescript * // Using the type bag pattern * type MyTypes = AgentTypeConfig< * { name: string }, // Response * typeof myState, // State * typeof myContext, // Context * typeof middleware, // Middleware * typeof tools // Tools * >; * * const agent: ReactAgent<MyTypes> = createAgent({ ... }); * ``` */ var ReactAgent = class { #graph; #toolBehaviorVersion = "v2"; #agentNode; #stateManager = new require_state.StateManager(); constructor(options) { this.options = options; this.#toolBehaviorVersion = options.version ?? this.#toolBehaviorVersion; /** * validate that model option is provided */ if (!options.model) throw new Error("`model` option is required to create an agent."); /** * Check if the LLM already has bound tools and throw if it does. */ if (typeof options.model !== "string") require_utils.validateLLMHasNoBoundTools(options.model); /** * define complete list of tools based on options and middleware */ const middlewareTools = this.options.middleware?.filter((m) => m.tools).flatMap((m) => m.tools) ?? []; const toolClasses = [...options.tools ?? [], ...middlewareTools]; /** * If any of the tools are configured to return_directly after running, * our graph needs to check if these were called */ const shouldReturnDirect = new Set(toolClasses.filter(require_utils.isClientTool).filter((tool) => "returnDirect" in tool && tool.returnDirect).map((tool) => tool.name)); /** * Create a schema that merges agent base schema with middleware state schemas * Using Zod with withLangGraph ensures LangGraph Studio gets proper metadata */ const { state, input, output } = require_annotation.createAgentAnnotationConditional(this.options.responseFormat !== void 0, this.options.stateSchema, this.options.middleware); const workflow = new __langchain_langgraph.StateGraph({ state, input, output }, this.options.contextSchema); const allNodeWorkflows = workflow; const beforeAgentNodes = []; const beforeModelNodes = []; const afterModelNodes = []; const afterAgentNodes = []; const wrapModelCallHookMiddleware = []; this.#agentNode = new require_AgentNode.AgentNode({ model: this.options.model, systemMessage: require_utils.normalizeSystemPrompt(this.options.systemPrompt), includeAgentName: this.options.includeAgentName, name: this.options.name, responseFormat: this.options.responseFormat, middleware: this.options.middleware, toolClasses, shouldReturnDirect, signal: this.options.signal, wrapModelCallHookMiddleware }); const middlewareNames = /* @__PURE__ */ new Set(); const middleware = this.options.middleware ?? []; for (let i = 0; i < middleware.length; i++) { let beforeAgentNode; let beforeModelNode; let afterModelNode; let afterAgentNode; const m = middleware[i]; if (middlewareNames.has(m.name)) throw new Error(`Middleware ${m.name} is defined multiple times`); middlewareNames.add(m.name); if (m.beforeAgent) { beforeAgentNode = new require_BeforeAgentNode.BeforeAgentNode(m, { getState: () => this.#stateManager.getState(m.name) }); this.#stateManager.addNode(m, beforeAgentNode); const name = `${m.name}.before_agent`; beforeAgentNodes.push({ index: i, name, allowed: require_utils$2.getHookConstraint(m.beforeAgent) }); allNodeWorkflows.addNode(name, beforeAgentNode, beforeAgentNode.nodeOptions); } if (m.beforeModel) { beforeModelNode = new require_BeforeModelNode.BeforeModelNode(m, { getState: () => this.#stateManager.getState(m.name) }); this.#stateManager.addNode(m, beforeModelNode); const name = `${m.name}.before_model`; beforeModelNodes.push({ index: i, name, allowed: require_utils$2.getHookConstraint(m.beforeModel) }); allNodeWorkflows.addNode(name, beforeModelNode, beforeModelNode.nodeOptions); } if (m.afterModel) { afterModelNode = new require_AfterModelNode.AfterModelNode(m, { getState: () => this.#stateManager.getState(m.name) }); this.#stateManager.addNode(m, afterModelNode); const name = `${m.name}.after_model`; afterModelNodes.push({ index: i, name, allowed: require_utils$2.getHookConstraint(m.afterModel) }); allNodeWorkflows.addNode(name, afterModelNode, afterModelNode.nodeOptions); } if (m.afterAgent) { afterAgentNode = new require_AfterAgentNode.AfterAgentNode(m, { getState: () => this.#stateManager.getState(m.name) }); this.#stateManager.addNode(m, afterAgentNode); const name = `${m.name}.after_agent`; afterAgentNodes.push({ index: i, name, allowed: require_utils$2.getHookConstraint(m.afterAgent) }); allNodeWorkflows.addNode(name, afterAgentNode, afterAgentNode.nodeOptions); } if (m.wrapModelCall) wrapModelCallHookMiddleware.push([m, () => this.#stateManager.getState(m.name)]); } /** * Add Nodes */ allNodeWorkflows.addNode(require_AgentNode.AGENT_NODE_NAME, this.#agentNode); /** * add single tool node for all tools */ if (toolClasses.filter(require_utils.isClientTool).length > 0) { const toolNode = new require_ToolNode.ToolNode(toolClasses.filter(require_utils.isClientTool), { signal: this.options.signal, wrapToolCall: require_utils.wrapToolCall(middleware) }); allNodeWorkflows.addNode(require_ToolNode.TOOLS_NODE_NAME, toolNode); } /** * Add Edges */ let entryNode; if (beforeAgentNodes.length > 0) entryNode = beforeAgentNodes[0].name; else if (beforeModelNodes.length > 0) entryNode = beforeModelNodes[0].name; else entryNode = require_AgentNode.AGENT_NODE_NAME; const loopEntryNode = beforeModelNodes.length > 0 ? beforeModelNodes[0].name : require_AgentNode.AGENT_NODE_NAME; const exitNode = afterAgentNodes.length > 0 ? afterAgentNodes[afterAgentNodes.length - 1].name : __langchain_langgraph.END; allNodeWorkflows.addEdge(__langchain_langgraph.START, entryNode); const clientTools = toolClasses.filter(require_utils.isClientTool); for (let i = 0; i < beforeAgentNodes.length; i++) { const node = beforeAgentNodes[i]; const current = node.name; const isLast = i === beforeAgentNodes.length - 1; const nextDefault = isLast ? loopEntryNode : beforeAgentNodes[i + 1].name; if (node.allowed && node.allowed.length > 0) { const hasTools = clientTools.length > 0; const allowedMapped = node.allowed.map((t) => require_utils$1.parseJumpToTarget(t)).filter((dest) => dest !== require_ToolNode.TOOLS_NODE_NAME || hasTools); const destinations = Array.from(new Set([nextDefault, ...allowedMapped.map((dest) => dest === __langchain_langgraph.END ? exitNode : dest)])); allNodeWorkflows.addConditionalEdges(current, this.#createBeforeAgentRouter(clientTools, nextDefault, exitNode), destinations); } else allNodeWorkflows.addEdge(current, nextDefault); } for (let i = 0; i < beforeModelNodes.length; i++) { const node = beforeModelNodes[i]; const current = node.name; const isLast = i === beforeModelNodes.length - 1; const nextDefault = isLast ? require_AgentNode.AGENT_NODE_NAME : beforeModelNodes[i + 1].name; if (node.allowed && node.allowed.length > 0) { const hasTools = clientTools.length > 0; const allowedMapped = node.allowed.map((t) => require_utils$1.parseJumpToTarget(t)).filter((dest) => dest !== require_ToolNode.TOOLS_NODE_NAME || hasTools); const destinations = Array.from(new Set([nextDefault, ...allowedMapped])); allNodeWorkflows.addConditionalEdges(current, this.#createBeforeModelRouter(clientTools, nextDefault), destinations); } else allNodeWorkflows.addEdge(current, nextDefault); } const lastAfterModelNode = afterModelNodes.at(-1); if (afterModelNodes.length > 0 && lastAfterModelNode) allNodeWorkflows.addEdge(require_AgentNode.AGENT_NODE_NAME, lastAfterModelNode.name); else { const modelPaths = this.#getModelPaths(clientTools); const destinations = modelPaths.map((p) => p === __langchain_langgraph.END ? exitNode : p); if (destinations.length === 1) allNodeWorkflows.addEdge(require_AgentNode.AGENT_NODE_NAME, destinations[0]); else allNodeWorkflows.addConditionalEdges(require_AgentNode.AGENT_NODE_NAME, this.#createModelRouter(exitNode), destinations); } for (let i = afterModelNodes.length - 1; i > 0; i--) { const node = afterModelNodes[i]; const current = node.name; const nextDefault = afterModelNodes[i - 1].name; if (node.allowed && node.allowed.length > 0) { const hasTools = clientTools.length > 0; const allowedMapped = node.allowed.map((t) => require_utils$1.parseJumpToTarget(t)).filter((dest) => dest !== require_ToolNode.TOOLS_NODE_NAME || hasTools); const destinations = Array.from(new Set([nextDefault, ...allowedMapped])); allNodeWorkflows.addConditionalEdges(current, this.#createAfterModelSequenceRouter(clientTools, node.allowed, nextDefault), destinations); } else allNodeWorkflows.addEdge(current, nextDefault); } if (afterModelNodes.length > 0) { const firstAfterModel = afterModelNodes[0]; const firstAfterModelNode = firstAfterModel.name; const modelPaths = this.#getModelPaths(clientTools, true).filter((p) => p !== require_ToolNode.TOOLS_NODE_NAME || toolClasses.filter(require_utils.isClientTool).length > 0); const allowJump = Boolean(firstAfterModel.allowed && firstAfterModel.allowed.length > 0); const destinations = modelPaths.map((p) => p === __langchain_langgraph.END ? exitNode : p); allNodeWorkflows.addConditionalEdges(firstAfterModelNode, this.#createAfterModelRouter(clientTools, allowJump, exitNode), destinations); } for (let i = afterAgentNodes.length - 1; i > 0; i--) { const node = afterAgentNodes[i]; const current = node.name; const nextDefault = afterAgentNodes[i - 1].name; if (node.allowed && node.allowed.length > 0) { const hasTools = clientTools.length > 0; const allowedMapped = node.allowed.map((t) => require_utils$1.parseJumpToTarget(t)).filter((dest) => dest !== require_ToolNode.TOOLS_NODE_NAME || hasTools); const destinations = Array.from(new Set([nextDefault, ...allowedMapped])); allNodeWorkflows.addConditionalEdges(current, this.#createAfterModelSequenceRouter(clientTools, node.allowed, nextDefault), destinations); } else allNodeWorkflows.addEdge(current, nextDefault); } if (afterAgentNodes.length > 0) { const firstAfterAgent = afterAgentNodes[0]; const firstAfterAgentNode = firstAfterAgent.name; if (firstAfterAgent.allowed && firstAfterAgent.allowed.length > 0) { const hasTools = clientTools.length > 0; const allowedMapped = firstAfterAgent.allowed.map((t) => require_utils$1.parseJumpToTarget(t)).filter((dest) => dest !== require_ToolNode.TOOLS_NODE_NAME || hasTools); /** * For after_agent, only use explicitly allowed destinations (don't add loopEntryNode) * The default destination (when no jump occurs) should be END */ const destinations = Array.from(new Set([__langchain_langgraph.END, ...allowedMapped])); allNodeWorkflows.addConditionalEdges(firstAfterAgentNode, this.#createAfterModelSequenceRouter(clientTools, firstAfterAgent.allowed, __langchain_langgraph.END), destinations); } else allNodeWorkflows.addEdge(firstAfterAgentNode, __langchain_langgraph.END); } /** * add edges for tools node */ if (clientTools.length > 0) { const toolReturnTarget = loopEntryNode; if (shouldReturnDirect.size > 0) allNodeWorkflows.addConditionalEdges(require_ToolNode.TOOLS_NODE_NAME, this.#createToolsRouter(shouldReturnDirect, exitNode), [toolReturnTarget, exitNode]); else allNodeWorkflows.addEdge(require_ToolNode.TOOLS_NODE_NAME, toolReturnTarget); } /** * compile the graph */ this.#graph = allNodeWorkflows.compile({ checkpointer: this.options.checkpointer, store: this.options.store, name: this.options.name, description: this.options.description }); } /** * Get the compiled {@link https://docs.langchain.com/oss/javascript/langgraph/use-graph-api | StateGraph}. */ get graph() { return this.#graph; } /** * Get possible edge destinations from model node. * @param toolClasses names of tools to call * @param includeModelRequest whether to include "model_request" as a valid path (for jumpTo routing) * @returns list of possible edge destinations */ #getModelPaths(toolClasses, includeModelRequest = false) { const paths = []; if (toolClasses.length > 0) paths.push(require_ToolNode.TOOLS_NODE_NAME); if (includeModelRequest) paths.push(require_AgentNode.AGENT_NODE_NAME); paths.push(__langchain_langgraph.END); return paths; } /** * Create routing function for tools node conditional edges. */ #createToolsRouter(shouldReturnDirect, exitNode) { return (state) => { const builtInState = state; const messages = builtInState.messages; const lastMessage = messages[messages.length - 1]; if (__langchain_core_messages.ToolMessage.isInstance(lastMessage) && lastMessage.name && shouldReturnDirect.has(lastMessage.name)) return this.options.responseFormat ? require_AgentNode.AGENT_NODE_NAME : exitNode; return require_AgentNode.AGENT_NODE_NAME; }; } /** * Create routing function for model node conditional edges. * @param exitNode - The exit node to route to (could be after_agent or END) */ #createModelRouter(exitNode = __langchain_langgraph.END) { /** * determine if the agent should continue or not */ return (state) => { const builtInState = state; const messages = builtInState.messages; const lastMessage = messages.at(-1); if (!__langchain_core_messages.AIMessage.isInstance(lastMessage) || !lastMessage.tool_calls || lastMessage.tool_calls.length === 0) return exitNode; const hasOnlyStructuredResponseCalls = lastMessage.tool_calls.every((toolCall) => toolCall.name.startsWith("extract-")); if (hasOnlyStructuredResponseCalls) return exitNode; /** * The tool node processes a single message. */ if (this.#toolBehaviorVersion === "v1") return require_ToolNode.TOOLS_NODE_NAME; /** * Route to tools node (filter out any structured response tool calls) */ const regularToolCalls = lastMessage.tool_calls.filter((toolCall) => !toolCall.name.startsWith("extract-")); if (regularToolCalls.length === 0) return exitNode; return regularToolCalls.map((toolCall) => new __langchain_langgraph.Send(require_ToolNode.TOOLS_NODE_NAME, { ...state, lg_tool_call: toolCall })); }; } /** * Create routing function for jumpTo functionality after afterModel hooks. * * This router checks if the `jumpTo` property is set in the state after afterModel middleware * execution. If set, it routes to the specified target ("model_request" or "tools"). * If not set, it falls back to the normal model routing logic for afterModel context. * * The jumpTo property is automatically cleared after use to prevent infinite loops. * * @param toolClasses - Available tool classes for validation * @param allowJump - Whether jumping is allowed * @param exitNode - The exit node to route to (could be after_agent or END) * @returns Router function that handles jumpTo logic and normal routing */ #createAfterModelRouter(toolClasses, allowJump, exitNode) { const hasStructuredResponse = Boolean(this.options.responseFormat); return (state) => { const builtInState = state; const messages = builtInState.messages; const lastMessage = messages.at(-1); if (__langchain_core_messages.AIMessage.isInstance(lastMessage) && (!lastMessage.tool_calls || lastMessage.tool_calls.length === 0)) return exitNode; if (allowJump && builtInState.jumpTo) { const destination = require_utils$1.parseJumpToTarget(builtInState.jumpTo); if (destination === __langchain_langgraph.END) return exitNode; if (destination === require_ToolNode.TOOLS_NODE_NAME) { if (toolClasses.length === 0) return exitNode; return new __langchain_langgraph.Send(require_ToolNode.TOOLS_NODE_NAME, { ...state, jumpTo: void 0 }); } return new __langchain_langgraph.Send(require_AgentNode.AGENT_NODE_NAME, { ...state, jumpTo: void 0 }); } const toolMessages = messages.filter(__langchain_core_messages.ToolMessage.isInstance); const lastAiMessage = messages.filter(__langchain_core_messages.AIMessage.isInstance).at(-1); const pendingToolCalls = lastAiMessage?.tool_calls?.filter((call) => !toolMessages.some((m) => m.tool_call_id === call.id)); if (pendingToolCalls && pendingToolCalls.length > 0) return pendingToolCalls.map((toolCall) => new __langchain_langgraph.Send(require_ToolNode.TOOLS_NODE_NAME, { ...state, lg_tool_call: toolCall })); const hasStructuredResponseCalls = lastAiMessage?.tool_calls?.some((toolCall) => toolCall.name.startsWith("extract-")); if (pendingToolCalls && pendingToolCalls.length === 0 && !hasStructuredResponseCalls && hasStructuredResponse) return require_AgentNode.AGENT_NODE_NAME; if (!__langchain_core_messages.AIMessage.isInstance(lastMessage) || !lastMessage.tool_calls || lastMessage.tool_calls.length === 0) return exitNode; const hasOnlyStructuredResponseCalls = lastMessage.tool_calls.every((toolCall) => toolCall.name.startsWith("extract-")); const hasRegularToolCalls = lastMessage.tool_calls.some((toolCall) => !toolCall.name.startsWith("extract-")); if (hasOnlyStructuredResponseCalls || !hasRegularToolCalls) return exitNode; /** * For routing from afterModel nodes, always use simple string paths * The Send API is handled at the model_request node level */ return require_ToolNode.TOOLS_NODE_NAME; }; } /** * Router for afterModel sequence nodes (connecting later middlewares to earlier ones), * honoring allowed jump targets and defaulting to the next node. */ #createAfterModelSequenceRouter(toolClasses, allowed, nextDefault) { const allowedSet = new Set(allowed.map((t) => require_utils$1.parseJumpToTarget(t))); return (state) => { const builtInState = state; if (builtInState.jumpTo) { const dest = require_utils$1.parseJumpToTarget(builtInState.jumpTo); if (dest === __langchain_langgraph.END && allowedSet.has(__langchain_langgraph.END)) return __langchain_langgraph.END; if (dest === require_ToolNode.TOOLS_NODE_NAME && allowedSet.has(require_ToolNode.TOOLS_NODE_NAME)) { if (toolClasses.length === 0) return __langchain_langgraph.END; return new __langchain_langgraph.Send(require_ToolNode.TOOLS_NODE_NAME, { ...state, jumpTo: void 0 }); } if (dest === require_AgentNode.AGENT_NODE_NAME && allowedSet.has(require_AgentNode.AGENT_NODE_NAME)) return new __langchain_langgraph.Send(require_AgentNode.AGENT_NODE_NAME, { ...state, jumpTo: void 0 }); } return nextDefault; }; } /** * Create routing function for jumpTo functionality after beforeAgent hooks. * Falls back to the default next node if no jumpTo is present. * When jumping to END, routes to exitNode (which could be an afterAgent node). */ #createBeforeAgentRouter(toolClasses, nextDefault, exitNode) { return (state) => { const builtInState = state; if (!builtInState.jumpTo) return nextDefault; const destination = require_utils$1.parseJumpToTarget(builtInState.jumpTo); if (destination === __langchain_langgraph.END) /** * When beforeAgent jumps to END, route to exitNode (first afterAgent node) */ return exitNode; if (destination === require_ToolNode.TOOLS_NODE_NAME) { if (toolClasses.length === 0) return exitNode; return new __langchain_langgraph.Send(require_ToolNode.TOOLS_NODE_NAME, { ...state, jumpTo: void 0 }); } return new __langchain_langgraph.Send(require_AgentNode.AGENT_NODE_NAME, { ...state, jumpTo: void 0 }); }; } /** * Create routing function for jumpTo functionality after beforeModel hooks. * Falls back to the default next node if no jumpTo is present. */ #createBeforeModelRouter(toolClasses, nextDefault) { return (state) => { const builtInState = state; if (!builtInState.jumpTo) return nextDefault; const destination = require_utils$1.parseJumpToTarget(builtInState.jumpTo); if (destination === __langchain_langgraph.END) return __langchain_langgraph.END; if (destination === require_ToolNode.TOOLS_NODE_NAME) { if (toolClasses.length === 0) return __langchain_langgraph.END; return new __langchain_langgraph.Send(require_ToolNode.TOOLS_NODE_NAME, { ...state, jumpTo: void 0 }); } return new __langchain_langgraph.Send(require_AgentNode.AGENT_NODE_NAME, { ...state, jumpTo: void 0 }); }; } /** * Initialize middleware states if not already present in the input state. */ async #initializeMiddlewareStates(state, config) { if (!this.options.middleware || this.options.middleware.length === 0 || state instanceof __langchain_langgraph.Command || !state) return state; const defaultStates = await require_utils$1.initializeMiddlewareStates(this.options.middleware, state); const threadState = await this.#graph.getState(config).catch(() => ({ values: {} })); const updatedState = { ...threadState.values, ...state }; if (!updatedState) return updatedState; for (const [key, value] of Object.entries(defaultStates)) if (!(key in updatedState)) updatedState[key] = value; return updatedState; } /** * Executes the agent with the given state and returns the final state after all processing. * * This method runs the agent's entire workflow synchronously, including: * - Processing the input messages through any configured middleware * - Calling the language model to generate responses * - Executing any tool calls made by the model * - Running all middleware hooks (beforeModel, afterModel, etc.) * * @param state - The initial state for the agent execution. Can be: * - An object containing `messages` array and any middleware-specific state properties * - A Command object for more advanced control flow * * @param config - Optional runtime configuration including: * @param config.context - The context for the agent execution. * @param config.configurable - LangGraph configuration options like `thread_id`, `run_id`, etc. * @param config.store - The store for the agent execution for persisting state, see more in {@link https://docs.langchain.com/oss/javascript/langgraph/memory#memory-storage | Memory storage}. * @param config.signal - An optional {@link https://developer.mozilla.org/en-US/docs/Web/API/AbortSignal | `AbortSignal`} for the agent execution. * @param config.recursionLimit - The recursion limit for the agent execution. * * @returns A Promise that resolves to the final agent state after execution completes. * The returned state includes: * - a `messages` property containing an array with all messages (input, AI responses, tool calls/results) * - a `structuredResponse` property containing the structured response (if configured) * - all state values defined in the middleware * * @example * ```typescript * const agent = new ReactAgent({ * llm: myModel, * tools: [calculator, webSearch], * responseFormat: z.object({ * weather: z.string(), * }), * }); * * const result = await agent.invoke({ * messages: [{ role: "human", content: "What's the weather in Paris?" }] * }); * * console.log(result.structuredResponse.weather); // outputs: "It's sunny and 75°F." * ``` */ async invoke(state, config) { const initializedState = await this.#initializeMiddlewareStates(state, config); return this.#graph.invoke(initializedState, config); } /** * Executes the agent with streaming, returning an async iterable of state updates as they occur. * * This method runs the agent's workflow similar to `invoke`, but instead of waiting for * completion, it streams high-level state updates in real-time. This allows you to: * - Display intermediate results to users as they're generated * - Monitor the agent's progress through each step * - React to state changes as nodes complete * * For more granular event-level streaming (like individual LLM tokens), use `streamEvents` instead. * * @param state - The initial state for the agent execution. Can be: * - An object containing `messages` array and any middleware-specific state properties * - A Command object for more advanced control flow * * @param config - Optional runtime configuration including: * @param config.context - The context for the agent execution. * @param config.configurable - LangGraph configuration options like `thread_id`, `run_id`, etc. * @param config.store - The store for the agent execution for persisting state, see more in {@link https://docs.langchain.com/oss/javascript/langgraph/memory#memory-storage | Memory storage}. * @param config.signal - An optional {@link https://developer.mozilla.org/en-US/docs/Web/API/AbortSignal | `AbortSignal`} for the agent execution. * @param config.streamMode - The streaming mode for the agent execution, see more in {@link https://docs.langchain.com/oss/javascript/langgraph/streaming#supported-stream-modes | Supported stream modes}. * @param config.recursionLimit - The recursion limit for the agent execution. * * @returns A Promise that resolves to an IterableReadableStream of state updates. * Each update contains the current state after a node completes. * * @example * ```typescript * const agent = new ReactAgent({ * llm: myModel, * tools: [calculator, webSearch] * }); * * const stream = await agent.stream({ * messages: [{ role: "human", content: "What's 2+2 and the weather in NYC?" }] * }); * * for await (const chunk of stream) { * console.log(chunk); // State update from each node * } * ``` */ async stream(state, config) { const initializedState = await this.#initializeMiddlewareStates(state, config); return this.#graph.stream(initializedState, config); } /** * Visualize the graph as a PNG image. * @param params - Parameters for the drawMermaidPng method. * @param params.withStyles - Whether to include styles in the graph. * @param params.curveStyle - The style of the graph's curves. * @param params.nodeColors - The colors of the graph's nodes. * @param params.wrapLabelNWords - The maximum number of words to wrap in a node's label. * @param params.backgroundColor - The background color of the graph. * @returns PNG image as a buffer */ async drawMermaidPng(params) { const representation = await this.#graph.getGraphAsync(); const image = await representation.drawMermaidPng(params); const arrayBuffer = await image.arrayBuffer(); const buffer = new Uint8Array(arrayBuffer); return buffer; } /** * Draw the graph as a Mermaid string. * @param params - Parameters for the drawMermaid method. * @param params.withStyles - Whether to include styles in the graph. * @param params.curveStyle - The style of the graph's curves. * @param params.nodeColors - The colors of the graph's nodes. * @param params.wrapLabelNWords - The maximum number of words to wrap in a node's label. * @param params.backgroundColor - The background color of the graph. * @returns Mermaid string */ async drawMermaid(params) { const representation = await this.#graph.getGraphAsync(); return representation.drawMermaid(params); } /** * The following are internal methods to enable support for LangGraph Platform. * They are not part of the createAgent public API. * * Note: we intentionally return as `never` to avoid type errors due to type inference. */ /** * @internal */ streamEvents(state, config, streamOptions) { return this.#graph.streamEvents(state, { ...config, version: config?.version ?? "v2" }, streamOptions); } /** * @internal */ getGraphAsync(config) { return this.#graph.getGraphAsync(config); } /** * @internal */ getState(config, options) { return this.#graph.getState(config, options); } /** * @internal */ getStateHistory(config, options) { return this.#graph.getStateHistory(config, options); } /** * @internal */ getSubgraphs(namespace, recurse) { return this.#graph.getSubgraphs(namespace, recurse); } /** * @internal */ getSubgraphAsync(namespace, recurse) { return this.#graph.getSubgraphsAsync(namespace, recurse); } /** * @internal */ updateState(inputConfig, values, asNode) { return this.#graph.updateState(inputConfig, values, asNode); } /** * @internal */ get builder() { return this.#graph.builder; } }; //#endregion exports.ReactAgent = ReactAgent; //# sourceMappingURL=ReactAgent.cjs.map