langchain
Version:
Typescript bindings for langchain
641 lines (639 loc) • 28.8 kB
JavaScript
const require_rolldown_runtime = require('../_virtual/rolldown_runtime.cjs');
const require_annotation = require('./annotation.cjs');
const require_utils = require('./utils.cjs');
const require_utils$1 = require('./nodes/utils.cjs');
const require_AgentNode = require('./nodes/AgentNode.cjs');
const require_ToolNode = require('./nodes/ToolNode.cjs');
const require_utils$2 = require('./middleware/utils.cjs');
const require_BeforeAgentNode = require('./nodes/BeforeAgentNode.cjs');
const require_BeforeModelNode = require('./nodes/BeforeModelNode.cjs');
const require_AfterModelNode = require('./nodes/AfterModelNode.cjs');
const require_AfterAgentNode = require('./nodes/AfterAgentNode.cjs');
const __langchain_core_messages = require_rolldown_runtime.__toESM(require("@langchain/core/messages"));
const __langchain_langgraph = require_rolldown_runtime.__toESM(require("@langchain/langgraph"));
//#region src/agents/ReactAgent.ts
var ReactAgent = class {
#graph;
#toolBehaviorVersion = "v2";
#agentNode;
constructor(options) {
this.options = options;
this.#toolBehaviorVersion = options.version ?? this.#toolBehaviorVersion;
/**
* validate that model option is provided
*/
if (!options.model) throw new Error("`model` option is required to create an agent.");
/**
* Check if the LLM already has bound tools and throw if it does.
*/
if (typeof options.model !== "string") require_utils.validateLLMHasNoBoundTools(options.model);
/**
* define complete list of tools based on options and middleware
*/
const middlewareTools = this.options.middleware?.filter((m) => m.tools).flatMap((m) => m.tools) ?? [];
const toolClasses = [...options.tools ?? [], ...middlewareTools];
/**
* If any of the tools are configured to return_directly after running,
* our graph needs to check if these were called
*/
const shouldReturnDirect = new Set(toolClasses.filter(require_utils.isClientTool).filter((tool) => "returnDirect" in tool && tool.returnDirect).map((tool) => tool.name));
/**
* Create a schema that merges agent base schema with middleware state schemas
* Using Zod with withLangGraph ensures LangGraph Studio gets proper metadata
*/
const schema = require_annotation.createAgentAnnotationConditional(this.options.responseFormat !== void 0, this.options.stateSchema, this.options.middleware);
const workflow = new __langchain_langgraph.StateGraph(schema, this.options.contextSchema);
const allNodeWorkflows = workflow;
const beforeAgentNodes = [];
const beforeModelNodes = [];
const afterModelNodes = [];
const afterAgentNodes = [];
const wrapModelCallHookMiddleware = [];
this.#agentNode = new require_AgentNode.AgentNode({
model: this.options.model,
systemPrompt: this.options.systemPrompt,
includeAgentName: this.options.includeAgentName,
name: this.options.name,
responseFormat: this.options.responseFormat,
middleware: this.options.middleware,
toolClasses,
shouldReturnDirect,
signal: this.options.signal,
wrapModelCallHookMiddleware
});
const middlewareNames = /* @__PURE__ */ new Set();
const middleware = this.options.middleware ?? [];
for (let i = 0; i < middleware.length; i++) {
let beforeAgentNode;
let beforeModelNode;
let afterModelNode;
let afterAgentNode;
const m = middleware[i];
if (middlewareNames.has(m.name)) throw new Error(`Middleware ${m.name} is defined multiple times`);
middlewareNames.add(m.name);
if (m.beforeAgent) {
beforeAgentNode = new require_BeforeAgentNode.BeforeAgentNode(m, { getPrivateState: () => this.#agentNode.getState()._privateState });
const name = `${m.name}.before_agent`;
beforeAgentNodes.push({
index: i,
name,
allowed: require_utils$2.getHookConstraint(m.beforeAgent)
});
allNodeWorkflows.addNode(name, beforeAgentNode, beforeAgentNode.nodeOptions);
}
if (m.beforeModel) {
beforeModelNode = new require_BeforeModelNode.BeforeModelNode(m, { getPrivateState: () => this.#agentNode.getState()._privateState });
const name = `${m.name}.before_model`;
beforeModelNodes.push({
index: i,
name,
allowed: require_utils$2.getHookConstraint(m.beforeModel)
});
allNodeWorkflows.addNode(name, beforeModelNode, beforeModelNode.nodeOptions);
}
if (m.afterModel) {
afterModelNode = new require_AfterModelNode.AfterModelNode(m, { getPrivateState: () => this.#agentNode.getState()._privateState });
const name = `${m.name}.after_model`;
afterModelNodes.push({
index: i,
name,
allowed: require_utils$2.getHookConstraint(m.afterModel)
});
allNodeWorkflows.addNode(name, afterModelNode, afterModelNode.nodeOptions);
}
if (m.afterAgent) {
afterAgentNode = new require_AfterAgentNode.AfterAgentNode(m, { getPrivateState: () => this.#agentNode.getState()._privateState });
const name = `${m.name}.after_agent`;
afterAgentNodes.push({
index: i,
name,
allowed: require_utils$2.getHookConstraint(m.afterAgent)
});
allNodeWorkflows.addNode(name, afterAgentNode, afterAgentNode.nodeOptions);
}
if (m.wrapModelCall) wrapModelCallHookMiddleware.push([m, () => ({
...beforeAgentNode?.getState(),
...beforeModelNode?.getState(),
...afterModelNode?.getState(),
...afterAgentNode?.getState()
})]);
}
/**
* Add Nodes
*/
allNodeWorkflows.addNode("model_request", this.#agentNode, require_AgentNode.AgentNode.nodeOptions);
/**
* Collect and compose wrapToolCall handlers from middleware
* Wrap each handler with error handling and validation
*/
const wrapToolCallHandler = require_utils.wrapToolCall(middleware);
/**
* add single tool node for all tools
*/
if (toolClasses.filter(require_utils.isClientTool).length > 0) {
const toolNode = new require_ToolNode.ToolNode(toolClasses.filter(require_utils.isClientTool), {
signal: this.options.signal,
wrapToolCall: wrapToolCallHandler,
getPrivateState: () => this.#agentNode.getState()._privateState
});
allNodeWorkflows.addNode("tools", toolNode);
}
/**
* Add Edges
*/
let entryNode;
if (beforeAgentNodes.length > 0) entryNode = beforeAgentNodes[0].name;
else if (beforeModelNodes.length > 0) entryNode = beforeModelNodes[0].name;
else entryNode = "model_request";
const loopEntryNode = beforeModelNodes.length > 0 ? beforeModelNodes[0].name : "model_request";
const exitNode = afterAgentNodes.length > 0 ? afterAgentNodes[afterAgentNodes.length - 1].name : __langchain_langgraph.END;
allNodeWorkflows.addEdge(__langchain_langgraph.START, entryNode);
for (let i = 0; i < beforeAgentNodes.length; i++) {
const node = beforeAgentNodes[i];
const current = node.name;
const isLast = i === beforeAgentNodes.length - 1;
const nextDefault = isLast ? loopEntryNode : beforeAgentNodes[i + 1].name;
if (node.allowed && node.allowed.length > 0) {
const hasTools = toolClasses.filter(require_utils.isClientTool).length > 0;
const allowedMapped = node.allowed.map((t) => require_utils$1.parseJumpToTarget(t)).filter((dest) => dest !== "tools" || hasTools);
const destinations = Array.from(new Set([nextDefault, ...allowedMapped.map((dest) => dest === __langchain_langgraph.END ? exitNode : dest)]));
allNodeWorkflows.addConditionalEdges(current, this.#createBeforeAgentRouter(toolClasses.filter(require_utils.isClientTool), nextDefault, exitNode), destinations);
} else allNodeWorkflows.addEdge(current, nextDefault);
}
for (let i = 0; i < beforeModelNodes.length; i++) {
const node = beforeModelNodes[i];
const current = node.name;
const isLast = i === beforeModelNodes.length - 1;
const nextDefault = isLast ? "model_request" : beforeModelNodes[i + 1].name;
if (node.allowed && node.allowed.length > 0) {
const hasTools = toolClasses.filter(require_utils.isClientTool).length > 0;
const allowedMapped = node.allowed.map((t) => require_utils$1.parseJumpToTarget(t)).filter((dest) => dest !== "tools" || hasTools);
const destinations = Array.from(new Set([nextDefault, ...allowedMapped]));
allNodeWorkflows.addConditionalEdges(current, this.#createBeforeModelRouter(toolClasses.filter(require_utils.isClientTool), nextDefault), destinations);
} else allNodeWorkflows.addEdge(current, nextDefault);
}
const lastAfterModelNode = afterModelNodes.at(-1);
if (afterModelNodes.length > 0 && lastAfterModelNode) allNodeWorkflows.addEdge("model_request", lastAfterModelNode.name);
else {
const modelPaths = this.#getModelPaths(toolClasses.filter(require_utils.isClientTool));
const destinations = modelPaths.map((p) => p === __langchain_langgraph.END ? exitNode : p);
if (destinations.length === 1) allNodeWorkflows.addEdge("model_request", destinations[0]);
else allNodeWorkflows.addConditionalEdges("model_request", this.#createModelRouter(exitNode), destinations);
}
for (let i = afterModelNodes.length - 1; i > 0; i--) {
const node = afterModelNodes[i];
const current = node.name;
const nextDefault = afterModelNodes[i - 1].name;
if (node.allowed && node.allowed.length > 0) {
const hasTools = toolClasses.filter(require_utils.isClientTool).length > 0;
const allowedMapped = node.allowed.map((t) => require_utils$1.parseJumpToTarget(t)).filter((dest) => dest !== "tools" || hasTools);
const destinations = Array.from(new Set([nextDefault, ...allowedMapped]));
allNodeWorkflows.addConditionalEdges(current, this.#createAfterModelSequenceRouter(toolClasses.filter(require_utils.isClientTool), node.allowed, nextDefault), destinations);
} else allNodeWorkflows.addEdge(current, nextDefault);
}
if (afterModelNodes.length > 0) {
const firstAfterModel = afterModelNodes[0];
const firstAfterModelNode = firstAfterModel.name;
const modelPaths = this.#getModelPaths(toolClasses.filter(require_utils.isClientTool), true).filter((p) => p !== "tools" || toolClasses.filter(require_utils.isClientTool).length > 0);
const allowJump = Boolean(firstAfterModel.allowed && firstAfterModel.allowed.length > 0);
const destinations = modelPaths.map((p) => p === __langchain_langgraph.END ? exitNode : p);
allNodeWorkflows.addConditionalEdges(firstAfterModelNode, this.#createAfterModelRouter(toolClasses.filter(require_utils.isClientTool), allowJump, exitNode), destinations);
}
for (let i = afterAgentNodes.length - 1; i > 0; i--) {
const node = afterAgentNodes[i];
const current = node.name;
const nextDefault = afterAgentNodes[i - 1].name;
if (node.allowed && node.allowed.length > 0) {
const hasTools = toolClasses.filter(require_utils.isClientTool).length > 0;
const allowedMapped = node.allowed.map((t) => require_utils$1.parseJumpToTarget(t)).filter((dest) => dest !== "tools" || hasTools);
const destinations = Array.from(new Set([nextDefault, ...allowedMapped]));
allNodeWorkflows.addConditionalEdges(current, this.#createAfterModelSequenceRouter(toolClasses.filter(require_utils.isClientTool), node.allowed, nextDefault), destinations);
} else allNodeWorkflows.addEdge(current, nextDefault);
}
if (afterAgentNodes.length > 0) {
const firstAfterAgent = afterAgentNodes[0];
const firstAfterAgentNode = firstAfterAgent.name;
if (firstAfterAgent.allowed && firstAfterAgent.allowed.length > 0) {
const hasTools = toolClasses.filter(require_utils.isClientTool).length > 0;
const allowedMapped = firstAfterAgent.allowed.map((t) => require_utils$1.parseJumpToTarget(t)).filter((dest) => dest !== "tools" || hasTools);
/**
* For after_agent, only use explicitly allowed destinations (don't add loopEntryNode)
* The default destination (when no jump occurs) should be END
*/
const destinations = Array.from(new Set([__langchain_langgraph.END, ...allowedMapped]));
allNodeWorkflows.addConditionalEdges(firstAfterAgentNode, this.#createAfterModelSequenceRouter(toolClasses.filter(require_utils.isClientTool), firstAfterAgent.allowed, __langchain_langgraph.END), destinations);
} else allNodeWorkflows.addEdge(firstAfterAgentNode, __langchain_langgraph.END);
}
/**
* add edges for tools node
*/
if (toolClasses.filter(require_utils.isClientTool).length > 0) {
const toolReturnTarget = loopEntryNode;
if (shouldReturnDirect.size > 0) allNodeWorkflows.addConditionalEdges("tools", this.#createToolsRouter(shouldReturnDirect, exitNode), [toolReturnTarget, exitNode]);
else allNodeWorkflows.addEdge("tools", toolReturnTarget);
}
/**
* compile the graph
*/
this.#graph = allNodeWorkflows.compile({
checkpointer: this.options.checkpointer,
store: this.options.store,
name: this.options.name,
description: this.options.description
});
}
/**
* Get the compiled {@link https://docs.langchain.com/oss/javascript/langgraph/use-graph-api | StateGraph}.
*/
get graph() {
return this.#graph;
}
/**
* Get possible edge destinations from model node.
* @param toolClasses names of tools to call
* @param includeModelRequest whether to include "model_request" as a valid path (for jumpTo routing)
* @returns list of possible edge destinations
*/
#getModelPaths(toolClasses, includeModelRequest = false) {
const paths = [];
if (toolClasses.length > 0) paths.push("tools");
if (includeModelRequest) paths.push("model_request");
paths.push(__langchain_langgraph.END);
return paths;
}
/**
* Create routing function for tools node conditional edges.
*/
#createToolsRouter(shouldReturnDirect, exitNode) {
/**
* ToDo: fix type
*/
return (state) => {
const messages = state.messages;
const lastMessage = messages[messages.length - 1];
if (__langchain_core_messages.ToolMessage.isInstance(lastMessage) && lastMessage.name && shouldReturnDirect.has(lastMessage.name)) return this.options.responseFormat ? "model_request" : exitNode;
return "model_request";
};
}
/**
* Create routing function for model node conditional edges.
* @param exitNode - The exit node to route to (could be after_agent or END)
*/
#createModelRouter(exitNode = __langchain_langgraph.END) {
/**
* determine if the agent should continue or not
*/
return (state) => {
const messages = state.messages;
const lastMessage = messages.at(-1);
if (!__langchain_core_messages.AIMessage.isInstance(lastMessage) || !lastMessage.tool_calls || lastMessage.tool_calls.length === 0) return exitNode;
const hasOnlyStructuredResponseCalls = lastMessage.tool_calls.every((toolCall) => toolCall.name.startsWith("extract-"));
if (hasOnlyStructuredResponseCalls) return exitNode;
/**
* The tool node processes a single message.
*/
if (this.#toolBehaviorVersion === "v1") return "tools";
/**
* Route to tools node (filter out any structured response tool calls)
*/
const regularToolCalls = lastMessage.tool_calls.filter((toolCall) => !toolCall.name.startsWith("extract-"));
if (regularToolCalls.length === 0) return exitNode;
return regularToolCalls.map((toolCall) => new __langchain_langgraph.Send("tools", {
...state,
lg_tool_call: toolCall
}));
};
}
/**
* Create routing function for jumpTo functionality after afterModel hooks.
*
* This router checks if the `jumpTo` property is set in the state after afterModel middleware
* execution. If set, it routes to the specified target ("model_request" or "tools").
* If not set, it falls back to the normal model routing logic for afterModel context.
*
* The jumpTo property is automatically cleared after use to prevent infinite loops.
*
* @param toolClasses - Available tool classes for validation
* @param allowJump - Whether jumping is allowed
* @param exitNode - The exit node to route to (could be after_agent or END)
* @returns Router function that handles jumpTo logic and normal routing
*/
#createAfterModelRouter(toolClasses, allowJump, exitNode) {
const hasStructuredResponse = Boolean(this.options.responseFormat);
return (state) => {
const messages = state.messages;
const lastMessage = messages.at(-1);
if (__langchain_core_messages.AIMessage.isInstance(lastMessage) && (!lastMessage.tool_calls || lastMessage.tool_calls.length === 0)) return exitNode;
if (allowJump && state.jumpTo) {
if (state.jumpTo === __langchain_langgraph.END) return exitNode;
if (state.jumpTo === "tools") {
if (toolClasses.length === 0) return exitNode;
return new __langchain_langgraph.Send("tools", {
...state,
jumpTo: void 0
});
}
return new __langchain_langgraph.Send("model_request", {
...state,
jumpTo: void 0
});
}
const toolMessages = messages.filter(__langchain_core_messages.ToolMessage.isInstance);
const lastAiMessage = messages.filter(__langchain_core_messages.AIMessage.isInstance).at(-1);
const pendingToolCalls = lastAiMessage?.tool_calls?.filter((call) => !toolMessages.some((m) => m.tool_call_id === call.id));
if (pendingToolCalls && pendingToolCalls.length > 0) return pendingToolCalls.map((toolCall) => new __langchain_langgraph.Send("tools", {
...state,
lg_tool_call: toolCall
}));
const hasStructuredResponseCalls = lastAiMessage?.tool_calls?.some((toolCall) => toolCall.name.startsWith("extract-"));
if (pendingToolCalls && pendingToolCalls.length === 0 && !hasStructuredResponseCalls && hasStructuredResponse) return "model_request";
if (!__langchain_core_messages.AIMessage.isInstance(lastMessage) || !lastMessage.tool_calls || lastMessage.tool_calls.length === 0) return exitNode;
const hasOnlyStructuredResponseCalls = lastMessage.tool_calls.every((toolCall) => toolCall.name.startsWith("extract-"));
const hasRegularToolCalls = lastMessage.tool_calls.some((toolCall) => !toolCall.name.startsWith("extract-"));
if (hasOnlyStructuredResponseCalls || !hasRegularToolCalls) return exitNode;
/**
* For routing from afterModel nodes, always use simple string paths
* The Send API is handled at the model_request node level
*/
return "tools";
};
}
/**
* Router for afterModel sequence nodes (connecting later middlewares to earlier ones),
* honoring allowed jump targets and defaulting to the next node.
*/
#createAfterModelSequenceRouter(toolClasses, allowed, nextDefault) {
const allowedSet = new Set(allowed.map((t) => require_utils$1.parseJumpToTarget(t)));
return (state) => {
if (state.jumpTo) {
const dest = require_utils$1.parseJumpToTarget(state.jumpTo);
if (dest === __langchain_langgraph.END && allowedSet.has(__langchain_langgraph.END)) return __langchain_langgraph.END;
if (dest === "tools" && allowedSet.has("tools")) {
if (toolClasses.length === 0) return __langchain_langgraph.END;
return new __langchain_langgraph.Send("tools", {
...state,
jumpTo: void 0
});
}
if (dest === "model_request" && allowedSet.has("model_request")) return new __langchain_langgraph.Send("model_request", {
...state,
jumpTo: void 0
});
}
return nextDefault;
};
}
/**
* Create routing function for jumpTo functionality after beforeAgent hooks.
* Falls back to the default next node if no jumpTo is present.
* When jumping to END, routes to exitNode (which could be an afterAgent node).
*/
#createBeforeAgentRouter(toolClasses, nextDefault, exitNode) {
return (state) => {
if (!state.jumpTo) return nextDefault;
const destination = require_utils$1.parseJumpToTarget(state.jumpTo);
if (destination === __langchain_langgraph.END) return exitNode;
if (destination === "tools") {
if (toolClasses.length === 0) return exitNode;
return new __langchain_langgraph.Send("tools", {
...state,
jumpTo: void 0
});
}
return new __langchain_langgraph.Send("model_request", {
...state,
jumpTo: void 0
});
};
}
/**
* Create routing function for jumpTo functionality after beforeModel hooks.
* Falls back to the default next node if no jumpTo is present.
*/
#createBeforeModelRouter(toolClasses, nextDefault) {
return (state) => {
if (!state.jumpTo) return nextDefault;
const destination = require_utils$1.parseJumpToTarget(state.jumpTo);
if (destination === __langchain_langgraph.END) return __langchain_langgraph.END;
if (destination === "tools") {
if (toolClasses.length === 0) return __langchain_langgraph.END;
return new __langchain_langgraph.Send("tools", {
...state,
jumpTo: void 0
});
}
return new __langchain_langgraph.Send("model_request", {
...state,
jumpTo: void 0
});
};
}
/**
* Initialize middleware states if not already present in the input state.
*/
async #initializeMiddlewareStates(state) {
if (!this.options.middleware || this.options.middleware.length === 0 || state instanceof __langchain_langgraph.Command || !state) return state;
const defaultStates = await require_utils$1.initializeMiddlewareStates(this.options.middleware, state);
const updatedState = { ...state };
if (!updatedState) return updatedState;
for (const [key, value] of Object.entries(defaultStates)) if (!(key in updatedState)) updatedState[key] = value;
return updatedState;
}
/**
* Populate the private state of the agent node from the previous state.
*/
async #populatePrivateState(config) {
/**
* not needed if thread_id is not provided
*/
if (!config?.configurable?.thread_id) return;
const prevState = await this.#graph.getState(config);
/**
* not need if state is empty
*/
if (!prevState.values._privateState) return;
this.#agentNode.setState({
structuredResponse: void 0,
_privateState: prevState.values._privateState
});
}
/**
* Executes the agent with the given state and returns the final state after all processing.
*
* This method runs the agent's entire workflow synchronously, including:
* - Processing the input messages through any configured middleware
* - Calling the language model to generate responses
* - Executing any tool calls made by the model
* - Running all middleware hooks (beforeModel, afterModel, etc.)
*
* @param state - The initial state for the agent execution. Can be:
* - An object containing `messages` array and any middleware-specific state properties
* - A Command object for more advanced control flow
*
* @param config - Optional runtime configuration including:
* @param config.context - The context for the agent execution.
* @param config.configurable - LangGraph configuration options like `thread_id`, `run_id`, etc.
* @param config.store - The store for the agent execution for persisting state, see more in {@link https://docs.langchain.com/oss/javascript/langgraph/memory#memory-storage | Memory storage}.
* @param config.signal - An optional {@link https://developer.mozilla.org/en-US/docs/Web/API/AbortSignal | `AbortSignal`} for the agent execution.
* @param config.recursionLimit - The recursion limit for the agent execution.
*
* @returns A Promise that resolves to the final agent state after execution completes.
* The returned state includes:
* - a `messages` property containing an array with all messages (input, AI responses, tool calls/results)
* - a `structuredResponse` property containing the structured response (if configured)
* - all state values defined in the middleware
*
* @example
* ```typescript
* const agent = new ReactAgent({
* llm: myModel,
* tools: [calculator, webSearch],
* responseFormat: z.object({
* weather: z.string(),
* }),
* });
*
* const result = await agent.invoke({
* messages: [{ role: "human", content: "What's the weather in Paris?" }]
* });
*
* console.log(result.structuredResponse.weather); // outputs: "It's sunny and 75°F."
* ```
*/
async invoke(state, config) {
const initializedState = await this.#initializeMiddlewareStates(state);
await this.#populatePrivateState(config);
return this.#graph.invoke(initializedState, config);
}
/**
* Executes the agent with streaming, returning an async iterable of state updates as they occur.
*
* This method runs the agent's workflow similar to `invoke`, but instead of waiting for
* completion, it streams high-level state updates in real-time. This allows you to:
* - Display intermediate results to users as they're generated
* - Monitor the agent's progress through each step
* - React to state changes as nodes complete
*
* For more granular event-level streaming (like individual LLM tokens), use `streamEvents` instead.
*
* @param state - The initial state for the agent execution. Can be:
* - An object containing `messages` array and any middleware-specific state properties
* - A Command object for more advanced control flow
*
* @param config - Optional runtime configuration including:
* @param config.context - The context for the agent execution.
* @param config.configurable - LangGraph configuration options like `thread_id`, `run_id`, etc.
* @param config.store - The store for the agent execution for persisting state, see more in {@link https://docs.langchain.com/oss/javascript/langgraph/memory#memory-storage | Memory storage}.
* @param config.signal - An optional {@link https://developer.mozilla.org/en-US/docs/Web/API/AbortSignal | `AbortSignal`} for the agent execution.
* @param config.streamMode - The streaming mode for the agent execution, see more in {@link https://docs.langchain.com/oss/javascript/langgraph/streaming#supported-stream-modes | Supported stream modes}.
* @param config.recursionLimit - The recursion limit for the agent execution.
*
* @returns A Promise that resolves to an IterableReadableStream of state updates.
* Each update contains the current state after a node completes.
*
* @example
* ```typescript
* const agent = new ReactAgent({
* llm: myModel,
* tools: [calculator, webSearch]
* });
*
* const stream = await agent.stream({
* messages: [{ role: "human", content: "What's 2+2 and the weather in NYC?" }]
* });
*
* for await (const chunk of stream) {
* console.log(chunk); // State update from each node
* }
* ```
*/
async stream(state, config) {
const initializedState = await this.#initializeMiddlewareStates(state);
return this.#graph.stream(initializedState, config);
}
/**
* Visualize the graph as a PNG image.
* @param params - Parameters for the drawMermaidPng method.
* @param params.withStyles - Whether to include styles in the graph.
* @param params.curveStyle - The style of the graph's curves.
* @param params.nodeColors - The colors of the graph's nodes.
* @param params.wrapLabelNWords - The maximum number of words to wrap in a node's label.
* @param params.backgroundColor - The background color of the graph.
* @returns PNG image as a buffer
*/
async drawMermaidPng(params) {
const representation = await this.#graph.getGraphAsync();
const image = await representation.drawMermaidPng(params);
const arrayBuffer = await image.arrayBuffer();
const buffer = new Uint8Array(arrayBuffer);
return buffer;
}
/**
* Draw the graph as a Mermaid string.
* @param params - Parameters for the drawMermaid method.
* @param params.withStyles - Whether to include styles in the graph.
* @param params.curveStyle - The style of the graph's curves.
* @param params.nodeColors - The colors of the graph's nodes.
* @param params.wrapLabelNWords - The maximum number of words to wrap in a node's label.
* @param params.backgroundColor - The background color of the graph.
* @returns Mermaid string
*/
async drawMermaid(params) {
const representation = await this.#graph.getGraphAsync();
return representation.drawMermaid(params);
}
/**
* The following are internal methods to enable support for LangGraph Platform.
* They are not part of the createAgent public API.
*
* Note: we intentionally return as `never` to avoid type errors due to type inference.
*/
/**
* @internal
*/
streamEvents(state, config, streamOptions) {
return this.#graph.streamEvents(state, {
...config,
version: config?.version ?? "v2"
}, streamOptions);
}
/**
* @internal
*/
getGraphAsync(config) {
return this.#graph.getGraphAsync(config);
}
/**
* @internal
*/
getState(config, options) {
return this.#graph.getState(config, options);
}
/**
* @internal
*/
getStateHistory(config, options) {
return this.#graph.getStateHistory(config, options);
}
/**
* @internal
*/
getSubgraphs(namespace, recurse) {
return this.#graph.getSubgraphs(namespace, recurse);
}
/**
* @internal
*/
getSubgraphAsync(namespace, recurse) {
return this.#graph.getSubgraphsAsync(namespace, recurse);
}
/**
* @internal
*/
updateState(inputConfig, values, asNode) {
return this.#graph.updateState(inputConfig, values, asNode);
}
};
//#endregion
exports.ReactAgent = ReactAgent;
//# sourceMappingURL=ReactAgent.cjs.map