@langgraph-js/sdk
Version:
The UI SDK for LangGraph - seamlessly integrate your AI agents with frontend interfaces
328 lines (327 loc) • 12.1 kB
TypeScript
import type { Thread, Message, Assistant, Command } from "@langchain/langgraph-sdk";
import { EventEmitter } from "eventemitter3";
import { ToolManager } from "./ToolManager.js";
import { CallToolResult } from "./tool/createTool.js";
import { ILangGraphClient } from "./types.js";
export type RenderMessage = Message & {
/** 对于 AIMessage 来说是节点名称,对于工具节点来说是工具名称 */
name?: string;
/** 工具节点的触发节点名称 */
node_name?: string;
/** 工具入参 ,聚合而来*/
tool_input?: string;
additional_kwargs?: {
done?: boolean;
tool_calls?: {
function: {
arguments: string;
};
}[];
};
sub_agent_messages?: RenderMessage[];
usage_metadata?: {
total_tokens: number;
input_tokens: number;
output_tokens: number;
};
tool_call_id?: string;
response_metadata?: {
create_time: string;
};
/** 耗时 */
spend_time?: number;
/** 渲染时的唯一 id,聚合而来*/
unique_id?: string;
/** 工具调用是否完成 */
done?: boolean;
};
export type SendMessageOptions = {
extraParams?: Record<string, any>;
_debug?: {
streamResponse?: any;
};
command?: Command;
joinRunId?: string;
};
export interface LangGraphClientConfig {
apiUrl?: string;
apiKey?: string;
callerOptions?: {
/**
* The maximum number of concurrent calls that can be made.
* Defaults to `Infinity`, which means no limit.
*/
maxConcurrency?: number;
/**
* The maximum number of retries that can be made for a single call,
* with an exponential backoff between each attempt. Defaults to 6.
*/
maxRetries?: number;
onFailedResponseHook?: any;
/**
* Specify a custom fetch implementation.
*
* By default we expect the `fetch` is available in the global scope.
*/
fetch?: typeof fetch | ((...args: any[]) => any);
};
timeoutMs?: number;
defaultHeaders?: Record<string, string | null | undefined>;
/** 自定义客户端实现,如果不提供则使用官方 Client */
client: ILangGraphClient;
}
export interface LangGraphEvents {
/** 流开始事件 */
start: {
event: "start";
};
/** 消息部分更新事件 */
message: {
event: "messages/partial";
data: Message[];
};
/** 值更新事件 */
value: {
event: "messages/partial" | "values";
data: {
messages?: Message[];
};
};
/** 错误事件 */
error: {
event: "error";
data: any;
};
/** Thread 创建事件 */
thread: {
event: "thread/create";
data: {
thread: Thread;
};
};
/** 流完成事件 */
done: {
event: "done";
};
}
/**
* @zh LangGraphClient 类是与 LangGraph 后端交互的主要客户端。
* @en The LangGraphClient class is the main client for interacting with the LangGraph backend.
*/
export declare class LangGraphClient<TStateType = unknown, TUpdateType = TStateType> extends EventEmitter<LangGraphEvents> {
private client;
private currentAssistant;
private currentThread;
tools: ToolManager;
stopController: AbortController | null;
/** 用于存储 subAgent 状态数据的键 */
subAgentsKey: string;
/** Message 处理器 */
private messageProcessor;
constructor(config: LangGraphClientConfig);
/** 代理 assistants 属性到内部 client */
get assistants(): {
search(query?: {
graphId?: string;
metadata?: import("@langchain/langgraph-sdk").Metadata;
limit?: number;
offset?: number;
sortBy?: import("./types.js").AssistantSortBy;
sortOrder?: import("./types.js").SortOrder;
}): Promise<Assistant[]>;
getGraph(assistantId: string, options?: {
xray?: boolean | number;
}): Promise<import("@langchain/langgraph-sdk").AssistantGraph>;
};
/** 代理 threads 属性到内部 client */
get threads(): {
create<ValuesType = TStateType>(payload?: {
metadata?: import("@langchain/langgraph-sdk").Metadata;
threadId?: string;
ifExists?: import("@langchain/langgraph-sdk").OnConflictBehavior;
graphId?: string;
supersteps?: Array<{
updates: Array<{
values: unknown;
command?: Command;
asNode: string;
}>;
}>;
}): Promise<Thread<ValuesType>>;
search<ValuesType = TStateType>(query?: {
metadata?: import("@langchain/langgraph-sdk").Metadata;
limit?: number;
offset?: number;
status?: import("@langchain/langgraph-sdk").ThreadStatus;
sortBy?: import("./types.js").ThreadSortBy;
sortOrder?: import("./types.js").SortOrder;
}): Promise<Thread<ValuesType>[]>;
get<ValuesType = TStateType>(threadId: string): Promise<Thread<ValuesType>>;
delete(threadId: string): Promise<void>;
};
/** 代理 runs 属性到内部 client */
get runs(): {
list(threadId: string, options?: {
limit?: number;
offset?: number;
status?: import("./types.js").RunStatus;
}): Promise<import("@langchain/langgraph-sdk").Run[]>;
stream<TStreamMode extends import("@langchain/langgraph-sdk").StreamMode | import("@langchain/langgraph-sdk").StreamMode[] = import("@langchain/langgraph-sdk").StreamMode, TSubgraphs extends boolean = false>(threadId: null, assistantId: string, payload?: {
input?: Record<string, unknown> | null;
metadata?: import("@langchain/langgraph-sdk").Metadata;
config?: import("@langchain/langgraph-sdk").Config;
checkpointId?: string;
checkpoint?: Omit<import("@langchain/langgraph-sdk").Checkpoint, "thread_id">;
checkpointDuring?: boolean;
interruptBefore?: "*" | string[];
interruptAfter?: "*" | string[];
signal?: AbortController["signal"];
webhook?: string;
onDisconnect?: import("./types.js").DisconnectMode;
afterSeconds?: number;
ifNotExists?: "create" | "reject";
command?: Command;
onRunCreated?: (params: {
run_id: string;
thread_id?: string;
}) => void;
streamMode?: TStreamMode | undefined;
streamSubgraphs?: TSubgraphs | undefined;
streamResumable?: boolean;
feedbackKeys?: string[];
} | undefined): import("./types.js").TypedAsyncGenerator<TStreamMode, TSubgraphs, TStateType, TUpdateType, unknown>;
stream<TStreamMode extends import("@langchain/langgraph-sdk").StreamMode | import("@langchain/langgraph-sdk").StreamMode[] = import("@langchain/langgraph-sdk").StreamMode, TSubgraphs extends boolean = false>(threadId: string, assistantId: string, payload?: {
input?: Record<string, unknown> | null;
metadata?: import("@langchain/langgraph-sdk").Metadata;
config?: import("@langchain/langgraph-sdk").Config;
checkpointId?: string;
checkpoint?: Omit<import("@langchain/langgraph-sdk").Checkpoint, "thread_id">;
checkpointDuring?: boolean;
interruptBefore?: "*" | string[];
interruptAfter?: "*" | string[];
multitaskStrategy?: import("./types.js").MultitaskStrategy;
onCompletion?: import("./types.js").OnCompletionBehavior;
signal?: AbortController["signal"];
webhook?: string;
onDisconnect?: import("./types.js").DisconnectMode;
afterSeconds?: number;
ifNotExists?: "create" | "reject";
command?: Command;
onRunCreated?: (params: {
run_id: string;
thread_id?: string;
}) => void;
streamMode?: TStreamMode | undefined;
streamSubgraphs?: TSubgraphs | undefined;
streamResumable?: boolean;
feedbackKeys?: string[];
} | undefined): import("./types.js").TypedAsyncGenerator<TStreamMode, TSubgraphs, TStateType, TUpdateType, unknown>;
joinStream(threadId: string | undefined | null, runId: string, options?: {
signal?: AbortSignal;
cancelOnDisconnect?: boolean;
lastEventId?: string;
streamMode?: import("@langchain/langgraph-sdk").StreamMode | import("@langchain/langgraph-sdk").StreamMode[];
} | AbortSignal): AsyncGenerator<{
id?: string;
event: import("@langchain/core/tracers/log_stream").StreamEvent;
data: any;
}>;
cancel(threadId: string, runId: string, wait?: boolean, action?: import("./types.js").CancelAction): Promise<void>;
};
availableAssistants: Assistant[];
private listAssistants;
/**
* @zh 初始化 Assistant。
* @en Initializes the Assistant.
*/
initAssistant(agentName?: string): Promise<void>;
/**
* @zh 创建一个新的 Thread。
* @en Creates a new Thread.
*/
createThread({ threadId, }?: {
threadId?: string;
}): Promise<Thread<TStateType>>;
graphVisualize(): Promise<import("@langchain/langgraph-sdk").AssistantGraph>;
/**
* @zh 列出所有的 Thread。
* @en Lists all Threads.
*/
listThreads(): Promise<Thread<TStateType>[]>;
deleteThread(threadId: string): Promise<void>;
/**
* @zh 从历史中恢复 Thread 数据。
* @en Resets the Thread data from history.
*/
resetThread(agent: string, threadId: string): Promise<Thread<TStateType>>;
resetStream(): Promise<void>;
cloneMessage(message: Message): Message;
/**
* @zh 用于 UI 中的流式渲染中的消息。
* @en Messages used for streaming rendering in the UI.
*/
get renderMessage(): RenderMessage[];
/**
* @zh 获取 Token 计数器信息。
* @en Gets the Token counter information.
*/
get tokenCounter(): {
total_tokens: number;
input_tokens: number;
output_tokens: number;
};
/** 前端工具人机交互时,锁住面板 */
isFELocking(messages: RenderMessage[]): boolean | undefined;
graphState: any;
currentRun?: {
run_id: string;
};
/**
* @zh 取消当前的 Run。
* @en Cancels the current Run.
*/
cancelRun(): void;
/**
* @zh 发送消息到 LangGraph 后端。
* @en Sends a message to the LangGraph backend.
*/
sendMessage(input: string | Message[], { joinRunId, extraParams, _debug, command }?: SendMessageOptions): Promise<any[]>;
/** 当前子图位置,但是依赖 stream,不太适合稳定使用*/
private graphPosition;
getGraphPosition(): {
id: string;
name: string;
}[];
getGraphNodeNow(): {
id: string;
name: string;
};
private runFETool;
private callFETool;
extraParams: Record<string, any>;
/**
* @zh 继续被前端工具中断的流程。
* @en Resumes a process interrupted by a frontend tool.
*/
resume(result: CallToolResult): Promise<any[]>;
/**
* @zh 标记前端工具等待已完成。
* @en Marks the frontend tool waiting as completed.
*/
doneFEToolWaiting(id: string, result: CallToolResult): void;
/**
* @zh 获取当前的 Thread。
* @en Gets the current Thread.
*/
getCurrentThread(): Thread<TStateType> | null;
/**
* @zh 获取当前的 Assistant。
* @en Gets the current Assistant.
*/
getCurrentAssistant(): Assistant | null;
/**
* @zh 重置客户端状态。
* @en Resets the client state.
*/
reset(): Promise<void>;
}