@langgraph-js/sdk
Version:
The UI SDK for LangGraph - seamlessly integrate your AI agents with frontend interfaces
227 lines (226 loc) • 7.59 kB
TypeScript
import { RenderMessage } from "./LangGraphClient.js";
import type { Message } from "@langchain/langgraph-sdk";
import { CallToolResult, UnionTool } from "./tool/createTool.js";
import { createChatStore } from "./ui-store/createChatStore.js";
/**
* @zh 测试任务接口
* @en Test task interface
*/
interface TestTask {
/** 任务是否成功完成 */
success: boolean;
/** 执行任务的函数 */
runTask: (messages: readonly RenderMessage[]) => Promise<void>;
/** 任务失败时的回调函数 */
fail: () => void;
}
export declare class TestLogger {
info(message: string): void;
logMessage(message: RenderMessage): void;
}
/**
* @zh LangGraph 测试工具,可以配合 vitest 等常用框架进行测试
* @en LangGraph test tool, can be used with vitest and other common frameworks for testing
*
* @example
* ```typescript
* const testChat = new TestLangGraphChat(createLangGraphClient(), { debug: true });
* await testChat.humanInput("你好", async () => {
* const aiMessage = await testChat.waitFor("ai");
* expect(aiMessage.content).toBeDefined();
* });
* ```
*/
export declare class TestLangGraphChat {
readonly store: ReturnType<typeof createChatStore>;
/** 是否开启调试模式 */
private debug;
/** 上次消息数量,用于检测消息变化 */
private lastLength;
/** 待处理的测试任务列表 */
protected processFunc: TestTask[];
/** 自定义日志器 */
private logger;
/**
* @zh 构造函数,初始化测试环境
* @en Constructor, initialize test environment
*/
constructor(store: ReturnType<typeof createChatStore>, options: {
debug?: boolean;
tools?: UnionTool<any, any, any>[];
logger?: TestLogger;
});
/**
* @zh 获取当前所有渲染消息
* @en Get all current render messages
*/
getMessages(): RenderMessage[];
/**
* @zh 添加工具到测试环境中,会自动包装工具的 execute 方法
* @en Add tools to test environment, automatically wraps tool execute methods
*
* @example
* ```typescript
* const tools = [createUITool({ name: "test_tool", ... })];
* testChat.addTools(tools);
* ```
*/
addTools(tools: UnionTool<any, any, any>[]): void;
/**
* @zh 设置额外参数
* @en Set extra states to LangGraph
*
* @example
* ```typescript
* testChat.setExtraParams({
* extraParam: "value",
* });
*/
setExtraParams(extraParams: Record<string, any>): void;
/**
* @zh 检查所有待处理的测试任务,只有在消息数量发生变化时才执行检查
* @en Check all pending test tasks, only executes when message count changes
*/
checkAllTask(messages: readonly RenderMessage[], options?: {
skipLengthCheck?: boolean;
}): void;
private readited;
/**
* @zh 准备测试环境,初始化客户端连接
* @en Prepare test environment, initialize client connection
*/
ready(): Promise<import("./LangGraphClient.js").LangGraphClient<unknown, unknown>> | undefined;
/**
* @zh 模拟人类输入消息并等待测试任务完成,这是测试的核心方法
* @en Simulate human input and wait for test tasks to complete, this is the core test method
*
* @example
* ```typescript
* await testChat.humanInput("请帮我思考一下", async () => {
* const toolMessage = await testChat.waitFor("tool", "thinking");
* expect(toolMessage.tool_input).toBeDefined();
*
* const aiMessage = await testChat.waitFor("ai");
* expect(aiMessage.content).toContain("思考");
* });
* ```
*/
humanInput(text: Message["content"], context: () => Promise<void>): Promise<[void, void]>;
/**
* @zh 等待特定类型的消息出现,创建异步等待任务
* @en Wait for specific type of message to appear, creates async waiting task
*
* @example
* ```typescript
* // 等待 AI 回复
* const aiMessage = await testChat.waitFor("ai");
*
* // 等待特定工具调用
* const toolMessage = await testChat.waitFor("tool", "sequential-thinking");
* ```
*/
waitFor<D extends "tool" | "ai", T extends RenderMessage, N extends D extends "tool" ? string : undefined>(type: D, name?: N): Promise<T>;
/**
* @zh 响应前端工具调用,模拟用户对工具的响应
* @en Respond to frontend tool calls, simulates user response to tools
*
* @example
* ```typescript
* const toolMessage = await testChat.waitFor("tool", "ask_user_for_approve");
* await testChat.responseFeTool(toolMessage, "approved");
* ```
*/
responseFeTool(message: RenderMessage, value: CallToolResult): Promise<RenderMessage>;
/**
* @zh 查找最后一条指定类型的消息,从消息数组末尾开始向前查找
* @en Find the last message of specified type, searches backwards from end of messages
*
* @example
* ```typescript
* // 查找最后一条 AI 消息
* const lastAI = testChat.findLast("ai");
*
* // 查找最后一条人类消息
* const lastHuman = testChat.findLast("human");
* ```
*/
findLast(type: "human" | "ai" | "tool", options?: {
before?: (item: RenderMessage) => boolean;
}): (import("@langchain/langgraph-sdk").HumanMessage & {
name?: string;
node_name?: string;
tool_input?: string;
additional_kwargs?: {
done?: boolean;
tool_calls?: {
function: {
arguments: string;
};
}[];
};
sub_agent_messages?: RenderMessage[];
usage_metadata?: {
total_tokens: number;
input_tokens: number;
output_tokens: number;
};
tool_call_id?: string;
response_metadata?: {
create_time: string;
};
spend_time?: number;
unique_id?: string;
done?: boolean;
}) | (import("@langchain/langgraph-sdk").AIMessage & {
name?: string;
node_name?: string;
tool_input?: string;
additional_kwargs?: {
done?: boolean;
tool_calls?: {
function: {
arguments: string;
};
}[];
};
sub_agent_messages?: RenderMessage[];
usage_metadata?: {
total_tokens: number;
input_tokens: number;
output_tokens: number;
};
tool_call_id?: string;
response_metadata?: {
create_time: string;
};
spend_time?: number;
unique_id?: string;
done?: boolean;
}) | (import("@langchain/langgraph-sdk").ToolMessage & {
name?: string;
node_name?: string;
tool_input?: string;
additional_kwargs?: {
done?: boolean;
tool_calls?: {
function: {
arguments: string;
};
}[];
};
sub_agent_messages?: RenderMessage[];
usage_metadata?: {
total_tokens: number;
input_tokens: number;
output_tokens: number;
};
tool_call_id?: string;
response_metadata?: {
create_time: string;
};
spend_time?: number;
unique_id?: string;
done?: boolean;
});
}
export {};