UNPKG

@langgraph-js/sdk

Version:

The UI SDK for LangGraph - seamlessly integrate your AI agents with frontend interfaces

318 lines (317 loc) 10.3 kB
import { RenderMessage } from "./LangGraphClient.js"; import type { Message } from "@langchain/langgraph-sdk"; import { CallToolResult, UnionTool } from "./tool/createTool.js"; import { createChatStore } from "./ui-store/createChatStore.js"; /** * @zh 测试任务接口 * @en Test task interface */ interface TestTask { /** 任务是否成功完成 */ success: boolean; /** 执行任务的函数 */ runTask: (messages: readonly RenderMessage[]) => Promise<void>; /** 任务失败时的回调函数 */ fail: () => void; } export declare class TestLogger { info(message: string): void; logMessage(message: RenderMessage): void; } /** * @zh LangGraph 测试工具,可以配合 vitest 等常用框架进行测试 * @en LangGraph test tool, can be used with vitest and other common frameworks for testing * * @example * ```typescript * const testChat = new TestLangGraphChat(createLangGraphClient(), { debug: true }); * await testChat.humanInput("你好", async () => { * const aiMessage = await testChat.waitFor("ai"); * expect(aiMessage.content).toBeDefined(); * }); * ``` */ export declare class TestLangGraphChat { readonly store: ReturnType<typeof createChatStore>; /** 是否开启调试模式 */ private debug; /** 上次消息数量,用于检测消息变化 */ private lastLength; /** 待处理的测试任务列表 */ protected processFunc: TestTask[]; /** 自定义日志器 */ private logger; /** * @zh 构造函数,初始化测试环境 * @en Constructor, initialize test environment */ constructor(store: ReturnType<typeof createChatStore>, options: { debug?: boolean; tools?: UnionTool<any, any, any>[]; logger?: TestLogger; }); /** * @zh 获取当前所有渲染消息 * @en Get all current render messages */ getMessages(): RenderMessage[]; /** * @zh 添加工具到测试环境中,会自动包装工具的 execute 方法 * @en Add tools to test environment, automatically wraps tool execute methods * * @example * ```typescript * const tools = [createUITool({ name: "test_tool", ... })]; * testChat.addTools(tools); * ``` */ addTools(tools: UnionTool<any, any, any>[]): void; /** * @zh 设置额外参数 * @en Set extra states to LangGraph * * @example * ```typescript * testChat.setExtraParams({ * extraParam: "value", * }); */ setExtraParams(extraParams: Record<string, any>): void; /** * @zh 检查所有待处理的测试任务,只有在消息数量发生变化时才执行检查 * @en Check all pending test tasks, only executes when message count changes */ checkAllTask(messages: readonly RenderMessage[], options?: { skipLengthCheck?: boolean; }): void; private readited; /** * @zh 准备测试环境,初始化客户端连接 * @en Prepare test environment, initialize client connection */ ready(): Promise<import("./LangGraphClient.js").LangGraphClient<unknown>> | undefined; /** * @zh 模拟人类输入消息并等待测试任务完成,这是测试的核心方法 * @en Simulate human input and wait for test tasks to complete, this is the core test method * * @example * ```typescript * await testChat.humanInput("请帮我思考一下", async () => { * const toolMessage = await testChat.waitFor("tool", "thinking"); * expect(toolMessage.tool_input).toBeDefined(); * * const aiMessage = await testChat.waitFor("ai"); * expect(aiMessage.content).toContain("思考"); * }); * ``` */ humanInput(text: Message["content"], context: () => Promise<void>): Promise<[void, void]>; /** * @zh 等待特定类型的消息出现,创建异步等待任务 * @en Wait for specific type of message to appear, creates async waiting task * * @example * ```typescript * // 等待 AI 回复 * const aiMessage = await testChat.waitFor("ai"); * * // 等待特定工具调用 * const toolMessage = await testChat.waitFor("tool", "sequential-thinking"); * ``` */ waitFor<D extends "tool" | "ai", T extends RenderMessage, N extends D extends "tool" ? string : undefined>(type: D, name?: N): Promise<T>; /** * @zh 响应前端工具调用,模拟用户对工具的响应 * @en Respond to frontend tool calls, simulates user response to tools * * @example * ```typescript * const toolMessage = await testChat.waitFor("tool", "ask_user_for_approve"); * await testChat.responseFeTool(toolMessage, "approved"); * ``` */ responseFeTool(message: RenderMessage, value: CallToolResult): Promise<RenderMessage>; /** * @zh 查找最后一条指定类型的消息,从消息数组末尾开始向前查找 * @en Find the last message of specified type, searches backwards from end of messages * * @example * ```typescript * // 查找最后一条 AI 消息 * const lastAI = testChat.findLast("ai"); * * // 查找最后一条人类消息 * const lastHuman = testChat.findLast("human"); * ``` */ findLast(type: "human" | "ai" | "tool", options?: { before?: (item: RenderMessage) => boolean; }): ({ additional_kwargs?: { [x: string]: unknown; } | undefined; content: string | ({ type: "text"; text: string; } | { type: "image_url"; image_url: string | { url: string; detail?: ("auto" | "low" | "high") | undefined; }; })[]; id?: string | undefined; name?: string | undefined; response_metadata?: Record<string, unknown> | undefined; } & { type: "human"; example?: boolean | undefined; } & { name?: string; node_name?: string; tool_input?: string; additional_kwargs?: { done?: boolean; tool_calls?: { function: { arguments: string; }; }[]; }; sub_agent_messages?: RenderMessage[]; usage_metadata?: { total_tokens: number; input_tokens: number; output_tokens: number; }; tool_call_id?: string; response_metadata?: { create_time: string; }; spend_time?: number; unique_id?: string; done?: boolean; }) | ({ additional_kwargs?: { [x: string]: unknown; } | undefined; content: string | ({ type: "text"; text: string; } | { type: "image_url"; image_url: string | { url: string; detail?: ("auto" | "low" | "high") | undefined; }; })[]; id?: string | undefined; name?: string | undefined; response_metadata?: Record<string, unknown> | undefined; } & { type: "ai"; example?: boolean | undefined; tool_calls?: { name: string; args: { [x: string]: any; }; id?: string | undefined; type?: "tool_call" | undefined; }[] | undefined; invalid_tool_calls?: { name?: string | undefined; args?: string | undefined; id?: string | undefined; error?: string | undefined; type?: "invalid_tool_call" | undefined; }[] | undefined; usage_metadata?: { input_tokens: number; output_tokens: number; total_tokens: number; input_token_details?: { audio?: number | undefined; cache_read?: number | undefined; cache_creation?: number | undefined; } | undefined; output_token_details?: { audio?: number | undefined; reasoning?: number | undefined; } | undefined; } | undefined; } & { name?: string; node_name?: string; tool_input?: string; additional_kwargs?: { done?: boolean; tool_calls?: { function: { arguments: string; }; }[]; }; sub_agent_messages?: RenderMessage[]; usage_metadata?: { total_tokens: number; input_tokens: number; output_tokens: number; }; tool_call_id?: string; response_metadata?: { create_time: string; }; spend_time?: number; unique_id?: string; done?: boolean; }) | ({ additional_kwargs?: { [x: string]: unknown; } | undefined; content: string | ({ type: "text"; text: string; } | { type: "image_url"; image_url: string | { url: string; detail?: ("auto" | "low" | "high") | undefined; }; })[]; id?: string | undefined; name?: string | undefined; response_metadata?: Record<string, unknown> | undefined; } & { type: "tool"; status?: "error" | "success" | undefined; tool_call_id: string; artifact?: any; } & { name?: string; node_name?: string; tool_input?: string; additional_kwargs?: { done?: boolean; tool_calls?: { function: { arguments: string; }; }[]; }; sub_agent_messages?: RenderMessage[]; usage_metadata?: { total_tokens: number; input_tokens: number; output_tokens: number; }; tool_call_id?: string; response_metadata?: { create_time: string; }; spend_time?: number; unique_id?: string; done?: boolean; }); } export {};