@langchain/openai
Version:
OpenAI integrations for LangChain.js
1 lines • 7.97 kB
Source Map (JSON)
{"version":3,"file":"localShell.d.ts","names":["z","OpenAI","OpenAIClient","DynamicStructuredTool","LocalShellAction","Responses","ResponseOutputItem","LocalShellCall","Action","LocalShellExecActionSchema","ZodLiteral","ZodString","ZodArray","ZodRecord","ZodOptional","ZodNumber","core","$strip","ZodObject","LocalShellActionSchema","ZodDiscriminatedUnion","LocalShellOptions","Promise","LocalShellTool","Tool","LocalShell","localShell"],"sources":["../../src/tools/localShell.d.ts"],"sourcesContent":["import { z } from \"zod/v4\";\nimport { OpenAI as OpenAIClient } from \"openai\";\nimport { type DynamicStructuredTool } from \"@langchain/core/tools\";\n/**\n * Re-export action type from OpenAI SDK for convenience.\n * The action contains command details like argv tokens, environment variables,\n * working directory, timeout, and user.\n */\nexport type LocalShellAction = OpenAIClient.Responses.ResponseOutputItem.LocalShellCall.Action;\nexport declare const LocalShellExecActionSchema: z.ZodObject<{\n type: z.ZodLiteral<\"exec\">;\n command: z.ZodArray<z.ZodString>;\n env: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodString>>;\n working_directory: z.ZodOptional<z.ZodString>;\n timeout_ms: z.ZodOptional<z.ZodNumber>;\n user: z.ZodOptional<z.ZodString>;\n}, z.core.$strip>;\nexport declare const LocalShellActionSchema: z.ZodDiscriminatedUnion<[z.ZodObject<{\n type: z.ZodLiteral<\"exec\">;\n command: z.ZodArray<z.ZodString>;\n env: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodString>>;\n working_directory: z.ZodOptional<z.ZodString>;\n timeout_ms: z.ZodOptional<z.ZodNumber>;\n user: z.ZodOptional<z.ZodString>;\n}, z.core.$strip>]>;\n/**\n * Options for the Local Shell tool.\n */\nexport interface LocalShellOptions {\n /**\n * Optional execute function that handles shell command execution.\n * This function receives the action input and should return the command output\n * (stdout + stderr combined).\n *\n * If not provided, you'll need to handle action execution manually by\n * checking `local_shell_call` outputs in the response.\n *\n * @example\n * ```typescript\n * execute: async (action) => {\n * const result = await exec(action.command.join(' '), {\n * cwd: action.working_directory,\n * env: { ...process.env, ...action.env },\n * timeout: action.timeout_ms,\n * });\n * return result.stdout + result.stderr;\n * }\n * ```\n */\n execute: (action: LocalShellAction) => string | Promise<string>;\n}\n/**\n * OpenAI Local Shell tool type for the Responses API.\n */\nexport type LocalShellTool = OpenAIClient.Responses.Tool.LocalShell;\n/**\n * Creates a Local Shell tool that allows models to run shell commands locally\n * on a machine you provide. Commands are executed inside your own runtime—\n * the API only returns the instructions, but does not execute them on OpenAI infrastructure.\n *\n * **Important**: The local shell tool is designed to work with\n * [Codex CLI](https://github.com/openai/codex) and the `codex-mini-latest` model.\n *\n * **How it works**:\n * The tool operates in a continuous loop:\n * 1. Model sends shell commands (`local_shell_call` with `exec` action)\n * 2. Your code executes the command locally\n * 3. You return the output back to the model\n * 4. Repeat until the task is complete\n *\n * **Security Warning**: Running arbitrary shell commands can be dangerous.\n * Always sandbox execution or add strict allow/deny-lists before forwarding\n * a command to the system shell.\n *\n * @see {@link https://platform.openai.com/docs/guides/tools-local-shell | OpenAI Local Shell Documentation}\n *\n * @param options - Optional configuration for the Local Shell tool\n * @returns A Local Shell tool that can be passed to `bindTools`\n *\n * @example\n * ```typescript\n * import { ChatOpenAI, tools } from \"@langchain/openai\";\n * import { exec } from \"child_process\";\n * import { promisify } from \"util\";\n *\n * const execAsync = promisify(exec);\n * const model = new ChatOpenAI({ model: \"codex-mini-latest\" });\n *\n * // With execute callback for automatic command handling\n * const shell = tools.localShell({\n * execute: async (action) => {\n * const { command, env, working_directory, timeout_ms } = action;\n * const result = await execAsync(command.join(' '), {\n * cwd: working_directory ?? process.cwd(),\n * env: { ...process.env, ...env },\n * timeout: timeout_ms ?? undefined,\n * });\n * return result.stdout + result.stderr;\n * },\n * });\n *\n * const llmWithShell = model.bindTools([shell]);\n * const response = await llmWithShell.invoke(\n * \"List files in the current directory\"\n * );\n * ```\n *\n * @example\n * ```typescript\n * // Without execute callback (manual handling)\n * const shell = tools.localShell();\n *\n * const response = await model.invoke(\"List files\", {\n * tools: [shell],\n * });\n *\n * // Access the shell call from the response\n * const shellCall = response.additional_kwargs.tool_outputs?.find(\n * (output) => output.type === \"local_shell_call\"\n * );\n * if (shellCall) {\n * console.log(\"Command to execute:\", shellCall.action.command);\n * // Execute the command manually, then send back the output\n * }\n * ```\n *\n * @example\n * ```typescript\n * // Full shell loop example\n * async function shellLoop(model, task) {\n * let response = await model.invoke(task, {\n * tools: [tools.localShell()],\n * });\n *\n * while (true) {\n * const shellCall = response.additional_kwargs.tool_outputs?.find(\n * (output) => output.type === \"local_shell_call\"\n * );\n *\n * if (!shellCall) break;\n *\n * // Execute command (with proper sandboxing!)\n * const output = await executeCommand(shellCall.action);\n *\n * // Send output back to model\n * response = await model.invoke([\n * response,\n * {\n * type: \"local_shell_call_output\",\n * id: shellCall.call_id,\n * output: output,\n * },\n * ], {\n * tools: [tools.localShell()],\n * });\n * }\n *\n * return response;\n * }\n * ```\n *\n * @remarks\n * - Only available through the Responses API (not Chat Completions)\n * - Designed for use with `codex-mini-latest` model\n * - Commands are provided as argv tokens in `action.command`\n * - Action includes: `command`, `env`, `working_directory`, `timeout_ms`, `user`\n * - Always sandbox or validate commands before execution\n * - The `timeout_ms` from the model is only a hint—enforce your own limits\n */\nexport declare function localShell(options: LocalShellOptions): DynamicStructuredTool<z.ZodDiscriminatedUnion<[z.ZodObject<{\n type: z.ZodLiteral<\"exec\">;\n command: z.ZodArray<z.ZodString>;\n env: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodString>>;\n working_directory: z.ZodOptional<z.ZodString>;\n timeout_ms: z.ZodOptional<z.ZodNumber>;\n user: z.ZodOptional<z.ZodString>;\n}, z.core.$strip>]>, OpenAIClient.Responses.ResponseOutputItem.LocalShellCall.Action, unknown, string>;\n//# sourceMappingURL=localShell.d.ts.map"],"mappings":";;;;;;;;AAQA;AAoBA;AA0BA;AAmHwB0B,KAjKZtB,gBAAAA,GAAmBF,QAAAA,CAAaG,SAAAA,CAAUC,kBAAAA,CAAmBC,cAAAA,CAAeC,MAiKtD;;;;AAAmD,UA7IpEa,iBAAAA,CA6IoE;;;;;;;;;;;;;;;;;;;;;oBAxH/DjB,8BAA8BkB;;;;;KAKxCC,cAAAA,GAAiBrB,QAAAA,CAAaG,SAAAA,CAAUmB,IAAAA,CAAKC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAmHjCC,UAAAA,UAAoBL,oBAAoBlB,sBAAsBH,CAAAA,CAAEoB,uBAAuBpB,CAAAA,CAAEkB;QACvGlB,CAAAA,CAAEU;WACCV,CAAAA,CAAEY,SAASZ,CAAAA,CAAEW;OACjBX,CAAAA,CAAEc,YAAYd,CAAAA,CAAEa,UAAUb,CAAAA,CAAEW,WAAWX,CAAAA,CAAEW;qBAC3BX,CAAAA,CAAEc,YAAYd,CAAAA,CAAEW;cACvBX,CAAAA,CAAEc,YAAYd,CAAAA,CAAEe;QACtBf,CAAAA,CAAEc,YAAYd,CAAAA,CAAEW;GACvBX,CAAAA,CAAEgB,IAAAA,CAAKC,WAAWf,QAAAA,CAAaG,SAAAA,CAAUC,kBAAAA,CAAmBC,cAAAA,CAAeC"}