ai-sdk-provider-codex-cli
Version:
AI SDK v5 provider for OpenAI Codex CLI with native JSON Schema support
227 lines (221 loc) • 8 kB
TypeScript
import { ProviderV2, LanguageModelV2 } from '@ai-sdk/provider';
/**
* Logger interface for custom logging.
* Allows consumers to provide their own logging implementation
* or disable logging entirely.
*
* @example
* ```typescript
* const customLogger: Logger = {
* debug: (message) => myLoggingService.debug(message),
* info: (message) => myLoggingService.info(message),
* warn: (message) => myLoggingService.warn(message),
* error: (message) => myLoggingService.error(message),
* };
* ```
*/
interface Logger {
/**
* Log a debug message. Only logged when verbose mode is enabled.
* Used for detailed execution tracing and troubleshooting.
*/
debug: (message: string) => void;
/**
* Log an informational message. Only logged when verbose mode is enabled.
* Used for general execution flow information.
*/
info: (message: string) => void;
/**
* Log a warning message.
*/
warn: (message: string) => void;
/**
* Log an error message.
*/
error: (message: string) => void;
}
type ApprovalMode = 'untrusted' | 'on-failure' | 'on-request' | 'never';
type SandboxMode = 'read-only' | 'workspace-write' | 'danger-full-access';
type ReasoningEffort = 'minimal' | 'low' | 'medium' | 'high';
/**
* Reasoning summary detail level.
* Note: The API error messages claim 'concise' and 'none' are valid, but they are
* actually rejected with 400 errors. Only 'auto' and 'detailed' work in practice.
*/
type ReasoningSummary = 'auto' | 'detailed';
type ReasoningSummaryFormat = 'none' | 'experimental';
type ModelVerbosity = 'low' | 'medium' | 'high';
interface CodexCliSettings {
codexPath?: string;
cwd?: string;
approvalMode?: ApprovalMode;
sandboxMode?: SandboxMode;
fullAuto?: boolean;
dangerouslyBypassApprovalsAndSandbox?: boolean;
skipGitRepoCheck?: boolean;
color?: 'always' | 'never' | 'auto';
allowNpx?: boolean;
outputLastMessageFile?: string;
env?: Record<string, string>;
verbose?: boolean;
logger?: Logger | false;
/**
* Controls reasoning effort for reasoning-capable models (o3, o4-mini, gpt-5, gpt-5-codex).
* Higher effort produces more thorough reasoning at the cost of latency.
*
* Maps to: `-c model_reasoning_effort=<value>`
* @see https://platform.openai.com/docs/guides/reasoning
*/
reasoningEffort?: ReasoningEffort;
/**
* Controls reasoning summary detail level.
*
* Valid values: 'auto' | 'detailed'
* Note: Despite API error messages claiming 'concise' and 'none' are valid,
* they are rejected with 400 errors in practice.
*
* Maps to: `-c model_reasoning_summary=<value>`
* @see https://platform.openai.com/docs/guides/reasoning#reasoning-summaries
*/
reasoningSummary?: ReasoningSummary;
/**
* Controls reasoning summary format (experimental).
*
* Maps to: `-c model_reasoning_summary_format=<value>`
*/
reasoningSummaryFormat?: ReasoningSummaryFormat;
/**
* Controls output length/detail for GPT-5 family models.
* Only applies to models using the Responses API.
*
* Maps to: `-c model_verbosity=<value>`
*/
modelVerbosity?: ModelVerbosity;
/**
* Include experimental plan tool that the model can use to update its current plan.
*
* Maps to: `--include-plan-tool`
*/
includePlanTool?: boolean;
/**
* Configuration profile from config.toml to specify default options.
*
* Maps to: `--profile <name>`
*/
profile?: string;
/**
* Use OSS provider (experimental).
*
* Maps to: `--oss`
*/
oss?: boolean;
/**
* Enable web search tool for the model.
*
* Maps to: `-c tools.web_search=true`
*/
webSearch?: boolean;
/**
* Generic Codex CLI config overrides. Allows setting any config value
* without updating the provider.
*
* Each entry maps to: `-c <key>=<value>`
*
* Examples:
* - `{ experimental_resume: '/tmp/session.jsonl' }`
* - `{ 'model_providers.custom.base_url': 'http://localhost:8000' }`
* - `{ 'sandbox_workspace_write': { network_access: true } }`
*
* Values are serialized:
* - string → raw string
* - number/boolean → String(value)
* - plain objects → flattened recursively to dotted keys
* - arrays → JSON.stringify(value)
* - other objects (Date, RegExp, Map, etc.) → JSON.stringify(value)
*/
configOverrides?: Record<string, string | number | boolean | object>;
}
interface CodexCliProviderSettings {
defaultSettings?: CodexCliSettings;
}
/**
* Per-call overrides supplied through AI SDK providerOptions.
* These values take precedence over constructor-level CodexCliSettings.
*/
interface CodexCliProviderOptions {
/**
* Per-call override for reasoning depth.
* Maps to `model_reasoning_effort`.
*/
reasoningEffort?: ReasoningEffort;
/**
* Per-call override for reasoning summary detail level.
* Maps to `model_reasoning_summary`.
*/
reasoningSummary?: ReasoningSummary;
/**
* Per-call override for reasoning summary format.
* Maps to `model_reasoning_summary_format`.
*/
reasoningSummaryFormat?: ReasoningSummaryFormat;
/**
* AI SDK naming for per-call verbosity overrides.
* Maps to Codex `model_verbosity`.
*/
textVerbosity?: ModelVerbosity;
/**
* Per-call Codex CLI config overrides. These are merged with
* constructor-level overrides with per-call values taking precedence.
*/
configOverrides?: Record<string, string | number | boolean | object>;
}
interface CodexCliProvider extends ProviderV2 {
(modelId: string, settings?: CodexCliSettings): LanguageModelV2;
languageModel(modelId: string, settings?: CodexCliSettings): LanguageModelV2;
chat(modelId: string, settings?: CodexCliSettings): LanguageModelV2;
textEmbeddingModel(modelId: string): never;
imageModel(modelId: string): never;
}
declare function createCodexCli(options?: CodexCliProviderSettings): CodexCliProvider;
declare const codexCli: CodexCliProvider;
interface CodexLanguageModelOptions {
id: string;
settings?: CodexCliSettings;
}
declare class CodexCliLanguageModel implements LanguageModelV2 {
readonly specificationVersion: "v2";
readonly provider = "codex-cli";
readonly defaultObjectGenerationMode: "json";
readonly supportsImageUrls = false;
readonly supportedUrls: {};
readonly supportsStructuredOutputs = true;
readonly modelId: string;
readonly settings: CodexCliSettings;
private logger;
private sessionId?;
constructor(options: CodexLanguageModelOptions);
private mergeSettings;
private getItemType;
private buildArgs;
private addConfigOverride;
/**
* Serialize a config override value into a CLI-safe string.
*/
private serializeConfigValue;
private isPlainObject;
private sanitizeJsonSchema;
private mapWarnings;
private parseExperimentalJsonEvent;
private extractUsage;
private getToolName;
private buildToolInputPayload;
private buildToolResultPayload;
private safeStringify;
private emitToolInvocation;
private emitToolResult;
private handleSpawnError;
doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
}
declare function isAuthenticationError(err: unknown): boolean;
export { CodexCliLanguageModel, type CodexCliProvider, type CodexCliProviderOptions, type CodexCliProviderSettings, type CodexCliSettings, type Logger, type ModelVerbosity, type ReasoningEffort, type ReasoningSummary, type ReasoningSummaryFormat, codexCli, createCodexCli, isAuthenticationError };