openai
Version:
The official TypeScript library for the OpenAI API
1,988 lines (1,716 loc) • 196 kB
text/typescript
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import {
type ExtractParsedContentFromParams,
parseResponse,
type ResponseCreateParamsWithTools,
addOutputText,
} from '../../lib/ResponsesParser';
import { ResponseStream, ResponseStreamParams } from '../../lib/responses/ResponseStream';
import { APIResource } from '../../core/resource';
import * as ResponsesAPI from './responses';
import * as Shared from '../shared';
import * as InputItemsAPI from './input-items';
import { InputItemListParams, InputItems, ResponseItemList } from './input-items';
import * as InputTokensAPI from './input-tokens';
import { InputTokenCountParams, InputTokenCountResponse, InputTokens } from './input-tokens';
import { APIPromise } from '../../core/api-promise';
import { CursorPage } from '../../core/pagination';
import { Stream } from '../../core/streaming';
import { buildHeaders } from '../../internal/headers';
import { RequestOptions } from '../../internal/request-options';
import { path } from '../../internal/utils/path';
export interface ParsedResponseOutputText<ParsedT> extends ResponseOutputText {
parsed: ParsedT | null;
}
export type ParsedContent<ParsedT> = ParsedResponseOutputText<ParsedT> | ResponseOutputRefusal;
export interface ParsedResponseOutputMessage<ParsedT> extends ResponseOutputMessage {
content: ParsedContent<ParsedT>[];
}
export interface ParsedResponseFunctionToolCall extends ResponseFunctionToolCall {
parsed_arguments: any;
}
export type ParsedResponseOutputItem<ParsedT> =
| ParsedResponseOutputMessage<ParsedT>
| ParsedResponseFunctionToolCall
| ResponseFileSearchToolCall
| ResponseFunctionWebSearch
| ResponseComputerToolCall
| ResponseReasoningItem
| ResponseCompactionItem
| ResponseOutputItem.ImageGenerationCall
| ResponseCodeInterpreterToolCall
| ResponseOutputItem.LocalShellCall
| ResponseFunctionShellToolCall
| ResponseFunctionShellToolCallOutput
| ResponseApplyPatchToolCall
| ResponseApplyPatchToolCallOutput
| ResponseOutputItem.McpCall
| ResponseOutputItem.McpListTools
| ResponseOutputItem.McpApprovalRequest
| ResponseCustomToolCall;
export interface ParsedResponse<ParsedT> extends Response {
output: Array<ParsedResponseOutputItem<ParsedT>>;
output_parsed: ParsedT | null;
}
export type ResponseParseParams = ResponseCreateParamsNonStreaming;
export class Responses extends APIResource {
inputItems: InputItemsAPI.InputItems = new InputItemsAPI.InputItems(this._client);
inputTokens: InputTokensAPI.InputTokens = new InputTokensAPI.InputTokens(this._client);
/**
* Creates a model response. Provide
* [text](https://platform.openai.com/docs/guides/text) or
* [image](https://platform.openai.com/docs/guides/images) inputs to generate
* [text](https://platform.openai.com/docs/guides/text) or
* [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
* the model call your own
* [custom code](https://platform.openai.com/docs/guides/function-calling) or use
* built-in [tools](https://platform.openai.com/docs/guides/tools) like
* [web search](https://platform.openai.com/docs/guides/tools-web-search) or
* [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
* your own data as input for the model's response.
*
* @example
* ```ts
* const response = await client.responses.create();
* ```
*/
create(body: ResponseCreateParamsNonStreaming, options?: RequestOptions): APIPromise<Response>;
create(
body: ResponseCreateParamsStreaming,
options?: RequestOptions,
): APIPromise<Stream<ResponseStreamEvent>>;
create(
body: ResponseCreateParamsBase,
options?: RequestOptions,
): APIPromise<Stream<ResponseStreamEvent> | Response>;
create(
body: ResponseCreateParams,
options?: RequestOptions,
): APIPromise<Response> | APIPromise<Stream<ResponseStreamEvent>> {
return (
this._client.post('/responses', { body, ...options, stream: body.stream ?? false }) as
| APIPromise<Response>
| APIPromise<Stream<ResponseStreamEvent>>
)._thenUnwrap((rsp) => {
if ('object' in rsp && rsp.object === 'response') {
addOutputText(rsp as Response);
}
return rsp;
}) as APIPromise<Response> | APIPromise<Stream<ResponseStreamEvent>>;
}
/**
* Retrieves a model response with the given ID.
*
* @example
* ```ts
* const response = await client.responses.retrieve(
* 'resp_677efb5139a88190b512bc3fef8e535d',
* );
* ```
*/
retrieve(
responseID: string,
query?: ResponseRetrieveParamsNonStreaming,
options?: RequestOptions,
): APIPromise<Response>;
retrieve(
responseID: string,
query: ResponseRetrieveParamsStreaming,
options?: RequestOptions,
): APIPromise<Stream<ResponseStreamEvent>>;
retrieve(
responseID: string,
query?: ResponseRetrieveParamsBase | undefined,
options?: RequestOptions,
): APIPromise<Stream<ResponseStreamEvent> | Response>;
retrieve(
responseID: string,
query: ResponseRetrieveParams | undefined = {},
options?: RequestOptions,
): APIPromise<Response> | APIPromise<Stream<ResponseStreamEvent>> {
return (
this._client.get(path`/responses/${responseID}`, {
query,
...options,
stream: query?.stream ?? false,
}) as APIPromise<Response> | APIPromise<Stream<ResponseStreamEvent>>
)._thenUnwrap((rsp) => {
if ('object' in rsp && rsp.object === 'response') {
addOutputText(rsp as Response);
}
return rsp;
}) as APIPromise<Response> | APIPromise<Stream<ResponseStreamEvent>>;
}
/**
* Deletes a model response with the given ID.
*
* @example
* ```ts
* await client.responses.delete(
* 'resp_677efb5139a88190b512bc3fef8e535d',
* );
* ```
*/
delete(responseID: string, options?: RequestOptions): APIPromise<void> {
return this._client.delete(path`/responses/${responseID}`, {
...options,
headers: buildHeaders([{ Accept: '*/*' }, options?.headers]),
});
}
parse<Params extends ResponseCreateParamsWithTools, ParsedT = ExtractParsedContentFromParams<Params>>(
body: Params,
options?: RequestOptions,
): APIPromise<ParsedResponse<ParsedT>> {
return this._client.responses
.create(body, options)
._thenUnwrap((response) => parseResponse(response as Response, body));
}
/**
* Creates a model response stream
*/
stream<Params extends ResponseStreamParams, ParsedT = ExtractParsedContentFromParams<Params>>(
body: Params,
options?: RequestOptions,
): ResponseStream<ParsedT> {
return ResponseStream.createResponse<ParsedT>(this._client, body, options);
}
/**
* Cancels a model response with the given ID. Only responses created with the
* `background` parameter set to `true` can be cancelled.
* [Learn more](https://platform.openai.com/docs/guides/background).
*
* @example
* ```ts
* const response = await client.responses.cancel(
* 'resp_677efb5139a88190b512bc3fef8e535d',
* );
* ```
*/
cancel(responseID: string, options?: RequestOptions): APIPromise<Response> {
return this._client.post(path`/responses/${responseID}/cancel`, options);
}
/**
* Compact a conversation. Returns a compacted response object.
*
* Learn when and how to compact long-running conversations in the
* [conversation state guide](https://platform.openai.com/docs/guides/conversation-state#managing-the-context-window).
* For ZDR-compatible compaction details, see
* [Compaction (advanced)](https://platform.openai.com/docs/guides/conversation-state#compaction-advanced).
*
* @example
* ```ts
* const compactedResponse = await client.responses.compact({
* model: 'gpt-5.2',
* });
* ```
*/
compact(body: ResponseCompactParams, options?: RequestOptions): APIPromise<CompactedResponse> {
return this._client.post('/responses/compact', { body, ...options });
}
}
export type ResponseItemsPage = CursorPage<ResponseItem>;
/**
* Allows the assistant to create, delete, or update files using unified diffs.
*/
export interface ApplyPatchTool {
/**
* The type of the tool. Always `apply_patch`.
*/
type: 'apply_patch';
}
export interface CompactedResponse {
/**
* The unique identifier for the compacted response.
*/
id: string;
/**
* Unix timestamp (in seconds) when the compacted conversation was created.
*/
created_at: number;
/**
* The object type. Always `response.compaction`.
*/
object: 'response.compaction';
/**
* The compacted list of output items. This is a list of all user messages,
* followed by a single compaction item.
*/
output: Array<ResponseOutputItem>;
/**
* Token accounting for the compaction pass, including cached, reasoning, and total
* tokens.
*/
usage: ResponseUsage;
}
/**
* A tool that controls a virtual computer. Learn more about the
* [computer tool](https://platform.openai.com/docs/guides/tools-computer-use).
*/
export interface ComputerTool {
/**
* The height of the computer display.
*/
display_height: number;
/**
* The width of the computer display.
*/
display_width: number;
/**
* The type of computer environment to control.
*/
environment: 'windows' | 'mac' | 'linux' | 'ubuntu' | 'browser';
/**
* The type of the computer use tool. Always `computer_use_preview`.
*/
type: 'computer_use_preview';
}
export interface ContainerAuto {
/**
* Automatically creates a container for this request
*/
type: 'container_auto';
/**
* An optional list of uploaded files to make available to your code.
*/
file_ids?: Array<string>;
/**
* The memory limit for the container.
*/
memory_limit?: '1g' | '4g' | '16g' | '64g' | null;
/**
* Network access policy for the container.
*/
network_policy?: ContainerNetworkPolicyDisabled | ContainerNetworkPolicyAllowlist;
/**
* An optional list of skills referenced by id or inline data.
*/
skills?: Array<SkillReference | InlineSkill>;
}
export interface ContainerNetworkPolicyAllowlist {
/**
* A list of allowed domains when type is `allowlist`.
*/
allowed_domains: Array<string>;
/**
* Allow outbound network access only to specified domains. Always `allowlist`.
*/
type: 'allowlist';
/**
* Optional domain-scoped secrets for allowlisted domains.
*/
domain_secrets?: Array<ContainerNetworkPolicyDomainSecret>;
}
export interface ContainerNetworkPolicyDisabled {
/**
* Disable outbound network access. Always `disabled`.
*/
type: 'disabled';
}
export interface ContainerNetworkPolicyDomainSecret {
/**
* The domain associated with the secret.
*/
domain: string;
/**
* The name of the secret to inject for the domain.
*/
name: string;
/**
* The secret value to inject for the domain.
*/
value: string;
}
export interface ContainerReference {
/**
* The ID of the referenced container.
*/
container_id: string;
/**
* References a container created with the /v1/containers endpoint
*/
type: 'container_reference';
}
/**
* A custom tool that processes input using a specified format. Learn more about
* [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
*/
export interface CustomTool {
/**
* The name of the custom tool, used to identify it in tool calls.
*/
name: string;
/**
* The type of the custom tool. Always `custom`.
*/
type: 'custom';
/**
* Optional description of the custom tool, used to provide more context.
*/
description?: string;
/**
* The input format for the custom tool. Default is unconstrained text.
*/
format?: Shared.CustomToolInputFormat;
}
/**
* A message input to the model with a role indicating instruction following
* hierarchy. Instructions given with the `developer` or `system` role take
* precedence over instructions given with the `user` role. Messages with the
* `assistant` role are presumed to have been generated by the model in previous
* interactions.
*/
export interface EasyInputMessage {
/**
* Text, image, or audio input to the model, used to generate a response. Can also
* contain previous assistant responses.
*/
content: string | ResponseInputMessageContentList;
/**
* The role of the message input. One of `user`, `assistant`, `system`, or
* `developer`.
*/
role: 'user' | 'assistant' | 'system' | 'developer';
/**
* The phase of an assistant message.
*
* Use `commentary` for an intermediate assistant message and `final_answer` for
* the final assistant message. For follow-up requests with models like
* `gpt-5.3-codex` and later, preserve and resend phase on all assistant messages.
* Omitting it can degrade performance. Not used for user messages.
*/
phase?: 'commentary' | 'final_answer' | null;
/**
* The type of the message input. Always `message`.
*/
type?: 'message';
}
/**
* A tool that searches for relevant content from uploaded files. Learn more about
* the
* [file search tool](https://platform.openai.com/docs/guides/tools-file-search).
*/
export interface FileSearchTool {
/**
* The type of the file search tool. Always `file_search`.
*/
type: 'file_search';
/**
* The IDs of the vector stores to search.
*/
vector_store_ids: Array<string>;
/**
* A filter to apply.
*/
filters?: Shared.ComparisonFilter | Shared.CompoundFilter | null;
/**
* The maximum number of results to return. This number should be between 1 and 50
* inclusive.
*/
max_num_results?: number;
/**
* Ranking options for search.
*/
ranking_options?: FileSearchTool.RankingOptions;
}
export namespace FileSearchTool {
/**
* Ranking options for search.
*/
export interface RankingOptions {
/**
* Weights that control how reciprocal rank fusion balances semantic embedding
* matches versus sparse keyword matches when hybrid search is enabled.
*/
hybrid_search?: RankingOptions.HybridSearch;
/**
* The ranker to use for the file search.
*/
ranker?: 'auto' | 'default-2024-11-15';
/**
* The score threshold for the file search, a number between 0 and 1. Numbers
* closer to 1 will attempt to return only the most relevant results, but may
* return fewer results.
*/
score_threshold?: number;
}
export namespace RankingOptions {
/**
* Weights that control how reciprocal rank fusion balances semantic embedding
* matches versus sparse keyword matches when hybrid search is enabled.
*/
export interface HybridSearch {
/**
* The weight of the embedding in the reciprocal ranking fusion.
*/
embedding_weight: number;
/**
* The weight of the text in the reciprocal ranking fusion.
*/
text_weight: number;
}
}
}
/**
* A tool that allows the model to execute shell commands.
*/
export interface FunctionShellTool {
/**
* The type of the shell tool. Always `shell`.
*/
type: 'shell';
environment?: ContainerAuto | LocalEnvironment | ContainerReference | null;
}
/**
* Defines a function in your own code the model can choose to call. Learn more
* about
* [function calling](https://platform.openai.com/docs/guides/function-calling).
*/
export interface FunctionTool {
/**
* The name of the function to call.
*/
name: string;
/**
* A JSON schema object describing the parameters of the function.
*/
parameters: { [key: string]: unknown } | null;
/**
* Whether to enforce strict parameter validation. Default `true`.
*/
strict: boolean | null;
/**
* The type of the function tool. Always `function`.
*/
type: 'function';
/**
* A description of the function. Used by the model to determine whether or not to
* call the function.
*/
description?: string | null;
}
export interface InlineSkill {
/**
* The description of the skill.
*/
description: string;
/**
* The name of the skill.
*/
name: string;
/**
* Inline skill payload
*/
source: InlineSkillSource;
/**
* Defines an inline skill for this request.
*/
type: 'inline';
}
/**
* Inline skill payload
*/
export interface InlineSkillSource {
/**
* Base64-encoded skill zip bundle.
*/
data: string;
/**
* The media type of the inline skill payload. Must be `application/zip`.
*/
media_type: 'application/zip';
/**
* The type of the inline skill source. Must be `base64`.
*/
type: 'base64';
}
export interface LocalEnvironment {
/**
* Use a local computer environment.
*/
type: 'local';
/**
* An optional list of skills.
*/
skills?: Array<LocalSkill>;
}
export interface LocalSkill {
/**
* The description of the skill.
*/
description: string;
/**
* The name of the skill.
*/
name: string;
/**
* The path to the directory containing the skill.
*/
path: string;
}
export interface Response {
/**
* Unique identifier for this Response.
*/
id: string;
/**
* Unix timestamp (in seconds) of when this Response was created.
*/
created_at: number;
output_text: string;
/**
* An error object returned when the model fails to generate a Response.
*/
error: ResponseError | null;
/**
* Details about why the response is incomplete.
*/
incomplete_details: Response.IncompleteDetails | null;
/**
* A system (or developer) message inserted into the model's context.
*
* When using along with `previous_response_id`, the instructions from a previous
* response will not be carried over to the next response. This makes it simple to
* swap out system (or developer) messages in new responses.
*/
instructions: string | Array<ResponseInputItem> | null;
/**
* Set of 16 key-value pairs that can be attached to an object. This can be useful
* for storing additional information about the object in a structured format, and
* querying for objects via API or the dashboard.
*
* Keys are strings with a maximum length of 64 characters. Values are strings with
* a maximum length of 512 characters.
*/
metadata: Shared.Metadata | null;
/**
* Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
* wide range of models with different capabilities, performance characteristics,
* and price points. Refer to the
* [model guide](https://platform.openai.com/docs/models) to browse and compare
* available models.
*/
model: Shared.ResponsesModel;
/**
* The object type of this resource - always set to `response`.
*/
object: 'response';
/**
* An array of content items generated by the model.
*
* - The length and order of items in the `output` array is dependent on the
* model's response.
* - Rather than accessing the first item in the `output` array and assuming it's
* an `assistant` message with the content generated by the model, you might
* consider using the `output_text` property where supported in SDKs.
*/
output: Array<ResponseOutputItem>;
/**
* Whether to allow the model to run tool calls in parallel.
*/
parallel_tool_calls: boolean;
/**
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
* make the output more random, while lower values like 0.2 will make it more
* focused and deterministic. We generally recommend altering this or `top_p` but
* not both.
*/
temperature: number | null;
/**
* How the model should select which tool (or tools) to use when generating a
* response. See the `tools` parameter to see how to specify which tools the model
* can call.
*/
tool_choice:
| ToolChoiceOptions
| ToolChoiceAllowed
| ToolChoiceTypes
| ToolChoiceFunction
| ToolChoiceMcp
| ToolChoiceCustom
| ToolChoiceApplyPatch
| ToolChoiceShell;
/**
* An array of tools the model may call while generating a response. You can
* specify which tool to use by setting the `tool_choice` parameter.
*
* We support the following categories of tools:
*
* - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
* capabilities, like
* [web search](https://platform.openai.com/docs/guides/tools-web-search) or
* [file search](https://platform.openai.com/docs/guides/tools-file-search).
* Learn more about
* [built-in tools](https://platform.openai.com/docs/guides/tools).
* - **MCP Tools**: Integrations with third-party systems via custom MCP servers or
* predefined connectors such as Google Drive and SharePoint. Learn more about
* [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
* - **Function calls (custom tools)**: Functions that are defined by you, enabling
* the model to call your own code with strongly typed arguments and outputs.
* Learn more about
* [function calling](https://platform.openai.com/docs/guides/function-calling).
* You can also use custom tools to call your own code.
*/
tools: Array<Tool>;
/**
* An alternative to sampling with temperature, called nucleus sampling, where the
* model considers the results of the tokens with top_p probability mass. So 0.1
* means only the tokens comprising the top 10% probability mass are considered.
*
* We generally recommend altering this or `temperature` but not both.
*/
top_p: number | null;
/**
* Whether to run the model response in the background.
* [Learn more](https://platform.openai.com/docs/guides/background).
*/
background?: boolean | null;
/**
* Unix timestamp (in seconds) of when this Response was completed. Only present
* when the status is `completed`.
*/
completed_at?: number | null;
/**
* The conversation that this response belonged to. Input items and output items
* from this response were automatically added to this conversation.
*/
conversation?: Response.Conversation | null;
/**
* An upper bound for the number of tokens that can be generated for a response,
* including visible output tokens and
* [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
*/
max_output_tokens?: number | null;
/**
* The unique ID of the previous response to the model. Use this to create
* multi-turn conversations. Learn more about
* [conversation state](https://platform.openai.com/docs/guides/conversation-state).
* Cannot be used in conjunction with `conversation`.
*/
previous_response_id?: string | null;
/**
* Reference to a prompt template and its variables.
* [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
*/
prompt?: ResponsePrompt | null;
/**
* Used by OpenAI to cache responses for similar requests to optimize your cache
* hit rates. Replaces the `user` field.
* [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
*/
prompt_cache_key?: string;
/**
* The retention policy for the prompt cache. Set to `24h` to enable extended
* prompt caching, which keeps cached prefixes active for longer, up to a maximum
* of 24 hours.
* [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
*/
prompt_cache_retention?: 'in-memory' | '24h' | null;
/**
* **gpt-5 and o-series models only**
*
* Configuration options for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning).
*/
reasoning?: Shared.Reasoning | null;
/**
* A stable identifier used to help detect users of your application that may be
* violating OpenAI's usage policies. The IDs should be a string that uniquely
* identifies each user, with a maximum length of 64 characters. We recommend
* hashing their username or email address, in order to avoid sending us any
* identifying information.
* [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
*/
safety_identifier?: string;
/**
* Specifies the latency tier to use for processing the request. This parameter is
* relevant for customers subscribed to the scale tier service:
*
* - If set to 'auto', then the request will be processed with the service tier
* configured in the Project settings. Unless otherwise configured, the Project
* will use 'default'.
* - If set to 'default', then the request will be processed with the standard
* pricing and performance for the selected model.
* - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
* '[priority](https://openai.com/api-priority-processing/)', then the request
* will be processed with the corresponding service tier.
* - When not set, the default behavior is 'auto'.
*
* When this parameter is set, the response body will include the `service_tier`
* utilized.
*/
service_tier?: 'auto' | 'default' | 'flex' | 'scale' | 'priority' | null;
/**
* The status of the response generation. One of `completed`, `failed`,
* `in_progress`, `cancelled`, `queued`, or `incomplete`.
*/
status?: ResponseStatus;
/**
* Configuration options for a text response from the model. Can be plain text or
* structured JSON data. Learn more:
*
* - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
* - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
*/
text?: ResponseTextConfig;
/**
* The truncation strategy to use for the model response.
*
* - `auto`: If the input to this Response exceeds the model's context window size,
* the model will truncate the response to fit the context window by dropping
* items from the beginning of the conversation.
* - `disabled` (default): If the input size will exceed the context window size
* for a model, the request will fail with a 400 error.
*/
truncation?: 'auto' | 'disabled' | null;
/**
* Represents token usage details including input tokens, output tokens, a
* breakdown of output tokens, and the total tokens used.
*/
usage?: ResponseUsage;
/**
* @deprecated This field is being replaced by `safety_identifier` and
* `prompt_cache_key`. Use `prompt_cache_key` instead to maintain caching
* optimizations. A stable identifier for your end-users. Used to boost cache hit
* rates by better bucketing similar requests and to help OpenAI detect and prevent
* abuse.
* [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
*/
user?: string;
}
export namespace Response {
/**
* Details about why the response is incomplete.
*/
export interface IncompleteDetails {
/**
* The reason why the response is incomplete.
*/
reason?: 'max_output_tokens' | 'content_filter';
}
/**
* The conversation that this response belonged to. Input items and output items
* from this response were automatically added to this conversation.
*/
export interface Conversation {
/**
* The unique ID of the conversation that this response was associated with.
*/
id: string;
}
}
/**
* A tool call that applies file diffs by creating, deleting, or updating files.
*/
export interface ResponseApplyPatchToolCall {
/**
* The unique ID of the apply patch tool call. Populated when this item is returned
* via API.
*/
id: string;
/**
* The unique ID of the apply patch tool call generated by the model.
*/
call_id: string;
/**
* One of the create_file, delete_file, or update_file operations applied via
* apply_patch.
*/
operation:
| ResponseApplyPatchToolCall.CreateFile
| ResponseApplyPatchToolCall.DeleteFile
| ResponseApplyPatchToolCall.UpdateFile;
/**
* The status of the apply patch tool call. One of `in_progress` or `completed`.
*/
status: 'in_progress' | 'completed';
/**
* The type of the item. Always `apply_patch_call`.
*/
type: 'apply_patch_call';
/**
* The ID of the entity that created this tool call.
*/
created_by?: string;
}
export namespace ResponseApplyPatchToolCall {
/**
* Instruction describing how to create a file via the apply_patch tool.
*/
export interface CreateFile {
/**
* Diff to apply.
*/
diff: string;
/**
* Path of the file to create.
*/
path: string;
/**
* Create a new file with the provided diff.
*/
type: 'create_file';
}
/**
* Instruction describing how to delete a file via the apply_patch tool.
*/
export interface DeleteFile {
/**
* Path of the file to delete.
*/
path: string;
/**
* Delete the specified file.
*/
type: 'delete_file';
}
/**
* Instruction describing how to update a file via the apply_patch tool.
*/
export interface UpdateFile {
/**
* Diff to apply.
*/
diff: string;
/**
* Path of the file to update.
*/
path: string;
/**
* Update an existing file with the provided diff.
*/
type: 'update_file';
}
}
/**
* The output emitted by an apply patch tool call.
*/
export interface ResponseApplyPatchToolCallOutput {
/**
* The unique ID of the apply patch tool call output. Populated when this item is
* returned via API.
*/
id: string;
/**
* The unique ID of the apply patch tool call generated by the model.
*/
call_id: string;
/**
* The status of the apply patch tool call output. One of `completed` or `failed`.
*/
status: 'completed' | 'failed';
/**
* The type of the item. Always `apply_patch_call_output`.
*/
type: 'apply_patch_call_output';
/**
* The ID of the entity that created this tool call output.
*/
created_by?: string;
/**
* Optional textual output returned by the apply patch tool.
*/
output?: string | null;
}
/**
* Emitted when there is a partial audio response.
*/
export interface ResponseAudioDeltaEvent {
/**
* A chunk of Base64 encoded response audio bytes.
*/
delta: string;
/**
* A sequence number for this chunk of the stream response.
*/
sequence_number: number;
/**
* The type of the event. Always `response.audio.delta`.
*/
type: 'response.audio.delta';
}
/**
* Emitted when the audio response is complete.
*/
export interface ResponseAudioDoneEvent {
/**
* The sequence number of the delta.
*/
sequence_number: number;
/**
* The type of the event. Always `response.audio.done`.
*/
type: 'response.audio.done';
}
/**
* Emitted when there is a partial transcript of audio.
*/
export interface ResponseAudioTranscriptDeltaEvent {
/**
* The partial transcript of the audio response.
*/
delta: string;
/**
* The sequence number of this event.
*/
sequence_number: number;
/**
* The type of the event. Always `response.audio.transcript.delta`.
*/
type: 'response.audio.transcript.delta';
}
/**
* Emitted when the full audio transcript is completed.
*/
export interface ResponseAudioTranscriptDoneEvent {
/**
* The sequence number of this event.
*/
sequence_number: number;
/**
* The type of the event. Always `response.audio.transcript.done`.
*/
type: 'response.audio.transcript.done';
}
/**
* Emitted when a partial code snippet is streamed by the code interpreter.
*/
export interface ResponseCodeInterpreterCallCodeDeltaEvent {
/**
* The partial code snippet being streamed by the code interpreter.
*/
delta: string;
/**
* The unique identifier of the code interpreter tool call item.
*/
item_id: string;
/**
* The index of the output item in the response for which the code is being
* streamed.
*/
output_index: number;
/**
* The sequence number of this event, used to order streaming events.
*/
sequence_number: number;
/**
* The type of the event. Always `response.code_interpreter_call_code.delta`.
*/
type: 'response.code_interpreter_call_code.delta';
}
/**
* Emitted when the code snippet is finalized by the code interpreter.
*/
export interface ResponseCodeInterpreterCallCodeDoneEvent {
/**
* The final code snippet output by the code interpreter.
*/
code: string;
/**
* The unique identifier of the code interpreter tool call item.
*/
item_id: string;
/**
* The index of the output item in the response for which the code is finalized.
*/
output_index: number;
/**
* The sequence number of this event, used to order streaming events.
*/
sequence_number: number;
/**
* The type of the event. Always `response.code_interpreter_call_code.done`.
*/
type: 'response.code_interpreter_call_code.done';
}
/**
* Emitted when the code interpreter call is completed.
*/
export interface ResponseCodeInterpreterCallCompletedEvent {
/**
* The unique identifier of the code interpreter tool call item.
*/
item_id: string;
/**
* The index of the output item in the response for which the code interpreter call
* is completed.
*/
output_index: number;
/**
* The sequence number of this event, used to order streaming events.
*/
sequence_number: number;
/**
* The type of the event. Always `response.code_interpreter_call.completed`.
*/
type: 'response.code_interpreter_call.completed';
}
/**
* Emitted when a code interpreter call is in progress.
*/
export interface ResponseCodeInterpreterCallInProgressEvent {
/**
* The unique identifier of the code interpreter tool call item.
*/
item_id: string;
/**
* The index of the output item in the response for which the code interpreter call
* is in progress.
*/
output_index: number;
/**
* The sequence number of this event, used to order streaming events.
*/
sequence_number: number;
/**
* The type of the event. Always `response.code_interpreter_call.in_progress`.
*/
type: 'response.code_interpreter_call.in_progress';
}
/**
* Emitted when the code interpreter is actively interpreting the code snippet.
*/
export interface ResponseCodeInterpreterCallInterpretingEvent {
/**
* The unique identifier of the code interpreter tool call item.
*/
item_id: string;
/**
* The index of the output item in the response for which the code interpreter is
* interpreting code.
*/
output_index: number;
/**
* The sequence number of this event, used to order streaming events.
*/
sequence_number: number;
/**
* The type of the event. Always `response.code_interpreter_call.interpreting`.
*/
type: 'response.code_interpreter_call.interpreting';
}
/**
* A tool call to run code.
*/
export interface ResponseCodeInterpreterToolCall {
/**
* The unique ID of the code interpreter tool call.
*/
id: string;
/**
* The code to run, or null if not available.
*/
code: string | null;
/**
* The ID of the container used to run the code.
*/
container_id: string;
/**
* The outputs generated by the code interpreter, such as logs or images. Can be
* null if no outputs are available.
*/
outputs: Array<ResponseCodeInterpreterToolCall.Logs | ResponseCodeInterpreterToolCall.Image> | null;
/**
* The status of the code interpreter tool call. Valid values are `in_progress`,
* `completed`, `incomplete`, `interpreting`, and `failed`.
*/
status: 'in_progress' | 'completed' | 'incomplete' | 'interpreting' | 'failed';
/**
* The type of the code interpreter tool call. Always `code_interpreter_call`.
*/
type: 'code_interpreter_call';
}
export namespace ResponseCodeInterpreterToolCall {
/**
* The logs output from the code interpreter.
*/
export interface Logs {
/**
* The logs output from the code interpreter.
*/
logs: string;
/**
* The type of the output. Always `logs`.
*/
type: 'logs';
}
/**
* The image output from the code interpreter.
*/
export interface Image {
/**
* The type of the output. Always `image`.
*/
type: 'image';
/**
* The URL of the image output from the code interpreter.
*/
url: string;
}
}
/**
* A compaction item generated by the
* [`v1/responses/compact` API](https://platform.openai.com/docs/api-reference/responses/compact).
*/
export interface ResponseCompactionItem {
/**
* The unique ID of the compaction item.
*/
id: string;
/**
* The encrypted content that was produced by compaction.
*/
encrypted_content: string;
/**
* The type of the item. Always `compaction`.
*/
type: 'compaction';
/**
* The identifier of the actor that created the item.
*/
created_by?: string;
}
/**
* A compaction item generated by the
* [`v1/responses/compact` API](https://platform.openai.com/docs/api-reference/responses/compact).
*/
export interface ResponseCompactionItemParam {
/**
* The encrypted content of the compaction summary.
*/
encrypted_content: string;
/**
* The type of the item. Always `compaction`.
*/
type: 'compaction';
/**
* The ID of the compaction item.
*/
id?: string | null;
}
/**
* Emitted when the model response is complete.
*/
export interface ResponseCompletedEvent {
/**
* Properties of the completed response.
*/
response: Response;
/**
* The sequence number for this event.
*/
sequence_number: number;
/**
* The type of the event. Always `response.completed`.
*/
type: 'response.completed';
}
/**
* A tool call to a computer use tool. See the
* [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use)
* for more information.
*/
export interface ResponseComputerToolCall {
/**
* The unique ID of the computer call.
*/
id: string;
/**
* A click action.
*/
action:
| ResponseComputerToolCall.Click
| ResponseComputerToolCall.DoubleClick
| ResponseComputerToolCall.Drag
| ResponseComputerToolCall.Keypress
| ResponseComputerToolCall.Move
| ResponseComputerToolCall.Screenshot
| ResponseComputerToolCall.Scroll
| ResponseComputerToolCall.Type
| ResponseComputerToolCall.Wait;
/**
* An identifier used when responding to the tool call with output.
*/
call_id: string;
/**
* The pending safety checks for the computer call.
*/
pending_safety_checks: Array<ResponseComputerToolCall.PendingSafetyCheck>;
/**
* The status of the item. One of `in_progress`, `completed`, or `incomplete`.
* Populated when items are returned via API.
*/
status: 'in_progress' | 'completed' | 'incomplete';
/**
* The type of the computer call. Always `computer_call`.
*/
type: 'computer_call';
}
export namespace ResponseComputerToolCall {
/**
* A click action.
*/
export interface Click {
/**
* Indicates which mouse button was pressed during the click. One of `left`,
* `right`, `wheel`, `back`, or `forward`.
*/
button: 'left' | 'right' | 'wheel' | 'back' | 'forward';
/**
* Specifies the event type. For a click action, this property is always `click`.
*/
type: 'click';
/**
* The x-coordinate where the click occurred.
*/
x: number;
/**
* The y-coordinate where the click occurred.
*/
y: number;
}
/**
* A double click action.
*/
export interface DoubleClick {
/**
* Specifies the event type. For a double click action, this property is always set
* to `double_click`.
*/
type: 'double_click';
/**
* The x-coordinate where the double click occurred.
*/
x: number;
/**
* The y-coordinate where the double click occurred.
*/
y: number;
}
/**
* A drag action.
*/
export interface Drag {
/**
* An array of coordinates representing the path of the drag action. Coordinates
* will appear as an array of objects, eg
*
* ```
* [
* { x: 100, y: 200 },
* { x: 200, y: 300 }
* ]
* ```
*/
path: Array<Drag.Path>;
/**
* Specifies the event type. For a drag action, this property is always set to
* `drag`.
*/
type: 'drag';
}
export namespace Drag {
/**
* An x/y coordinate pair, e.g. `{ x: 100, y: 200 }`.
*/
export interface Path {
/**
* The x-coordinate.
*/
x: number;
/**
* The y-coordinate.
*/
y: number;
}
}
/**
* A collection of keypresses the model would like to perform.
*/
export interface Keypress {
/**
* The combination of keys the model is requesting to be pressed. This is an array
* of strings, each representing a key.
*/
keys: Array<string>;
/**
* Specifies the event type. For a keypress action, this property is always set to
* `keypress`.
*/
type: 'keypress';
}
/**
* A mouse move action.
*/
export interface Move {
/**
* Specifies the event type. For a move action, this property is always set to
* `move`.
*/
type: 'move';
/**
* The x-coordinate to move to.
*/
x: number;
/**
* The y-coordinate to move to.
*/
y: number;
}
/**
* A screenshot action.
*/
export interface Screenshot {
/**
* Specifies the event type. For a screenshot action, this property is always set
* to `screenshot`.
*/
type: 'screenshot';
}
/**
* A scroll action.
*/
export interface Scroll {
/**
* The horizontal scroll distance.
*/
scroll_x: number;
/**
* The vertical scroll distance.
*/
scroll_y: number;
/**
* Specifies the event type. For a scroll action, this property is always set to
* `scroll`.
*/
type: 'scroll';
/**
* The x-coordinate where the scroll occurred.
*/
x: number;
/**
* The y-coordinate where the scroll occurred.
*/
y: number;
}
/**
* An action to type in text.
*/
export interface Type {
/**
* The text to type.
*/
text: string;
/**
* Specifies the event type. For a type action, this property is always set to
* `type`.
*/
type: 'type';
}
/**
* A wait action.
*/
export interface Wait {
/**
* Specifies the event type. For a wait action, this property is always set to
* `wait`.
*/
type: 'wait';
}
/**
* A pending safety check for the computer call.
*/
export interface PendingSafetyCheck {
/**
* The ID of the pending safety check.
*/
id: string;
/**
* The type of the pending safety check.
*/
code?: string | null;
/**
* Details about the pending safety check.
*/
message?: string | null;
}
}
export interface ResponseComputerToolCallOutputItem {
/**
* The unique ID of the computer call tool output.
*/
id: string;
/**
* The ID of the computer tool call that produced the output.
*/
call_id: string;
/**
* A computer screenshot image used with the computer use tool.
*/
output: ResponseComputerToolCallOutputScreenshot;
/**
* The type of the computer tool call output. Always `computer_call_output`.
*/
type: 'computer_call_output';
/**
* The safety checks reported by the API that have been acknowledged by the
* developer.
*/
acknowledged_safety_checks?: Array<ResponseComputerToolCallOutputItem.AcknowledgedSafetyCheck>;
/**
* The status of the message input. One of `in_progress`, `completed`, or
* `incomplete`. Populated when input items are returned via API.
*/
status?: 'in_progress' | 'completed' | 'incomplete';
}
export namespace ResponseComputerToolCallOutputItem {
/**
* A pending safety check for the computer call.
*/
export interface AcknowledgedSafetyCheck {
/**
* The ID of the pending safety check.
*/
id: string;
/**
* The type of the pending safety check.
*/
code?: string | null;
/**
* Details about the pending safety check.
*/
message?: string | null;
}
}
/**
* A computer screenshot image used with the computer use tool.
*/
export interface ResponseComputerToolCallOutputScreenshot {
/**
* Specifies the event type. For a computer screenshot, this property is always set
* to `computer_screenshot`.
*/
type: 'computer_screenshot';
/**
* The identifier of an uploaded file that contains the screenshot.
*/
file_id?: string;
/**
* The URL of the screenshot image.
*/
image_url?: string;
}
/**
* Represents a container created with /v1/containers.
*/
export interface ResponseContainerReference {
container_id: string;
/**
* The environment type. Always `container_reference`.
*/
type: 'container_reference';
}
/**
* Multi-modal input and output contents.
*/
export type ResponseContent =
| ResponseInputText
| ResponseInputImage
| ResponseInputFile
| ResponseOutputText
| ResponseOutputRefusal
| ResponseContent.ReasoningTextContent;
export namespace ResponseContent {
/**
* Reasoning text from the model.
*/
export interface ReasoningTextContent {
/**
* The reasoning text from the model.
*/
text: string;
/**
* The type of the reasoning text. Always `reasoning_text`.
*/
type: 'reasoning_text';
}
}
/**
* Emitted when a new content part is added.
*/
export interface ResponseContentPartAddedEvent {
/**
* The index of the content part that was added.
*/
content_index: number;
/**
* The ID of the output item that the content part was added to.
*/
item_id: string;
/**
* The index of the output item that the content part was added to.
*/
output_index: number;
/**
* The content part that was added.
*/
part: ResponseOutputText | ResponseOutputRefusal | ResponseContentPartAddedEvent.ReasoningText;
/**
* The sequence number of this event.
*/
sequence_number: number;
/**
* The type of the event. Always `response.content_part.added`.
*/
type: 'response.content_part.added';
}
export namespace ResponseContentPartAddedEvent {
/**
* Reasoning text from the model.
*/
export interface ReasoningText {
/**
* The reasoning text from the model.
*/
text: string;
/**
* The type of the reasoning text. Always `reasoning_text`.
*/
type: 'reasoning_text';
}
}
/**
* Emitted when a content part is done.
*/
export interface ResponseContentPartDoneEvent {
/**
* The index of the content part that is done.
*/
content_index: number;
/**
* The ID of the output item that the content part was added to.
*/
item_id: string;
/**
* The index of the output item that the content part was added to.
*/
output_index: number;
/**
* The content part that is done.
*/
part: ResponseOutputText | ResponseOutputRefusal | ResponseContentPartDoneEvent.ReasoningText;
/**
* The sequence number of this event.
*/
sequence_number: number;
/**
* The type of the event. Always `response.content_part.done`.
*/
type: 'response.content_part.done';
}
export namespace ResponseContentPartDoneEvent {
/**
* Reasoning text from the model.
*/
export interface ReasoningText {
/**
* The reasoning text from the model.
*/
text: string;
/**
* The type of the reasoning text. Always `reasoning_text`.
*/
type: 'reasoning_text';
}
}
/**
* The conversation that this response belongs to.
*/
export interface ResponseConversationParam {
/**
* The unique ID of the conversation.
*/
id: string;
}
/**
* An event that is emitted when a response is created.
*/
export interface ResponseCreatedEvent {
/**
* The response that was created.
*/
response: Response;
/**
* The sequence number for this event.
*/
sequence_number: number;
/**
* The type of the event. Always `response.created`.
*/
type: 'response.created';
}
/**
* A call to a custom tool created by the model.
*/
export interface ResponseCustomToolCall {
/**
* An identifier used to map this custom tool call to a tool call output.
*/
call_id: string;
/**
* The input for the custom tool call generated by the model.
*/
input: string;
/**
* The name of the custom tool being called.
*/
name: string;
/**
* The type of the custom tool call. Always `custom_tool_call`.
*/
type: 'custom_tool_call';
/**
* The unique ID of the custom tool call in the OpenAI platform.
*/
id?: string;
}
/**
* Event representing a delta (partial update) to the input of a custom tool call.
*/
export interface ResponseCustomToolCallInputDeltaEvent {
/**
* The incremental input data (delta) for the custom tool call.
*/
delta: string;
/**
* Unique identifier for the API item associated with this event.
*/
item_id: string;
/**
* The index of the output this delta applies to.
*/
output_index: number;
/**
* The sequence number of this event.
*/
sequence_number: number;
/**
* The event type identifier.
*/
type: 'response.custom_tool_call_input.delta';
}
/**
* Event indicating that input for a custom tool call is complete.
*/
export interface ResponseCustomToolCallInputDoneEvent {
/**
* The complete input data for the custom tool call.
*/
input: string;
/**
* Unique identifier for the API item associated with this event.
*/
item_id: string;
/**
* The index of the output this event applies to.
*/
output_index: number;