openai
Version:
The official TypeScript library for the OpenAI API
1,983 lines (1,714 loc) • 146 kB
text/typescript
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import {
type ExtractParsedContentFromParams,
parseResponse,
type ResponseCreateParamsWithTools,
addOutputText,
} from '../../lib/ResponsesParser';
import { ResponseStream, ResponseStreamParams } from '../../lib/responses/ResponseStream';
import { APIResource } from '../../core/resource';
import * as ResponsesAPI from './responses';
import * as Shared from '../shared';
import * as InputItemsAPI from './input-items';
import { InputItemListParams, InputItems, ResponseItemList } from './input-items';
import { APIPromise } from '../../core/api-promise';
import { CursorPage } from '../../core/pagination';
import { Stream } from '../../core/streaming';
import { buildHeaders } from '../../internal/headers';
import { RequestOptions } from '../../internal/request-options';
import { path } from '../../internal/utils/path';
export interface ParsedResponseOutputText<ParsedT> extends ResponseOutputText {
parsed: ParsedT | null;
}
export type ParsedContent<ParsedT> = ParsedResponseOutputText<ParsedT> | ResponseOutputRefusal;
export interface ParsedResponseOutputMessage<ParsedT> extends ResponseOutputMessage {
content: ParsedContent<ParsedT>[];
}
export interface ParsedResponseFunctionToolCall extends ResponseFunctionToolCall {
parsed_arguments: any;
}
export type ParsedResponseOutputItem<ParsedT> =
| ParsedResponseOutputMessage<ParsedT>
| ParsedResponseFunctionToolCall
| ResponseFileSearchToolCall
| ResponseFunctionWebSearch
| ResponseComputerToolCall
| ResponseReasoningItem
| ResponseOutputItem.ImageGenerationCall
| ResponseCodeInterpreterToolCall
| ResponseOutputItem.LocalShellCall
| ResponseOutputItem.McpCall
| ResponseOutputItem.McpListTools
| ResponseOutputItem.McpApprovalRequest
| ResponseCustomToolCall;
export interface ParsedResponse<ParsedT> extends Response {
output: Array<ParsedResponseOutputItem<ParsedT>>;
output_parsed: ParsedT | null;
}
export type ResponseParseParams = ResponseCreateParamsNonStreaming;
export class Responses extends APIResource {
inputItems: InputItemsAPI.InputItems = new InputItemsAPI.InputItems(this._client);
/**
* Creates a model response. Provide
* [text](https://platform.openai.com/docs/guides/text) or
* [image](https://platform.openai.com/docs/guides/images) inputs to generate
* [text](https://platform.openai.com/docs/guides/text) or
* [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
* the model call your own
* [custom code](https://platform.openai.com/docs/guides/function-calling) or use
* built-in [tools](https://platform.openai.com/docs/guides/tools) like
* [web search](https://platform.openai.com/docs/guides/tools-web-search) or
* [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
* your own data as input for the model's response.
*
* @example
* ```ts
* const response = await client.responses.create();
* ```
*/
create(body: ResponseCreateParamsNonStreaming, options?: RequestOptions): APIPromise<Response>;
create(
body: ResponseCreateParamsStreaming,
options?: RequestOptions,
): APIPromise<Stream<ResponseStreamEvent>>;
create(
body: ResponseCreateParamsBase,
options?: RequestOptions,
): APIPromise<Stream<ResponseStreamEvent> | Response>;
create(
body: ResponseCreateParams,
options?: RequestOptions,
): APIPromise<Response> | APIPromise<Stream<ResponseStreamEvent>> {
return (
this._client.post('/responses', { body, ...options, stream: body.stream ?? false }) as
| APIPromise<Response>
| APIPromise<Stream<ResponseStreamEvent>>
)._thenUnwrap((rsp) => {
if ('object' in rsp && rsp.object === 'response') {
addOutputText(rsp as Response);
}
return rsp;
}) as APIPromise<Response> | APIPromise<Stream<ResponseStreamEvent>>;
}
/**
* Retrieves a model response with the given ID.
*
* @example
* ```ts
* const response = await client.responses.retrieve(
* 'resp_677efb5139a88190b512bc3fef8e535d',
* );
* ```
*/
retrieve(
responseID: string,
query?: ResponseRetrieveParamsNonStreaming,
options?: RequestOptions,
): APIPromise<Response>;
retrieve(
responseID: string,
query: ResponseRetrieveParamsStreaming,
options?: RequestOptions,
): APIPromise<Stream<ResponseStreamEvent>>;
retrieve(
responseID: string,
query?: ResponseRetrieveParamsBase | undefined,
options?: RequestOptions,
): APIPromise<Stream<ResponseStreamEvent> | Response>;
retrieve(
responseID: string,
query: ResponseRetrieveParams | undefined = {},
options?: RequestOptions,
): APIPromise<Response> | APIPromise<Stream<ResponseStreamEvent>> {
return (
this._client.get(path`/responses/${responseID}`, {
query,
...options,
stream: query?.stream ?? false,
}) as APIPromise<Response> | APIPromise<Stream<ResponseStreamEvent>>
)._thenUnwrap((rsp) => {
if ('object' in rsp && rsp.object === 'response') {
addOutputText(rsp as Response);
}
return rsp;
}) as APIPromise<Response> | APIPromise<Stream<ResponseStreamEvent>>;
}
/**
* Deletes a model response with the given ID.
*
* @example
* ```ts
* await client.responses.delete(
* 'resp_677efb5139a88190b512bc3fef8e535d',
* );
* ```
*/
delete(responseID: string, options?: RequestOptions): APIPromise<void> {
return this._client.delete(path`/responses/${responseID}`, {
...options,
headers: buildHeaders([{ Accept: '*/*' }, options?.headers]),
});
}
parse<Params extends ResponseCreateParamsWithTools, ParsedT = ExtractParsedContentFromParams<Params>>(
body: Params,
options?: RequestOptions,
): APIPromise<ParsedResponse<ParsedT>> {
return this._client.responses
.create(body, options)
._thenUnwrap((response) => parseResponse(response as Response, body));
}
/**
* Creates a model response stream
*/
stream<Params extends ResponseStreamParams, ParsedT = ExtractParsedContentFromParams<Params>>(
body: Params,
options?: RequestOptions,
): ResponseStream<ParsedT> {
return ResponseStream.createResponse<ParsedT>(this._client, body, options);
}
/**
* Cancels a model response with the given ID. Only responses created with the
* `background` parameter set to `true` can be cancelled.
* [Learn more](https://platform.openai.com/docs/guides/background).
*
* @example
* ```ts
* const response = await client.responses.cancel(
* 'resp_677efb5139a88190b512bc3fef8e535d',
* );
* ```
*/
cancel(responseID: string, options?: RequestOptions): APIPromise<Response> {
return this._client.post(path`/responses/${responseID}/cancel`, options);
}
}
export type ResponseItemsPage = CursorPage<ResponseItem>;
/**
* A tool that controls a virtual computer. Learn more about the
* [computer tool](https://platform.openai.com/docs/guides/tools-computer-use).
*/
export interface ComputerTool {
/**
* The height of the computer display.
*/
display_height: number;
/**
* The width of the computer display.
*/
display_width: number;
/**
* The type of computer environment to control.
*/
environment: 'windows' | 'mac' | 'linux' | 'ubuntu' | 'browser';
/**
* The type of the computer use tool. Always `computer_use_preview`.
*/
type: 'computer_use_preview';
}
/**
* A custom tool that processes input using a specified format. Learn more about
* [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools).
*/
export interface CustomTool {
/**
* The name of the custom tool, used to identify it in tool calls.
*/
name: string;
/**
* The type of the custom tool. Always `custom`.
*/
type: 'custom';
/**
* Optional description of the custom tool, used to provide more context.
*/
description?: string;
/**
* The input format for the custom tool. Default is unconstrained text.
*/
format?: Shared.CustomToolInputFormat;
}
/**
* A message input to the model with a role indicating instruction following
* hierarchy. Instructions given with the `developer` or `system` role take
* precedence over instructions given with the `user` role. Messages with the
* `assistant` role are presumed to have been generated by the model in previous
* interactions.
*/
export interface EasyInputMessage {
/**
* Text, image, or audio input to the model, used to generate a response. Can also
* contain previous assistant responses.
*/
content: string | ResponseInputMessageContentList;
/**
* The role of the message input. One of `user`, `assistant`, `system`, or
* `developer`.
*/
role: 'user' | 'assistant' | 'system' | 'developer';
/**
* The type of the message input. Always `message`.
*/
type?: 'message';
}
/**
* A tool that searches for relevant content from uploaded files. Learn more about
* the
* [file search tool](https://platform.openai.com/docs/guides/tools-file-search).
*/
export interface FileSearchTool {
/**
* The type of the file search tool. Always `file_search`.
*/
type: 'file_search';
/**
* The IDs of the vector stores to search.
*/
vector_store_ids: Array<string>;
/**
* A filter to apply.
*/
filters?: Shared.ComparisonFilter | Shared.CompoundFilter | null;
/**
* The maximum number of results to return. This number should be between 1 and 50
* inclusive.
*/
max_num_results?: number;
/**
* Ranking options for search.
*/
ranking_options?: FileSearchTool.RankingOptions;
}
export namespace FileSearchTool {
/**
* Ranking options for search.
*/
export interface RankingOptions {
/**
* The ranker to use for the file search.
*/
ranker?: 'auto' | 'default-2024-11-15';
/**
* The score threshold for the file search, a number between 0 and 1. Numbers
* closer to 1 will attempt to return only the most relevant results, but may
* return fewer results.
*/
score_threshold?: number;
}
}
/**
* Defines a function in your own code the model can choose to call. Learn more
* about
* [function calling](https://platform.openai.com/docs/guides/function-calling).
*/
export interface FunctionTool {
/**
* The name of the function to call.
*/
name: string;
/**
* A JSON schema object describing the parameters of the function.
*/
parameters: { [key: string]: unknown } | null;
/**
* Whether to enforce strict parameter validation. Default `true`.
*/
strict: boolean | null;
/**
* The type of the function tool. Always `function`.
*/
type: 'function';
/**
* A description of the function. Used by the model to determine whether or not to
* call the function.
*/
description?: string | null;
}
export interface Response {
/**
* Unique identifier for this Response.
*/
id: string;
/**
* Unix timestamp (in seconds) of when this Response was created.
*/
created_at: number;
output_text: string;
/**
* An error object returned when the model fails to generate a Response.
*/
error: ResponseError | null;
/**
* Details about why the response is incomplete.
*/
incomplete_details: Response.IncompleteDetails | null;
/**
* A system (or developer) message inserted into the model's context.
*
* When using along with `previous_response_id`, the instructions from a previous
* response will not be carried over to the next response. This makes it simple to
* swap out system (or developer) messages in new responses.
*/
instructions: string | Array<ResponseInputItem> | null;
/**
* Set of 16 key-value pairs that can be attached to an object. This can be useful
* for storing additional information about the object in a structured format, and
* querying for objects via API or the dashboard.
*
* Keys are strings with a maximum length of 64 characters. Values are strings with
* a maximum length of 512 characters.
*/
metadata: Shared.Metadata | null;
/**
* Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
* wide range of models with different capabilities, performance characteristics,
* and price points. Refer to the
* [model guide](https://platform.openai.com/docs/models) to browse and compare
* available models.
*/
model: Shared.ResponsesModel;
/**
* The object type of this resource - always set to `response`.
*/
object: 'response';
/**
* An array of content items generated by the model.
*
* - The length and order of items in the `output` array is dependent on the
* model's response.
* - Rather than accessing the first item in the `output` array and assuming it's
* an `assistant` message with the content generated by the model, you might
* consider using the `output_text` property where supported in SDKs.
*/
output: Array<ResponseOutputItem>;
/**
* Whether to allow the model to run tool calls in parallel.
*/
parallel_tool_calls: boolean;
/**
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
* make the output more random, while lower values like 0.2 will make it more
* focused and deterministic. We generally recommend altering this or `top_p` but
* not both.
*/
temperature: number | null;
/**
* How the model should select which tool (or tools) to use when generating a
* response. See the `tools` parameter to see how to specify which tools the model
* can call.
*/
tool_choice:
| ToolChoiceOptions
| ToolChoiceAllowed
| ToolChoiceTypes
| ToolChoiceFunction
| ToolChoiceMcp
| ToolChoiceCustom;
/**
* An array of tools the model may call while generating a response. You can
* specify which tool to use by setting the `tool_choice` parameter.
*
* We support the following categories of tools:
*
* - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
* capabilities, like
* [web search](https://platform.openai.com/docs/guides/tools-web-search) or
* [file search](https://platform.openai.com/docs/guides/tools-file-search).
* Learn more about
* [built-in tools](https://platform.openai.com/docs/guides/tools).
* - **MCP Tools**: Integrations with third-party systems via custom MCP servers or
* predefined connectors such as Google Drive and SharePoint. Learn more about
* [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
* - **Function calls (custom tools)**: Functions that are defined by you, enabling
* the model to call your own code with strongly typed arguments and outputs.
* Learn more about
* [function calling](https://platform.openai.com/docs/guides/function-calling).
* You can also use custom tools to call your own code.
*/
tools: Array<Tool>;
/**
* An alternative to sampling with temperature, called nucleus sampling, where the
* model considers the results of the tokens with top_p probability mass. So 0.1
* means only the tokens comprising the top 10% probability mass are considered.
*
* We generally recommend altering this or `temperature` but not both.
*/
top_p: number | null;
/**
* Whether to run the model response in the background.
* [Learn more](https://platform.openai.com/docs/guides/background).
*/
background?: boolean | null;
/**
* The conversation that this response belongs to. Input items and output items
* from this response are automatically added to this conversation.
*/
conversation?: Response.Conversation | null;
/**
* An upper bound for the number of tokens that can be generated for a response,
* including visible output tokens and
* [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
*/
max_output_tokens?: number | null;
/**
* The unique ID of the previous response to the model. Use this to create
* multi-turn conversations. Learn more about
* [conversation state](https://platform.openai.com/docs/guides/conversation-state).
* Cannot be used in conjunction with `conversation`.
*/
previous_response_id?: string | null;
/**
* Reference to a prompt template and its variables.
* [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
*/
prompt?: ResponsePrompt | null;
/**
* Used by OpenAI to cache responses for similar requests to optimize your cache
* hit rates. Replaces the `user` field.
* [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
*/
prompt_cache_key?: string;
/**
* **gpt-5 and o-series models only**
*
* Configuration options for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning).
*/
reasoning?: Shared.Reasoning | null;
/**
* A stable identifier used to help detect users of your application that may be
* violating OpenAI's usage policies. The IDs should be a string that uniquely
* identifies each user. We recommend hashing their username or email address, in
* order to avoid sending us any identifying information.
* [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
*/
safety_identifier?: string;
/**
* Specifies the latency tier to use for processing the request. This parameter is
* relevant for customers subscribed to the scale tier service:
*
* - If set to 'auto', then the request will be processed with the service tier
* configured in the Project settings. Unless otherwise configured, the Project
* will use 'default'.
* - If set to 'default', then the request will be processed with the standard
* pricing and performance for the selected model.
* - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
* '[priority](https://openai.com/api-priority-processing/)', then the request
* will be processed with the corresponding service tier.
* - When not set, the default behavior is 'auto'.
*
* When this parameter is set, the response body will include the `service_tier`
* utilized.
*/
service_tier?: 'auto' | 'default' | 'flex' | 'scale' | 'priority' | null;
/**
* The status of the response generation. One of `completed`, `failed`,
* `in_progress`, `cancelled`, `queued`, or `incomplete`.
*/
status?: ResponseStatus;
/**
* Configuration options for a text response from the model. Can be plain text or
* structured JSON data. Learn more:
*
* - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
* - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
*/
text?: ResponseTextConfig;
/**
* The truncation strategy to use for the model response.
*
* - `auto`: If the context of this response and previous ones exceeds the model's
* context window size, the model will truncate the response to fit the context
* window by dropping input items in the middle of the conversation.
* - `disabled` (default): If a model response will exceed the context window size
* for a model, the request will fail with a 400 error.
*/
truncation?: 'auto' | 'disabled' | null;
/**
* Represents token usage details including input tokens, output tokens, a
* breakdown of output tokens, and the total tokens used.
*/
usage?: ResponseUsage;
/**
* @deprecated This field is being replaced by `safety_identifier` and
* `prompt_cache_key`. Use `prompt_cache_key` instead to maintain caching
* optimizations. A stable identifier for your end-users. Used to boost cache hit
* rates by better bucketing similar requests and to help OpenAI detect and prevent
* abuse.
* [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
*/
user?: string;
}
export namespace Response {
/**
* Details about why the response is incomplete.
*/
export interface IncompleteDetails {
/**
* The reason why the response is incomplete.
*/
reason?: 'max_output_tokens' | 'content_filter';
}
/**
* The conversation that this response belongs to. Input items and output items
* from this response are automatically added to this conversation.
*/
export interface Conversation {
/**
* The unique ID of the conversation.
*/
id: string;
}
}
/**
* Emitted when there is a partial audio response.
*/
export interface ResponseAudioDeltaEvent {
/**
* A chunk of Base64 encoded response audio bytes.
*/
delta: string;
/**
* A sequence number for this chunk of the stream response.
*/
sequence_number: number;
/**
* The type of the event. Always `response.audio.delta`.
*/
type: 'response.audio.delta';
}
/**
* Emitted when the audio response is complete.
*/
export interface ResponseAudioDoneEvent {
/**
* The sequence number of the delta.
*/
sequence_number: number;
/**
* The type of the event. Always `response.audio.done`.
*/
type: 'response.audio.done';
}
/**
* Emitted when there is a partial transcript of audio.
*/
export interface ResponseAudioTranscriptDeltaEvent {
/**
* The partial transcript of the audio response.
*/
delta: string;
/**
* The sequence number of this event.
*/
sequence_number: number;
/**
* The type of the event. Always `response.audio.transcript.delta`.
*/
type: 'response.audio.transcript.delta';
}
/**
* Emitted when the full audio transcript is completed.
*/
export interface ResponseAudioTranscriptDoneEvent {
/**
* The sequence number of this event.
*/
sequence_number: number;
/**
* The type of the event. Always `response.audio.transcript.done`.
*/
type: 'response.audio.transcript.done';
}
/**
* Emitted when a partial code snippet is streamed by the code interpreter.
*/
export interface ResponseCodeInterpreterCallCodeDeltaEvent {
/**
* The partial code snippet being streamed by the code interpreter.
*/
delta: string;
/**
* The unique identifier of the code interpreter tool call item.
*/
item_id: string;
/**
* The index of the output item in the response for which the code is being
* streamed.
*/
output_index: number;
/**
* The sequence number of this event, used to order streaming events.
*/
sequence_number: number;
/**
* The type of the event. Always `response.code_interpreter_call_code.delta`.
*/
type: 'response.code_interpreter_call_code.delta';
}
/**
* Emitted when the code snippet is finalized by the code interpreter.
*/
export interface ResponseCodeInterpreterCallCodeDoneEvent {
/**
* The final code snippet output by the code interpreter.
*/
code: string;
/**
* The unique identifier of the code interpreter tool call item.
*/
item_id: string;
/**
* The index of the output item in the response for which the code is finalized.
*/
output_index: number;
/**
* The sequence number of this event, used to order streaming events.
*/
sequence_number: number;
/**
* The type of the event. Always `response.code_interpreter_call_code.done`.
*/
type: 'response.code_interpreter_call_code.done';
}
/**
* Emitted when the code interpreter call is completed.
*/
export interface ResponseCodeInterpreterCallCompletedEvent {
/**
* The unique identifier of the code interpreter tool call item.
*/
item_id: string;
/**
* The index of the output item in the response for which the code interpreter call
* is completed.
*/
output_index: number;
/**
* The sequence number of this event, used to order streaming events.
*/
sequence_number: number;
/**
* The type of the event. Always `response.code_interpreter_call.completed`.
*/
type: 'response.code_interpreter_call.completed';
}
/**
* Emitted when a code interpreter call is in progress.
*/
export interface ResponseCodeInterpreterCallInProgressEvent {
/**
* The unique identifier of the code interpreter tool call item.
*/
item_id: string;
/**
* The index of the output item in the response for which the code interpreter call
* is in progress.
*/
output_index: number;
/**
* The sequence number of this event, used to order streaming events.
*/
sequence_number: number;
/**
* The type of the event. Always `response.code_interpreter_call.in_progress`.
*/
type: 'response.code_interpreter_call.in_progress';
}
/**
* Emitted when the code interpreter is actively interpreting the code snippet.
*/
export interface ResponseCodeInterpreterCallInterpretingEvent {
/**
* The unique identifier of the code interpreter tool call item.
*/
item_id: string;
/**
* The index of the output item in the response for which the code interpreter is
* interpreting code.
*/
output_index: number;
/**
* The sequence number of this event, used to order streaming events.
*/
sequence_number: number;
/**
* The type of the event. Always `response.code_interpreter_call.interpreting`.
*/
type: 'response.code_interpreter_call.interpreting';
}
/**
* A tool call to run code.
*/
export interface ResponseCodeInterpreterToolCall {
/**
* The unique ID of the code interpreter tool call.
*/
id: string;
/**
* The code to run, or null if not available.
*/
code: string | null;
/**
* The ID of the container used to run the code.
*/
container_id: string;
/**
* The outputs generated by the code interpreter, such as logs or images. Can be
* null if no outputs are available.
*/
outputs: Array<ResponseCodeInterpreterToolCall.Logs | ResponseCodeInterpreterToolCall.Image> | null;
/**
* The status of the code interpreter tool call. Valid values are `in_progress`,
* `completed`, `incomplete`, `interpreting`, and `failed`.
*/
status: 'in_progress' | 'completed' | 'incomplete' | 'interpreting' | 'failed';
/**
* The type of the code interpreter tool call. Always `code_interpreter_call`.
*/
type: 'code_interpreter_call';
}
export namespace ResponseCodeInterpreterToolCall {
/**
* The logs output from the code interpreter.
*/
export interface Logs {
/**
* The logs output from the code interpreter.
*/
logs: string;
/**
* The type of the output. Always 'logs'.
*/
type: 'logs';
}
/**
* The image output from the code interpreter.
*/
export interface Image {
/**
* The type of the output. Always 'image'.
*/
type: 'image';
/**
* The URL of the image output from the code interpreter.
*/
url: string;
}
}
/**
* Emitted when the model response is complete.
*/
export interface ResponseCompletedEvent {
/**
* Properties of the completed response.
*/
response: Response;
/**
* The sequence number for this event.
*/
sequence_number: number;
/**
* The type of the event. Always `response.completed`.
*/
type: 'response.completed';
}
/**
* A tool call to a computer use tool. See the
* [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use)
* for more information.
*/
export interface ResponseComputerToolCall {
/**
* The unique ID of the computer call.
*/
id: string;
/**
* A click action.
*/
action:
| ResponseComputerToolCall.Click
| ResponseComputerToolCall.DoubleClick
| ResponseComputerToolCall.Drag
| ResponseComputerToolCall.Keypress
| ResponseComputerToolCall.Move
| ResponseComputerToolCall.Screenshot
| ResponseComputerToolCall.Scroll
| ResponseComputerToolCall.Type
| ResponseComputerToolCall.Wait;
/**
* An identifier used when responding to the tool call with output.
*/
call_id: string;
/**
* The pending safety checks for the computer call.
*/
pending_safety_checks: Array<ResponseComputerToolCall.PendingSafetyCheck>;
/**
* The status of the item. One of `in_progress`, `completed`, or `incomplete`.
* Populated when items are returned via API.
*/
status: 'in_progress' | 'completed' | 'incomplete';
/**
* The type of the computer call. Always `computer_call`.
*/
type: 'computer_call';
}
export namespace ResponseComputerToolCall {
/**
* A click action.
*/
export interface Click {
/**
* Indicates which mouse button was pressed during the click. One of `left`,
* `right`, `wheel`, `back`, or `forward`.
*/
button: 'left' | 'right' | 'wheel' | 'back' | 'forward';
/**
* Specifies the event type. For a click action, this property is always set to
* `click`.
*/
type: 'click';
/**
* The x-coordinate where the click occurred.
*/
x: number;
/**
* The y-coordinate where the click occurred.
*/
y: number;
}
/**
* A double click action.
*/
export interface DoubleClick {
/**
* Specifies the event type. For a double click action, this property is always set
* to `double_click`.
*/
type: 'double_click';
/**
* The x-coordinate where the double click occurred.
*/
x: number;
/**
* The y-coordinate where the double click occurred.
*/
y: number;
}
/**
* A drag action.
*/
export interface Drag {
/**
* An array of coordinates representing the path of the drag action. Coordinates
* will appear as an array of objects, eg
*
* ```
* [
* { x: 100, y: 200 },
* { x: 200, y: 300 }
* ]
* ```
*/
path: Array<Drag.Path>;
/**
* Specifies the event type. For a drag action, this property is always set to
* `drag`.
*/
type: 'drag';
}
export namespace Drag {
/**
* A series of x/y coordinate pairs in the drag path.
*/
export interface Path {
/**
* The x-coordinate.
*/
x: number;
/**
* The y-coordinate.
*/
y: number;
}
}
/**
* A collection of keypresses the model would like to perform.
*/
export interface Keypress {
/**
* The combination of keys the model is requesting to be pressed. This is an array
* of strings, each representing a key.
*/
keys: Array<string>;
/**
* Specifies the event type. For a keypress action, this property is always set to
* `keypress`.
*/
type: 'keypress';
}
/**
* A mouse move action.
*/
export interface Move {
/**
* Specifies the event type. For a move action, this property is always set to
* `move`.
*/
type: 'move';
/**
* The x-coordinate to move to.
*/
x: number;
/**
* The y-coordinate to move to.
*/
y: number;
}
/**
* A screenshot action.
*/
export interface Screenshot {
/**
* Specifies the event type. For a screenshot action, this property is always set
* to `screenshot`.
*/
type: 'screenshot';
}
/**
* A scroll action.
*/
export interface Scroll {
/**
* The horizontal scroll distance.
*/
scroll_x: number;
/**
* The vertical scroll distance.
*/
scroll_y: number;
/**
* Specifies the event type. For a scroll action, this property is always set to
* `scroll`.
*/
type: 'scroll';
/**
* The x-coordinate where the scroll occurred.
*/
x: number;
/**
* The y-coordinate where the scroll occurred.
*/
y: number;
}
/**
* An action to type in text.
*/
export interface Type {
/**
* The text to type.
*/
text: string;
/**
* Specifies the event type. For a type action, this property is always set to
* `type`.
*/
type: 'type';
}
/**
* A wait action.
*/
export interface Wait {
/**
* Specifies the event type. For a wait action, this property is always set to
* `wait`.
*/
type: 'wait';
}
/**
* A pending safety check for the computer call.
*/
export interface PendingSafetyCheck {
/**
* The ID of the pending safety check.
*/
id: string;
/**
* The type of the pending safety check.
*/
code: string;
/**
* Details about the pending safety check.
*/
message: string;
}
}
export interface ResponseComputerToolCallOutputItem {
/**
* The unique ID of the computer call tool output.
*/
id: string;
/**
* The ID of the computer tool call that produced the output.
*/
call_id: string;
/**
* A computer screenshot image used with the computer use tool.
*/
output: ResponseComputerToolCallOutputScreenshot;
/**
* The type of the computer tool call output. Always `computer_call_output`.
*/
type: 'computer_call_output';
/**
* The safety checks reported by the API that have been acknowledged by the
* developer.
*/
acknowledged_safety_checks?: Array<ResponseComputerToolCallOutputItem.AcknowledgedSafetyCheck>;
/**
* The status of the message input. One of `in_progress`, `completed`, or
* `incomplete`. Populated when input items are returned via API.
*/
status?: 'in_progress' | 'completed' | 'incomplete';
}
export namespace ResponseComputerToolCallOutputItem {
/**
* A pending safety check for the computer call.
*/
export interface AcknowledgedSafetyCheck {
/**
* The ID of the pending safety check.
*/
id: string;
/**
* The type of the pending safety check.
*/
code: string;
/**
* Details about the pending safety check.
*/
message: string;
}
}
/**
* A computer screenshot image used with the computer use tool.
*/
export interface ResponseComputerToolCallOutputScreenshot {
/**
* Specifies the event type. For a computer screenshot, this property is always set
* to `computer_screenshot`.
*/
type: 'computer_screenshot';
/**
* The identifier of an uploaded file that contains the screenshot.
*/
file_id?: string;
/**
* The URL of the screenshot image.
*/
image_url?: string;
}
/**
* Multi-modal input and output contents.
*/
export type ResponseContent =
| ResponseInputText
| ResponseInputImage
| ResponseInputFile
| ResponseInputAudio
| ResponseOutputText
| ResponseOutputRefusal;
/**
* Emitted when a new content part is added.
*/
export interface ResponseContentPartAddedEvent {
/**
* The index of the content part that was added.
*/
content_index: number;
/**
* The ID of the output item that the content part was added to.
*/
item_id: string;
/**
* The index of the output item that the content part was added to.
*/
output_index: number;
/**
* The content part that was added.
*/
part: ResponseOutputText | ResponseOutputRefusal;
/**
* The sequence number of this event.
*/
sequence_number: number;
/**
* The type of the event. Always `response.content_part.added`.
*/
type: 'response.content_part.added';
}
/**
* Emitted when a content part is done.
*/
export interface ResponseContentPartDoneEvent {
/**
* The index of the content part that is done.
*/
content_index: number;
/**
* The ID of the output item that the content part was added to.
*/
item_id: string;
/**
* The index of the output item that the content part was added to.
*/
output_index: number;
/**
* The content part that is done.
*/
part: ResponseOutputText | ResponseOutputRefusal;
/**
* The sequence number of this event.
*/
sequence_number: number;
/**
* The type of the event. Always `response.content_part.done`.
*/
type: 'response.content_part.done';
}
/**
* The conversation that this response belongs to.
*/
export interface ResponseConversationParam {
/**
* The unique ID of the conversation.
*/
id: string;
}
/**
* An event that is emitted when a response is created.
*/
export interface ResponseCreatedEvent {
/**
* The response that was created.
*/
response: Response;
/**
* The sequence number for this event.
*/
sequence_number: number;
/**
* The type of the event. Always `response.created`.
*/
type: 'response.created';
}
/**
* A call to a custom tool created by the model.
*/
export interface ResponseCustomToolCall {
/**
* An identifier used to map this custom tool call to a tool call output.
*/
call_id: string;
/**
* The input for the custom tool call generated by the model.
*/
input: string;
/**
* The name of the custom tool being called.
*/
name: string;
/**
* The type of the custom tool call. Always `custom_tool_call`.
*/
type: 'custom_tool_call';
/**
* The unique ID of the custom tool call in the OpenAI platform.
*/
id?: string;
}
/**
* Event representing a delta (partial update) to the input of a custom tool call.
*/
export interface ResponseCustomToolCallInputDeltaEvent {
/**
* The incremental input data (delta) for the custom tool call.
*/
delta: string;
/**
* Unique identifier for the API item associated with this event.
*/
item_id: string;
/**
* The index of the output this delta applies to.
*/
output_index: number;
/**
* The sequence number of this event.
*/
sequence_number: number;
/**
* The event type identifier.
*/
type: 'response.custom_tool_call_input.delta';
}
/**
* Event indicating that input for a custom tool call is complete.
*/
export interface ResponseCustomToolCallInputDoneEvent {
/**
* The complete input data for the custom tool call.
*/
input: string;
/**
* Unique identifier for the API item associated with this event.
*/
item_id: string;
/**
* The index of the output this event applies to.
*/
output_index: number;
/**
* The sequence number of this event.
*/
sequence_number: number;
/**
* The event type identifier.
*/
type: 'response.custom_tool_call_input.done';
}
/**
* The output of a custom tool call from your code, being sent back to the model.
*/
export interface ResponseCustomToolCallOutput {
/**
* The call ID, used to map this custom tool call output to a custom tool call.
*/
call_id: string;
/**
* The output from the custom tool call generated by your code.
*/
output: string;
/**
* The type of the custom tool call output. Always `custom_tool_call_output`.
*/
type: 'custom_tool_call_output';
/**
* The unique ID of the custom tool call output in the OpenAI platform.
*/
id?: string;
}
/**
* An error object returned when the model fails to generate a Response.
*/
export interface ResponseError {
/**
* The error code for the response.
*/
code:
| 'server_error'
| 'rate_limit_exceeded'
| 'invalid_prompt'
| 'vector_store_timeout'
| 'invalid_image'
| 'invalid_image_format'
| 'invalid_base64_image'
| 'invalid_image_url'
| 'image_too_large'
| 'image_too_small'
| 'image_parse_error'
| 'image_content_policy_violation'
| 'invalid_image_mode'
| 'image_file_too_large'
| 'unsupported_image_media_type'
| 'empty_image_file'
| 'failed_to_download_image'
| 'image_file_not_found';
/**
* A human-readable description of the error.
*/
message: string;
}
/**
* Emitted when an error occurs.
*/
export interface ResponseErrorEvent {
/**
* The error code.
*/
code: string | null;
/**
* The error message.
*/
message: string;
/**
* The error parameter.
*/
param: string | null;
/**
* The sequence number of this event.
*/
sequence_number: number;
/**
* The type of the event. Always `error`.
*/
type: 'error';
}
/**
* An event that is emitted when a response fails.
*/
export interface ResponseFailedEvent {
/**
* The response that failed.
*/
response: Response;
/**
* The sequence number of this event.
*/
sequence_number: number;
/**
* The type of the event. Always `response.failed`.
*/
type: 'response.failed';
}
/**
* Emitted when a file search call is completed (results found).
*/
export interface ResponseFileSearchCallCompletedEvent {
/**
* The ID of the output item that the file search call is initiated.
*/
item_id: string;
/**
* The index of the output item that the file search call is initiated.
*/
output_index: number;
/**
* The sequence number of this event.
*/
sequence_number: number;
/**
* The type of the event. Always `response.file_search_call.completed`.
*/
type: 'response.file_search_call.completed';
}
/**
* Emitted when a file search call is initiated.
*/
export interface ResponseFileSearchCallInProgressEvent {
/**
* The ID of the output item that the file search call is initiated.
*/
item_id: string;
/**
* The index of the output item that the file search call is initiated.
*/
output_index: number;
/**
* The sequence number of this event.
*/
sequence_number: number;
/**
* The type of the event. Always `response.file_search_call.in_progress`.
*/
type: 'response.file_search_call.in_progress';
}
/**
* Emitted when a file search is currently searching.
*/
export interface ResponseFileSearchCallSearchingEvent {
/**
* The ID of the output item that the file search call is initiated.
*/
item_id: string;
/**
* The index of the output item that the file search call is searching.
*/
output_index: number;
/**
* The sequence number of this event.
*/
sequence_number: number;
/**
* The type of the event. Always `response.file_search_call.searching`.
*/
type: 'response.file_search_call.searching';
}
/**
* The results of a file search tool call. See the
* [file search guide](https://platform.openai.com/docs/guides/tools-file-search)
* for more information.
*/
export interface ResponseFileSearchToolCall {
/**
* The unique ID of the file search tool call.
*/
id: string;
/**
* The queries used to search for files.
*/
queries: Array<string>;
/**
* The status of the file search tool call. One of `in_progress`, `searching`,
* `incomplete` or `failed`,
*/
status: 'in_progress' | 'searching' | 'completed' | 'incomplete' | 'failed';
/**
* The type of the file search tool call. Always `file_search_call`.
*/
type: 'file_search_call';
/**
* The results of the file search tool call.
*/
results?: Array<ResponseFileSearchToolCall.Result> | null;
}
export namespace ResponseFileSearchToolCall {
export interface Result {
/**
* Set of 16 key-value pairs that can be attached to an object. This can be useful
* for storing additional information about the object in a structured format, and
* querying for objects via API or the dashboard. Keys are strings with a maximum
* length of 64 characters. Values are strings with a maximum length of 512
* characters, booleans, or numbers.
*/
attributes?: { [key: string]: string | number | boolean } | null;
/**
* The unique ID of the file.
*/
file_id?: string;
/**
* The name of the file.
*/
filename?: string;
/**
* The relevance score of the file - a value between 0 and 1.
*/
score?: number;
/**
* The text that was retrieved from the file.
*/
text?: string;
}
}
/**
* An object specifying the format that the model must output.
*
* Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
* ensures the model will match your supplied JSON schema. Learn more in the
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
*
* The default format is `{ "type": "text" }` with no additional options.
*
* **Not recommended for gpt-4o and newer models:**
*
* Setting to `{ "type": "json_object" }` enables the older JSON mode, which
* ensures the message the model generates is valid JSON. Using `json_schema` is
* preferred for models that support it.
*/
export type ResponseFormatTextConfig =
| Shared.ResponseFormatText
| ResponseFormatTextJSONSchemaConfig
| Shared.ResponseFormatJSONObject;
/**
* JSON Schema response format. Used to generate structured JSON responses. Learn
* more about
* [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs).
*/
export interface ResponseFormatTextJSONSchemaConfig {
/**
* The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores
* and dashes, with a maximum length of 64.
*/
name: string;
/**
* The schema for the response format, described as a JSON Schema object. Learn how
* to build JSON schemas [here](https://json-schema.org/).
*/
schema: { [key: string]: unknown };
/**
* The type of response format being defined. Always `json_schema`.
*/
type: 'json_schema';
/**
* A description of what the response format is for, used by the model to determine
* how to respond in the format.
*/
description?: string;
/**
* Whether to enable strict schema adherence when generating the output. If set to
* true, the model will always follow the exact schema defined in the `schema`
* field. Only a subset of JSON Schema is supported when `strict` is `true`. To
* learn more, read the
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
*/
strict?: boolean | null;
}
/**
* Emitted when there is a partial function-call arguments delta.
*/
export interface ResponseFunctionCallArgumentsDeltaEvent {
/**
* The function-call arguments delta that is added.
*/
delta: string;
/**
* The ID of the output item that the function-call arguments delta is added to.
*/
item_id: string;
/**
* The index of the output item that the function-call arguments delta is added to.
*/
output_index: number;
/**
* The sequence number of this event.
*/
sequence_number: number;
/**
* The type of the event. Always `response.function_call_arguments.delta`.
*/
type: 'response.function_call_arguments.delta';
}
/**
* Emitted when function-call arguments are finalized.
*/
export interface ResponseFunctionCallArgumentsDoneEvent {
/**
* The function-call arguments.
*/
arguments: string;
/**
* The ID of the item.
*/
item_id: string;
/**
* The index of the output item.
*/
output_index: number;
/**
* The sequence number of this event.
*/
sequence_number: number;
type: 'response.function_call_arguments.done';
}
/**
* A tool call to run a function. See the
* [function calling guide](https://platform.openai.com/docs/guides/function-calling)
* for more information.
*/
export interface ResponseFunctionToolCall {
/**
* A JSON string of the arguments to pass to the function.
*/
arguments: string;
/**
* The unique ID of the function tool call generated by the model.
*/
call_id: string;
/**
* The name of the function to run.
*/
name: string;
/**
* The type of the function tool call. Always `function_call`.
*/
type: 'function_call';
/**
* The unique ID of the function tool call.
*/
id?: string;
/**
* The status of the item. One of `in_progress`, `completed`, or `incomplete`.
* Populated when items are returned via API.
*/
status?: 'in_progress' | 'completed' | 'incomplete';
}
/**
* A tool call to run a function. See the
* [function calling guide](https://platform.openai.com/docs/guides/function-calling)
* for more information.
*/
export interface ResponseFunctionToolCallItem extends ResponseFunctionToolCall {
/**
* The unique ID of the function tool call.
*/
id: string;
}
export interface ResponseFunctionToolCallOutputItem {
/**
* The unique ID of the function call tool output.
*/
id: string;
/**
* The unique ID of the function tool call generated by the model.
*/
call_id: string;
/**
* A JSON string of the output of the function tool call.
*/
output: string;
/**
* The type of the function tool call output. Always `function_call_output`.
*/
type: 'function_call_output';
/**
* The status of the item. One of `in_progress`, `completed`, or `incomplete`.
* Populated when items are returned via API.
*/
status?: 'in_progress' | 'completed' | 'incomplete';
}
/**
* The results of a web search tool call. See the
* [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for
* more information.
*/
export interface ResponseFunctionWebSearch {
/**
* The unique ID of the web search tool call.
*/
id: string;
/**
* The status of the web search tool call.
*/
status: 'in_progress' | 'searching' | 'completed' | 'failed';
/**
* The type of the web search tool call. Always `web_search_call`.
*/
type: 'web_search_call';
}
export namespace ResponseFunctionWebSearch {
/**
* Action type "search" - Performs a web search query.
*/
export interface Search {
/**
* The search query.
*/
query: string;
/**
* The action type.
*/
type: 'search';
/**
* The sources used in the search.
*/
sources?: Array<Search.Source>;
}
export namespace Search {
/**
* A source used in the search.
*/
export interface Source {
/**
* The type of source. Always `url`.
*/
type: 'url';
/**
* The URL of the source.
*/
url: string;
}
}
/**
* Action type "open_page" - Opens a specific URL from search results.
*/
e