@openai/agents-core
Version:
The OpenAI Agents SDK is a lightweight yet powerful framework for building multi-agent workflows.
133 lines (132 loc) • 6.39 kB
TypeScript
import { Agent, AgentOutputType } from './agent';
import { InputGuardrail, OutputGuardrail } from './guardrail';
import { HandoffInputFilter } from './handoff';
import { Model, ModelProvider, ModelSettings } from './model';
import { RunContext } from './runContext';
import { AgentInputItem } from './types';
import { RunResult, StreamedRunResult } from './result';
import { RunHooks } from './lifecycle';
import { RunItem } from './items';
import { RunState } from './runState';
/**
* Configures settings for the entire agent run.
*/
export type RunConfig = {
/**
* The model to use for the entire agent run. If set, will override the model set on every
* agent. The modelProvider passed in below must be able to resolve this model name.
*/
model?: string | Model;
/**
* The model provider to use when looking up string model names. Defaults to OpenAI.
*/
modelProvider: ModelProvider;
/**
* Configure global model settings. Any non-null values will override the agent-specific model
* settings.
*/
modelSettings?: ModelSettings;
/**
* A global input filter to apply to all handoffs. If `Handoff.inputFilter` is set, then that
* will take precedence. The input filter allows you to edit the inputs that are sent to the new
* agent. See the documentation in `Handoff.inputFilter` for more details.
*/
handoffInputFilter?: HandoffInputFilter;
/**
* A list of input guardrails to run on the initial run input.
*/
inputGuardrails?: InputGuardrail[];
/**
* A list of output guardrails to run on the final output of the run.
*/
outputGuardrails?: OutputGuardrail<AgentOutputType<unknown>>[];
/**
* Whether tracing is disabled for the agent run. If disabled, we will not trace the agent run.
*/
tracingDisabled: boolean;
/**
* Whether we include potentially sensitive data (for example: inputs/outputs of tool calls or
* LLM generations) in traces. If false, we'll still create spans for these events, but the
* sensitive data will not be included.
*/
traceIncludeSensitiveData: boolean;
/**
* The name of the run, used for tracing. Should be a logical name for the run, like
* "Code generation workflow" or "Customer support agent".
*/
workflowName?: string;
/**
* A custom trace ID to use for tracing. If not provided, we will generate a new trace ID.
*/
traceId?: string;
/**
* A grouping identifier to use for tracing, to link multiple traces from the same conversation
* or process. For example, you might use a chat thread ID.
*/
groupId?: string;
/**
* An optional dictionary of additional metadata to include with the trace.
*/
traceMetadata?: Record<string, string>;
};
type SharedRunOptions<TContext = undefined> = {
context?: TContext | RunContext<TContext>;
maxTurns?: number;
signal?: AbortSignal;
previousResponseId?: string;
};
export type StreamRunOptions<TContext = undefined> = SharedRunOptions<TContext> & {
/**
* Whether to stream the run. If true, the run will emit events as the model responds.
*/
stream: true;
};
export type NonStreamRunOptions<TContext = undefined> = SharedRunOptions<TContext> & {
/**
* Whether to stream the run. If true, the run will emit events as the model responds.
*/
stream?: false;
};
export type IndividualRunOptions<TContext = undefined> = StreamRunOptions<TContext> | NonStreamRunOptions<TContext>;
export declare function getTurnInput(originalInput: string | AgentInputItem[], generatedItems: RunItem[]): AgentInputItem[];
/**
* A Runner is responsible for running an agent workflow.
*/
export declare class Runner extends RunHooks<any, AgentOutputType<unknown>> {
#private;
readonly config: RunConfig;
private readonly inputGuardrailDefs;
private readonly outputGuardrailDefs;
constructor(config?: Partial<RunConfig>);
/**
* Run a workflow starting at the given agent. The agent will run in a loop until a final
* output is generated. The loop runs like so:
* 1. The agent is invoked with the given input.
* 2. If there is a final output (i.e. the agent produces something of type
* `agent.outputType`, the loop terminates.
* 3. If there's a handoff, we run the loop again, with the new agent.
* 4. Else, we run tool calls (if any), and re-run the loop.
*
* In two cases, the agent may raise an exception:
* 1. If the maxTurns is exceeded, a MaxTurnsExceeded exception is raised.
* 2. If a guardrail tripwire is triggered, a GuardrailTripwireTriggered exception is raised.
*
* Note that only the first agent's input guardrails are run.
*
* @param agent - The starting agent to run.
* @param input - The initial input to the agent. You can pass a string or an array of
* `AgentInputItem`.
* @param options - The options for the run.
* @param options.stream - Whether to stream the run. If true, the run will emit events as the
* model responds.
* @param options.context - The context to run the agent with.
* @param options.maxTurns - The maximum number of turns to run the agent.
* @returns The result of the run.
*/
run<TAgent extends Agent<any, any>, TContext = undefined>(agent: TAgent, input: string | AgentInputItem[] | RunState<TContext, TAgent>, options?: NonStreamRunOptions<TContext>): Promise<RunResult<TContext, TAgent>>;
run<TAgent extends Agent<any, any>, TContext = undefined>(agent: TAgent, input: string | AgentInputItem[] | RunState<TContext, TAgent>, options?: StreamRunOptions<TContext>): Promise<StreamedRunResult<TContext, TAgent>>;
}
export declare function selectModel(agentModel: string | Model, runConfigModel: string | Model | undefined): string | Model;
export declare function run<TAgent extends Agent<any, any>, TContext = undefined>(agent: TAgent, input: string | AgentInputItem[] | RunState<TContext, TAgent>, options?: NonStreamRunOptions<TContext>): Promise<RunResult<TContext, TAgent>>;
export declare function run<TAgent extends Agent<any, any>, TContext = undefined>(agent: TAgent, input: string | AgentInputItem[] | RunState<TContext, TAgent>, options?: StreamRunOptions<TContext>): Promise<StreamedRunResult<TContext, TAgent>>;
export {};