agentswarm
Version:
LLM-agnostic typescript framework for creating OpenAI-style Swarm agents with the Vercel AI SDK
101 lines (100 loc) • 3.94 kB
TypeScript
import type { LanguageModel, CoreAssistantMessage, CoreSystemMessage, CoreUserMessage, CoreToolMessage, UserContent, StepResult } from 'ai';
import { Agent } from './agent';
import { type ExtendedTextStreamPart, type JSONSerializableObject } from './utils';
export type SwarmMessage = (CoreAssistantMessage & {
sender?: string;
}) | CoreUserMessage | CoreToolMessage | CoreSystemMessage;
export type SwarmOptions<SWARM_CONTEXT extends object = JSONSerializableObject> = {
defaultModel?: LanguageModel;
queen: Agent<SWARM_CONTEXT>;
initialContext: SWARM_CONTEXT;
messages?: Array<SwarmMessage>;
name?: string;
maxTurns?: number;
returnToQueen?: boolean;
};
/**
* Invoke the swarm to handle a user message
*/
export type BaseSwarmInvocationOptions<SWARM_CONTEXT extends object = JSONSerializableObject> = {
contextUpdate?: Partial<SWARM_CONTEXT>;
setAgent?: Agent<SWARM_CONTEXT>;
maxTurns?: number;
returnToQueen?: boolean;
onStepFinish?: (event: StepResult<any>, context: SWARM_CONTEXT) => Promise<void> | void;
};
type SwarmInvocationWithContent = {
content: UserContent;
messages?: undefined;
};
type SwarmInvocationWithMessages = {
content?: undefined;
messages: Array<SwarmMessage>;
};
export type SwarmInvocationOptions<SWARM_CONTEXT extends object> = BaseSwarmInvocationOptions<SWARM_CONTEXT> & (SwarmInvocationWithContent | SwarmInvocationWithMessages);
export type SwarmStreamingOptions = {
experimental_toolCallStreaming?: boolean;
};
/**
* The swarm is the callable that can generate text, generate objects, or stream text.
*/
export declare class Swarm<SWARM_CONTEXT extends object = any> {
readonly defaultModel: LanguageModel;
readonly name?: string;
readonly queen: Agent<SWARM_CONTEXT>;
protected context: SWARM_CONTEXT;
protected messages: Array<SwarmMessage>;
protected readonly maxTurns: number;
protected readonly returnToQueen: boolean;
constructor(options: SwarmOptions<SWARM_CONTEXT>);
protected _activeAgent: Agent<SWARM_CONTEXT>;
get activeAgent(): Readonly<Agent<SWARM_CONTEXT>>;
/**
* Use the swarm to generate text / tool calls
*/
generateText(options: SwarmInvocationOptions<SWARM_CONTEXT>): Promise<{
finishReason: import("@ai-sdk/provider").LanguageModelV1FinishReason;
activeAgent: Agent<SWARM_CONTEXT>;
text: string;
messages: SwarmMessage[];
context: SWARM_CONTEXT;
}>;
/**
* Stream from the swarm
* @param options
*/
streamText(options: SwarmInvocationOptions<SWARM_CONTEXT> & SwarmStreamingOptions): {
finishReason: Promise<import("@ai-sdk/provider").LanguageModelV1FinishReason>;
activeAgent: Promise<Readonly<Agent<JSONSerializableObject>>>;
text: Promise<string>;
messages: Promise<SwarmMessage[]>;
context: Promise<SWARM_CONTEXT>;
textStream: import("./utils").AsyncIterableStream<any>;
fullStream: import("./utils").AsyncIterableStream<ExtendedTextStreamPart<any>>;
};
/**
* Return a read-only version of the context
*/
getContext(): Readonly<SWARM_CONTEXT>;
/**
* Update context, and receive a readonly version of it.
* @param update
*/
updateContext(update: Partial<SWARM_CONTEXT>): Readonly<SWARM_CONTEXT>;
/**
* Handle updating and overriding models configurations based on invocation options
* @param invocationOptions
* @private
*/
private handleUpdatesAndOverrides;
/**
* wrap the agent's tools to hide the swarmContext property that they can request to get access to the swarm's
* context; so that the LLM doesn't see it and try to generate it. this requires modifying the JSON schema, and
* wrapping the executor.
* @param tools
* @private
*/
private wrapTools;
getMessages(): Readonly<Array<SwarmMessage>>;
}
export {};