@convex-dev/agent
Version:
A agent component for Convex.
84 lines • 3.36 kB
TypeScript
import { type CallSettings, type GenerateObjectResult, type IdGenerator, type LanguageModel, type ModelMessage, type StepResult, type StopCondition, type ToolSet } from "ai";
import type { ActionCtx, AgentComponent, Config, Options } from "./types.js";
import type { Message, MessageDoc } from "../validators.js";
import { type ModelOrMetadata } from "../shared.js";
import type { Agent } from "./index.js";
export declare function startGeneration<T, Tools extends ToolSet = ToolSet, CustomCtx extends object = object>(ctx: ActionCtx & CustomCtx, component: AgentComponent,
/**
* These are the arguments you'll pass to the LLM call such as
* `generateText` or `streamText`. This function will look up the context
* and provide functions to save the steps, abort the generation, and more.
* The type of the arguments returned infers from the type of the arguments
* you pass here.
*/
args: T & {
/**
* If provided, this message will be used as the "prompt" for the LLM call,
* instead of the prompt or messages.
* This is useful if you want to first save a user message, then use it as
* the prompt for the LLM call in another call.
*/
promptMessageId?: string;
/**
* The model to use for the LLM calls. This will override the model specified
* in the Agent constructor.
*/
model?: LanguageModel;
/**
* The tools to use for the tool calls. This will override tools specified
* in the Agent constructor or createThread / continueThread.
*/
tools?: Tools;
/**
* The single prompt message to use for the LLM call. This will be the
* last message in the context. If it's a string, it will be a user role.
*/
prompt?: string | (ModelMessage | Message)[];
/**
* If provided alongside prompt, the ordering will be:
* 1. system prompt
* 2. search context
* 3. recent messages
* 4. these messages
* 5. prompt messages, including those already on the same `order` as
* the promptMessageId message, if provided.
*/
messages?: (ModelMessage | Message)[];
/**
* The abort signal to be passed to the LLM call. If triggered, it will
* mark the pending message as failed. If the generation is asynchronously
* aborted, it will trigger this signal when detected.
*/
abortSignal?: AbortSignal;
stopWhen?: StopCondition<Tools> | Array<StopCondition<Tools>>;
_internal?: {
generateId?: IdGenerator;
};
}, { threadId, ...opts }: Options & Config & {
userId?: string | null;
threadId?: string;
languageModel?: LanguageModel;
agentName: string;
agentForToolCtx?: Agent;
}): Promise<{
args: T & {
system?: string;
model: LanguageModel;
messages: ModelMessage[];
prompt?: never;
tools?: Tools;
} & CallSettings;
order: number;
stepOrder: number;
userId: string | undefined;
promptMessageId: string | undefined;
updateModel: (model: ModelOrMetadata | undefined) => void;
save: <TOOLS extends ToolSet>(toSave: {
step: StepResult<TOOLS>;
} | {
object: GenerateObjectResult<unknown>;
}, createPendingMessage?: boolean) => Promise<void>;
fail: (reason: string) => Promise<void>;
getSavedMessages: () => MessageDoc[];
}>;
//# sourceMappingURL=start.d.ts.map