@copilotkit/runtime
Version:
<img src="https://github.com/user-attachments/assets/0a6b64d9-e193-4940-a3f6-60334ac34084" alt="banner" style="border-radius: 12px; border: 2px solid #d6d4fa;" />
1,510 lines (1,453 loc) • 48.3 kB
TypeScript
import { ReplaySubject } from 'rxjs';
import { Parameter, Action, CopilotKitLowLevelError, CopilotErrorHandler, PartialBy, MaybePromise, NonEmptyRecord } from '@copilotkit/shared';
import OpenAI from 'openai';
import { BaseMessageChunk, AIMessage, AIMessageChunk, BaseMessage } from '@langchain/core/messages';
import { DynamicStructuredTool } from '@langchain/core/tools';
import { IterableReadableStream, IterableReadableStreamInterface } from '@langchain/core/utils/stream';
import { Groq } from 'groq-sdk';
import Anthropic from '@anthropic-ai/sdk';
import * as graphql from 'graphql';
import * as pino from 'pino';
import { YogaInitialContext, createYoga } from 'graphql-yoga';
import { CopilotRuntimeOptions, CopilotRuntime as CopilotRuntime$1, AgentRunner } from '@copilotkitnext/runtime';
import { AbstractAgent } from '@ag-ui/client';
import * as http from 'http';
import { ServerResponse, IncomingMessage } from 'http';
import { Readable } from 'node:stream';
declare enum MessageRole {
assistant = "assistant",
developer = "developer",
system = "system",
tool = "tool",
user = "user"
}
declare enum ActionInputAvailability {
disabled = "disabled",
enabled = "enabled",
remote = "remote"
}
declare class BaseMessageInput {
id: string;
createdAt: Date;
}
declare class MessageInput extends BaseMessageInput {
textMessage?: TextMessageInput;
actionExecutionMessage?: ActionExecutionMessageInput;
resultMessage?: ResultMessageInput;
agentStateMessage?: AgentStateMessageInput;
imageMessage?: ImageMessageInput;
}
declare class TextMessageInput {
content: string;
parentMessageId?: string;
role: MessageRole;
}
declare class ActionExecutionMessageInput {
name: string;
arguments: string;
parentMessageId?: string;
scope?: String;
}
declare class ResultMessageInput {
actionExecutionId: string;
actionName: string;
parentMessageId?: string;
result: string;
}
declare class AgentStateMessageInput {
threadId: string;
agentName: string;
role: MessageRole;
state: string;
running: boolean;
nodeName: string;
runId: string;
active: boolean;
}
declare class ImageMessageInput {
format: string;
bytes: string;
parentMessageId?: string;
role: MessageRole;
}
declare enum MessageStatusCode {
Pending = "pending",
Success = "success",
Failed = "failed"
}
declare class BaseMessageStatus {
code: MessageStatusCode;
}
declare class PendingMessageStatus extends BaseMessageStatus {
code: MessageStatusCode;
}
declare class SuccessMessageStatus extends BaseMessageStatus {
code: MessageStatusCode;
}
declare class FailedMessageStatus extends BaseMessageStatus {
code: MessageStatusCode;
reason: string;
}
declare const MessageStatusUnion: PendingMessageStatus | SuccessMessageStatus | FailedMessageStatus;
type MessageStatus = typeof MessageStatusUnion;
declare enum ResponseStatusCode {
Pending = "pending",
Success = "success",
Failed = "failed"
}
declare abstract class BaseResponseStatus {
code: ResponseStatusCode;
}
declare enum FailedResponseStatusReason {
GUARDRAILS_VALIDATION_FAILED = "GUARDRAILS_VALIDATION_FAILED",
MESSAGE_STREAM_INTERRUPTED = "MESSAGE_STREAM_INTERRUPTED",
UNKNOWN_ERROR = "UNKNOWN_ERROR"
}
declare class FailedResponseStatus extends BaseResponseStatus {
code: ResponseStatusCode;
reason: FailedResponseStatusReason;
details?: Record<string, any>;
}
/**
* The extensions response is used to receive additional information from the copilot runtime, specific to a
* service adapter or agent framework.
*
* Next time a request to the runtime is made, the extensions response will be included in the request as input.
*/
declare class ExtensionsResponse {
openaiAssistantAPI?: OpenAIApiAssistantAPIResponse;
}
declare class OpenAIApiAssistantAPIResponse {
runId?: string;
threadId?: string;
}
declare abstract class BaseMessageOutput {
id: string;
createdAt: Date;
status: typeof MessageStatusUnion;
}
type MessageType = "TextMessage" | "ActionExecutionMessage" | "ResultMessage" | "AgentStateMessage" | "ImageMessage";
declare class Message {
type: MessageType;
id: BaseMessageOutput["id"];
createdAt: BaseMessageOutput["createdAt"];
status: MessageStatus;
constructor(props: any);
isTextMessage(): this is TextMessage;
isActionExecutionMessage(): this is ActionExecutionMessage;
isResultMessage(): this is ResultMessage;
isAgentStateMessage(): this is AgentStateMessage;
isImageMessage(): this is ImageMessage;
}
type MessageConstructorOptions = Partial<Message>;
type TextMessageConstructorOptions = MessageConstructorOptions & TextMessageInput;
declare class TextMessage extends Message implements TextMessageConstructorOptions {
content: TextMessageInput["content"];
parentMessageId: TextMessageInput["parentMessageId"];
role: TextMessageInput["role"];
type: "TextMessage";
constructor(props: TextMessageConstructorOptions);
}
declare class ActionExecutionMessage extends Message implements Omit<ActionExecutionMessageInput, "arguments" | "scope"> {
type: MessageType;
name: string;
arguments: Record<string, any>;
parentMessageId?: string;
}
declare class ResultMessage extends Message implements ResultMessageInput {
type: MessageType;
actionExecutionId: string;
actionName: string;
result: string;
static encodeResult(result: any, error?: {
code: string;
message: string;
} | string | Error): string;
static decodeResult(result: string): {
error?: {
code: string;
message: string;
};
result: string;
};
hasError(): boolean;
getError(): {
code: string;
message: string;
} | undefined;
}
declare class AgentStateMessage extends Message implements Omit<AgentStateMessageInput, "state"> {
type: MessageType;
threadId: string;
agentName: string;
nodeName: string;
runId: string;
active: boolean;
role: MessageRole;
state: any;
running: boolean;
}
declare class ImageMessage extends Message implements ImageMessageInput {
type: MessageType;
format: string;
bytes: string;
role: MessageRole;
parentMessageId?: string;
}
declare enum RuntimeEventTypes {
TextMessageStart = "TextMessageStart",
TextMessageContent = "TextMessageContent",
TextMessageEnd = "TextMessageEnd",
ActionExecutionStart = "ActionExecutionStart",
ActionExecutionArgs = "ActionExecutionArgs",
ActionExecutionEnd = "ActionExecutionEnd",
ActionExecutionResult = "ActionExecutionResult",
AgentStateMessage = "AgentStateMessage",
MetaEvent = "MetaEvent",
RunError = "RunError"
}
declare enum RuntimeMetaEventName {
LangGraphInterruptEvent = "LangGraphInterruptEvent",
LangGraphInterruptResumeEvent = "LangGraphInterruptResumeEvent",
CopilotKitLangGraphInterruptEvent = "CopilotKitLangGraphInterruptEvent"
}
type RunTimeMetaEvent = {
type: RuntimeEventTypes.MetaEvent;
name: RuntimeMetaEventName.LangGraphInterruptEvent;
value: string;
} | {
type: RuntimeEventTypes.MetaEvent;
name: RuntimeMetaEventName.CopilotKitLangGraphInterruptEvent;
data: {
value: string;
messages: (TextMessage | ActionExecutionMessage | ResultMessage)[];
};
} | {
type: RuntimeEventTypes.MetaEvent;
name: RuntimeMetaEventName.LangGraphInterruptResumeEvent;
data: string;
};
type RuntimeErrorEvent = {
type: RuntimeEventTypes.RunError;
message: string;
code?: string;
};
type RuntimeEvent = {
type: RuntimeEventTypes.TextMessageStart;
messageId: string;
parentMessageId?: string;
} | {
type: RuntimeEventTypes.TextMessageContent;
messageId: string;
content: string;
} | {
type: RuntimeEventTypes.TextMessageEnd;
messageId: string;
} | {
type: RuntimeEventTypes.ActionExecutionStart;
actionExecutionId: string;
actionName: string;
parentMessageId?: string;
} | {
type: RuntimeEventTypes.ActionExecutionArgs;
actionExecutionId: string;
args: string;
} | {
type: RuntimeEventTypes.ActionExecutionEnd;
actionExecutionId: string;
} | {
type: RuntimeEventTypes.ActionExecutionResult;
actionName: string;
actionExecutionId: string;
result: string;
} | {
type: RuntimeEventTypes.AgentStateMessage;
threadId: string;
agentName: string;
nodeName: string;
runId: string;
active: boolean;
role: string;
state: string;
running: boolean;
} | RunTimeMetaEvent | RuntimeErrorEvent;
type EventSourceCallback = (eventStream$: RuntimeEventSubject) => Promise<void>;
declare class RuntimeEventSubject extends ReplaySubject<RuntimeEvent> {
constructor();
sendTextMessageStart({ messageId, parentMessageId, }: {
messageId: string;
parentMessageId?: string;
}): void;
sendTextMessageContent({ messageId, content }: {
messageId: string;
content: string;
}): void;
sendTextMessageEnd({ messageId }: {
messageId: string;
}): void;
sendTextMessage(messageId: string, content: string): void;
sendActionExecutionStart({ actionExecutionId, actionName, parentMessageId, }: {
actionExecutionId: string;
actionName: string;
parentMessageId?: string;
}): void;
sendActionExecutionArgs({ actionExecutionId, args, }: {
actionExecutionId: string;
args: string;
}): void;
sendActionExecutionEnd({ actionExecutionId }: {
actionExecutionId: string;
}): void;
sendActionExecution({ actionExecutionId, actionName, args, parentMessageId, }: {
actionExecutionId: string;
actionName: string;
args: string;
parentMessageId?: string;
}): void;
sendActionExecutionResult({ actionExecutionId, actionName, result, error, }: {
actionExecutionId: string;
actionName: string;
result?: string;
error?: {
code: string;
message: string;
};
}): void;
sendAgentStateMessage({ threadId, agentName, nodeName, runId, active, role, state, running, }: {
threadId: string;
agentName: string;
nodeName: string;
runId: string;
active: boolean;
role: string;
state: string;
running: boolean;
}): void;
}
declare class RuntimeEventSource {
private eventStream$;
private callback;
private errorHandler?;
private errorContext?;
constructor(params?: {
errorHandler?: (error: any, context: any) => Promise<void>;
errorContext?: any;
});
stream(callback: EventSourceCallback): Promise<void>;
}
declare class ActionInput {
name: string;
description: string;
jsonSchema: string;
available?: ActionInputAvailability;
}
declare class ForwardedParametersInput {
model?: string;
maxTokens?: number;
stop?: string[];
toolChoice?: String;
toolChoiceFunctionName?: string;
temperature?: number;
}
/**
* The extensions input is used to pass additional information to the copilot runtime, specific to a
* service adapter or agent framework.
*/
declare class ExtensionsInput {
openaiAssistantAPI?: OpenAIApiAssistantAPIInput;
}
declare class OpenAIApiAssistantAPIInput {
runId?: string;
threadId?: string;
}
declare class AgentSessionInput {
agentName: string;
threadId?: string;
nodeName?: string;
}
declare class AgentStateInput {
agentName: string;
state: string;
config?: string;
}
interface CopilotRuntimeChatCompletionRequest {
eventSource: RuntimeEventSource;
messages: Message[];
actions: ActionInput[];
model?: string;
threadId?: string;
runId?: string;
forwardedParameters?: ForwardedParametersInput;
extensions?: ExtensionsInput;
agentSession?: AgentSessionInput;
agentStates?: AgentStateInput[];
}
interface CopilotRuntimeChatCompletionResponse {
threadId: string;
runId?: string;
extensions?: ExtensionsResponse;
}
interface CopilotServiceAdapter {
provider?: string;
model?: string;
process(request: CopilotRuntimeChatCompletionRequest): Promise<CopilotRuntimeChatCompletionResponse>;
name?: string;
}
/**
* Copilot Runtime adapter for OpenAI.
*
* ## Example
*
* ```ts
* import { CopilotRuntime, OpenAIAdapter } from "@copilotkit/runtime";
* import OpenAI from "openai";
*
* const copilotKit = new CopilotRuntime();
*
* const openai = new OpenAI({
* organization: "<your-organization-id>", // optional
* apiKey: "<your-api-key>",
* });
*
* return new OpenAIAdapter({ openai });
* ```
*
* ## Example with Azure OpenAI
*
* ```ts
* import { CopilotRuntime, OpenAIAdapter } from "@copilotkit/runtime";
* import OpenAI from "openai";
*
* // The name of your Azure OpenAI Instance.
* // https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource
* const instance = "<your instance name>";
*
* // Corresponds to your Model deployment within your OpenAI resource, e.g. my-gpt35-16k-deployment
* // Navigate to the Azure OpenAI Studio to deploy a model.
* const model = "<your model>";
*
* const apiKey = process.env["AZURE_OPENAI_API_KEY"];
* if (!apiKey) {
* throw new Error("The AZURE_OPENAI_API_KEY environment variable is missing or empty.");
* }
*
* const copilotKit = new CopilotRuntime();
*
* const openai = new OpenAI({
* apiKey,
* baseURL: `https://${instance}.openai.azure.com/openai/deployments/${model}`,
* defaultQuery: { "api-version": "2024-04-01-preview" },
* defaultHeaders: { "api-key": apiKey },
* });
*
* return new OpenAIAdapter({ openai });
* ```
*/
interface OpenAIAdapterParams {
/**
* An optional OpenAI instance to use. If not provided, a new instance will be
* created.
*/
openai?: OpenAI;
/**
* The model to use.
*/
model?: string;
/**
* Whether to disable parallel tool calls.
* You can disable parallel tool calls to force the model to execute tool calls sequentially.
* This is useful if you want to execute tool calls in a specific order so that the state changes
* introduced by one tool call are visible to the next tool call. (i.e. new actions or readables)
*
* @default false
*/
disableParallelToolCalls?: boolean;
/**
* Whether to keep the role in system messages as "System".
* By default, it is converted to "developer", which is used by newer OpenAI models
*
* @default false
*/
keepSystemRole?: boolean;
}
declare class OpenAIAdapter implements CopilotServiceAdapter {
model: string;
provider: string;
private disableParallelToolCalls;
private _openai;
private keepSystemRole;
get openai(): OpenAI;
get name(): string;
constructor(params?: OpenAIAdapterParams);
private ensureOpenAI;
process(request: CopilotRuntimeChatCompletionRequest): Promise<CopilotRuntimeChatCompletionResponse>;
}
type LangChainBaseMessageChunkStream = IterableReadableStream<BaseMessageChunk>;
type LangChainAIMessageChunkStream = IterableReadableStreamInterface<AIMessageChunk>;
type LangChainReturnType = LangChainBaseMessageChunkStream | LangChainAIMessageChunkStream | BaseMessageChunk | string | AIMessage;
/**
* Copilot Runtime adapter for LangChain.
*
* ## Example
*
* ```ts
* import { CopilotRuntime, LangChainAdapter } from "@copilotkit/runtime";
* import { ChatOpenAI } from "@langchain/openai";
*
* const copilotKit = new CopilotRuntime();
*
* const model = new ChatOpenAI({
* model: "gpt-4o",
* apiKey: "<your-api-key>",
* });
*
* return new LangChainAdapter({
* chainFn: async ({ messages, tools }) => {
* return model.bindTools(tools).stream(messages);
* // or optionally enable strict mode
* // return model.bindTools(tools, { strict: true }).stream(messages);
* }
* });
* ```
*
* The asynchronous handler function (`chainFn`) can return any of the following:
*
* - A simple `string` response
* - A LangChain stream (`IterableReadableStream`)
* - A LangChain `BaseMessageChunk` object
* - A LangChain `AIMessage` object
*/
interface ChainFnParameters {
model: string;
messages: BaseMessage[];
tools: DynamicStructuredTool[];
threadId?: string;
runId?: string;
}
interface LangChainAdapterOptions {
/**
* A function that uses the LangChain API to generate a response.
*/
chainFn: (parameters: ChainFnParameters) => Promise<LangChainReturnType>;
}
declare class LangChainAdapter implements CopilotServiceAdapter {
private options;
/**
* To use LangChain as a backend, provide a handler function to the adapter with your custom LangChain logic.
*/
get name(): string;
constructor(options: LangChainAdapterOptions);
process(request: CopilotRuntimeChatCompletionRequest): Promise<CopilotRuntimeChatCompletionResponse>;
}
/**
* Copilot Runtime adapter for Google Generative AI (e.g. Gemini).
*
* ## Example
*
* ```ts
* import { CopilotRuntime, GoogleGenerativeAIAdapter } from "@copilotkit/runtime";
* const { GoogleGenerativeAI } = require("@google/generative-ai");
*
* const genAI = new GoogleGenerativeAI(process.env["GOOGLE_API_KEY"]);
*
* const copilotKit = new CopilotRuntime();
*
* return new GoogleGenerativeAIAdapter({ model: "gemini-2.5-flash", apiVersion: "v1" });
* ```
*/
interface GoogleGenerativeAIAdapterOptions {
/**
* A custom Google Generative AI model to use.
*/
model?: string;
/**
* The API version to use (e.g. "v1" or "v1beta"). Defaults to "v1".
*/
apiVersion?: "v1" | "v1beta";
/**
* The API key to use.
*/
apiKey?: string;
}
declare class GoogleGenerativeAIAdapter extends LangChainAdapter {
provider: string;
model: string;
constructor(options?: GoogleGenerativeAIAdapterOptions);
}
/**
* Copilot Runtime adapter for the OpenAI Assistant API.
*
* ## Example
*
* ```ts
* import { CopilotRuntime, OpenAIAssistantAdapter } from "@copilotkit/runtime";
* import OpenAI from "openai";
*
* const copilotKit = new CopilotRuntime();
*
* const openai = new OpenAI({
* organization: "<your-organization-id>",
* apiKey: "<your-api-key>",
* });
*
* return new OpenAIAssistantAdapter({
* openai,
* assistantId: "<your-assistant-id>",
* codeInterpreterEnabled: true,
* fileSearchEnabled: true,
* });
* ```
*/
interface OpenAIAssistantAdapterParams {
/**
* The ID of the assistant to use.
*/
assistantId: string;
/**
* An optional OpenAI instance to use. If not provided, a new instance will be created.
*/
openai?: OpenAI;
/**
* Whether to enable code interpretation.
* @default true
*/
codeInterpreterEnabled?: boolean;
/**
* Whether to enable file search.
* @default true
*/
fileSearchEnabled?: boolean;
/**
* Whether to disable parallel tool calls.
* You can disable parallel tool calls to force the model to execute tool calls sequentially.
* This is useful if you want to execute tool calls in a specific order so that the state changes
* introduced by one tool call are visible to the next tool call. (i.e. new actions or readables)
*
* @default false
*/
disableParallelToolCalls?: boolean;
/**
* Whether to keep the role in system messages as "System".
* By default, it is converted to "developer", which is used by newer OpenAI models
*
* @default false
*/
keepSystemRole?: boolean;
}
declare class OpenAIAssistantAdapter implements CopilotServiceAdapter {
private _openai;
private codeInterpreterEnabled;
private assistantId;
private fileSearchEnabled;
private disableParallelToolCalls;
private keepSystemRole;
get name(): string;
constructor(params: OpenAIAssistantAdapterParams);
private ensureOpenAI;
process(request: CopilotRuntimeChatCompletionRequest): Promise<CopilotRuntimeChatCompletionResponse>;
private submitToolOutputs;
private submitUserMessage;
private streamResponse;
}
/**
* CopilotKit Adapter for Unify
*
* <RequestExample>
* ```jsx CopilotRuntime Example
* const copilotKit = new CopilotRuntime();
* return copilotKit.response(req, new UnifyAdapter());
* ```
* </RequestExample>
*
* You can easily set the model to use by passing it to the constructor.
* ```jsx
* const copilotKit = new CopilotRuntime();
* return copilotKit.response(
* req,
* new UnifyAdapter({ model: "llama-3-8b-chat@fireworks-ai" }),
* );
* ```
*/
interface UnifyAdapterParams {
apiKey?: string;
model: string;
}
declare class UnifyAdapter implements CopilotServiceAdapter {
private apiKey;
model: string;
private start;
provider: string;
get name(): string;
constructor(options?: UnifyAdapterParams);
process(request: CopilotRuntimeChatCompletionRequest): Promise<CopilotRuntimeChatCompletionResponse>;
}
/**
* Copilot Runtime adapter for Groq.
*
* ## Example
*
* ```ts
* import { CopilotRuntime, GroqAdapter } from "@copilotkit/runtime";
* import { Groq } from "groq-sdk";
*
* const groq = new Groq({ apiKey: process.env["GROQ_API_KEY"] });
*
* const copilotKit = new CopilotRuntime();
*
* return new GroqAdapter({ groq, model: "<model-name>" });
* ```
*/
interface GroqAdapterParams {
/**
* An optional Groq instance to use.
*/
groq?: Groq;
/**
* The model to use.
*/
model?: string;
/**
* Whether to disable parallel tool calls.
* You can disable parallel tool calls to force the model to execute tool calls sequentially.
* This is useful if you want to execute tool calls in a specific order so that the state changes
* introduced by one tool call are visible to the next tool call. (i.e. new actions or readables)
*
* @default false
*/
disableParallelToolCalls?: boolean;
}
declare class GroqAdapter implements CopilotServiceAdapter {
model: string;
provider: string;
private disableParallelToolCalls;
private _groq;
get groq(): Groq;
get name(): string;
constructor(params?: GroqAdapterParams);
private ensureGroq;
process(request: CopilotRuntimeChatCompletionRequest): Promise<CopilotRuntimeChatCompletionResponse>;
}
interface RemoteChainParameters {
name: string;
description: string;
chainUrl: string;
parameters?: Parameter[];
parameterType?: "single" | "multi";
}
declare class RemoteChain {
name: string;
description: string;
chainUrl: string;
parameters?: Parameter[];
parameterType: "single" | "multi";
constructor(options: RemoteChainParameters);
toAction(): Promise<Action<any>>;
inferLangServeParameters(): Promise<void>;
}
/**
* Converts service adapter errors to structured CopilotKitError format using HTTP status codes.
* This provides consistent error classification across all service adapters.
*/
declare function convertServiceAdapterError(error: any, adapterName: string): CopilotKitLowLevelError;
/**
* Copilot Runtime adapter for Anthropic.
*
* ## Example
*
* ```ts
* import { CopilotRuntime, AnthropicAdapter } from "@copilotkit/runtime";
* import Anthropic from "@anthropic-ai/sdk";
*
* const copilotKit = new CopilotRuntime();
*
* const anthropic = new Anthropic({
* apiKey: "<your-api-key>",
* });
*
* return new AnthropicAdapter({
* anthropic,
* promptCaching: {
* enabled: true,
* debug: true
* }
* });
* ```
*/
interface AnthropicPromptCachingConfig {
/**
* Whether to enable prompt caching.
*/
enabled: boolean;
/**
* Whether to enable debug logging for cache operations.
*/
debug?: boolean;
}
interface AnthropicAdapterParams {
/**
* An optional Anthropic instance to use. If not provided, a new instance will be
* created.
*/
anthropic?: Anthropic;
/**
* The model to use.
*/
model?: string;
/**
* Configuration for prompt caching.
* See: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching
*/
promptCaching?: AnthropicPromptCachingConfig;
}
declare class AnthropicAdapter implements CopilotServiceAdapter {
model: string;
provider: string;
private promptCaching;
private _anthropic;
get anthropic(): Anthropic;
get name(): string;
constructor(params?: AnthropicAdapterParams);
private ensureAnthropic;
/**
* Adds cache control to system prompt
*/
private addSystemPromptCaching;
/**
* Adds cache control to the final message
*/
private addIncrementalMessageCaching;
private shouldGenerateFallbackResponse;
process(request: CopilotRuntimeChatCompletionRequest): Promise<CopilotRuntimeChatCompletionResponse>;
}
interface OllamaAdapterOptions {
model?: string;
}
declare class ExperimentalOllamaAdapter implements CopilotServiceAdapter {
model: string;
provider: string;
get name(): string;
constructor(options?: OllamaAdapterOptions);
process(request: CopilotRuntimeChatCompletionRequest): Promise<CopilotRuntimeChatCompletionResponse>;
}
/**
* Copilot Runtime adapter for AWS Bedrock.
*
* ## Example
*
* ```ts
* import { CopilotRuntime, BedrockAdapter } from "@copilotkit/runtime";
*
* const copilotKit = new CopilotRuntime();
*
* return new BedrockAdapter({
* model: "amazon.nova-lite-v1:0",
* region: "us-east-1",
* credentials: {
* accessKeyId: process.env.AWS_ACCESS_KEY_ID,
* secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY
* }
* });
* ```
*/
interface BedrockAdapterParams {
/**
* AWS Bedrock model ID to use.
* @default "amazon.nova-lite-v1:0"
*/
model?: string;
/**
* AWS region where Bedrock is available.
* @default "us-east-1"
*/
region?: string;
/**
* AWS credentials for Bedrock access.
*/
credentials?: {
accessKeyId?: string;
secretAccessKey?: string;
};
}
declare class BedrockAdapter extends LangChainAdapter {
provider: string;
model: string;
constructor(options?: BedrockAdapterParams);
}
/**
* CopilotKit Empty Adapter
*
* This adapter is meant to preserve adherence to runtime requirements, while doing nothing
* Ideal if you don't want to connect an LLM the to the runtime, and only use your LangGraph agent.
* Be aware that Copilot Suggestions will not work if you use this adapter
*
* ## Example
*
* ```ts
* import { CopilotRuntime, EmptyAdapter } from "@copilotkit/runtime";
*
* const copilotKit = new CopilotRuntime();
*
* return new EmptyAdapter();
* ```
*/
declare class EmptyAdapter implements CopilotServiceAdapter {
process(request: CopilotRuntimeChatCompletionRequest): Promise<CopilotRuntimeChatCompletionResponse>;
get name(): string;
}
declare const ExperimentalEmptyAdapter: typeof EmptyAdapter;
interface BaseEndpointDefinition<TActionType extends EndpointType> {
type?: TActionType;
}
interface CopilotKitEndpoint extends BaseEndpointDefinition<EndpointType.CopilotKit> {
url: string;
onBeforeRequest?: ({ ctx }: {
ctx: GraphQLContext;
}) => {
headers?: Record<string, string> | undefined;
};
}
interface LangGraphPlatformAgent {
name: string;
description: string;
assistantId?: string;
}
interface LangGraphPlatformEndpoint extends BaseEndpointDefinition<EndpointType.LangGraphPlatform> {
deploymentUrl: string;
langsmithApiKey?: string | null;
agents: LangGraphPlatformAgent[];
}
type EndpointDefinition = CopilotKitEndpoint | LangGraphPlatformEndpoint;
declare enum EndpointType {
CopilotKit = "copilotKit",
LangGraphPlatform = "langgraph-platform"
}
interface LLMRequestData {
threadId?: string;
runId?: string;
model?: string;
messages: any[];
actions?: any[];
forwardedParameters?: any;
timestamp: number;
provider?: string;
[key: string]: any;
}
interface LLMResponseData {
threadId: string;
runId?: string;
model?: string;
output: any;
latency: number;
timestamp: number;
provider?: string;
isProgressiveChunk?: boolean;
isFinalResponse?: boolean;
[key: string]: any;
}
interface LLMErrorData {
threadId?: string;
runId?: string;
model?: string;
error: Error | string;
timestamp: number;
provider?: string;
[key: string]: any;
}
interface CopilotObservabilityHooks {
handleRequest: (data: LLMRequestData) => void | Promise<void>;
handleResponse: (data: LLMResponseData) => void | Promise<void>;
handleError: (data: LLMErrorData) => void | Promise<void>;
}
/**
* Configuration for CopilotKit logging functionality.
*
* @remarks
* Custom logging handlers require a valid CopilotKit public API key.
* Sign up at https://docs.copilotkit.ai/quickstart#get-a-copilot-cloud-public-api-key to get your key.
*/
interface CopilotObservabilityConfig {
/**
* Enable or disable logging functionality.
*
* @default false
*/
enabled: boolean;
/**
* Controls whether logs are streamed progressively or buffered.
* - When true: Each token and update is logged as it's generated (real-time)
* - When false: Complete responses are logged after completion (batched)
*
* @default true
*/
progressive: boolean;
/**
* Custom observability hooks for request, response, and error events.
*
* @remarks
* Using custom observability hooks requires a valid CopilotKit public API key.
*/
hooks: CopilotObservabilityHooks;
}
/**
* Represents a tool provided by an MCP server.
*/
interface MCPTool {
description?: string;
/** Schema defining parameters, mirroring the MCP structure. */
schema?: {
parameters?: {
properties?: Record<string, any>;
required?: string[];
jsonSchema?: Record<string, any>;
};
};
/** The function to call to execute the tool on the MCP server. */
execute(params: any): Promise<any>;
}
/**
* Defines the contract for *any* MCP client implementation the user might provide.
*/
interface MCPClient {
/** A method that returns a map of tool names to MCPTool objects available from the connected MCP server. */
tools(): Promise<Record<string, MCPTool>>;
/** An optional method for cleanup if the underlying client requires explicit disconnection. */
close?(): Promise<void>;
}
/**
* Configuration for connecting to an MCP endpoint.
*/
interface MCPEndpointConfig {
endpoint: string;
apiKey?: string;
}
/**
* Extracts CopilotKit-compatible parameters from an MCP tool schema.
* @param toolOrSchema The schema object from an MCPTool or the full MCPTool object.
* @returns An array of Parameter objects.
*/
declare function extractParametersFromSchema(toolOrSchema?: MCPTool | MCPTool["schema"]): Parameter[];
/**
* Converts a map of MCPTools into an array of CopilotKit Actions.
* @param mcpTools A record mapping tool names to MCPTool objects.
* @param mcpEndpoint The endpoint URL from which these tools were fetched.
* @returns An array of Action<any> objects.
*/
declare function convertMCPToolsToActions(mcpTools: Record<string, MCPTool>, mcpEndpoint: string): Action<any>[];
/**
* Generate better instructions for using MCP tools
* This is used to enhance the system prompt with tool documentation
*/
declare function generateMcpToolInstructions(toolsMap: Record<string, MCPTool>): string;
/**
* <Callout type="info">
* This is the reference for the `CopilotRuntime` class. For more information and example code snippets, please see [Concept: Copilot Runtime](/concepts/copilot-runtime).
* </Callout>
*
* ## Usage
*
* ```tsx
* import { CopilotRuntime } from "@copilotkit/runtime";
*
* const copilotKit = new CopilotRuntime();
* ```
*/
type CreateMCPClientFunction = (config: MCPEndpointConfig) => Promise<MCPClient>;
type ActionsConfiguration<T extends Parameter[] | [] = []> = Action<T>[] | ((ctx: {
properties: any;
url?: string;
}) => Action<T>[]);
interface OnBeforeRequestOptions {
threadId?: string;
runId?: string;
inputMessages: Message[];
properties: any;
url?: string;
}
type OnBeforeRequestHandler = (options: OnBeforeRequestOptions) => void | Promise<void>;
interface OnAfterRequestOptions {
threadId: string;
runId?: string;
inputMessages: Message[];
outputMessages: Message[];
properties: any;
url?: string;
}
type OnAfterRequestHandler = (options: OnAfterRequestOptions) => void | Promise<void>;
interface OnStopGenerationOptions {
threadId: string;
runId?: string;
url?: string;
agentName?: string;
lastMessage: MessageInput;
}
type OnStopGenerationHandler = (options: OnStopGenerationOptions) => void | Promise<void>;
interface Middleware {
/**
* A function that is called before the request is processed.
*/
/**
* @deprecated This middleware hook is deprecated and will be removed in a future version.
* Use updated middleware integration methods in CopilotRuntimeVNext instead.
*/
onBeforeRequest?: OnBeforeRequestHandler;
/**
* A function that is called after the request is processed.
*/
/**
* @deprecated This middleware hook is deprecated and will be removed in a future version.
* Use updated middleware integration methods in CopilotRuntimeVNext instead.
*/
onAfterRequest?: OnAfterRequestHandler;
}
interface CopilotRuntimeConstructorParams_BASE<T extends Parameter[] | [] = []> {
/**
* Middleware to be used by the runtime.
*
* ```ts
* onBeforeRequest: (options: {
* threadId?: string;
* runId?: string;
* inputMessages: Message[];
* properties: any;
* }) => void | Promise<void>;
* ```
*
* ```ts
* onAfterRequest: (options: {
* threadId?: string;
* runId?: string;
* inputMessages: Message[];
* outputMessages: Message[];
* properties: any;
* }) => void | Promise<void>;
* ```
*/
/**
* @deprecated This middleware hook is deprecated and will be removed in a future version.
* Use updated middleware integration methods in CopilotRuntimeVNext instead.
*/
middleware?: Middleware;
actions?: ActionsConfiguration<T>;
remoteActions?: CopilotKitEndpoint[];
remoteEndpoints?: EndpointDefinition[];
langserve?: RemoteChainParameters[];
agents?: Record<string, AbstractAgent>;
delegateAgentProcessingToServiceAdapter?: boolean;
/**
* Configuration for LLM request/response logging.
* Requires publicApiKey from CopilotKit component to be set:
*
* ```tsx
* <CopilotKit publicApiKey="ck_pub_..." />
* ```
*
* Example logging config:
* ```ts
* logging: {
* enabled: true, // Enable or disable logging
* progressive: true, // Set to false for buffered logging
* logger: {
* logRequest: (data) => langfuse.trace({ name: "LLM Request", input: data }),
* logResponse: (data) => langfuse.trace({ name: "LLM Response", output: data }),
* logError: (errorData) => langfuse.trace({ name: "LLM Error", metadata: errorData }),
* },
* }
* ```
*/
observability_c?: CopilotObservabilityConfig;
/**
* Configuration for connecting to Model Context Protocol (MCP) servers.
* Allows fetching and using tools defined on external MCP-compliant servers.
* Requires providing the `createMCPClient` function during instantiation.
* @experimental
*/
mcpServers?: MCPEndpointConfig[];
/**
* A function that creates an MCP client instance for a given endpoint configuration.
* This function is responsible for using the appropriate MCP client library
* (e.g., `@copilotkit/runtime`, `ai`) to establish a connection.
* Required if `mcpServers` is provided.
*
* ```typescript
* import { experimental_createMCPClient } from "ai"; // Import from vercel ai library
* // ...
* const runtime = new CopilotRuntime({
* mcpServers: [{ endpoint: "..." }],
* async createMCPClient(config) {
* return await experimental_createMCPClient({
* transport: {
* type: "sse",
* url: config.endpoint,
* headers: config.apiKey
* ? { Authorization: `Bearer ${config.apiKey}` }
* : undefined,
* },
* });
* }
* });
* ```
*/
createMCPClient?: CreateMCPClientFunction;
/**
* Optional error handler for comprehensive debugging and observability.
*
* **Requires publicApiKey**: Error handling only works when requests include a valid publicApiKey.
* This is a premium Copilot Cloud feature.
*
* @param errorEvent - Structured error event with rich debugging context
*
* @example
* ```typescript
* const runtime = new CopilotRuntime({
* onError: (errorEvent) => {
* debugDashboard.capture(errorEvent);
* }
* });
* ```
*/
onError?: CopilotErrorHandler;
onStopGeneration?: OnStopGenerationHandler;
}
interface CopilotRuntimeConstructorParams<T extends Parameter[] | [] = []> extends Omit<CopilotRuntimeConstructorParams_BASE<T>, "agents">, Omit<CopilotRuntimeOptions, "agents" | "transcriptionService"> {
/**
* TODO: un-omit `transcriptionService` above once it's supported
*
* This satisfies...
* – the optional constraint in `CopilotRuntimeConstructorParams_BASE`
* – the `MaybePromise<NonEmptyRecord<T>>` constraint in `CopilotRuntimeOptionsVNext`
* – the `Record<string, AbstractAgent>` constraint in `both
*/
agents?: MaybePromise<NonEmptyRecord<Record<string, AbstractAgent>>>;
}
/**
* Central runtime object passed to all request handlers.
*/
declare class CopilotRuntime<const T extends Parameter[] | [] = []> {
params?: CopilotRuntimeConstructorParams<T>;
private observability?;
private mcpToolsCache;
private runtimeArgs;
private _instance;
constructor(params?: CopilotRuntimeConstructorParams<T> & PartialBy<CopilotRuntimeOptions, "agents">);
get instance(): CopilotRuntime$1;
private assignEndpointsToAgents;
handleServiceAdapter(serviceAdapter: CopilotServiceAdapter): void;
private getToolsFromActions;
private assignToolsToAgents;
private createOnBeforeRequestHandler;
private createOnAfterRequestHandler;
/**
* Log LLM request if observability is enabled
*/
private logObservabilityBeforeRequest;
/**
* Log final LLM response after request completes
*/
private logObservabilityAfterRequest;
private getToolsFromMCP;
}
declare function copilotKitEndpoint(config: Omit<CopilotKitEndpoint, "type">): CopilotKitEndpoint;
declare function langGraphPlatformEndpoint(config: Omit<LangGraphPlatformEndpoint, "type">): LangGraphPlatformEndpoint;
declare function resolveEndpointType(endpoint: EndpointDefinition): EndpointType;
interface CopilotCloudOptions {
baseUrl?: string;
publicApiKey?: string;
}
type LogLevel = "debug" | "info" | "warn" | "error";
type CopilotRuntimeLogger = ReturnType<typeof createLogger>;
declare function createLogger(options?: {
level?: LogLevel;
component?: string;
}): pino.Logger<never>;
declare const logger: pino.Logger<never>;
declare const addCustomHeaderPlugin: {
onResponse({ response }: {
response: any;
}): void;
};
type AnyPrimitive = string | boolean | number | null;
type CopilotRequestContextProperties = Record<string, AnyPrimitive | Record<string, AnyPrimitive>>;
type GraphQLContext = YogaInitialContext & {
_copilotkit: CreateCopilotRuntimeServerOptions;
properties: CopilotRequestContextProperties;
logger: typeof logger;
};
interface CreateCopilotRuntimeServerOptions {
runtime: CopilotRuntime<any>;
serviceAdapter?: CopilotServiceAdapter;
endpoint: string;
baseUrl?: string;
cloud?: CopilotCloudOptions;
properties?: CopilotRequestContextProperties;
logLevel?: LogLevel;
}
declare function createContext(initialContext: YogaInitialContext, copilotKitContext: CreateCopilotRuntimeServerOptions, contextLogger: typeof logger, properties?: CopilotRequestContextProperties): Promise<Partial<GraphQLContext>>;
declare function buildSchema(options?: {
emitSchemaFile?: string;
}): graphql.GraphQLSchema;
type CommonConfig = {
logging: typeof logger;
schema: ReturnType<typeof buildSchema>;
plugins: Parameters<typeof createYoga>[0]["plugins"];
context: (ctx: YogaInitialContext) => Promise<Partial<GraphQLContext>>;
maskedErrors: {
maskError: (error: any, message: string, isDev?: boolean) => any;
};
};
declare function getCommonConfig(options: CreateCopilotRuntimeServerOptions): CommonConfig;
declare function copilotRuntimeNextJSAppRouterEndpoint(options: CreateCopilotRuntimeServerOptions): {
handleRequest: (req: Request) => Response | Promise<Response>;
};
declare const config: {
api: {
bodyParser: boolean;
};
};
declare function copilotRuntimeNextJSPagesRouterEndpoint(options: CreateCopilotRuntimeServerOptions): (req: http.IncomingMessage & {
body?: unknown;
complete?: boolean;
}, res: http.ServerResponse) => Promise<void>;
type IncomingWithBody = IncomingMessage & {
body?: unknown;
complete?: boolean;
};
declare function readableStreamToNodeStream(webStream: ReadableStream): Readable;
declare function copilotRuntimeNodeHttpEndpoint(options: CreateCopilotRuntimeServerOptions): (req: IncomingWithBody, res: ServerResponse) => Promise<void>;
declare function copilotRuntimeNodeExpressEndpoint(options: CreateCopilotRuntimeServerOptions): (req: http.IncomingMessage & {
body?: unknown;
complete?: boolean;
}, res: http.ServerResponse) => Promise<void>;
declare function copilotRuntimeNestEndpoint(options: CreateCopilotRuntimeServerOptions): (req: http.IncomingMessage & {
body?: unknown;
complete?: boolean;
}, res: http.ServerResponse) => Promise<void>;
/**
* TelemetryAgentRunner - A wrapper around AgentRunner that adds telemetry
* for agent execution streams.
*
* This captures the following telemetry events:
* - oss.runtime.agent_execution_stream_started - when an agent execution starts
* - oss.runtime.agent_execution_stream_ended - when an agent execution completes
* - oss.runtime.agent_execution_stream_errored - when an agent execution fails
*/
/**
* Configuration options for TelemetryAgentRunner
*/
interface TelemetryAgentRunnerConfig {
/**
* The underlying runner to delegate to
* If not provided, defaults to InMemoryAgentRunner
*/
runner?: AgentRunner;
/**
* Optional LangSmith API key (will be hashed for telemetry)
*/
langsmithApiKey?: string;
}
/**
* An AgentRunner wrapper that adds telemetry tracking for agent executions.
*
* Usage:
* ```ts
* const runtime = new CopilotRuntime({
* runner: new TelemetryAgentRunner(),
* // or with custom runner:
* runner: new TelemetryAgentRunner({ runner: customRunner }),
* });
* ```
*/
declare class TelemetryAgentRunner implements AgentRunner {
private readonly _runner;
private readonly hashedLgcKey;
constructor(config?: TelemetryAgentRunnerConfig);
/**
* Runs an agent with telemetry tracking.
* Wraps the underlying runner's Observable stream with telemetry events.
*/
run(...args: Parameters<AgentRunner["run"]>): ReturnType<AgentRunner["run"]>;
/**
* Delegates to the underlying runner's connect method
*/
connect(...args: Parameters<AgentRunner["connect"]>): ReturnType<AgentRunner["connect"]>;
/**
* Delegates to the underlying runner's isRunning method
*/
isRunning(...args: Parameters<AgentRunner["isRunning"]>): ReturnType<AgentRunner["isRunning"]>;
/**
* Delegates to the underlying runner's stop method
*/
stop(...args: Parameters<AgentRunner["stop"]>): ReturnType<AgentRunner["stop"]>;
}
/**
* @deprecated LangGraphAgent import from `@copilotkit/runtime` is deprecated. Please import it from `@copilotkit/runtime/langgraph` instead
*/
declare class LangGraphAgent {
constructor();
}
/**
* @deprecated LangGraphHttpAgent import from `@copilotkit/runtime` is deprecated. Please import it from `@copilotkit/runtime/langgraph` instead
*/
declare class LangGraphHttpAgent {
constructor();
}
/**
* @deprecated TextMessageEvents import from `@copilotkit/runtime` is deprecated. Please import it from `@copilotkit/runtime/langgraph` instead
*/
type TextMessageEvents = any;
/**
* @deprecated ToolCallEvents import from `@copilotkit/runtime` is deprecated. Please import it from `@copilotkit/runtime/langgraph` instead
*/
type ToolCallEvents = any;
/**
* @deprecated CustomEventNames import from `@copilotkit/runtime` is deprecated. Please import it from `@copilotkit/runtime/langgraph` instead
*/
type CustomEventNames = any;
/**
* @deprecated PredictStateTool import from `@copilotkit/runtime` is deprecated. Please import it from `@copilotkit/runtime/langgraph` instead
*/
type PredictStateTool = any;
declare class GuardrailsValidationFailureResponse extends FailedResponseStatus {
reason: FailedResponseStatusReason;
details: {
guardrailsReason: string;
};
constructor({ guardrailsReason }: {
guardrailsReason: any;
});
}
declare class MessageStreamInterruptedResponse extends FailedResponseStatus {
reason: FailedResponseStatusReason;
details: {
messageId: string;
description: string;
};
constructor({ messageId }: {
messageId: string;
});
}
declare class UnknownErrorResponse extends FailedResponseStatus {
reason: FailedResponseStatusReason;
details: {
description?: string;
originalError?: {
code?: string;
statusCode?: number;
severity?: string;
visibility?: string;
originalErrorType?: string;
extensions?: any;
};
};
constructor({ description, originalError, }: {
description?: string;
originalError?: {
code?: string;
statusCode?: number;
severity?: string;
visibility?: string;
originalErrorType?: string;
extensions?: any;
};
});
}
export { AnthropicAdapter, AnthropicAdapterParams, AnthropicPromptCachingConfig, BedrockAdapter, BedrockAdapterParams, CommonConfig, CopilotRequestContextProperties, CopilotRuntime, CopilotRuntimeChatCompletionRequest, CopilotRuntimeChatCompletionResponse, CopilotRuntimeConstructorParams_BASE, CopilotRuntimeLogger, CopilotServiceAdapter, CreateCopilotRuntimeServerOptions, CustomEventNames, EmptyAdapter, ExperimentalEmptyAdapter, ExperimentalOllamaAdapter, GoogleGenerativeAIAdapter, GraphQLContext, GroqAdapter, GroqAdapterParams, GuardrailsValidationFailureResponse, LangChainAdapter, LangGraphAgent, LangGraphHttpAgent, LogLevel, MCPClient, MCPEndpointConfig, MCPTool, MessageStreamInterruptedResponse, OpenAIAdapter, OpenAIAdapterParams, OpenAIAssistantAdapter, OpenAIAssistantAdapterParams, PredictStateTool, RemoteChain, RemoteChainParameters, TelemetryAgentRunner, TelemetryAgentRunnerConfig, TextMessageEvents, ToolCallEvents, UnifyAdapter, UnifyAdapterParams, UnknownErrorResponse, addCustomHeaderPlugin, buildSchema, config, convertMCPToolsToActions, convertServiceAdapterError, copilotKitEndpoint, copilotRuntimeNestEndpoint, copilotRuntimeNextJSAppRouterEndpoint, copilotRuntimeNextJSPagesRouterEndpoint, copilotRuntimeNodeExpressEndpoint, copilotRuntimeNodeHttpEndpoint, createContext, createLogger, extractParametersFromSchema, generateMcpToolInstructions, getCommonConfig, langGraphPlatformEndpoint, readableStreamToNodeStream, resolveEndpointType };