@langchain/core
Version:
Core LangChain.js abstractions and schemas
121 lines (120 loc) • 3.84 kB
TypeScript
import { BaseMessage, BaseMessageChunk, type MessageType, BaseMessageFields } from "./base.js";
import { InvalidToolCall, ToolCall, ToolCallChunk } from "./tool.js";
export type AIMessageFields = BaseMessageFields & {
tool_calls?: ToolCall[];
invalid_tool_calls?: InvalidToolCall[];
usage_metadata?: UsageMetadata;
};
/**
* Breakdown of input token counts.
*
* Does not *need* to sum to full input token count. Does *not* need to have all keys.
*/
export type InputTokenDetails = {
/**
* Audio input tokens.
*/
audio?: number;
/**
* Input tokens that were cached and there was a cache hit.
*
* Since there was a cache hit, the tokens were read from the cache.
* More precisely, the model state given these tokens was read from the cache.
*/
cache_read?: number;
/**
* Input tokens that were cached and there was a cache miss.
*
* Since there was a cache miss, the cache was created from these tokens.
*/
cache_creation?: number;
};
/**
* Breakdown of output token counts.
*
* Does *not* need to sum to full output token count. Does *not* need to have all keys.
*/
export type OutputTokenDetails = {
/**
* Audio output tokens
*/
audio?: number;
/**
* Reasoning output tokens.
*
* Tokens generated by the model in a chain of thought process (i.e. by
* OpenAI's o1 models) that are not returned as part of model output.
*/
reasoning?: number;
};
/**
* Usage metadata for a message, such as token counts.
*/
export type UsageMetadata = {
/**
* Count of input (or prompt) tokens. Sum of all input token types.
*/
input_tokens: number;
/**
* Count of output (or completion) tokens. Sum of all output token types.
*/
output_tokens: number;
/**
* Total token count. Sum of input_tokens + output_tokens.
*/
total_tokens: number;
/**
* Breakdown of input token counts.
*
* Does *not* need to sum to full input token count. Does *not* need to have all keys.
*/
input_token_details?: InputTokenDetails;
/**
* Breakdown of output token counts.
*
* Does *not* need to sum to full output token count. Does *not* need to have all keys.
*/
output_token_details?: OutputTokenDetails;
};
/**
* Represents an AI message in a conversation.
*/
export declare class AIMessage extends BaseMessage {
tool_calls?: ToolCall[];
invalid_tool_calls?: InvalidToolCall[];
/**
* If provided, token usage information associated with the message.
*/
usage_metadata?: UsageMetadata;
get lc_aliases(): Record<string, string>;
constructor(fields: string | AIMessageFields,
/** @deprecated */
kwargs?: Record<string, unknown>);
static lc_name(): string;
_getType(): MessageType;
get _printableFields(): Record<string, unknown>;
}
export declare function isAIMessage(x: BaseMessage): x is AIMessage;
export declare function isAIMessageChunk(x: BaseMessageChunk): x is AIMessageChunk;
export type AIMessageChunkFields = AIMessageFields & {
tool_call_chunks?: ToolCallChunk[];
};
/**
* Represents a chunk of an AI message, which can be concatenated with
* other AI message chunks.
*/
export declare class AIMessageChunk extends BaseMessageChunk {
tool_calls?: ToolCall[];
invalid_tool_calls?: InvalidToolCall[];
tool_call_chunks?: ToolCallChunk[];
/**
* If provided, token usage information associated with the message.
*/
usage_metadata?: UsageMetadata;
constructor(fields: string | AIMessageChunkFields);
get lc_aliases(): Record<string, string>;
static lc_name(): string;
_getType(): MessageType;
get _printableFields(): Record<string, unknown>;
concat(chunk: AIMessageChunk): AIMessageChunk;
}