@flatfile/improv
Version:
A powerful TypeScript library for building AI agents with multi-threaded conversations, tool execution, and event handling capabilities
1,438 lines (1,428 loc) • 1.59 MB
JavaScript
"use strict";
var __create = Object.create;
var __defProp = Object.defineProperty;
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
var __getOwnPropNames = Object.getOwnPropertyNames;
var __getProtoOf = Object.getPrototypeOf;
var __hasOwnProp = Object.prototype.hasOwnProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
var __esm = (fn, res) => function __init() {
return fn && (res = (0, fn[__getOwnPropNames(fn)[0]])(fn = 0)), res;
};
var __commonJS = (cb, mod) => function __require() {
return mod || (0, cb[__getOwnPropNames(cb)[0]])((mod = { exports: {} }).exports, mod), mod.exports;
};
var __export = (target, all) => {
for (var name in all)
__defProp(target, name, { get: all[name], enumerable: true });
};
var __copyProps = (to, from, except, desc) => {
if (from && typeof from === "object" || typeof from === "function") {
for (let key of __getOwnPropNames(from))
if (!__hasOwnProp.call(to, key) && key !== except)
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
}
return to;
};
var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
// If the importer is in node compatibility mode or this is not an ESM
// file that has been converted to a CommonJS file using a Babel-
// compatible transform (i.e. "__esModule" has not been set), then set
// "default" to the CommonJS "module.exports" for node compatibility.
isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
mod
));
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
// src/event-source.ts
var import_eventemitter2, EventSource;
var init_event_source = __esm({
"src/event-source.ts"() {
"use strict";
import_eventemitter2 = require("eventemitter2");
EventSource = class extends import_eventemitter2.EventEmitter2 {
static {
__name(this, "EventSource");
}
constructor() {
super({
wildcard: true
});
}
/**
* Forward all events from a source EventEmitter2 instance
* Preserves the original event name and merges any additional context
*/
forwardEvents(source, context2 = {}) {
const self = this;
source.on("**", function(data) {
const eventName = this.event;
self.emit(eventName, {
...data,
...context2
});
});
}
debug(message, data) {
if (process.env.NODE_ENV === "development") {
console.log(message);
if (data) {
console.dir(data, {
depth: null
});
}
}
}
error(...args) {
if (process.env.NODE_ENV === "development") {
console.error(...args);
}
}
};
}
});
// src/message.ts
var Message;
var init_message = __esm({
"src/message.ts"() {
"use strict";
Message = class {
static {
__name(this, "Message");
}
_content;
_role;
_reasoning;
_toolCalls;
_toolResults;
_attachments;
_cache;
constructor({ content, role = "user", toolCalls = [], toolResults = [], attachments = [], reasoning = [], cache: cache2 = false }) {
this._content = content;
this._role = role;
this._toolCalls = toolCalls;
this._reasoning = reasoning;
this._toolResults = toolResults;
this._attachments = attachments;
this._cache = cache2;
}
get content() {
return this._content;
}
get role() {
return this._role;
}
get toolCalls() {
return this._toolCalls;
}
get toolResults() {
return this._toolResults;
}
get attachments() {
return this._attachments;
}
get cache() {
return this._cache;
}
get reasoning() {
return this._reasoning;
}
isToolResponse() {
return this._toolResults.length > 0;
}
isAssistantMessage() {
return this._role === "assistant";
}
isUserMessage() {
return this._role === "user";
}
isSystemMessage() {
return this._role === "system";
}
isToolCall() {
return this._toolCalls.length > 0;
}
/**
* Get attachments of a specific type
*/
getAttachmentsByType(type) {
return this._attachments.filter((a) => a.type === type);
}
/**
* Add an attachment to the message
*/
addAttachment(attachment) {
this._attachments.push(attachment);
}
/**
* Remove an attachment from the message
*/
removeAttachment(index) {
if (index >= 0 && index < this._attachments.length) {
this._attachments.splice(index, 1);
}
}
/**
* Attempts to parse and return JSON content from the message
* Supports multiple formats:
* 1. Direct JSON string
* 2. JSON within markdown code blocks
* 3. JSON within specific delimiters
*/
get json() {
try {
if (this._content) {
return JSON.parse(this._content);
}
} catch {
try {
if (this._content) {
const codeBlockMatch = this._content.match(/```(?:json)?\s*([\s\S]*?)\s*```/);
if (codeBlockMatch?.[1]) {
return JSON.parse(codeBlockMatch[1]);
}
}
} catch {
try {
if (this._content) {
const jsonMatch = this._content.match(/\{[\s\S]*\}/);
if (jsonMatch) {
return JSON.parse(jsonMatch[0]);
}
}
} catch {
return null;
}
}
}
return null;
}
};
}
});
// src/tracing/tracer.ts
var NullTracer, defaultTracer;
var init_tracer = __esm({
"src/tracing/tracer.ts"() {
"use strict";
NullTracer = class {
static {
__name(this, "NullTracer");
}
traceable(fn, _metadata) {
return fn;
}
};
defaultTracer = new NullTracer();
}
});
// src/tracing/registry.ts
var registry_exports = {};
__export(registry_exports, {
TracerRegistry: () => TracerRegistry,
getTracer: () => getTracer,
isTracingEnabled: () => isTracingEnabled
});
function getTracer() {
return TracerRegistry.getInstance().getActiveTracer();
}
function isTracingEnabled() {
return TracerRegistry.getInstance().isTracingEnabled();
}
var TracerRegistry;
var init_registry = __esm({
"src/tracing/registry.ts"() {
"use strict";
init_tracer();
TracerRegistry = class _TracerRegistry {
static {
__name(this, "TracerRegistry");
}
static instance;
tracers = /* @__PURE__ */ new Map();
activeTracer = "null";
constructor() {
this.register("null", defaultTracer);
}
/**
* Get the singleton instance of the registry
*/
static getInstance() {
if (!_TracerRegistry.instance) {
_TracerRegistry.instance = new _TracerRegistry();
}
return _TracerRegistry.instance;
}
/**
* Register a tracer with the registry
* @param name Name of the tracer
* @param tracer The tracer implementation
*/
register(name, tracer) {
this.tracers.set(name, tracer);
}
/**
* Set the active tracer by name
* @param name Name of the tracer to activate
* @throws Error if the tracer doesn't exist
*/
setActiveTracer(name) {
if (!this.tracers.has(name)) {
throw new Error(`Tracer '${name}' is not registered`);
}
this.activeTracer = name;
}
/**
* Get the currently active tracer
*/
getActiveTracer() {
return this.tracers.get(this.activeTracer) || defaultTracer;
}
/**
* Check if tracing is enabled (i.e., active tracer is not the null tracer)
*/
isTracingEnabled() {
return this.activeTracer !== "null";
}
};
__name(getTracer, "getTracer");
__name(isTracingEnabled, "isTracingEnabled");
}
});
// src/tracing/index.ts
var tracing_exports = {};
__export(tracing_exports, {
NullTracer: () => NullTracer,
TracerRegistry: () => TracerRegistry,
defaultTracer: () => defaultTracer,
getTracer: () => getTracer,
isTracingEnabled: () => isTracingEnabled,
traceable: () => traceable
});
function traceable(fn, metadata) {
if (!isTracingEnabled()) {
return fn;
}
return getTracer().traceable(fn, metadata);
}
var init_tracing = __esm({
"src/tracing/index.ts"() {
"use strict";
init_registry();
init_tracer();
__name(traceable, "traceable");
}
});
// src/model.drivers/base.ts
var BaseModelDriver;
var init_base = __esm({
"src/model.drivers/base.ts"() {
"use strict";
init_event_source();
BaseModelDriver = class extends EventSource {
static {
__name(this, "BaseModelDriver");
}
/**
* Check if the abort signal has been triggered and throw an error if so
*/
checkAbortSignal(abortSignal) {
if (abortSignal?.aborted) {
throw new Error("Thread execution aborted");
}
}
/**
* Execute an async operation with abort signal checking before and after
*/
async withAbortCheck(abortSignal, operation) {
this.checkAbortSignal(abortSignal);
const result = await operation();
this.checkAbortSignal(abortSignal);
return result;
}
/**
* Wrap an async generator with abort signal checking on each iteration
*/
async *wrapStreamWithAbort(abortSignal, stream) {
try {
for await (const chunk of stream) {
this.checkAbortSignal(abortSignal);
yield chunk;
}
} finally {
this.checkAbortSignal(abortSignal);
}
}
};
}
});
// src/model.drivers/openai.ts
var openai_exports = {};
__export(openai_exports, {
OpenAIThreadDriver: () => OpenAIThreadDriver
});
var import_nanoid2, import_openai, import_zod3, DEFAULT_MAX_PROMPT_CHARACTERS, JSON_SCHEMA_URL, OpenAIThreadDriver;
var init_openai = __esm({
"src/model.drivers/openai.ts"() {
"use strict";
import_nanoid2 = require("nanoid");
import_openai = __toESM(require("openai"), 1);
import_zod3 = require("zod");
init_message();
init_tracing();
init_base();
DEFAULT_MAX_PROMPT_CHARACTERS = 1e6;
JSON_SCHEMA_URL = "http://json-schema.org/draft-07/schema#";
OpenAIThreadDriver = class extends BaseModelDriver {
static {
__name(this, "OpenAIThreadDriver");
}
/**
* OpenAI API Client
*/
client;
/**
* OpenAI model to use
*/
model;
/**
* Temperature for response generation
*/
temperature;
/**
* Maximum number of tokens to generate
*/
maxTokens;
/**
* Whether to cache responses
*/
cache;
/**
* Schema for structured output
*/
responseSchema;
/**
* Provider-specific options
*/
providerOptions;
/**
* Maximum character limit for input validation
*/
maxPromptCharacters;
/**
* Returns a list of available OpenAI models
*/
static getAvailableModels() {
return [
"gpt-4o",
"gpt-4o-mini",
"gpt-4o-mini-audio-preview",
"gpt-4",
"gpt-4-turbo",
"gpt-3.5-turbo",
"gpt-4-vision-preview",
"gpt-4.1",
"gpt-5",
// Reasoning models
"o1",
"o1-mini",
"o1-preview",
"o3",
"o3-mini",
"o3-pro",
"o4-mini"
];
}
/**
* Create a new OpenAI driver
* @param config Configuration options
*/
constructor(config = {}) {
super();
const apiKey = config.apiKey || process.env.OPENAI_API_KEY;
if (!apiKey) {
throw new Error("OpenAI API Key must be provided in config or as OPENAI_API_KEY environment variable");
}
const baseClient = new import_openai.default({
apiKey,
// Use API version that supports prompt caching (automatic for prompts >1024 tokens)
defaultQuery: {
"api-version": "2024-10-01-preview"
}
});
if (config.trace ?? false) {
this.client = new Proxy(baseClient, {
get: /* @__PURE__ */ __name((target, prop, receiver) => {
const value = Reflect.get(target, prop, receiver);
if (prop === "chat" && value && typeof value === "object") {
return new Proxy(value, {
get: /* @__PURE__ */ __name((chatTarget, chatProp, chatReceiver) => {
const chatValue = Reflect.get(chatTarget, chatProp, chatReceiver);
if (chatProp === "completions" && chatValue && typeof chatValue === "object") {
return new Proxy(chatValue, {
get: /* @__PURE__ */ __name((completionsTarget, completionsProp, completionsReceiver) => {
const completionsValue = Reflect.get(completionsTarget, completionsProp, completionsReceiver);
if (completionsProp === "create" && typeof completionsValue === "function") {
return traceable(completionsValue.bind(completionsTarget), {
run_type: "llm",
name: "openai",
...config.traceMetadata,
aggregator: /* @__PURE__ */ __name((args, result) => {
const params = args[0] || {};
if (params.stream) {
return {
model: params.model,
input: {
messages: params.messages,
temperature: params.temperature,
max_tokens: params.max_tokens,
tools: params.tools ? params.tools.length : 0
},
streaming: true
};
}
return {
model: params.model,
input: {
messages: params.messages,
temperature: params.temperature,
max_tokens: params.max_tokens,
tools: params.tools ? params.tools.length : 0
},
output: result.choices?.[0]?.message,
metrics: result.usage ? {
prompt_tokens: result.usage.prompt_tokens,
completion_tokens: result.usage.completion_tokens,
total_tokens: result.usage.total_tokens
} : void 0,
finish_reason: result.choices?.[0]?.finish_reason
};
}, "aggregator")
});
}
return completionsValue;
}, "get")
});
}
return chatValue;
}, "get")
});
}
return value;
}, "get")
});
} else {
this.client = baseClient;
}
this.model = config.model || "gpt-4o";
this.temperature = config.temperature ?? 0.7;
this.maxTokens = config.maxTokens;
this.cache = config.cache ?? false;
this.responseSchema = config.responseSchema;
this.providerOptions = config.providerOptions;
this.maxPromptCharacters = config.maxPromptCharacters ?? DEFAULT_MAX_PROMPT_CHARACTERS;
}
/**
* Send a thread to the LLM and get a response
* @param thread Thread to send
* @returns Updated thread with LLM response
*/
async sendThread(thread, abortSignal) {
if (thread.all().length === 0) {
throw new Error("Cannot send an empty thread");
}
this.checkAbortSignal(abortSignal);
const messages = this.formatMessagesForAPI(thread);
const totalCharacters = messages.reduce((sum, msg) => sum + (typeof msg.content === "string" ? msg.content.length : 0), 0);
if (this.maxPromptCharacters !== void 0 && totalCharacters > this.maxPromptCharacters) {
throw new Error(`The thread content is too long (${totalCharacters} characters). Try a shorter conversation or increase maxPromptCharacters.`);
}
try {
const params = {
model: this.model,
messages,
temperature: this.temperature
};
if (this.maxTokens) {
params.max_tokens = this.maxTokens;
}
if (this.providerOptions) {
if (this.providerOptions.reasoning) {
Object.assign(params, {
reasoning: this.providerOptions.reasoning
});
}
if (this.providerOptions.reasoning_effort) {
params.reasoning_effort = this.providerOptions.reasoning_effort;
}
if (this.providerOptions.verbosity) {
params.verbosity = this.providerOptions.verbosity;
}
if (this.providerOptions.max_completion_tokens) {
params.max_completion_tokens = this.providerOptions.max_completion_tokens;
}
if (this.providerOptions.cfg) {
Object.assign(params, {
cfg: this.providerOptions.cfg
});
}
if (this.providerOptions.freeform_tools !== void 0) {
params.freeform_tools = this.providerOptions.freeform_tools;
}
if (this.providerOptions.mcp) {
Object.assign(params, {
mcp: this.providerOptions.mcp
});
}
if (this.providerOptions.modality) {
params.modality = this.providerOptions.modality;
}
if (this.providerOptions.prediction) {
params.prediction = this.providerOptions.prediction;
}
if (this.providerOptions.store !== void 0) {
params.store = this.providerOptions.store;
}
const { reasoning, reasoning_effort, verbosity, max_completion_tokens, cfg, freeform_tools, mcp, modality, prediction, store, ...otherOptions } = this.providerOptions;
Object.assign(params, otherOptions);
}
if (this.responseSchema) {
params.response_format = {
type: "json_schema",
json_schema: {
name: "response",
schema: this.responseSchema,
strict: true
}
};
}
const tools = thread.getTools();
if (tools.length > 0) {
params.tools = this.createToolDefinitions(tools);
params.tool_choice = "auto";
}
const response = await this.client.chat.completions.create(params);
const lastChoice = response.choices[0];
if (!lastChoice) {
throw new Error("No response from OpenAI API");
}
const choice = lastChoice.message;
if (!choice) {
throw new Error("Empty response from OpenAI API");
}
const message = new Message({
role: "assistant",
content: choice.content || "",
toolCalls: this.parseToolCalls(choice.tool_calls),
cache: this.cache
});
thread.push(message);
if (response.usage) {
const toolCalls = this.parseToolCalls(choice.tool_calls);
const enhancedTokenUsage = {
inputTokens: response.usage.prompt_tokens,
outputTokens: response.usage.completion_tokens,
totalTokens: response.usage.total_tokens,
cachedTokens: response.usage.prompt_tokens_details?.cached_tokens || void 0,
reasoningTokens: response.usage.completion_tokens_details?.reasoning_tokens || void 0
};
const cachedTokens = enhancedTokenUsage.cachedTokens || 0;
enhancedTokenUsage.newContentTokens = enhancedTokenUsage.inputTokens - cachedTokens;
const toolTokenBreakdown = this.generateToolTokenBreakdown(toolCalls, enhancedTokenUsage, thread);
if (toolTokenBreakdown.length > 0) {
enhancedTokenUsage.toolTokenBreakdown = toolTokenBreakdown;
}
thread.updateTokenUsage(enhancedTokenUsage);
this.debug("OpenAI API usage metrics", {
inputTokens: enhancedTokenUsage.inputTokens,
outputTokens: enhancedTokenUsage.outputTokens,
totalTokens: enhancedTokenUsage.totalTokens,
cachedTokens: enhancedTokenUsage.cachedTokens,
reasoningTokens: enhancedTokenUsage.reasoningTokens,
newContentTokens: enhancedTokenUsage.newContentTokens,
toolBreakdownCount: toolTokenBreakdown.length
});
}
return thread;
} catch (error2) {
this.error("Error sending message to OpenAI API", {
error: error2
});
throw error2;
}
}
/**
* Stream a thread to the LLM and get a streaming response
* @param thread Thread to send
* @returns AsyncGenerator yielding the stream and updated thread
*/
async *streamThread(thread) {
if (thread.all().length === 0) {
throw new Error("Cannot stream an empty thread");
}
this.debug("Streaming message to OpenAI API", {
model: this.model
});
const messages = this.formatMessagesForAPI(thread);
const totalCharacters = messages.reduce((sum, msg) => sum + (typeof msg.content === "string" ? msg.content.length : 0), 0);
if (this.maxPromptCharacters !== void 0 && totalCharacters > this.maxPromptCharacters) {
throw new Error(`The thread content is too long (${totalCharacters} characters). Try a shorter conversation or increase maxPromptCharacters.`);
}
try {
const params = {
model: this.model,
messages,
temperature: this.temperature,
stream: true,
stream_options: {
include_usage: true
}
};
if (this.maxTokens) {
params.max_tokens = this.maxTokens;
}
if (this.providerOptions) {
if (this.providerOptions.reasoning) {
Object.assign(params, {
reasoning: this.providerOptions.reasoning
});
}
if (this.providerOptions.reasoning_effort) {
params.reasoning_effort = this.providerOptions.reasoning_effort;
}
if (this.providerOptions.verbosity) {
params.verbosity = this.providerOptions.verbosity;
}
if (this.providerOptions.max_completion_tokens) {
params.max_completion_tokens = this.providerOptions.max_completion_tokens;
}
if (this.providerOptions.cfg) {
Object.assign(params, {
cfg: this.providerOptions.cfg
});
}
if (this.providerOptions.freeform_tools !== void 0) {
params.freeform_tools = this.providerOptions.freeform_tools;
}
if (this.providerOptions.mcp) {
Object.assign(params, {
mcp: this.providerOptions.mcp
});
}
if (this.providerOptions.modality) {
params.modality = this.providerOptions.modality;
}
if (this.providerOptions.prediction) {
params.prediction = this.providerOptions.prediction;
}
if (this.providerOptions.store !== void 0) {
params.store = this.providerOptions.store;
}
const { reasoning, reasoning_effort, verbosity, max_completion_tokens, cfg, freeform_tools, mcp, modality, prediction, store, ...otherOptions } = this.providerOptions;
Object.assign(params, otherOptions);
}
if (this.responseSchema) {
params.response_format = {
type: "json_schema",
json_schema: {
name: "response",
schema: this.responseSchema,
strict: true
}
};
}
const tools = thread.getTools();
if (tools.length > 0) {
params.tools = this.createToolDefinitions(tools);
params.tool_choice = "auto";
}
const message = new Message({
role: "assistant",
content: "",
cache: this.cache
});
const stream = await this.client.chat.completions.create(params);
const streamContent = this.createStreamGenerator(stream, message, thread);
yield {
stream: streamContent,
message
};
thread.push(message);
return thread;
} catch (error2) {
this.error("Error streaming message to OpenAI API", {
error: error2
});
throw error2;
}
}
/**
* Create a stream generator that handles updates to the message
*/
async *createStreamGenerator(stream, message, thread) {
let toolCallsStarted = false;
let currentToolCalls = [];
let streamUsage = null;
try {
for await (const chunk of stream) {
const delta = chunk.choices[0]?.delta;
if (chunk.usage) {
streamUsage = chunk.usage;
}
if (!delta) continue;
if (delta.content) {
const updatedMessage = new Message({
role: "assistant",
content: (message.content || "") + delta.content,
toolCalls: message.toolCalls,
cache: this.cache
});
Object.assign(message, updatedMessage);
if (delta.content !== "") {
yield delta.content;
}
}
if (delta.tool_calls) {
if (!toolCallsStarted) {
toolCallsStarted = true;
currentToolCalls = delta.tool_calls.map((call) => ({
...call
}));
} else {
for (const toolCall of delta.tool_calls) {
const existingCall = currentToolCalls.find((c) => c.index === toolCall.index);
if (existingCall) {
if (toolCall.function) {
if (!existingCall.function) {
existingCall.function = {};
}
if (toolCall.function.name && !existingCall.function.name) {
existingCall.function.name = toolCall.function.name;
}
if (toolCall.function.arguments) {
existingCall.function.arguments = (existingCall.function.arguments || "") + toolCall.function.arguments;
}
}
} else {
currentToolCalls.push({
...toolCall
});
}
}
}
}
}
if (currentToolCalls.length > 0) {
const parsedToolCalls = this.parseToolCalls(currentToolCalls);
const updatedMessage = new Message({
role: "assistant",
content: message.content || "",
toolCalls: parsedToolCalls,
cache: this.cache
});
Object.assign(message, updatedMessage);
}
if (streamUsage) {
const toolCalls = this.parseToolCalls(currentToolCalls);
const enhancedTokenUsage = {
inputTokens: streamUsage.prompt_tokens,
outputTokens: streamUsage.completion_tokens,
totalTokens: streamUsage.total_tokens,
cachedTokens: streamUsage.prompt_tokens_details?.cached_tokens || void 0,
reasoningTokens: streamUsage.completion_tokens_details?.reasoning_tokens || void 0
};
const cachedTokens = enhancedTokenUsage.cachedTokens || 0;
enhancedTokenUsage.newContentTokens = enhancedTokenUsage.inputTokens - cachedTokens;
const toolTokenBreakdown = this.generateToolTokenBreakdown(toolCalls, enhancedTokenUsage, thread);
if (toolTokenBreakdown.length > 0) {
enhancedTokenUsage.toolTokenBreakdown = toolTokenBreakdown;
}
thread.updateTokenUsage(enhancedTokenUsage);
this.debug("OpenAI Streaming API usage metrics", {
inputTokens: enhancedTokenUsage.inputTokens,
outputTokens: enhancedTokenUsage.outputTokens,
totalTokens: enhancedTokenUsage.totalTokens,
cachedTokens: enhancedTokenUsage.cachedTokens,
reasoningTokens: enhancedTokenUsage.reasoningTokens,
newContentTokens: enhancedTokenUsage.newContentTokens,
toolBreakdownCount: toolTokenBreakdown.length
});
}
} catch (error2) {
this.error("Error processing OpenAI stream", {
error: error2
});
throw error2;
}
}
/**
* Format messages from the Thread object to the OpenAI API format
*/
formatMessagesForAPI(thread) {
const messages = [];
for (const message of thread.all()) {
const hasAttachments = message.attachments && message.attachments.length > 0;
if (message.role === "system") {
messages.push({
role: "system",
content: this.combineTextWithAttachmentSummaries(message)
});
continue;
}
if (message.toolResults && message.toolResults.length > 0) {
for (const toolResult of message.toolResults) {
let toolContent = toolResult.result;
if (toolContent == null) {
toolContent = {
error: "Tool result was null or undefined"
};
}
const serializedAttachments = hasAttachments ? this.serializeAttachmentsForToolMessage(message.attachments || []) : void 0;
if (serializedAttachments && typeof toolContent === "object" && toolContent !== null) {
toolContent = {
...toolContent,
attachments: serializedAttachments
};
} else if (serializedAttachments) {
toolContent = {
value: toolContent,
attachments: serializedAttachments
};
}
const toolCallId = toolResult.toolUseId.startsWith("call_") ? toolResult.toolUseId : `call_${toolResult.toolUseId}`;
messages.push({
role: "tool",
tool_call_id: toolCallId,
content: JSON.stringify(toolContent)
});
}
continue;
}
if (message.role === "assistant" && message.toolCalls && message.toolCalls.length > 0) {
const toolCalls = message.toolCalls.map((call) => ({
id: call.toolUseId.startsWith("call_") ? call.toolUseId : `call_${call.toolUseId}`,
type: "function",
function: {
name: call.name,
arguments: JSON.stringify(call.arguments)
}
}));
const assistantContent = hasAttachments ? this.combineTextWithAttachmentSummaries(message) : message.content || null;
messages.push({
role: "assistant",
content: assistantContent,
tool_calls: toolCalls
});
continue;
}
if (hasAttachments && message.role === "user") {
const contentParts = this.buildUserAttachmentContentParts(message);
messages.push({
role: "user",
content: contentParts
});
continue;
}
if (hasAttachments && message.role !== "tool") {
const contentWithSummaries = this.combineTextWithAttachmentSummaries(message);
if (message.role === "assistant") {
messages.push({
role: "assistant",
content: contentWithSummaries || "(Empty response)"
});
} else if (message.role === "user") {
messages.push({
role: "user",
content: contentWithSummaries || "(Empty message)"
});
} else {
messages.push({
role: message.role,
content: contentWithSummaries || "(Empty message)"
});
}
continue;
}
if (message.role === "user") {
messages.push({
role: "user",
content: message.content || "(Empty message)"
});
} else if (message.role === "assistant") {
messages.push({
role: "assistant",
content: message.content || "(Empty response)"
});
}
}
return messages;
}
/**
* Build a multimodal content array for a user message with attachments
*/
buildUserAttachmentContentParts(message) {
const contentParts = [];
if (message.content) {
contentParts.push({
type: "text",
text: message.content
});
}
for (const attachment of message.attachments || []) {
const processedParts = this.processAttachmentForUserMessage(attachment);
contentParts.push(...processedParts);
}
if (contentParts.length === 0) {
contentParts.push({
type: "text",
text: "(Empty message)"
});
}
return contentParts;
}
/**
* Convert an attachment to one or more OpenAI content parts for user messages
*/
processAttachmentForUserMessage(attachment) {
if (attachment.type === "image") {
return [
this.processImageAttachment(attachment)
];
}
if (attachment.type === "document") {
return this.processDocumentAttachment(attachment);
}
if (attachment.type === "video") {
return [
this.createUnsupportedAttachmentTextPart("Video attachments are not supported by the OpenAI Chat Completions API. Provide key frames as images or a transcript instead.")
];
}
return [
this.createUnsupportedAttachmentTextPart("Encountered an attachment type that could not be processed.")
];
}
/**
* Convert an image attachment into an OpenAI content part
*/
processImageAttachment(attachment) {
const base64Data = this.getAttachmentBase64(attachment);
if (base64Data) {
const mediaType = this.getImageMediaType(attachment.format);
return {
type: "image_url",
image_url: {
url: `data:${mediaType};base64,${base64Data}`,
detail: "auto"
}
};
}
if (attachment.source.uri) {
return {
type: "image_url",
image_url: {
url: attachment.source.uri,
detail: "auto"
}
};
}
return this.createUnsupportedAttachmentTextPart("Image attachment missing data or URI. Unable to include in request.");
}
/**
* Convert a document attachment into OpenAI content parts
*/
processDocumentAttachment(attachment) {
const fileId = this.extractOpenAIFileId(attachment.source.uri);
if (fileId) {
return [
{
type: "file",
file: {
file_id: fileId,
filename: attachment.name
}
}
];
}
const base64Data = this.getAttachmentBase64(attachment);
if (base64Data) {
if (this.isTextDocumentFormat(attachment.format)) {
const decoded = this.decodeBase64ToUtf8(base64Data);
if (decoded) {
const label = attachment.name ? `${attachment.format.toUpperCase()} Document \u2014 ${attachment.name}` : `${attachment.format.toUpperCase()} Document`;
return [
{
type: "text",
text: `[${label}]
${decoded}`
}
];
}
}
return [
{
type: "file",
file: {
file_data: base64Data,
filename: attachment.name || `document.${attachment.format}`
}
}
];
}
if (attachment.source.uri) {
return [
this.createUnsupportedAttachmentTextPart(`Document attachment (${attachment.name || attachment.format}) could not be embedded. Reference URI: ${attachment.source.uri}`)
];
}
return [
this.createUnsupportedAttachmentTextPart(`Document attachment (${attachment.name || attachment.format}) could not be embedded because no data source was provided.`)
];
}
/**
* Combine message text with attachment summaries for roles that only support text
*/
combineTextWithAttachmentSummaries(message) {
const baseText = message.content || this.getDefaultPlaceholderForRole(message.role);
if (!message.attachments || message.attachments.length === 0) {
return baseText;
}
const summaryLines = message.attachments.map((attachment, index) => this.describeAttachment(attachment, index + 1));
const attachmentsSummary = [
`Attachments:`,
...summaryLines
].join("\n");
if (baseText) {
return `${baseText}
${attachmentsSummary}`;
}
return attachmentsSummary;
}
/**
* Provide a readable description for an attachment
*/
describeAttachment(attachment, position) {
const sourceDescription = this.describeAttachmentSource(attachment);
if (attachment.type === "document") {
return `${position}. Document (${attachment.format}) \u2014 ${attachment.name || "unnamed"} \u2014 ${sourceDescription}`;
}
if (attachment.type === "image") {
return `${position}. Image (${attachment.format}) \u2014 ${sourceDescription}`;
}
if (attachment.type === "video") {
return `${position}. Video (${attachment.format}) \u2014 ${sourceDescription}`;
}
return `${position}. Attachment \u2014 ${sourceDescription}`;
}
/**
* Summarize the attachment source without leaking raw data
*/
describeAttachmentSource(attachment) {
if (attachment.source.bytes) {
return "embedded binary bytes";
}
if (attachment.source.uri?.startsWith("data:")) {
return "embedded data URI";
}
if (attachment.source.uri) {
return `uri: ${attachment.source.uri}`;
}
return "no source provided";
}
/**
* Serialize attachments for inclusion alongside tool message payloads
*/
serializeAttachmentsForToolMessage(attachments) {
if (!attachments || attachments.length === 0) {
return void 0;
}
const serialized = attachments.map((attachment) => {
const base = {
type: attachment.type
};
if (attachment.type === "document") {
base.format = attachment.format;
base.name = attachment.name;
} else if (attachment.type === "image" || attachment.type === "video") {
base.format = attachment.format;
}
const base64Data = this.getAttachmentBase64(attachment);
if (base64Data) {
base.base64 = base64Data;
}
if (attachment.source.uri) {
base.uri = attachment.source.uri;
}
if (attachment.source.bucketOwner) {
base.bucketOwner = attachment.source.bucketOwner;
}
return base;
});
return serialized.length > 0 ? serialized : void 0;
}
/**
* Create a text content part for unsupported attachments
*/
createUnsupportedAttachmentTextPart(text) {
return {
type: "text",
text
};
}
/**
* Extract base64 data from an attachment source if available
*/
getAttachmentBase64(attachment) {
if (attachment.source.bytes && attachment.source.bytes.length > 0) {
return Buffer.from(attachment.source.bytes).toString("base64");
}
if (attachment.source.uri?.startsWith("data:")) {
const commaIndex = attachment.source.uri.indexOf(",");
if (commaIndex !== -1) {
return attachment.source.uri.substring(commaIndex + 1);
}
}
return null;
}
/**
* Check whether a document format is text-based
*/
isTextDocumentFormat(format) {
const textFormats = /* @__PURE__ */ new Set([
"txt",
"md",
"markdown",
"csv",
"html"
]);
return textFormats.has(format.toLowerCase());
}
/**
* Decode base64 data to UTF-8 safely
*/
decodeBase64ToUtf8(base64) {
try {
return Buffer.from(base64, "base64").toString("utf-8");
} catch (error2) {
this.debug("Failed to decode base64 document attachment", {
error: error2
});
return null;
}
}
/**
* Attempt to extract an OpenAI file ID from a URI
*/
extractOpenAIFileId(uri) {
if (!uri) {
return void 0;
}
if (uri.startsWith("file-")) {
return uri;
}
const openAISchemePrefix = "openai://file/";
if (uri.startsWith(openAISchemePrefix)) {
const candidate = uri.substring(openAISchemePrefix.length);
return candidate || void 0;
}
return void 0;
}
/**
* Map image formats to media types
*/
getImageMediaType(format) {
const normalized = format.toLowerCase();
const formatMap = {
jpg: "image/jpeg",
jpeg: "image/jpeg",
png: "image/png",
gif: "image/gif",
webp: "image/webp"
};
return formatMap[normalized] || `image/${normalized}`;
}
/**
* Provide default placeholder text for roles that require content
*/
getDefaultPlaceholderForRole(role) {
if (role === "assistant") {
return "(Empty response)";
}
if (role === "user") {
return "(Empty message)";
}
if (role === "system") {
return "(Empty system message)";
}
return "";
}
/**
* Create tool definitions for the OpenAI API
*/
createToolDefinitions(tools) {
return tools.map((tool) => ({
type: "function",
function: {
name: tool.getName(),
description: tool.getDescription(),
parameters: {
...import_zod3.z.toJSONSchema(tool.getParameters()),
$schema: JSON_SCHEMA_URL
}
}
}));
}
/**
* Parse tool calls from the OpenAI API response
*/
parseToolCalls(toolCalls) {
if (!toolCalls || toolCalls.length === 0) {
return void 0;
}
return toolCalls.map((toolCall) => {
if (toolCall.type === "function" && toolCall.function.name) {
try {
const args = JSON.parse(toolCall.function.arguments || "{}");
return {
toolUseId: toolCall.id,
name: toolCall.function.name,
arguments: args
};
} catch (error2) {
this.error("Error parsing tool call arguments", {
error: error2,
toolCall
});
return {
toolUseId: toolCall.id,
name: toolCall.function.name,
arguments: {}
};
}
}
this.error("Unknown tool call type", {
toolCall
});
return {
toolUseId: toolCall.id || (0, import_nanoid2.nanoid)(),
name: toolCall.function?.name || "unknown",
arguments: {}
};
});
}
/**
* Generate per-tool token breakdown by distributing total usage across tools
* Since OpenAI API doesn't provide per-tool usage, we estimate based on equal distribution
*/
generateToolTokenBreakdown(toolCalls, totalUsage, _thread) {
if (!toolCalls || toolCalls.length === 0) {
return [];
}
const breakdown = [];
const toolCount = toolCalls.length;
const baseInputPerTool = Math.floor(totalUsage.inputTokens / toolCount);
const baseOutputPerTool = Math.floor(totalUsage.outputTokens / toolCount);
let remainderInput = totalUsage.inputTokens % toolCount;
let remainderOutput = totalUsage.outputTokens % toolCount;
toolCalls.forEach((toolCall) => {
const inputTokens = baseInputPerTool + (remainderInput > 0 ? 1 : 0);
const outputTokens = baseOutputPerTool + (remainderOutput > 0 ? 1 : 0);
if (remainderInput > 0) remainderInput--;
if (remainderOutput > 0) remainderOutput--;
const toolBreakdown = {
toolName: toolCall.name,
toolUseId: toolCall.toolUseId,
inputTokens,
outputTokens,
totalTokens: inputTokens + outputTokens
};
breakdown.push(toolBreakdown);
});
return breakdown;
}
/**
* Get reasoning token count from the last message, or 0 if not available
*/
getLastReasoningTokens(thread) {
const lastUsage = thread.getLastMessageTokenUsage();
return lastUsage?.reasoningTokens || 0;
}
/**
* Get total reasoning tokens for the entire thread
*/
getTotalReasoningTokens(thread) {
const totalUsage = thread.getTokenUsage();
return totalUsage?.reasoningTokens || 0;
}
/**
* Calculate recommended character limit based on desired token limit
* @param targetTokens Target token limit (e.g., 100_000 for 100K tokens)
* @param contentType Type of content - affects character-to-token ratio
* @returns Recommended character limit
*/
static calculateCharacterLimit(targetTokens, contentType = "text") {
const ratios = {
text: 4,
code: 3.2,
structured: 2.5
};
return Math.floor(targetTokens * ratios[contentType]);
}
};
}
});
// src/model.drivers/anthropic.ts
var anthropic_exports = {};
__export(anthropic_exports, {
AnthropicThreadDriver: () => AnthropicThreadDriver
});
var import_sdk, import_nanoid3, import_zod4, DEFAULT_MAX_PROMPT_CHARACTERS2, AnthropicThreadDriver;
var init_anthropic = __esm({
"src/model.drivers/anthropic.ts"() {
"use strict";
import_sdk = __toESM(require("@anthropic-ai/sdk"), 1);
import_nanoid3 = require("nanoid");
import_zod4 = require("zod");
init_message();
init_tracing();
init_base();
DEFAULT_MAX_PROMPT_CHARACTERS2 = 1e6;
AnthropicThreadDriver = class extends BaseModelDriver {
static {
__name(this, "AnthropicThreadDriver");
}
/**
* Anthropic API Client
*/
client;
/**
* Anthropic model to use
*/
model;
/**
* Temperature for response generation
*/
temperature;
/**
* Maximum number of tokens to generate
*/
maxTokens;
/**
* Whether to cache responses
*/
cache;
/**
* Tool choice mode
*/
toolChoice;
/**
* Schema for structured output
*/
responseSchema;
/**
* Provider-specific options
*/
providerOptions;
/**
* Caching configuration
*/
cachingConfig;
/**
* Cac