@flatfile/improv
Version:
A powerful TypeScript library for building AI agents with multi-threaded conversations, tool execution, and event handling capabilities
1,456 lines (1,447 loc) • 1.35 MB
JavaScript
var __create = Object.create;
var __defProp = Object.defineProperty;
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
var __getOwnPropNames = Object.getOwnPropertyNames;
var __getProtoOf = Object.getPrototypeOf;
var __hasOwnProp = Object.prototype.hasOwnProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
var __require = /* @__PURE__ */ ((x) => typeof require !== "undefined" ? require : typeof Proxy !== "undefined" ? new Proxy(x, {
get: (a, b) => (typeof require !== "undefined" ? require : a)[b]
}) : x)(function(x) {
if (typeof require !== "undefined") return require.apply(this, arguments);
throw Error('Dynamic require of "' + x + '" is not supported');
});
var __esm = (fn, res) => function __init() {
return fn && (res = (0, fn[__getOwnPropNames(fn)[0]])(fn = 0)), res;
};
var __commonJS = (cb, mod) => function __require2() {
return mod || (0, cb[__getOwnPropNames(cb)[0]])((mod = { exports: {} }).exports, mod), mod.exports;
};
var __export = (target, all) => {
for (var name in all)
__defProp(target, name, { get: all[name], enumerable: true });
};
var __copyProps = (to, from, except, desc) => {
if (from && typeof from === "object" || typeof from === "function") {
for (let key of __getOwnPropNames(from))
if (!__hasOwnProp.call(to, key) && key !== except)
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
}
return to;
};
var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
// If the importer is in node compatibility mode or this is not an ESM
// file that has been converted to a CommonJS file using a Babel-
// compatible transform (i.e. "__esModule" has not been set), then set
// "default" to the CommonJS "module.exports" for node compatibility.
isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
mod
));
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
// src/event-source.ts
import { EventEmitter2 } from "eventemitter2";
var EventSource;
var init_event_source = __esm({
"src/event-source.ts"() {
"use strict";
EventSource = class extends EventEmitter2 {
static {
__name(this, "EventSource");
}
constructor() {
super({
wildcard: true
});
}
/**
* Forward all events from a source EventEmitter2 instance
* Preserves the original event name and merges any additional context
*/
forwardEvents(source, context = {}) {
const self = this;
source.on("**", function(data) {
const eventName = this.event;
self.emit(eventName, {
...data,
...context
});
});
}
debug(message, data) {
if (process.env.NODE_ENV === "development") {
console.log(message);
if (data) {
console.dir(data, {
depth: null
});
}
}
}
error(...args) {
if (process.env.NODE_ENV === "development") {
console.error(...args);
}
}
};
}
});
// src/message.ts
var Message;
var init_message = __esm({
"src/message.ts"() {
"use strict";
Message = class {
static {
__name(this, "Message");
}
_content;
_role;
_reasoning;
_toolCalls;
_toolResults;
_attachments;
_cache;
constructor({ content, role = "user", toolCalls = [], toolResults = [], attachments = [], reasoning = [], cache: cache2 = false }) {
this._content = content;
this._role = role;
this._toolCalls = toolCalls;
this._reasoning = reasoning;
this._toolResults = toolResults;
this._attachments = attachments;
this._cache = cache2;
}
get content() {
return this._content;
}
get role() {
return this._role;
}
get toolCalls() {
return this._toolCalls;
}
get toolResults() {
return this._toolResults;
}
get attachments() {
return this._attachments;
}
get cache() {
return this._cache;
}
get reasoning() {
return this._reasoning;
}
isToolResponse() {
return this._toolResults.length > 0;
}
isAssistantMessage() {
return this._role === "assistant";
}
isUserMessage() {
return this._role === "user";
}
isSystemMessage() {
return this._role === "system";
}
isToolCall() {
return this._toolCalls.length > 0;
}
/**
* Get attachments of a specific type
*/
getAttachmentsByType(type) {
return this._attachments.filter((a) => a.type === type);
}
/**
* Add an attachment to the message
*/
addAttachment(attachment) {
this._attachments.push(attachment);
}
/**
* Remove an attachment from the message
*/
removeAttachment(index) {
if (index >= 0 && index < this._attachments.length) {
this._attachments.splice(index, 1);
}
}
/**
* Attempts to parse and return JSON content from the message
* Supports multiple formats:
* 1. Direct JSON string
* 2. JSON within markdown code blocks
* 3. JSON within specific delimiters
*/
get json() {
try {
if (this._content) {
return JSON.parse(this._content);
}
} catch {
try {
if (this._content) {
const codeBlockMatch = this._content.match(/```(?:json)?\s*([\s\S]*?)\s*```/);
if (codeBlockMatch?.[1]) {
return JSON.parse(codeBlockMatch[1]);
}
}
} catch {
try {
if (this._content) {
const jsonMatch = this._content.match(/\{[\s\S]*\}/);
if (jsonMatch) {
return JSON.parse(jsonMatch[0]);
}
}
} catch {
return null;
}
}
}
return null;
}
};
}
});
// src/tracing/tracer.ts
var NullTracer, defaultTracer;
var init_tracer = __esm({
"src/tracing/tracer.ts"() {
"use strict";
NullTracer = class {
static {
__name(this, "NullTracer");
}
traceable(fn, _metadata) {
return fn;
}
};
defaultTracer = new NullTracer();
}
});
// src/tracing/registry.ts
var registry_exports = {};
__export(registry_exports, {
TracerRegistry: () => TracerRegistry,
getTracer: () => getTracer,
isTracingEnabled: () => isTracingEnabled
});
function getTracer() {
return TracerRegistry.getInstance().getActiveTracer();
}
function isTracingEnabled() {
return TracerRegistry.getInstance().isTracingEnabled();
}
var TracerRegistry;
var init_registry = __esm({
"src/tracing/registry.ts"() {
"use strict";
init_tracer();
TracerRegistry = class _TracerRegistry {
static {
__name(this, "TracerRegistry");
}
static instance;
tracers = /* @__PURE__ */ new Map();
activeTracer = "null";
constructor() {
this.register("null", defaultTracer);
}
/**
* Get the singleton instance of the registry
*/
static getInstance() {
if (!_TracerRegistry.instance) {
_TracerRegistry.instance = new _TracerRegistry();
}
return _TracerRegistry.instance;
}
/**
* Register a tracer with the registry
* @param name Name of the tracer
* @param tracer The tracer implementation
*/
register(name, tracer) {
this.tracers.set(name, tracer);
}
/**
* Set the active tracer by name
* @param name Name of the tracer to activate
* @throws Error if the tracer doesn't exist
*/
setActiveTracer(name) {
if (!this.tracers.has(name)) {
throw new Error(`Tracer '${name}' is not registered`);
}
this.activeTracer = name;
}
/**
* Get the currently active tracer
*/
getActiveTracer() {
return this.tracers.get(this.activeTracer) || defaultTracer;
}
/**
* Check if tracing is enabled (i.e., active tracer is not the null tracer)
*/
isTracingEnabled() {
return this.activeTracer !== "null";
}
};
__name(getTracer, "getTracer");
__name(isTracingEnabled, "isTracingEnabled");
}
});
// src/tracing/index.ts
var tracing_exports = {};
__export(tracing_exports, {
NullTracer: () => NullTracer,
TracerRegistry: () => TracerRegistry,
defaultTracer: () => defaultTracer,
getTracer: () => getTracer,
isTracingEnabled: () => isTracingEnabled,
traceable: () => traceable
});
function traceable(fn, metadata) {
if (!isTracingEnabled()) {
return fn;
}
return getTracer().traceable(fn, metadata);
}
var init_tracing = __esm({
"src/tracing/index.ts"() {
"use strict";
init_registry();
init_tracer();
__name(traceable, "traceable");
}
});
// src/model.drivers/base.ts
var BaseModelDriver;
var init_base = __esm({
"src/model.drivers/base.ts"() {
"use strict";
init_event_source();
BaseModelDriver = class extends EventSource {
static {
__name(this, "BaseModelDriver");
}
/**
* Check if the abort signal has been triggered and throw an error if so
*/
checkAbortSignal(abortSignal) {
if (abortSignal?.aborted) {
throw new Error("Thread execution aborted");
}
}
/**
* Execute an async operation with abort signal checking before and after
*/
async withAbortCheck(abortSignal, operation) {
this.checkAbortSignal(abortSignal);
const result = await operation();
this.checkAbortSignal(abortSignal);
return result;
}
/**
* Wrap an async generator with abort signal checking on each iteration
*/
async *wrapStreamWithAbort(abortSignal, stream) {
try {
for await (const chunk of stream) {
this.checkAbortSignal(abortSignal);
yield chunk;
}
} finally {
this.checkAbortSignal(abortSignal);
}
}
};
}
});
// src/model.drivers/openai.ts
var openai_exports = {};
__export(openai_exports, {
OpenAIThreadDriver: () => OpenAIThreadDriver
});
import { nanoid as nanoid2 } from "nanoid";
import OpenAI from "openai";
import { zodToJsonSchema } from "zod-to-json-schema";
var MAX_PROMPT_CHARACTERS, JSON_SCHEMA_URL, OpenAIThreadDriver;
var init_openai = __esm({
"src/model.drivers/openai.ts"() {
"use strict";
init_message();
init_tracing();
init_base();
MAX_PROMPT_CHARACTERS = 1e5;
JSON_SCHEMA_URL = "http://json-schema.org/draft-07/schema#";
OpenAIThreadDriver = class extends BaseModelDriver {
static {
__name(this, "OpenAIThreadDriver");
}
/**
* OpenAI API Client
*/
client;
/**
* OpenAI model to use
*/
model;
/**
* Temperature for response generation
*/
temperature;
/**
* Maximum number of tokens to generate
*/
maxTokens;
/**
* Whether to cache responses
*/
cache;
/**
* Schema for structured output
*/
responseSchema;
/**
* Returns a list of available OpenAI models
*/
static getAvailableModels() {
return [
"gpt-4o",
"gpt-4o-mini",
"gpt-4",
"gpt-4-turbo",
"gpt-3.5-turbo",
"gpt-4-vision-preview"
];
}
/**
* Create a new OpenAI driver
* @param config Configuration options
*/
constructor(config = {}) {
super();
const apiKey = config.apiKey || process.env.OPENAI_API_KEY;
if (!apiKey) {
throw new Error("OpenAI API Key must be provided in config or as OPENAI_API_KEY environment variable");
}
const baseClient = new OpenAI({
apiKey
});
if (config.trace ?? false) {
this.client = new Proxy(baseClient, {
get: /* @__PURE__ */ __name((target, prop, receiver) => {
const value = Reflect.get(target, prop, receiver);
if (prop === "chat" && value && typeof value === "object") {
return new Proxy(value, {
get: /* @__PURE__ */ __name((chatTarget, chatProp, chatReceiver) => {
const chatValue = Reflect.get(chatTarget, chatProp, chatReceiver);
if (chatProp === "completions" && chatValue && typeof chatValue === "object") {
return new Proxy(chatValue, {
get: /* @__PURE__ */ __name((completionsTarget, completionsProp, completionsReceiver) => {
const completionsValue = Reflect.get(completionsTarget, completionsProp, completionsReceiver);
if (completionsProp === "create" && typeof completionsValue === "function") {
return traceable(completionsValue.bind(completionsTarget), {
run_type: "llm",
name: "openai",
...config.traceMetadata,
aggregator: /* @__PURE__ */ __name((args, result) => {
const params = args[0] || {};
if (params.stream) {
return {
model: params.model,
input: {
messages: params.messages,
temperature: params.temperature,
max_tokens: params.max_tokens,
tools: params.tools ? params.tools.length : 0
},
streaming: true
};
}
return {
model: params.model,
input: {
messages: params.messages,
temperature: params.temperature,
max_tokens: params.max_tokens,
tools: params.tools ? params.tools.length : 0
},
output: result.choices?.[0]?.message,
metrics: result.usage ? {
prompt_tokens: result.usage.prompt_tokens,
completion_tokens: result.usage.completion_tokens,
total_tokens: result.usage.total_tokens
} : void 0,
finish_reason: result.choices?.[0]?.finish_reason
};
}, "aggregator")
});
}
return completionsValue;
}, "get")
});
}
return chatValue;
}, "get")
});
}
return value;
}, "get")
});
} else {
this.client = baseClient;
}
this.model = config.model || "gpt-4o";
this.temperature = config.temperature ?? 0.7;
this.maxTokens = config.maxTokens;
this.cache = config.cache ?? false;
this.responseSchema = config.responseSchema;
}
/**
* Send a thread to the LLM and get a response
* @param thread Thread to send
* @returns Updated thread with LLM response
*/
async sendThread(thread, abortSignal) {
if (thread.all().length === 0) {
throw new Error("Cannot send an empty thread");
}
this.checkAbortSignal(abortSignal);
const messages = this.formatMessagesForAPI(thread);
const totalCharacters = messages.reduce((sum, msg) => sum + (typeof msg.content === "string" ? msg.content.length : 0), 0);
if (totalCharacters > MAX_PROMPT_CHARACTERS) {
throw new Error(`The thread content is too long (${totalCharacters} characters). Try a shorter conversation.`);
}
try {
const params = {
model: this.model,
messages,
temperature: this.temperature
};
if (this.maxTokens) {
params.max_tokens = this.maxTokens;
}
if (this.responseSchema) {
params.response_format = {
type: "json_schema",
json_schema: {
name: "response",
schema: this.responseSchema,
strict: true
}
};
}
const tools = thread.getTools();
if (tools.length > 0) {
params.tools = this.createToolDefinitions(tools);
params.tool_choice = "auto";
}
const response = await this.client.chat.completions.create(params);
const lastChoice = response.choices[0];
if (!lastChoice) {
throw new Error("No response from OpenAI API");
}
const choice = lastChoice.message;
if (!choice) {
throw new Error("Empty response from OpenAI API");
}
const message = new Message({
role: "assistant",
content: choice.content || "",
toolCalls: this.parseToolCalls(choice.tool_calls),
cache: this.cache
});
thread.push(message);
if (response.usage) {
}
return thread;
} catch (error2) {
this.error("Error sending message to OpenAI API", {
error: error2
});
throw error2;
}
}
/**
* Stream a thread to the LLM and get a streaming response
* @param thread Thread to send
* @returns AsyncGenerator yielding the stream and updated thread
*/
async *streamThread(thread) {
if (thread.all().length === 0) {
throw new Error("Cannot stream an empty thread");
}
this.debug("Streaming message to OpenAI API", {
model: this.model
});
const messages = this.formatMessagesForAPI(thread);
const totalCharacters = messages.reduce((sum, msg) => sum + (typeof msg.content === "string" ? msg.content.length : 0), 0);
if (totalCharacters > MAX_PROMPT_CHARACTERS) {
throw new Error(`The thread content is too long (${totalCharacters} characters). Try a shorter conversation.`);
}
try {
const params = {
model: this.model,
messages,
temperature: this.temperature,
stream: true
};
if (this.maxTokens) {
params.max_tokens = this.maxTokens;
}
if (this.responseSchema) {
params.response_format = {
type: "json_schema",
json_schema: {
name: "response",
schema: this.responseSchema,
strict: true
}
};
}
const tools = thread.getTools();
if (tools.length > 0) {
params.tools = this.createToolDefinitions(tools);
params.tool_choice = "auto";
}
const message = new Message({
role: "assistant",
content: "",
cache: this.cache
});
const stream = await this.client.chat.completions.create(params);
const streamContent = this.createStreamGenerator(stream, message);
yield {
stream: streamContent,
message
};
thread.push(message);
return thread;
} catch (error2) {
this.error("Error streaming message to OpenAI API", {
error: error2
});
throw error2;
}
}
/**
* Create a stream generator that handles updates to the message
*/
async *createStreamGenerator(stream, message) {
let toolCallsStarted = false;
let currentToolCalls = [];
try {
for await (const chunk of stream) {
const delta = chunk.choices[0]?.delta;
if (!delta) continue;
if (delta.content) {
const updatedMessage = new Message({
role: "assistant",
content: (message.content || "") + delta.content,
toolCalls: message.toolCalls,
cache: this.cache
});
Object.assign(message, updatedMessage);
yield delta.content;
}
if (delta.tool_calls) {
if (!toolCallsStarted) {
toolCallsStarted = true;
currentToolCalls = delta.tool_calls.map((call) => ({
...call
}));
} else {
for (const toolCall of delta.tool_calls) {
const existingCall = currentToolCalls.find((c) => c.index === toolCall.index);
if (existingCall) {
if (toolCall.function) {
if (!existingCall.function) {
existingCall.function = {};
}
if (toolCall.function.name && !existingCall.function.name) {
existingCall.function.name = toolCall.function.name;
}
if (toolCall.function.arguments) {
existingCall.function.arguments = (existingCall.function.arguments || "") + toolCall.function.arguments;
}
}
} else {
currentToolCalls.push({
...toolCall
});
}
}
}
}
}
if (currentToolCalls.length > 0) {
const parsedToolCalls = this.parseToolCalls(currentToolCalls);
const updatedMessage = new Message({
role: "assistant",
content: message.content || "",
toolCalls: parsedToolCalls,
cache: this.cache
});
Object.assign(message, updatedMessage);
}
} catch (error2) {
this.error("Error processing OpenAI stream", {
error: error2
});
throw error2;
}
}
/**
* Format messages from the Thread object to the OpenAI API format
*/
formatMessagesForAPI(thread) {
const messages = [];
for (const message of thread.all()) {
if (message.attachments && message.attachments.length > 0) {
const contentParts = [];
if (message.content) {
contentParts.push({
type: "text",
text: message.content
});
}
for (const attachment of message.attachments) {
if (attachment.type === "image" && attachment.source.uri) {
contentParts.push({
type: "image_url",
image_url: {
url: attachment.source.uri,
detail: "auto"
}
});
}
}
if (message.role === "user") {
messages.push({
role: "user",
content: contentParts
});
} else if (message.role === "assistant") {
messages.push({
role: "assistant",
content: contentParts
});
} else if (message.role === "system") {
messages.push({
role: "system",
content: contentParts
});
}
continue;
}
if (message.toolResults && message.toolResults.length > 0) {
if (message.content) {
messages.push({
role: "user",
content: message.content
});
}
for (const toolResult of message.toolResults) {
messages.push({
role: "tool",
tool_call_id: toolResult.toolUseId,
content: JSON.stringify(toolResult.result)
});
}
continue;
}
if (message.role === "assistant" && message.toolCalls && message.toolCalls.length > 0) {
const toolCalls = message.toolCalls.map((call) => ({
id: call.toolUseId,
type: "function",
function: {
name: call.name,
arguments: JSON.stringify(call.arguments)
}
}));
messages.push({
role: "assistant",
content: message.content || null,
tool_calls: toolCalls
});
continue;
}
if (message.role === "user") {
messages.push({
role: "user",
content: message.content || ""
});
} else if (message.role === "assistant") {
messages.push({
role: "assistant",
content: message.content || ""
});
} else if (message.role === "system") {
messages.push({
role: "system",
content: message.content || ""
});
}
}
return messages;
}
/**
* Create tool definitions for the OpenAI API
*/
createToolDefinitions(tools) {
return tools.map((tool) => ({
type: "function",
function: {
name: tool.getName(),
description: tool.getDescription(),
parameters: {
...zodToJsonSchema(tool.getParameters()),
$schema: JSON_SCHEMA_URL
}
}
}));
}
/**
* Parse tool calls from the OpenAI API response
*/
parseToolCalls(toolCalls) {
if (!toolCalls || toolCalls.length === 0) {
return void 0;
}
return toolCalls.map((toolCall) => {
if (toolCall.type === "function" && toolCall.function.name) {
try {
const args = JSON.parse(toolCall.function.arguments || "{}");
return {
toolUseId: toolCall.id,
name: toolCall.function.name,
arguments: args
};
} catch (error2) {
this.error("Error parsing tool call arguments", {
error: error2,
toolCall
});
return {
toolUseId: toolCall.id,
name: toolCall.function.name,
arguments: {}
};
}
}
this.error("Unknown tool call type", {
toolCall
});
return {
toolUseId: toolCall.id || nanoid2(),
name: toolCall.function?.name || "unknown",
arguments: {}
};
});
}
};
}
});
// src/model.drivers/anthropic.ts
var anthropic_exports = {};
__export(anthropic_exports, {
AnthropicThreadDriver: () => AnthropicThreadDriver
});
import { nanoid as nanoid3 } from "nanoid";
import { zodToJsonSchema as zodToJsonSchema2 } from "zod-to-json-schema";
var Anthropic, MAX_PROMPT_CHARACTERS2, AnthropicThreadDriver;
var init_anthropic = __esm({
"src/model.drivers/anthropic.ts"() {
"use strict";
init_message();
init_tracing();
init_base();
Anthropic = __require("@anthropic-ai/sdk").default;
MAX_PROMPT_CHARACTERS2 = 1e5;
AnthropicThreadDriver = class extends BaseModelDriver {
static {
__name(this, "AnthropicThreadDriver");
}
/**
* Anthropic API Client
*/
client;
/**
* Anthropic model to use
*/
model;
/**
* Temperature for response generation
*/
temperature;
/**
* Maximum number of tokens to generate
*/
maxTokens;
/**
* Whether to cache responses
*/
cache;
/**
* Tool choice mode
*/
toolChoice;
/**
* Schema for structured output
*/
responseSchema;
/**
* Returns a list of available Anthropic models
*/
static getAvailableModels() {
return [
// Claude 3 models
"claude-3-opus-20240229",
"claude-3-sonnet-20240229",
"claude-3-haiku-20240307",
"claude-3-5-sonnet-20240620",
"claude-3-7-sonnet-20250219",
// Claude 2 models
"claude-2.0",
"claude-2.1",
// Tagged versions
"claude-3-opus-20240229-v1:0",
"claude-3-sonnet-20240229-v1:0",
"claude-3-haiku-20240307-v1:0"
];
}
/**
* Create a new Anthropic driver
* @param config Configuration options
*/
constructor(config) {
super();
const apiKey = config?.apiKey || process.env.ANTHROPIC_API_KEY;
if (!apiKey) {
throw new Error("Anthropic API Key must be provided in config or as ANTHROPIC_API_KEY environment variable");
}
const baseClient = new Anthropic({
apiKey,
// This is needed because we're in a Node.js environment, in a server context
dangerouslyAllowBrowser: false
});
if (config?.trace ?? false) {
this.client = new Proxy(baseClient, {
get: /* @__PURE__ */ __name((target, prop, receiver) => {
const value = Reflect.get(target, prop, receiver);
if (prop === "messages" && value && typeof value === "object") {
return new Proxy(value, {
get: /* @__PURE__ */ __name((messagesTarget, messagesProp, messagesReceiver) => {
const messagesValue = Reflect.get(messagesTarget, messagesProp, messagesReceiver);
if (messagesProp === "create" && typeof messagesValue === "function") {
return traceable(messagesValue.bind(messagesTarget), {
run_type: "llm",
name: "anthropic",
...config?.traceMetadata,
aggregator: /* @__PURE__ */ __name((args, result) => {
const params = args[0] || {};
if (params.stream) {
return {
model: params.model,
input: {
messages: params.messages,
temperature: params.temperature,
max_tokens: params.max_tokens,
tools: params.tools ? params.tools.length : 0
},
streaming: true
};
}
return {
model: params.model,
input: {
messages: params.messages,
temperature: params.temperature,
max_tokens: params.max_tokens,
tools: params.tools ? params.tools.length : 0
},
output: result.content,
metrics: result.usage ? {
prompt_tokens: result.usage.input_tokens,
completion_tokens: result.usage.output_tokens,
total_tokens: result.usage.input_tokens + result.usage.output_tokens
} : void 0,
finish_reason: result.stop_reason
};
}, "aggregator")
});
}
return messagesValue;
}, "get")
});
}
return value;
}, "get")
});
} else {
this.client = baseClient;
}
this.model = config?.model || "claude-3-5-sonnet-20240620";
this.temperature = config?.temperature ?? 0.7;
this.maxTokens = config?.maxTokens ?? 1024;
this.cache = config?.cache ?? false;
this.toolChoice = config?.toolChoice;
this.responseSchema = config?.responseSchema;
}
/**
* Send a thread to the LLM and get a response
* @param thread Thread to send
* @returns Updated thread with LLM response
*/
async sendThread(thread, abortSignal) {
if (thread.all().length === 0) {
throw new Error("Cannot send an empty thread");
}
const { messages, systemMessage } = this.formatMessagesForAPI(thread);
const totalCharacters = messages.reduce((sum, msg) => sum + (Array.isArray(msg.content) ? msg.content.reduce((s, c) => s + (typeof c.text === "string" ? c.text.length : 0), 0) : typeof msg.content === "string" ? msg.content.length : 0), 0);
if (totalCharacters > MAX_PROMPT_CHARACTERS2) {
throw new Error(`The thread content is too long (${totalCharacters} characters). Try a shorter conversation.`);
}
try {
this.checkAbortSignal(abortSignal);
const params = {
model: this.model,
messages,
temperature: this.temperature,
max_tokens: this.maxTokens
};
if (systemMessage) {
params.system = systemMessage;
}
const tools = thread.getTools();
if (tools.length > 0) {
params.tools = this.createToolDefinitions(tools);
if (this.toolChoice === "any") {
params.tool_choice = {
type: "any"
};
} else if (this.toolChoice === "none") {
params.tool_choice = {
type: "none"
};
} else {
params.tool_choice = {
type: "auto"
};
}
}
if (this.responseSchema && (!tools || tools.length === 0)) {
params.tools = [
{
name: "structured_response",
description: "Return a structured response following the specified schema",
input_schema: this.responseSchema
}
];
params.tool_choice = {
type: "tool",
name: "structured_response"
};
}
this.checkAbortSignal(abortSignal);
const response = await this.client.messages.create(params);
let content = Array.isArray(response.content) ? response.content.map((block) => block.text).join("") : response.content;
let toolCalls = this.parseToolCalls(response);
if (this.responseSchema && toolCalls && toolCalls.length > 0) {
const structuredCall = toolCalls.find((call) => call.name === "structured_response");
if (structuredCall) {
content = JSON.stringify(structuredCall.arguments);
toolCalls = toolCalls.filter((call) => call.name !== "structured_response");
if (toolCalls.length === 0) {
toolCalls = void 0;
}
}
}
const message = new Message({
role: "assistant",
content,
toolCalls,
cache: this.cache
});
thread.push(message);
if (response.usage) {
const tokenUsage = {
inputTokens: response.usage.input_tokens,
outputTokens: response.usage.output_tokens,
totalTokens: response.usage.input_tokens + response.usage.output_tokens
};
thread.updateTokenUsage(tokenUsage);
this.debug("Anthropic API usage metrics", tokenUsage);
}
return thread;
} catch (error2) {
this.error("Error sending message to Anthropic API", {
error: error2
});
throw error2;
}
}
/**
* Stream a thread to the LLM and get a streaming response
* @param thread Thread to send
* @returns AsyncGenerator yielding the stream and updated thread
*/
async *streamThread(thread, abortSignal) {
if (thread.all().length === 0) {
throw new Error("Cannot stream an empty thread");
}
this.debug("Streaming message to Anthropic API", {
model: this.model
});
const { messages, systemMessage } = this.formatMessagesForAPI(thread);
const totalCharacters = messages.reduce((sum, msg) => sum + (Array.isArray(msg.content) ? msg.content.reduce((s, c) => s + (typeof c.text === "string" ? c.text.length : 0), 0) : typeof msg.content === "string" ? msg.content.length : 0), 0);
if (totalCharacters > MAX_PROMPT_CHARACTERS2) {
throw new Error(`The thread content is too long (${totalCharacters} characters). Try a shorter conversation.`);
}
try {
this.checkAbortSignal(abortSignal);
const params = {
model: this.model,
messages,
temperature: this.temperature,
stream: true,
max_tokens: this.maxTokens
};
if (systemMessage) {
params.system = systemMessage;
}
const tools = thread.getTools();
if (tools.length > 0) {
params.tools = this.createToolDefinitions(tools);
if (this.toolChoice === "any") {
params.tool_choice = {
type: "any"
};
} else if (this.toolChoice === "none") {
params.tool_choice = {
type: "none"
};
} else {
params.tool_choice = {
type: "auto"
};
}
}
if (this.responseSchema && (!tools || tools.length === 0)) {
params.tools = [
{
name: "structured_response",
description: "Return a structured response following the specified schema",
input_schema: this.responseSchema
}
];
params.tool_choice = {
type: "tool",
name: "structured_response"
};
}
this.checkAbortSignal(abortSignal);
const message = new Message({
role: "assistant",
content: "",
cache: this.cache
});
const stream = await this.client.messages.create(params);
const streamContent = this.createStreamGenerator(stream, message);
yield {
stream: streamContent,
message
};
thread.push(message);
return thread;
} catch (error2) {
this.error("Error streaming message to Anthropic API", {
error: error2
});
throw error2;
}
}
/**
* Create a stream generator that handles updates to the message
*/
async *createStreamGenerator(stream, message) {
let toolCallsStarted = false;
const currentToolCalls = [];
let contentAccumulated = "";
try {
for await (const chunk of stream) {
if (chunk.type === "content_block_delta") {
const delta = chunk.delta?.text || "";
contentAccumulated += delta;
Object.assign(message, new Message({
role: "assistant",
content: contentAccumulated,
toolCalls: message.toolCalls,
cache: message.cache
}));
yield delta;
}
if (chunk.type === "tool_use") {
if (!toolCallsStarted) {
toolCallsStarted = true;
currentToolCalls.push({
id: chunk.id,
name: chunk.name,
input: chunk.input
});
}
}
}
if (currentToolCalls.length > 0) {
let parsedToolCalls = currentToolCalls.map((call) => ({
toolUseId: call.id,
name: call.name,
arguments: call.input
}));
if (this.responseSchema && parsedToolCalls.length > 0) {
const structuredCall = parsedToolCalls.find((call) => call.name === "structured_response");
if (structuredCall) {
contentAccumulated = JSON.stringify(structuredCall.arguments);
parsedToolCalls = parsedToolCalls.filter((call) => call.name !== "structured_response");
if (parsedToolCalls.length === 0) {
parsedToolCalls = void 0;
}
}
}
const updatedMessage = new Message({
role: "assistant",
content: contentAccumulated,
toolCalls: parsedToolCalls,
cache: message.cache
});
Object.assign(message, updatedMessage);
}
} catch (error2) {
this.error("Error processing Anthropic stream", {
error: error2
});
throw error2;
}
}
/**
* Format messages from the Thread object to the Anthropic API format
*/
formatMessagesForAPI(thread) {
const formattedMessages = [];
let systemMessage;
const toolUseIds = /* @__PURE__ */ new Set();
for (const message of thread.all()) {
if (message.role === "system") {
systemMessage = message.content || "";
continue;
}
if (message.role === "assistant" && message.toolCalls && message.toolCalls.length > 0) {
const contentBlocks = [];
if (message.content) {
contentBlocks.push({
type: "text",
text: message.content
});
}
for (const toolCall of message.toolCalls) {
toolUseIds.add(toolCall.toolUseId);
contentBlocks.push({
type: "tool_use",
id: toolCall.toolUseId,
name: toolCall.name,
input: toolCall.arguments
});
}
formattedMessages.push({
role: "assistant",
content: contentBlocks.length > 0 ? contentBlocks : [
{
type: "text",
text: "I'll use a tool to help with that."
}
]
});
}
if (message.role === "assistant" && message.toolResults && message.toolResults.length > 0) {
const toolResultBlocks = [];
for (const toolResult of message.toolResults) {
const toolResultBlock = {
type: "tool_result",
tool_use_id: toolResult.toolUseId,
content: JSON.stringify(toolResult.result || toolResult.error || "No result")
};
toolResultBlocks.push(toolResultBlock);
toolUseIds.delete(toolResult.toolUseId);
}
if (toolResultBlocks.length > 0) {
formattedMessages.push({
role: "user",
content: toolResultBlocks
});
}
continue;
}
if (message.role === "tool" && message.toolResults && message.toolResults.length > 0) {
const toolResultBlocks = [];
for (const toolResult of message.toolResults) {
const toolResultBlock = {
type: "tool_result",
tool_use_id: toolResult.toolUseId,
content: JSON.stringify(toolResult.result || toolResult.error || "No result")
};
toolResultBlocks.push(toolResultBlock);
toolUseIds.delete(toolResult.toolUseId);
}
if (toolResultBlocks.length > 0) {
formattedMessages.push({
role: "user",
content: toolResultBlocks
});
}
continue;
}
if (message.role === "user" && message.toolResults && message.toolResults.length > 0) {
const validToolResults = message.toolResults;
if (validToolResults.length > 0) {
const contentBlocks = [];
if (message.content) {
contentBlocks.push({
type: "text",
text: message.content
});
}
for (const toolResult of validToolResults) {
contentBlocks.push({
type: "tool_result",
tool_use_id: toolResult.toolUseId,
content: JSON.stringify(toolResult.result || toolResult.error || "No result")
});
toolUseIds.delete(toolResult.toolUseId);
}
formattedMessages.push({
role: "user",
content: contentBlocks
});
} else if (message.content) {
formattedMessages.push({
role: "user",
content: message.content
});
}
continue;
}
if (message.attachments && message.attachments.length > 0) {
const contentParts = [];
if (message.content) {
contentParts.push({
type: "text",
text: message.content
});
}
for (const attachment of message.attachments) {
if (attachment.type === "image" && attachment.source.uri) {
contentParts.push({
type: "image",
source: {
type: "base64",
media_type: `image/${attachment.format}`,
data: attachment.source.uri.replace(/^data:image\/\w+;base64,/, "")
}
});
}
}
formattedMessages.push({
role: message.role === "user" ? "user" : "assistant",
content: contentParts
});
continue;
}
if (message.role === "user" || message.role === "assistant") {
const hasToolCalls = message.toolCalls && message.toolCalls.length > 0;
const hasToolResults = message.toolResults && message.toolResults.length > 0;
if (!hasToolCalls && !hasToolResults) {
formattedMessages.push({
role: message.role,
content: message.content || ""
});
} else {
}
}
}
if (toolUseIds.size > 0) {
}
return {
messages: formattedMessages,
systemMessage
};
}
/**
* Create tool definitions for the Anthropic API
*/
createToolDefinitions(tools) {
return tools.map((tool) => ({
name: tool.getName(),
description: tool.getDescription(),
input_schema: zodToJsonSchema2(tool.getParameters())
}));
}
/**
* Parse tool calls from the Anthropic API response
*/
parseToolCalls(response) {
if (!response.content || !Array.isArray(response.content)) {
return void 0;
}
const toolCalls = [];
for (const block of response.content) {
if (block.type === "tool_use") {
try {
const toolCall = {
toolUseId: block.id || nanoid3(),
name: block.name,
arguments: block.input || {}
};
toolCalls.push(toolCall);
} catch (error2) {
this.error("Error parsing tool call arguments", {
error: error2,
block
});
}
}
}
return toolCalls.length > 0 ? toolCalls : void 0;
}
};
}
});
// src/model.drivers/gemini.ts
var gemini_exports = {};
__export(gemini_exports, {
GeminiThreadDriver: () => GeminiThreadDriver,
Type: () => Type
});
import { GoogleGenAI } from "@google/genai";
import { nanoid as nanoid4 } from "nanoid";
import { zodToJsonSchema as zodToJsonSchema3 } from "zod-to-json-schema";
import { Type } from "@google/genai";
var DEFAULT_MODEL, MAX_PROMPT_CHARACTERS3, GeminiThreadDriver;
var init_gemini = __esm({
"src/model.drivers/gemini.ts"() {
"use strict";
init_message();
init_tracing();
init_base();
DEFAULT_MODEL = "gemini-2.5-flash";
MAX_PROMPT_CHARACTERS3 = 32e4;
GeminiThreadDriver = class extends BaseModelDriver {
static {
__name(this, "GeminiThreadDriver");
}
genAI;
model;
temperatur