@flatfile/improv
Version:
A powerful TypeScript library for building AI agents with multi-threaded conversations, tool execution, and event handling capabilities
1 lines • 2.3 MB
Source Map (JSON)
{"version":3,"sources":["../src/event-source.ts","../src/message.ts","../src/tracing/tracer.ts","../src/tracing/registry.ts","../src/tracing/index.ts","../src/model.drivers/base.ts","../src/model.drivers/openai.ts","../src/model.drivers/anthropic.ts","../src/model.drivers/gemini.ts","../src/model.drivers/cerebras.ts","../node_modules/retry/lib/retry_operation.js","../node_modules/retry/lib/retry.js","../node_modules/retry/index.js","../node_modules/p-retry/index.js","../node_modules/eventemitter3/index.js","../node_modules/p-finally/index.js","../node_modules/p-timeout/index.js","../node_modules/p-queue/dist/lower-bound.js","../node_modules/p-queue/dist/priority-queue.js","../node_modules/p-queue/dist/index.js","../node_modules/langsmith/dist/singletons/fetch.cjs","../node_modules/langsmith/dist/utils/async_caller.cjs","../node_modules/langsmith/dist/utils/messages.cjs","../node_modules/langsmith/dist/utils/_uuid.cjs","../node_modules/langsmith/dist/utils/warn.cjs","../node_modules/semver/internal/constants.js","../node_modules/semver/internal/debug.js","../node_modules/semver/internal/re.js","../node_modules/semver/internal/parse-options.js","../node_modules/semver/internal/identifiers.js","../node_modules/semver/classes/semver.js","../node_modules/semver/functions/parse.js","../node_modules/semver/functions/valid.js","../node_modules/semver/functions/clean.js","../node_modules/semver/functions/inc.js","../node_modules/semver/functions/diff.js","../node_modules/semver/functions/major.js","../node_modules/semver/functions/minor.js","../node_modules/semver/functions/patch.js","../node_modules/semver/functions/prerelease.js","../node_modules/semver/functions/compare.js","../node_modules/semver/functions/rcompare.js","../node_modules/semver/functions/compare-loose.js","../node_modules/semver/functions/compare-build.js","../node_modules/semver/functions/sort.js","../node_modules/semver/functions/rsort.js","../node_modules/semver/functions/gt.js","../node_modules/semver/functions/lt.js","../node_modules/semver/functions/eq.js","../node_modules/semver/functions/neq.js","../node_modules/semver/functions/gte.js","../node_modules/semver/functions/lte.js","../node_modules/semver/functions/cmp.js","../node_modules/semver/functions/coerce.js","../node_modules/semver/internal/lrucache.js","../node_modules/semver/classes/range.js","../node_modules/semver/classes/comparator.js","../node_modules/semver/functions/satisfies.js","../node_modules/semver/ranges/to-comparators.js","../node_modules/semver/ranges/max-satisfying.js","../node_modules/semver/ranges/min-satisfying.js","../node_modules/semver/ranges/min-version.js","../node_modules/semver/ranges/valid.js","../node_modules/semver/ranges/outside.js","../node_modules/semver/ranges/gtr.js","../node_modules/semver/ranges/ltr.js","../node_modules/semver/ranges/intersects.js","../node_modules/semver/ranges/simplify.js","../node_modules/semver/ranges/subset.js","../node_modules/semver/index.js","../node_modules/langsmith/dist/utils/prompts.cjs","../node_modules/langsmith/dist/utils/error.cjs","../node_modules/langsmith/dist/utils/fast-safe-stringify/index.cjs","../node_modules/langsmith/dist/client.cjs","../node_modules/langsmith/dist/index.cjs","../node_modules/langsmith/dist/utils/env.cjs","../node_modules/langsmith/dist/env.cjs","../node_modules/langsmith/dist/singletons/constants.cjs","../node_modules/langsmith/dist/run_trees.cjs","../node_modules/langsmith/dist/singletons/traceable.cjs","../node_modules/langsmith/dist/utils/asserts.cjs","../node_modules/langsmith/dist/traceable.cjs","../node_modules/langsmith/traceable.cjs","../node_modules/ms/index.js","../node_modules/debug/src/common.js","../node_modules/debug/src/browser.js","../node_modules/has-flag/index.js","../node_modules/supports-color/index.js","../node_modules/debug/src/node.js","../node_modules/debug/src/index.js","../node_modules/@kwsites/file-exists/src/index.ts","../node_modules/@kwsites/file-exists/index.ts","../node_modules/@kwsites/promise-deferred/src/index.ts","../node_modules/simple-git/src/lib/args/pathspec.ts","../node_modules/simple-git/src/lib/errors/git-error.ts","../node_modules/simple-git/src/lib/errors/git-response-error.ts","../node_modules/simple-git/src/lib/errors/task-configuration-error.ts","../node_modules/simple-git/src/lib/utils/util.ts","../node_modules/simple-git/src/lib/utils/argument-filters.ts","../node_modules/simple-git/src/lib/utils/exit-codes.ts","../node_modules/simple-git/src/lib/utils/git-output-streams.ts","../node_modules/simple-git/src/lib/utils/line-parser.ts","../node_modules/simple-git/src/lib/utils/simple-git-options.ts","../node_modules/simple-git/src/lib/utils/task-options.ts","../node_modules/simple-git/src/lib/utils/task-parser.ts","../node_modules/simple-git/src/lib/utils/index.ts","../node_modules/simple-git/src/lib/tasks/check-is-repo.ts","../node_modules/simple-git/src/lib/responses/CleanSummary.ts","../node_modules/simple-git/src/lib/tasks/task.ts","../node_modules/simple-git/src/lib/tasks/clean.ts","../node_modules/simple-git/src/lib/responses/ConfigList.ts","../node_modules/simple-git/src/lib/tasks/config.ts","../node_modules/simple-git/src/lib/tasks/diff-name-status.ts","../node_modules/simple-git/src/lib/tasks/grep.ts","../node_modules/simple-git/src/lib/tasks/reset.ts","../node_modules/simple-git/src/lib/git-logger.ts","../node_modules/simple-git/src/lib/runners/tasks-pending-queue.ts","../node_modules/simple-git/src/lib/runners/git-executor-chain.ts","../node_modules/simple-git/src/lib/runners/git-executor.ts","../node_modules/simple-git/src/lib/task-callback.ts","../node_modules/simple-git/src/lib/tasks/change-working-directory.ts","../node_modules/simple-git/src/lib/tasks/checkout.ts","../node_modules/simple-git/src/lib/tasks/count-objects.ts","../node_modules/simple-git/src/lib/parsers/parse-commit.ts","../node_modules/simple-git/src/lib/tasks/commit.ts","../node_modules/simple-git/src/lib/tasks/first-commit.ts","../node_modules/simple-git/src/lib/tasks/hash-object.ts","../node_modules/simple-git/src/lib/responses/InitSummary.ts","../node_modules/simple-git/src/lib/tasks/init.ts","../node_modules/simple-git/src/lib/args/log-format.ts","../node_modules/simple-git/src/lib/responses/DiffSummary.ts","../node_modules/simple-git/src/lib/parsers/parse-diff-summary.ts","../node_modules/simple-git/src/lib/parsers/parse-list-log-summary.ts","../node_modules/simple-git/src/lib/tasks/diff.ts","../node_modules/simple-git/src/lib/tasks/log.ts","../node_modules/simple-git/src/lib/responses/MergeSummary.ts","../node_modules/simple-git/src/lib/responses/PullSummary.ts","../node_modules/simple-git/src/lib/parsers/parse-remote-objects.ts","../node_modules/simple-git/src/lib/parsers/parse-remote-messages.ts","../node_modules/simple-git/src/lib/parsers/parse-pull.ts","../node_modules/simple-git/src/lib/parsers/parse-merge.ts","../node_modules/simple-git/src/lib/tasks/merge.ts","../node_modules/simple-git/src/lib/parsers/parse-push.ts","../node_modules/simple-git/src/lib/tasks/push.ts","../node_modules/simple-git/src/lib/tasks/show.ts","../node_modules/simple-git/src/lib/responses/FileStatusSummary.ts","../node_modules/simple-git/src/lib/responses/StatusSummary.ts","../node_modules/simple-git/src/lib/tasks/status.ts","../node_modules/simple-git/src/lib/tasks/version.ts","../node_modules/simple-git/src/lib/simple-git-api.ts","../node_modules/simple-git/src/lib/runners/scheduler.ts","../node_modules/simple-git/src/lib/tasks/apply-patch.ts","../node_modules/simple-git/src/lib/responses/BranchDeleteSummary.ts","../node_modules/simple-git/src/lib/parsers/parse-branch-delete.ts","../node_modules/simple-git/src/lib/responses/BranchSummary.ts","../node_modules/simple-git/src/lib/parsers/parse-branch.ts","../node_modules/simple-git/src/lib/tasks/branch.ts","../node_modules/simple-git/src/lib/responses/CheckIgnore.ts","../node_modules/simple-git/src/lib/tasks/check-ignore.ts","../node_modules/simple-git/src/lib/tasks/clone.ts","../node_modules/simple-git/src/lib/parsers/parse-fetch.ts","../node_modules/simple-git/src/lib/tasks/fetch.ts","../node_modules/simple-git/src/lib/parsers/parse-move.ts","../node_modules/simple-git/src/lib/tasks/move.ts","../node_modules/simple-git/src/lib/tasks/pull.ts","../node_modules/simple-git/src/lib/responses/GetRemoteSummary.ts","../node_modules/simple-git/src/lib/tasks/remote.ts","../node_modules/simple-git/src/lib/tasks/stash-list.ts","../node_modules/simple-git/src/lib/tasks/sub-module.ts","../node_modules/simple-git/src/lib/responses/TagList.ts","../node_modules/simple-git/src/lib/tasks/tag.ts","../node_modules/simple-git/src/git.js","../node_modules/simple-git/src/lib/api.ts","../node_modules/simple-git/src/lib/errors/git-construct-error.ts","../node_modules/simple-git/src/lib/errors/git-plugin-error.ts","../node_modules/simple-git/src/lib/plugins/abort-plugin.ts","../node_modules/simple-git/src/lib/plugins/block-unsafe-operations-plugin.ts","../node_modules/simple-git/src/lib/plugins/command-config-prefixing-plugin.ts","../node_modules/simple-git/src/lib/plugins/completion-detection.plugin.ts","../node_modules/simple-git/src/lib/plugins/custom-binary.plugin.ts","../node_modules/simple-git/src/lib/plugins/error-detection.plugin.ts","../node_modules/simple-git/src/lib/plugins/plugin-store.ts","../node_modules/simple-git/src/lib/plugins/progress-monitor-plugin.ts","../node_modules/simple-git/src/lib/plugins/spawn-options-plugin.ts","../node_modules/simple-git/src/lib/plugins/timout-plugin.ts","../node_modules/simple-git/src/lib/plugins/suffix-paths.plugin.ts","../node_modules/simple-git/src/lib/git-factory.ts","../node_modules/simple-git/src/lib/runners/promise-wrapped.ts","../node_modules/simple-git/src/esm.mjs","../node_modules/@braintrust/core/dist/index.mjs","../node_modules/@asteasolutions/zod-to-openapi/dist/lib/zod-is-type.js","../node_modules/@asteasolutions/zod-to-openapi/dist/zod-extensions.js","../node_modules/@asteasolutions/zod-to-openapi/dist/lib/object-set.js","../node_modules/@asteasolutions/zod-to-openapi/dist/lib/lodash.js","../node_modules/@asteasolutions/zod-to-openapi/dist/openapi-metadata.js","../node_modules/@asteasolutions/zod-to-openapi/dist/openapi-registry.js","../node_modules/@asteasolutions/zod-to-openapi/dist/errors.js","../node_modules/@asteasolutions/zod-to-openapi/dist/lib/enum-info.js","../node_modules/@asteasolutions/zod-to-openapi/dist/openapi-generator.js","../node_modules/@asteasolutions/zod-to-openapi/dist/v3.0/specifics.js","../node_modules/@asteasolutions/zod-to-openapi/dist/v3.0/openapi-generator.js","../node_modules/@asteasolutions/zod-to-openapi/dist/v3.1/specifics.js","../node_modules/@asteasolutions/zod-to-openapi/dist/v3.1/openapi-generator.js","../node_modules/@asteasolutions/zod-to-openapi/dist/index.js","../node_modules/@braintrust/core/typespecs/dist/index.mjs","../node_modules/@vercel/functions/headers.js","../node_modules/@vercel/functions/get-env.js","../node_modules/@vercel/functions/get-context.js","../node_modules/@vercel/functions/wait-until.js","../node_modules/@vercel/functions/middleware.js","../node_modules/@vercel/functions/index.js","../node_modules/mustache/mustache.mjs","../node_modules/eventsource-parser/src/parse.ts","../node_modules/color-name/index.js","../node_modules/color-convert/conversions.js","../node_modules/color-convert/route.js","../node_modules/color-convert/index.js","../node_modules/ansi-styles/index.js","../node_modules/chalk/source/util.js","../node_modules/chalk/source/templates.js","../node_modules/chalk/source/index.js","../node_modules/pluralize/pluralize.js","../node_modules/cli-progress/lib/eta.js","../node_modules/cli-progress/lib/terminal.js","../node_modules/ansi-regex/index.js","../node_modules/strip-ansi/index.js","../node_modules/is-fullwidth-code-point/index.js","../node_modules/emoji-regex/index.js","../node_modules/string-width/index.js","../node_modules/cli-progress/lib/format-value.js","../node_modules/cli-progress/lib/format-bar.js","../node_modules/cli-progress/lib/format-time.js","../node_modules/cli-progress/lib/formatter.js","../node_modules/cli-progress/lib/options.js","../node_modules/cli-progress/lib/generic-bar.js","../node_modules/cli-progress/lib/single-bar.js","../node_modules/cli-progress/lib/multi-bar.js","../node_modules/cli-progress/presets/legacy.js","../node_modules/cli-progress/presets/shades-classic.js","../node_modules/cli-progress/presets/shades-grey.js","../node_modules/cli-progress/presets/rect.js","../node_modules/cli-progress/presets/index.js","../node_modules/cli-progress/cli-progress.js","../node_modules/slugify/slugify.js","../node_modules/braintrust/dist/index.mjs","../src/index.ts","../src/tool.ts","../src/events.ts","../src/agent.ts","../src/decorators.ts","../src/thread.ts","../src/agent.tool.ts","../src/evaluator.ts","../src/solo.ts","../src/gig.ts","../src/piece.ts","../src/evaluators/three-keyed-lock.ts","../src/evaluators/agent-tool.evaluator.ts","../src/model.drivers/index.ts","../src/model.drivers/cohere.ts","../src/model.drivers/bedrock.ts","../src/model.drivers/huggingface.ts","../src/model.drivers/groq.ts","../src/tracing/api.ts","../src/tracing/adapters/index.ts","../src/tracing/adapters/langsmith.ts","../src/tracing/adapters/braintrust.ts"],"sourcesContent":["import { EventEmitter2 } from \"eventemitter2\";\n\n/**\n * Abstract base class for all event-emitting classes\n * Provides event forwarding functionality and type-safe event emission\n */\nexport abstract class EventSource extends EventEmitter2 implements EventSourceInterface {\n constructor() {\n super({\n wildcard: true,\n });\n }\n\n /**\n * Forward all events from a source EventEmitter2 instance\n * Preserves the original event name and merges any additional context\n */\n protected forwardEvents(source: EventSourceInterface, context: Record<string, any> = {}): void {\n const self = this;\n source.on(\"**\", function (data) {\n // @ts-ignore - event property is available in the wildcard listener context\n const eventName = this.event as string;\n // Forward the event with the same name and add context\n self.emit(eventName, { ...data, ...context });\n });\n }\n\n protected debug(message: string, data?: any) {\n if (process.env.NODE_ENV === \"development\") {\n console.log(message);\n if (data) {\n console.dir(data, { depth: null });\n }\n }\n }\n\n protected error(...args: any[]) {\n if (process.env.NODE_ENV === \"development\") {\n console.error(...args);\n }\n }\n}\n\nexport interface EventSourceInterface {\n on(event: string, listener: (...args: any[]) => void): any;\n}\n","/**\n * Implement a class that represents a message in the thread.\n * - content - the content of the message\n * - json - the json content of the message (if any) - support a number of ways of finding it\n * - role - the role of the message (user, assistant, tool)\n * - toolCalls - the tool calls that were made in the message\n * - toolResults - the results of the tool calls\n * - attachments - array of attachments (documents, images, videos)\n * - cache: boolean - whether the message is cached\n */\nexport type MessageRole = \"system\" | \"user\" | \"assistant\" | \"tool\";\n\nexport type AttachmentType = \"document\" | \"image\" | \"video\";\n\nexport type DocumentFormat =\n | \"pdf\"\n | \"csv\"\n | \"doc\"\n | \"docx\"\n | \"xls\"\n | \"xlsx\"\n | \"html\"\n | \"txt\"\n | \"md\";\nexport type ImageFormat = \"png\" | \"jpeg\" | \"gif\" | \"webp\";\nexport type VideoFormat =\n | \"mkv\"\n | \"mov\"\n | \"mp4\"\n | \"webm\"\n | \"flv\"\n | \"mpeg\"\n | \"mpg\"\n | \"wmv\"\n | \"three_gp\";\n\nexport interface AttachmentSource {\n bytes?: Uint8Array;\n uri?: string;\n bucketOwner?: string;\n}\n\nexport interface BaseAttachment {\n type: AttachmentType;\n source: AttachmentSource;\n}\n\nexport interface DocumentAttachment extends BaseAttachment {\n type: \"document\";\n format: DocumentFormat;\n name: string;\n}\n\nexport interface ImageAttachment extends BaseAttachment {\n type: \"image\";\n format: ImageFormat;\n}\n\nexport interface VideoAttachment extends BaseAttachment {\n type: \"video\";\n format: VideoFormat;\n}\n\nexport type Attachment = DocumentAttachment | ImageAttachment | VideoAttachment;\n\nexport interface ToolCall {\n name: string;\n toolUseId: string;\n arguments: Record<string, any>;\n}\n\nexport interface Reasoning {\n text: string;\n type: \"text\";\n signature: string;\n}\n\nexport interface ToolResult {\n name: string;\n toolUseId: string;\n result: any;\n error?: string;\n}\n\nexport class Message {\n private _content?: string;\n private _role: MessageRole;\n private _reasoning: Reasoning[];\n private _toolCalls: ToolCall[];\n private _toolResults: ToolResult[];\n private _attachments: Attachment[];\n private _cache: boolean;\n\n constructor({\n content,\n role = \"user\",\n toolCalls = [],\n toolResults = [],\n attachments = [],\n reasoning = [],\n cache = false,\n }: {\n content?: string | undefined;\n role?: MessageRole;\n reasoning?: Reasoning[];\n toolCalls?: ToolCall[];\n toolResults?: ToolResult[];\n attachments?: Attachment[];\n cache?: boolean;\n }) {\n this._content = content;\n this._role = role;\n this._toolCalls = toolCalls;\n this._reasoning = reasoning;\n this._toolResults = toolResults;\n this._attachments = attachments;\n this._cache = cache;\n }\n\n get content(): string | undefined {\n return this._content;\n }\n\n get role(): MessageRole {\n return this._role;\n }\n\n get toolCalls(): ToolCall[] {\n return this._toolCalls;\n }\n\n get toolResults(): ToolResult[] {\n return this._toolResults;\n }\n\n get attachments(): Attachment[] {\n return this._attachments;\n }\n\n get cache(): boolean {\n return this._cache;\n }\n\n get reasoning(): Reasoning[] {\n return this._reasoning;\n }\n\n isToolResponse(): boolean {\n return this._toolResults.length > 0;\n }\n\n isAssistantMessage(): boolean {\n return this._role === \"assistant\";\n }\n\n isUserMessage(): boolean {\n return this._role === \"user\";\n }\n\n isSystemMessage(): boolean {\n return this._role === \"system\";\n }\n\n isToolCall(): boolean {\n // Add debug logging\n // console.debug(\n // `[DEBUG] isToolCall check: ${this._role} has ${this._toolCalls.length} tool calls`,\n // );\n // if (this._toolCalls.length > 0) {\n // console.debug(`[DEBUG] Tool calls found:`, JSON.stringify(this._toolCalls, null, 2));\n // }\n return this._toolCalls.length > 0;\n }\n\n /**\n * Get attachments of a specific type\n */\n public getAttachmentsByType<T extends Attachment>(type: AttachmentType): T[] {\n return this._attachments.filter((a) => a.type === type) as T[];\n }\n\n /**\n * Add an attachment to the message\n */\n public addAttachment(attachment: Attachment): void {\n this._attachments.push(attachment);\n }\n\n /**\n * Remove an attachment from the message\n */\n public removeAttachment(index: number): void {\n if (index >= 0 && index < this._attachments.length) {\n this._attachments.splice(index, 1);\n }\n }\n\n /**\n * Attempts to parse and return JSON content from the message\n * Supports multiple formats:\n * 1. Direct JSON string\n * 2. JSON within markdown code blocks\n * 3. JSON within specific delimiters\n */\n get json(): any | null {\n try {\n // Try direct JSON parse\n if (this._content) {\n return JSON.parse(this._content);\n }\n } catch {\n try {\n // Try extracting from markdown code blocks\n if (this._content) {\n const codeBlockMatch = this._content.match(/```(?:json)?\\s*([\\s\\S]*?)\\s*```/);\n if (codeBlockMatch?.[1]) {\n return JSON.parse(codeBlockMatch[1]);\n }\n }\n } catch {\n try {\n // Try extracting from specific delimiters\n if (this._content) {\n const jsonMatch = this._content.match(/\\{[\\s\\S]*\\}/);\n if (jsonMatch) {\n return JSON.parse(jsonMatch[0]);\n }\n }\n } catch {\n // If all parsing attempts fail, return null\n return null;\n }\n }\n }\n return null;\n }\n}\n","/**\n * Interface for tracing function executions in Improv\n */\nexport interface Tracer {\n /**\n * Trace a function execution\n * @param fn The function to trace\n * @param metadata Metadata about the trace\n * @returns The wrapped function that will be traced\n */\n traceable<T extends (...args: any[]) => any>(fn: T, metadata: TraceMetadata): T;\n}\n\n/**\n * Metadata for a trace\n */\nexport interface TraceMetadata {\n /** The name of the trace */\n name: string;\n /** The type of run (e.g., \"tool\", \"llm\", \"agent\") */\n run_type?: string;\n /** Any additional metadata */\n [key: string]: unknown;\n}\n\n/**\n * A tracer that does nothing\n */\nexport class NullTracer implements Tracer {\n traceable<T extends (...args: any[]) => any>(fn: T, _metadata: TraceMetadata): T {\n // Simply returns the original function without any tracing\n return fn;\n }\n}\n\n// The default tracer does nothing\nexport const defaultTracer = new NullTracer();\n","import { type Tracer, defaultTracer } from \"./tracer\";\n\n/**\n * Registry for managing tracers in the system\n */\nexport class TracerRegistry {\n private static instance: TracerRegistry;\n private tracers: Map<string, Tracer> = new Map();\n private activeTracer = \"null\";\n\n private constructor() {\n // Register the default null tracer\n this.register(\"null\", defaultTracer);\n }\n\n /**\n * Get the singleton instance of the registry\n */\n public static getInstance(): TracerRegistry {\n if (!TracerRegistry.instance) {\n TracerRegistry.instance = new TracerRegistry();\n }\n return TracerRegistry.instance;\n }\n\n /**\n * Register a tracer with the registry\n * @param name Name of the tracer\n * @param tracer The tracer implementation\n */\n public register(name: string, tracer: Tracer): void {\n this.tracers.set(name, tracer);\n }\n\n /**\n * Set the active tracer by name\n * @param name Name of the tracer to activate\n * @throws Error if the tracer doesn't exist\n */\n public setActiveTracer(name: string): void {\n if (!this.tracers.has(name)) {\n throw new Error(`Tracer '${name}' is not registered`);\n }\n this.activeTracer = name;\n }\n\n /**\n * Get the currently active tracer\n */\n public getActiveTracer(): Tracer {\n return this.tracers.get(this.activeTracer) || defaultTracer;\n }\n\n /**\n * Check if tracing is enabled (i.e., active tracer is not the null tracer)\n */\n public isTracingEnabled(): boolean {\n return this.activeTracer !== \"null\";\n }\n}\n\n// Export a convenience function to get the active tracer\nexport function getTracer(): Tracer {\n return TracerRegistry.getInstance().getActiveTracer();\n}\n\n// Export a convenience function to check if tracing is enabled\nexport function isTracingEnabled(): boolean {\n return TracerRegistry.getInstance().isTracingEnabled();\n}\n","import { TracerRegistry, getTracer, isTracingEnabled } from \"./registry\";\nimport { NullTracer, type TraceMetadata, type Tracer, defaultTracer } from \"./tracer\";\n\n/**\n * Make a function traceable using the currently active tracer\n *\n * @param fn The function to trace\n * @param metadata Metadata about the trace\n * @returns The traced function\n */\nexport function traceable<T extends (...args: any[]) => any>(fn: T, metadata: TraceMetadata): T {\n // Only trace if tracing is enabled\n if (!isTracingEnabled()) {\n return fn;\n }\n\n // Get the active tracer and trace the function\n return getTracer().traceable(fn, metadata);\n}\n\n// Export everything from the tracer module\nexport { NullTracer, defaultTracer, TracerRegistry, getTracer, isTracingEnabled };\nexport type { Tracer, TraceMetadata };\n","import { EventSource } from \"../event-source\";\nimport type { Message } from \"../message\";\nimport type { Thread, ThreadDriver } from \"../thread\";\n\n/**\n * Abstract base class for model drivers.\n * Implements common functionality and defines the interface for model-specific implementations.\n */\nexport abstract class BaseModelDriver extends EventSource implements ThreadDriver {\n /**\n * Check if the abort signal has been triggered and throw an error if so\n */\n protected checkAbortSignal(abortSignal?: AbortSignal): void {\n if (abortSignal?.aborted) {\n throw new Error(\"Thread execution aborted\");\n }\n }\n\n /**\n * Execute an async operation with abort signal checking before and after\n */\n protected async withAbortCheck<T>(\n abortSignal: AbortSignal | undefined,\n operation: () => Promise<T>,\n ): Promise<T> {\n this.checkAbortSignal(abortSignal);\n const result = await operation();\n this.checkAbortSignal(abortSignal);\n return result;\n }\n\n /**\n * Wrap an async generator with abort signal checking on each iteration\n */\n protected async *wrapStreamWithAbort<T>(\n abortSignal: AbortSignal | undefined,\n stream: AsyncGenerator<T>,\n ): AsyncGenerator<T> {\n try {\n for await (const chunk of stream) {\n this.checkAbortSignal(abortSignal);\n yield chunk;\n }\n } finally {\n this.checkAbortSignal(abortSignal);\n }\n }\n\n /**\n * Process and send a thread to the model and return the updated thread\n */\n abstract sendThread(thread: Thread, abortSignal?: AbortSignal): Promise<Thread>;\n\n /**\n * Stream a response from the model\n */\n abstract streamThread(\n thread: Thread,\n abortSignal?: AbortSignal,\n ): AsyncGenerator<{ stream: AsyncGenerator<string, void>; message: Message }, Thread>;\n}\n","/**\n * Implement a class that represents an OpenAI client\n * and provides a set of tools for converting messages to and from the LLM.\n * - sendThread(thread: Thread): Thread - send a message to the LLM\n * - streamThread(thread: Thread): AsyncGenerator<string, Thread> - stream a message to the LLM\n */\n\nimport { nanoid } from \"nanoid\";\nimport OpenAI from \"openai\";\nimport { zodToJsonSchema } from \"zod-to-json-schema\";\nimport { Message, type ToolCall } from \"../message\";\nimport type { Thread } from \"../thread\";\nimport type { Tool } from \"../tool\";\nimport { traceable } from \"../tracing\";\nimport { BaseModelDriver } from \"./base\";\n\n// Constants\nconst MAX_PROMPT_CHARACTERS = 100_000;\nconst JSON_SCHEMA_URL = \"http://json-schema.org/draft-07/schema#\";\n\n/**\n * Available OpenAI models\n */\nexport type OpenAIModel =\n | \"gpt-4o\"\n | \"gpt-4o-mini\"\n | \"gpt-4\"\n | \"gpt-4-turbo\"\n | \"gpt-3.5-turbo\"\n | \"gpt-4-vision-preview\";\n\n/**\n * Configuration options for the OpenAI driver\n */\n/**\n * Schema for structured output\n */\nexport interface OpenAIResponseSchema {\n type: \"string\" | \"integer\" | \"number\" | \"boolean\" | \"array\" | \"object\";\n format?: string;\n description?: string;\n nullable?: boolean;\n enum?: string[];\n maxItems?: string;\n minItems?: string;\n properties?: Record<string, OpenAIResponseSchema>;\n required?: string[];\n propertyOrdering?: string[];\n items?: OpenAIResponseSchema;\n}\n\nexport interface OpenAIConfig {\n /**\n * OpenAI API Key\n */\n apiKey?: string;\n\n /**\n * Model to use\n * @default \"gpt-4o\"\n */\n model?: OpenAIModel;\n\n /**\n * Temperature for response generation\n * @default 0.7\n */\n temperature?: number;\n\n /**\n * Maximum number of tokens to generate\n */\n maxTokens?: number;\n\n /**\n * Whether to cache responses\n * @default false\n */\n cache?: boolean;\n\n /**\n * Whether to enable tracing\n * @default false\n */\n trace?: boolean;\n\n /**\n * Additional metadata for tracing\n */\n traceMetadata?: Record<string, unknown>;\n\n /**\n * Schema for structured output\n */\n responseSchema?: OpenAIResponseSchema;\n}\n\n/**\n * Represents an OpenAI client and provides tools for converting messages to/from the LLM\n */\nexport class OpenAIThreadDriver extends BaseModelDriver {\n /**\n * OpenAI API Client\n */\n private client: OpenAI;\n\n /**\n * OpenAI model to use\n */\n private model: OpenAIModel | string;\n\n /**\n * Temperature for response generation\n */\n private temperature: number;\n\n /**\n * Maximum number of tokens to generate\n */\n private maxTokens?: number;\n\n /**\n * Whether to cache responses\n */\n private cache: boolean;\n\n /**\n * Schema for structured output\n */\n private responseSchema?: OpenAIResponseSchema;\n\n /**\n * Returns a list of available OpenAI models\n */\n static getAvailableModels(): OpenAIModel[] {\n return [\n \"gpt-4o\",\n \"gpt-4o-mini\",\n \"gpt-4\",\n \"gpt-4-turbo\",\n \"gpt-3.5-turbo\",\n \"gpt-4-vision-preview\",\n ];\n }\n\n /**\n * Create a new OpenAI driver\n * @param config Configuration options\n */\n constructor(config: OpenAIConfig = {}) {\n super();\n\n // Get API Key from config or environment variable\n const apiKey = config.apiKey || process.env.OPENAI_API_KEY;\n if (!apiKey) {\n throw new Error(\n \"OpenAI API Key must be provided in config or as OPENAI_API_KEY environment variable\",\n );\n }\n\n // Create a base client\n const baseClient = new OpenAI({\n apiKey,\n });\n\n // Only wrap with tracing if trace is enabled (default to false)\n if (config.trace ?? false) {\n // Create a proxy around the OpenAI client to intercept API calls\n this.client = new Proxy(baseClient, {\n get: (target, prop, receiver) => {\n const value = Reflect.get(target, prop, receiver);\n\n // Handle nested properties (chat.completions.create)\n if (prop === \"chat\" && value && typeof value === \"object\") {\n return new Proxy(value, {\n get: (chatTarget, chatProp, chatReceiver) => {\n const chatValue = Reflect.get(chatTarget, chatProp, chatReceiver);\n\n // Handle completions property\n if (chatProp === \"completions\" && chatValue && typeof chatValue === \"object\") {\n return new Proxy(chatValue, {\n get: (completionsTarget, completionsProp, completionsReceiver) => {\n const completionsValue = Reflect.get(\n completionsTarget,\n completionsProp,\n completionsReceiver,\n );\n\n // Wrap the create method with tracing\n if (completionsProp === \"create\" && typeof completionsValue === \"function\") {\n return traceable(completionsValue.bind(completionsTarget), {\n run_type: \"llm\",\n name: \"openai\",\n ...config.traceMetadata,\n aggregator: (args: any[], result: any) => {\n // Extract relevant data for tracing\n const params = args[0] || {};\n\n // For streaming responses, we can't easily get metrics, so just return the input\n if (params.stream) {\n return {\n model: params.model,\n input: {\n messages: params.messages,\n temperature: params.temperature,\n max_tokens: params.max_tokens,\n tools: params.tools ? params.tools.length : 0,\n },\n streaming: true,\n };\n }\n\n // For non-streaming, include response data\n return {\n model: params.model,\n input: {\n messages: params.messages,\n temperature: params.temperature,\n max_tokens: params.max_tokens,\n tools: params.tools ? params.tools.length : 0,\n },\n output: result.choices?.[0]?.message,\n metrics: result.usage\n ? {\n prompt_tokens: result.usage.prompt_tokens,\n completion_tokens: result.usage.completion_tokens,\n total_tokens: result.usage.total_tokens,\n }\n : undefined,\n finish_reason: result.choices?.[0]?.finish_reason,\n };\n },\n });\n }\n\n return completionsValue;\n },\n });\n }\n\n return chatValue;\n },\n });\n }\n\n return value;\n },\n });\n } else {\n this.client = baseClient;\n }\n\n // Set configuration options\n this.model = config.model || \"gpt-4o\";\n this.temperature = config.temperature ?? 0.7;\n this.maxTokens = config.maxTokens;\n this.cache = config.cache ?? false;\n this.responseSchema = config.responseSchema;\n }\n\n /**\n * Send a thread to the LLM and get a response\n * @param thread Thread to send\n * @returns Updated thread with LLM response\n */\n async sendThread(thread: Thread, abortSignal?: AbortSignal): Promise<Thread> {\n if (thread.all().length === 0) {\n throw new Error(\"Cannot send an empty thread\");\n }\n\n // Check if execution has been aborted\n this.checkAbortSignal(abortSignal);\n\n // this.debug(\"Sending message to OpenAI API\", { model: this.model });\n\n // Format messages for OpenAI API\n const messages = this.formatMessagesForAPI(thread);\n\n // Check for potential token issues\n const totalCharacters = messages.reduce(\n (sum, msg) => sum + (typeof msg.content === \"string\" ? msg.content.length : 0),\n 0,\n );\n\n if (totalCharacters > MAX_PROMPT_CHARACTERS) {\n throw new Error(\n `The thread content is too long (${totalCharacters} characters). Try a shorter conversation.`,\n );\n }\n\n try {\n // Create parameters for the API call\n const params: OpenAI.Chat.ChatCompletionCreateParams = {\n model: this.model,\n messages,\n temperature: this.temperature,\n };\n\n // Add max tokens if specified\n if (this.maxTokens) {\n params.max_tokens = this.maxTokens;\n }\n\n // Add structured output if responseSchema is defined\n if (this.responseSchema) {\n params.response_format = {\n type: \"json_schema\",\n json_schema: {\n name: \"response\",\n schema: this.responseSchema,\n strict: true,\n },\n } as any;\n }\n\n // Add tools if there are any in the thread\n const tools = thread.getTools();\n if (tools.length > 0) {\n params.tools = this.createToolDefinitions(tools);\n params.tool_choice = \"auto\";\n }\n\n // Make the API call\n const response = await this.client.chat.completions.create(params);\n\n // Get the last choice from the response\n const lastChoice = response.choices[0];\n if (!lastChoice) {\n throw new Error(\"No response from OpenAI API\");\n }\n\n const choice = lastChoice.message;\n if (!choice) {\n throw new Error(\"Empty response from OpenAI API\");\n }\n\n // Create a new message from the response\n const message = new Message({\n role: \"assistant\",\n content: choice.content || \"\",\n toolCalls: this.parseToolCalls(choice.tool_calls),\n cache: this.cache,\n });\n\n // Add the message to the thread\n thread.push(message);\n\n // Log usage metrics if available\n if (response.usage) {\n // this.debug(\"OpenAI API usage metrics\", {\n // inputTokens: response.usage.prompt_tokens,\n // outputTokens: response.usage.completion_tokens,\n // totalTokens: response.usage.total_tokens,\n // });\n }\n\n return thread;\n } catch (error) {\n this.error(\"Error sending message to OpenAI API\", { error });\n throw error;\n }\n }\n\n /**\n * Stream a thread to the LLM and get a streaming response\n * @param thread Thread to send\n * @returns AsyncGenerator yielding the stream and updated thread\n */\n async *streamThread(\n thread: Thread,\n ): AsyncGenerator<{ stream: AsyncGenerator<string, void>; message: Message }, Thread> {\n if (thread.all().length === 0) {\n throw new Error(\"Cannot stream an empty thread\");\n }\n\n this.debug(\"Streaming message to OpenAI API\", { model: this.model });\n\n // Format messages for OpenAI API\n const messages = this.formatMessagesForAPI(thread);\n\n // Check for potential token issues\n const totalCharacters = messages.reduce(\n (sum, msg) => sum + (typeof msg.content === \"string\" ? msg.content.length : 0),\n 0,\n );\n\n if (totalCharacters > MAX_PROMPT_CHARACTERS) {\n throw new Error(\n `The thread content is too long (${totalCharacters} characters). Try a shorter conversation.`,\n );\n }\n\n try {\n // Create parameters for the API call\n const params: OpenAI.Chat.ChatCompletionCreateParams = {\n model: this.model,\n messages,\n temperature: this.temperature,\n stream: true,\n };\n\n // Add max tokens if specified\n if (this.maxTokens) {\n params.max_tokens = this.maxTokens;\n }\n\n // Add structured output if responseSchema is defined\n if (this.responseSchema) {\n params.response_format = {\n type: \"json_schema\",\n json_schema: {\n name: \"response\",\n schema: this.responseSchema,\n strict: true,\n },\n } as any;\n }\n\n // Add tools if there are any in the thread\n const tools = thread.getTools();\n if (tools.length > 0) {\n params.tools = this.createToolDefinitions(tools);\n params.tool_choice = \"auto\";\n }\n\n // Create a new message to add to the thread\n const message = new Message({\n role: \"assistant\",\n content: \"\",\n cache: this.cache,\n });\n\n // Make the streaming API call\n const stream = await this.client.chat.completions.create(params);\n\n // Create a streaming content generator\n const streamContent = this.createStreamGenerator(stream, message);\n\n // Yield the stream\n yield { stream: streamContent, message };\n\n // Add the message to the thread\n thread.push(message);\n\n // Return the updated thread\n return thread;\n } catch (error) {\n this.error(\"Error streaming message to OpenAI API\", { error });\n throw error;\n }\n }\n\n /**\n * Create a stream generator that handles updates to the message\n */\n private async *createStreamGenerator(\n stream: AsyncIterable<OpenAI.Chat.ChatCompletionChunk>,\n message: Message,\n ): AsyncGenerator<string, void> {\n let toolCallsStarted = false;\n let currentToolCalls: any[] = [];\n\n try {\n for await (const chunk of stream) {\n const delta = chunk.choices[0]?.delta;\n\n if (!delta) continue;\n\n // Handle content updates\n if (delta.content) {\n // Create a new message with updated content\n const updatedMessage = new Message({\n role: \"assistant\",\n content: (message.content || \"\") + delta.content,\n toolCalls: message.toolCalls,\n cache: this.cache,\n });\n\n // Copy the updated message properties\n Object.assign(message, updatedMessage);\n\n yield delta.content;\n }\n\n // Handle tool call updates\n if (delta.tool_calls) {\n if (!toolCallsStarted) {\n // First tool call chunk\n toolCallsStarted = true;\n currentToolCalls = delta.tool_calls.map((call) => ({ ...call }));\n } else {\n // Subsequent tool call chunks - need to merge them\n for (const toolCall of delta.tool_calls) {\n const existingCall = currentToolCalls.find((c) => c.index === toolCall.index);\n\n if (existingCall) {\n // Update function content if provided\n if (toolCall.function) {\n if (!existingCall.function) {\n existingCall.function = {};\n }\n\n if (toolCall.function.name && !existingCall.function.name) {\n existingCall.function.name = toolCall.function.name;\n }\n\n if (toolCall.function.arguments) {\n existingCall.function.arguments =\n (existingCall.function.arguments || \"\") + toolCall.function.arguments;\n }\n }\n } else {\n // New tool call\n currentToolCalls.push({ ...toolCall });\n }\n }\n }\n }\n }\n\n // Process the tool calls after the stream completes\n if (currentToolCalls.length > 0) {\n const parsedToolCalls = this.parseToolCalls(currentToolCalls);\n\n // Create a new message with the tool calls\n const updatedMessage = new Message({\n role: \"assistant\",\n content: message.content || \"\",\n toolCalls: parsedToolCalls,\n cache: this.cache,\n });\n\n // Copy the updated message properties\n Object.assign(message, updatedMessage);\n }\n } catch (error) {\n this.error(\"Error processing OpenAI stream\", { error });\n throw error;\n }\n }\n\n /**\n * Format messages from the Thread object to the OpenAI API format\n */\n private formatMessagesForAPI(thread: Thread): OpenAI.Chat.ChatCompletionMessageParam[] {\n const messages: OpenAI.Chat.ChatCompletionMessageParam[] = [];\n\n for (const message of thread.all()) {\n // Process attachments if they exist\n if (message.attachments && message.attachments.length > 0) {\n const contentParts: OpenAI.Chat.ChatCompletionContentPart[] = [];\n\n // Add text content if it exists\n if (message.content) {\n contentParts.push({\n type: \"text\",\n text: message.content,\n });\n }\n\n // Add each attachment\n for (const attachment of message.attachments) {\n if (attachment.type === \"image\" && attachment.source.uri) {\n contentParts.push({\n type: \"image_url\",\n image_url: {\n url: attachment.source.uri,\n detail: \"auto\",\n },\n });\n }\n }\n\n // OpenAI has specific typing requirements for different roles\n if (message.role === \"user\") {\n messages.push({\n role: \"user\",\n content: contentParts as any, // Type cast to bypass strict typing\n });\n } else if (message.role === \"assistant\") {\n messages.push({\n role: \"assistant\",\n content: contentParts as any, // Type cast to bypass strict typing\n });\n } else if (message.role === \"system\") {\n messages.push({\n role: \"system\",\n content: contentParts as any, // Type cast to bypass strict typing\n });\n }\n continue;\n }\n\n // Handle tool results (these are created from user messages with toolResults)\n if (message.toolResults && message.toolResults.length > 0) {\n // First add the user message if it has content\n if (message.content) {\n messages.push({\n role: \"user\",\n content: message.content,\n });\n }\n\n // Then add each tool result as a separate message\n for (const toolResult of message.toolResults) {\n messages.push({\n role: \"tool\",\n tool_call_id: toolResult.toolUseId,\n content: JSON.stringify(toolResult.result),\n });\n }\n continue;\n }\n\n // Handle tool calls in assistant messages\n if (message.role === \"assistant\" && message.toolCalls && message.toolCalls.length > 0) {\n const toolCalls = message.toolCalls.map((call) => ({\n id: call.toolUseId,\n type: \"function\" as const,\n function: {\n name: call.name,\n arguments: JSON.stringify(call.arguments),\n },\n }));\n\n messages.push({\n role: \"assistant\",\n content: message.content || null,\n tool_calls: toolCalls,\n });\n continue;\n }\n\n // Default case for simple messages\n if (message.role === \"user\") {\n messages.push({\n role: \"user\",\n content: message.content || \"\",\n });\n } else if (message.role === \"assistant\") {\n messages.push({\n role: \"assistant\",\n content: message.content || \"\",\n });\n } else if (message.role === \"system\") {\n messages.push({\n role: \"system\",\n content: message.content || \"\",\n });\n }\n }\n\n return messages;\n }\n\n /**\n * Create tool definitions for the OpenAI API\n */\n private createToolDefinitions(tools: Tool[]): OpenAI.Chat.ChatCompletionTool[] {\n return tools.map((tool) => ({\n type: \"function\",\n function: {\n name: tool.getName(),\n description: tool.getDescription(),\n parameters: {\n ...zodToJsonSchema(tool.getParameters()),\n $schema: JSON_SCHEMA_URL,\n },\n },\n }));\n }\n\n /**\n * Parse tool calls from the OpenAI API response\n */\n private parseToolCalls(\n toolCalls?: OpenAI.Chat.ChatCompletionMessageToolCall[],\n ): ToolCall[] | undefined {\n if (!toolCalls || toolCalls.length === 0) {\n return undefined;\n }\n\n return toolCalls.map((toolCall) => {\n if (toolCall.type === \"function\" && toolCall.function.name) {\n try {\n const args = JSON.parse(toolCall.function.arguments || \"{}\");\n return {\n toolUseId: toolCall.id,\n name: toolCall.function.name,\n arguments: args,\n };\n } catch (error) {\n this.error(\"Error parsing tool call arguments\", {\n error,\n toolCall,\n });\n return {\n toolUseId: toolCall.id,\n name: toolCall.function.name,\n arguments: {},\n };\n }\n }\n\n this.error(\"Unknown tool call type\", { toolCall });\n return {\n toolUseId: toolCall.id || nanoid(),\n name: toolCall.function?.name || \"unknown\",\n arguments: {},\n };\n });\n }\n}\n","/**\n * Implement a class that represents an Anthropic client\n * and provides a set of tools for converting messages to and from the LLM.\n * - sendThread(thread: Thread): Thread - send a message to the LLM\n * - streamThread(thread: Thread): AsyncGenerator<string, Thread> - stream a message to the LLM\n */\n\n// Use require if module not found error\nconst Anthropic = require(\"@anthropic-ai/sdk\").default;\nimport { nanoid } from \"nanoid\";\nimport { zodToJsonSchema } from \"zod-to-json-schema\";\nimport { Message, type ToolCall } from \"../message\";\nimport type { Thread } from \"../thread\";\nimport type { Tool } from \"../tool\";\nimport { traceable } from \"../tracing\";\nimport { BaseModelDriver } from \"./base\";\n\n// Constants\nconst MAX_PROMPT_CHARACTERS = 100_000;\n\n/**\n * Schema for structured output\n */\nexport interface AnthropicResponseSchema {\n type: \"string\" | \"integer\" | \"number\" | \"boolean\" | \"array\" | \"object\";\n format?: string;\n description?: string;\n nullable?: boolean;\n enum?: string[];\n maxItems?: string;\n minItems?: string;\n properties?: Record<string, AnthropicResponseSchema>;\n required?: string[];\n propertyOrdering?: string[];\n items?: AnthropicResponseSchema;\n}\n\n/**\n * Available Anthropic models\n */\nexport type AnthropicModel =\n // Claude 3 models\n | \"claude-3-opus-20240229\"\n | \"claude-3-sonnet-20240229\"\n | \"claude-3-haiku-20240307\"\n | \"claude-3-5-sonnet-20240620\"\n | \"claude-3-5-sonnet-20241022\" // Claude 3.5 Sonnet v2\n | \"claude-3-7-sonnet-20250219\"\n // Claude Opus 4\n | \"claude-opus-4-20250514\"\n // Claude 2 models\n | \"claude-2.0\"\n | \"claude-2.1\"\n // Tagged versions\n | \"claude-3-opus-20240229-v1:0\"\n | \"claude-3-sonnet-20240229-v1:0\"\n | \"claude-3-haiku-20240307-v1:0\";\n\n/**\n * Configuration options for the