UNPKG

@flatfile/improv

Version:

A powerful TypeScript library for building AI agents with multi-threaded conversations, tool execution, and event handling capabilities

1 lines 2.64 MB
{"version":3,"sources":["../src/event-source.ts","../src/message.ts","../src/tracing/tracer.ts","../src/tracing/registry.ts","../src/tracing/index.ts","../src/model.drivers/base.ts","../src/model.drivers/openai.ts","../src/model.drivers/anthropic.ts","../src/model.drivers/gemini.ts","../src/model.drivers/cerebras.ts","../node_modules/retry/lib/retry_operation.js","../node_modules/retry/lib/retry.js","../node_modules/retry/index.js","../node_modules/p-retry/index.js","../node_modules/eventemitter3/index.js","../node_modules/p-finally/index.js","../node_modules/p-timeout/index.js","../node_modules/p-queue/dist/lower-bound.js","../node_modules/p-queue/dist/priority-queue.js","../node_modules/p-queue/dist/index.js","../node_modules/langsmith/dist/singletons/fetch.cjs","../node_modules/langsmith/dist/utils/async_caller.cjs","../node_modules/langsmith/dist/utils/messages.cjs","../node_modules/langsmith/dist/utils/_uuid.cjs","../node_modules/langsmith/dist/utils/warn.cjs","../node_modules/semver/internal/constants.js","../node_modules/semver/internal/debug.js","../node_modules/semver/internal/re.js","../node_modules/semver/internal/parse-options.js","../node_modules/semver/internal/identifiers.js","../node_modules/semver/classes/semver.js","../node_modules/semver/functions/parse.js","../node_modules/semver/functions/valid.js","../node_modules/semver/functions/clean.js","../node_modules/semver/functions/inc.js","../node_modules/semver/functions/diff.js","../node_modules/semver/functions/major.js","../node_modules/semver/functions/minor.js","../node_modules/semver/functions/patch.js","../node_modules/semver/functions/prerelease.js","../node_modules/semver/functions/compare.js","../node_modules/semver/functions/rcompare.js","../node_modules/semver/functions/compare-loose.js","../node_modules/semver/functions/compare-build.js","../node_modules/semver/functions/sort.js","../node_modules/semver/functions/rsort.js","../node_modules/semver/functions/gt.js","../node_modules/semver/functions/lt.js","../node_modules/semver/functions/eq.js","../node_modules/semver/functions/neq.js","../node_modules/semver/functions/gte.js","../node_modules/semver/functions/lte.js","../node_modules/semver/functions/cmp.js","../node_modules/semver/functions/coerce.js","../node_modules/semver/internal/lrucache.js","../node_modules/semver/classes/range.js","../node_modules/semver/classes/comparator.js","../node_modules/semver/functions/satisfies.js","../node_modules/semver/ranges/to-comparators.js","../node_modules/semver/ranges/max-satisfying.js","../node_modules/semver/ranges/min-satisfying.js","../node_modules/semver/ranges/min-version.js","../node_modules/semver/ranges/valid.js","../node_modules/semver/ranges/outside.js","../node_modules/semver/ranges/gtr.js","../node_modules/semver/ranges/ltr.js","../node_modules/semver/ranges/intersects.js","../node_modules/semver/ranges/simplify.js","../node_modules/semver/ranges/subset.js","../node_modules/semver/index.js","../node_modules/langsmith/dist/utils/prompts.cjs","../node_modules/langsmith/dist/utils/error.cjs","../node_modules/langsmith/dist/utils/fast-safe-stringify/index.cjs","../node_modules/langsmith/dist/client.cjs","../node_modules/langsmith/dist/index.cjs","../node_modules/langsmith/dist/utils/env.cjs","../node_modules/langsmith/dist/env.cjs","../node_modules/langsmith/dist/singletons/constants.cjs","../node_modules/langsmith/dist/run_trees.cjs","../node_modules/langsmith/dist/singletons/traceable.cjs","../node_modules/langsmith/dist/utils/asserts.cjs","../node_modules/langsmith/dist/traceable.cjs","../node_modules/langsmith/traceable.cjs","../node_modules/braintrust/dist/chunk-WKBXJQ57.mjs","../node_modules/ms/index.js","../node_modules/debug/src/common.js","../node_modules/debug/src/browser.js","../node_modules/has-flag/index.js","../node_modules/supports-color/index.js","../node_modules/debug/src/node.js","../node_modules/debug/src/index.js","../node_modules/@kwsites/file-exists/src/index.ts","../node_modules/@kwsites/file-exists/index.ts","../node_modules/@kwsites/promise-deferred/src/index.ts","../node_modules/simple-git/src/lib/args/pathspec.ts","../node_modules/simple-git/src/lib/errors/git-error.ts","../node_modules/simple-git/src/lib/errors/git-response-error.ts","../node_modules/simple-git/src/lib/errors/task-configuration-error.ts","../node_modules/simple-git/src/lib/utils/util.ts","../node_modules/simple-git/src/lib/utils/argument-filters.ts","../node_modules/simple-git/src/lib/utils/exit-codes.ts","../node_modules/simple-git/src/lib/utils/git-output-streams.ts","../node_modules/simple-git/src/lib/utils/line-parser.ts","../node_modules/simple-git/src/lib/utils/simple-git-options.ts","../node_modules/simple-git/src/lib/utils/task-options.ts","../node_modules/simple-git/src/lib/utils/task-parser.ts","../node_modules/simple-git/src/lib/utils/index.ts","../node_modules/simple-git/src/lib/tasks/check-is-repo.ts","../node_modules/simple-git/src/lib/responses/CleanSummary.ts","../node_modules/simple-git/src/lib/tasks/task.ts","../node_modules/simple-git/src/lib/tasks/clean.ts","../node_modules/simple-git/src/lib/responses/ConfigList.ts","../node_modules/simple-git/src/lib/tasks/config.ts","../node_modules/simple-git/src/lib/tasks/diff-name-status.ts","../node_modules/simple-git/src/lib/tasks/grep.ts","../node_modules/simple-git/src/lib/tasks/reset.ts","../node_modules/simple-git/src/lib/git-logger.ts","../node_modules/simple-git/src/lib/runners/tasks-pending-queue.ts","../node_modules/simple-git/src/lib/runners/git-executor-chain.ts","../node_modules/simple-git/src/lib/runners/git-executor.ts","../node_modules/simple-git/src/lib/task-callback.ts","../node_modules/simple-git/src/lib/tasks/change-working-directory.ts","../node_modules/simple-git/src/lib/tasks/checkout.ts","../node_modules/simple-git/src/lib/tasks/count-objects.ts","../node_modules/simple-git/src/lib/parsers/parse-commit.ts","../node_modules/simple-git/src/lib/tasks/commit.ts","../node_modules/simple-git/src/lib/tasks/first-commit.ts","../node_modules/simple-git/src/lib/tasks/hash-object.ts","../node_modules/simple-git/src/lib/responses/InitSummary.ts","../node_modules/simple-git/src/lib/tasks/init.ts","../node_modules/simple-git/src/lib/args/log-format.ts","../node_modules/simple-git/src/lib/responses/DiffSummary.ts","../node_modules/simple-git/src/lib/parsers/parse-diff-summary.ts","../node_modules/simple-git/src/lib/parsers/parse-list-log-summary.ts","../node_modules/simple-git/src/lib/tasks/diff.ts","../node_modules/simple-git/src/lib/tasks/log.ts","../node_modules/simple-git/src/lib/responses/MergeSummary.ts","../node_modules/simple-git/src/lib/responses/PullSummary.ts","../node_modules/simple-git/src/lib/parsers/parse-remote-objects.ts","../node_modules/simple-git/src/lib/parsers/parse-remote-messages.ts","../node_modules/simple-git/src/lib/parsers/parse-pull.ts","../node_modules/simple-git/src/lib/parsers/parse-merge.ts","../node_modules/simple-git/src/lib/tasks/merge.ts","../node_modules/simple-git/src/lib/parsers/parse-push.ts","../node_modules/simple-git/src/lib/tasks/push.ts","../node_modules/simple-git/src/lib/tasks/show.ts","../node_modules/simple-git/src/lib/responses/FileStatusSummary.ts","../node_modules/simple-git/src/lib/responses/StatusSummary.ts","../node_modules/simple-git/src/lib/tasks/status.ts","../node_modules/simple-git/src/lib/tasks/version.ts","../node_modules/simple-git/src/lib/simple-git-api.ts","../node_modules/simple-git/src/lib/runners/scheduler.ts","../node_modules/simple-git/src/lib/tasks/apply-patch.ts","../node_modules/simple-git/src/lib/responses/BranchDeleteSummary.ts","../node_modules/simple-git/src/lib/parsers/parse-branch-delete.ts","../node_modules/simple-git/src/lib/responses/BranchSummary.ts","../node_modules/simple-git/src/lib/parsers/parse-branch.ts","../node_modules/simple-git/src/lib/tasks/branch.ts","../node_modules/simple-git/src/lib/responses/CheckIgnore.ts","../node_modules/simple-git/src/lib/tasks/check-ignore.ts","../node_modules/simple-git/src/lib/tasks/clone.ts","../node_modules/simple-git/src/lib/parsers/parse-fetch.ts","../node_modules/simple-git/src/lib/tasks/fetch.ts","../node_modules/simple-git/src/lib/parsers/parse-move.ts","../node_modules/simple-git/src/lib/tasks/move.ts","../node_modules/simple-git/src/lib/tasks/pull.ts","../node_modules/simple-git/src/lib/responses/GetRemoteSummary.ts","../node_modules/simple-git/src/lib/tasks/remote.ts","../node_modules/simple-git/src/lib/tasks/stash-list.ts","../node_modules/simple-git/src/lib/tasks/sub-module.ts","../node_modules/simple-git/src/lib/responses/TagList.ts","../node_modules/simple-git/src/lib/tasks/tag.ts","../node_modules/simple-git/src/git.js","../node_modules/simple-git/src/lib/api.ts","../node_modules/simple-git/src/lib/errors/git-construct-error.ts","../node_modules/simple-git/src/lib/errors/git-plugin-error.ts","../node_modules/simple-git/src/lib/plugins/abort-plugin.ts","../node_modules/simple-git/src/lib/plugins/block-unsafe-operations-plugin.ts","../node_modules/simple-git/src/lib/plugins/command-config-prefixing-plugin.ts","../node_modules/simple-git/src/lib/plugins/completion-detection.plugin.ts","../node_modules/simple-git/src/lib/plugins/custom-binary.plugin.ts","../node_modules/simple-git/src/lib/plugins/error-detection.plugin.ts","../node_modules/simple-git/src/lib/plugins/plugin-store.ts","../node_modules/simple-git/src/lib/plugins/progress-monitor-plugin.ts","../node_modules/simple-git/src/lib/plugins/spawn-options-plugin.ts","../node_modules/simple-git/src/lib/plugins/timout-plugin.ts","../node_modules/simple-git/src/lib/plugins/suffix-paths.plugin.ts","../node_modules/simple-git/src/lib/git-factory.ts","../node_modules/simple-git/src/lib/runners/promise-wrapped.ts","../node_modules/simple-git/src/esm.mjs","../node_modules/@vercel/functions/headers.js","../node_modules/@vercel/functions/get-env.js","../node_modules/@vercel/functions/get-context.js","../node_modules/@vercel/functions/wait-until.js","../node_modules/@vercel/functions/middleware.js","../node_modules/@vercel/functions/index.js","../node_modules/mustache/mustache.mjs","../node_modules/eventsource-parser/src/parse.ts","../node_modules/color-name/index.js","../node_modules/color-convert/conversions.js","../node_modules/color-convert/route.js","../node_modules/color-convert/index.js","../node_modules/ansi-styles/index.js","../node_modules/chalk/source/util.js","../node_modules/chalk/source/templates.js","../node_modules/chalk/source/index.js","../node_modules/pluralize/pluralize.js","../node_modules/cli-progress/lib/eta.js","../node_modules/cli-progress/lib/terminal.js","../node_modules/ansi-regex/index.js","../node_modules/strip-ansi/index.js","../node_modules/is-fullwidth-code-point/index.js","../node_modules/emoji-regex/index.js","../node_modules/string-width/index.js","../node_modules/cli-progress/lib/format-value.js","../node_modules/cli-progress/lib/format-bar.js","../node_modules/cli-progress/lib/format-time.js","../node_modules/cli-progress/lib/formatter.js","../node_modules/cli-progress/lib/options.js","../node_modules/cli-progress/lib/generic-bar.js","../node_modules/cli-progress/lib/single-bar.js","../node_modules/cli-progress/lib/multi-bar.js","../node_modules/cli-progress/presets/legacy.js","../node_modules/cli-progress/presets/shades-classic.js","../node_modules/cli-progress/presets/shades-grey.js","../node_modules/cli-progress/presets/rect.js","../node_modules/cli-progress/presets/index.js","../node_modules/cli-progress/cli-progress.js","../node_modules/slugify/slugify.js","../node_modules/braintrust/dist/index.mjs","../src/index.ts","../src/tool.ts","../src/events.ts","../src/agent.ts","../src/decorators.ts","../src/thread.ts","../src/agent.tool.ts","../src/evaluator.ts","../src/solo.ts","../src/gig.ts","../src/piece.ts","../src/evaluators/three-keyed-lock.ts","../src/evaluators/agent-tool.evaluator.ts","../src/model.drivers/index.ts","../src/model.drivers/cohere.ts","../src/model.drivers/bedrock.ts","../src/model.drivers/huggingface.ts","../src/model.drivers/groq.ts","../src/tracing/api.ts","../src/tracing/adapters/index.ts","../src/tracing/adapters/langsmith.ts","../src/tracing/adapters/braintrust.ts"],"sourcesContent":["import { EventEmitter2 } from \"eventemitter2\";\n\n/**\n * Abstract base class for all event-emitting classes\n * Provides event forwarding functionality and type-safe event emission\n */\nexport abstract class EventSource extends EventEmitter2 implements EventSourceInterface {\n constructor() {\n super({\n wildcard: true,\n });\n }\n\n /**\n * Forward all events from a source EventEmitter2 instance\n * Preserves the original event name and merges any additional context\n */\n protected forwardEvents(source: EventSourceInterface, context: Record<string, any> = {}): void {\n const self = this;\n source.on(\"**\", function (data) {\n // @ts-ignore - event property is available in the wildcard listener context\n const eventName = this.event as string;\n // Forward the event with the same name and add context\n self.emit(eventName, { ...data, ...context });\n });\n }\n\n protected debug(message: string, data?: any) {\n if (process.env.NODE_ENV === \"development\") {\n console.log(message);\n if (data) {\n console.dir(data, { depth: null });\n }\n }\n }\n\n protected error(...args: any[]) {\n if (process.env.NODE_ENV === \"development\") {\n console.error(...args);\n }\n }\n}\n\nexport interface EventSourceInterface {\n on(event: string, listener: (...args: any[]) => void): any;\n}\n","import type { Thread } from \"./thread\";\nimport type { Tool } from \"./tool\";\n\n/**\n * Implement a class that represents a message in the thread.\n * - content - the content of the message\n * - json - the json content of the message (if any) - support a number of ways of finding it\n * - role - the role of the message (user, assistant, tool)\n * - toolCalls - the tool calls that were made in the message\n * - toolResults - the results of the tool calls\n * - attachments - array of attachments (documents, images, videos)\n * - cache: boolean - whether the message is cached\n */\nexport type MessageRole = \"system\" | \"user\" | \"assistant\" | \"tool\";\n\nexport type AttachmentType = \"document\" | \"image\" | \"video\";\n\nexport type DocumentFormat =\n | \"pdf\"\n | \"csv\"\n | \"doc\"\n | \"docx\"\n | \"xls\"\n | \"xlsx\"\n | \"html\"\n | \"txt\"\n | \"md\";\nexport type ImageFormat = \"png\" | \"jpeg\" | \"gif\" | \"webp\";\nexport type VideoFormat =\n | \"mkv\"\n | \"mov\"\n | \"mp4\"\n | \"webm\"\n | \"flv\"\n | \"mpeg\"\n | \"mpg\"\n | \"wmv\"\n | \"three_gp\";\n\nexport interface AttachmentSource {\n bytes?: Uint8Array;\n uri?: string;\n bucketOwner?: string;\n}\n\nexport interface BaseAttachment {\n type: AttachmentType;\n source: AttachmentSource;\n}\n\nexport interface DocumentAttachment extends BaseAttachment {\n type: \"document\";\n format: DocumentFormat;\n name: string;\n}\n\nexport interface ImageAttachment extends BaseAttachment {\n type: \"image\";\n format: ImageFormat;\n}\n\nexport interface VideoAttachment extends BaseAttachment {\n type: \"video\";\n format: VideoFormat;\n}\n\nexport type Attachment = DocumentAttachment | ImageAttachment | VideoAttachment;\n\nexport interface ToolCall {\n name: string;\n toolUseId: string;\n arguments: Record<string, any>;\n thread?: Thread;\n message?: Message;\n tool?: Tool;\n}\n\nexport interface Reasoning {\n text: string;\n type: \"text\";\n signature: string;\n}\n\nexport interface ToolResult {\n name: string;\n toolUseId: string;\n result: any;\n error?: string;\n}\n\nexport class Message {\n private _content?: string;\n private _role: MessageRole;\n private _reasoning: Reasoning[];\n private _toolCalls: ToolCall[];\n private _toolResults: ToolResult[];\n private _attachments: Attachment[];\n private _cache: boolean;\n\n constructor({\n content,\n role = \"user\",\n toolCalls = [],\n toolResults = [],\n attachments = [],\n reasoning = [],\n cache = false,\n }: {\n content?: string | undefined;\n role?: MessageRole;\n reasoning?: Reasoning[];\n toolCalls?: ToolCall[];\n toolResults?: ToolResult[];\n attachments?: Attachment[];\n cache?: boolean;\n }) {\n this._content = content;\n this._role = role;\n this._toolCalls = toolCalls;\n this._reasoning = reasoning;\n this._toolResults = toolResults;\n this._attachments = attachments;\n this._cache = cache;\n }\n\n get content(): string | undefined {\n return this._content;\n }\n\n get role(): MessageRole {\n return this._role;\n }\n\n get toolCalls(): ToolCall[] {\n return this._toolCalls;\n }\n\n get toolResults(): ToolResult[] {\n return this._toolResults;\n }\n\n get attachments(): Attachment[] {\n return this._attachments;\n }\n\n get cache(): boolean {\n return this._cache;\n }\n\n get reasoning(): Reasoning[] {\n return this._reasoning;\n }\n\n isToolResponse(): boolean {\n return this._toolResults.length > 0;\n }\n\n isAssistantMessage(): boolean {\n return this._role === \"assistant\";\n }\n\n isUserMessage(): boolean {\n return this._role === \"user\";\n }\n\n isSystemMessage(): boolean {\n return this._role === \"system\";\n }\n\n isToolCall(): boolean {\n // Add debug logging\n // console.debug(\n // `[DEBUG] isToolCall check: ${this._role} has ${this._toolCalls.length} tool calls`,\n // );\n // if (this._toolCalls.length > 0) {\n // console.debug(`[DEBUG] Tool calls found:`, JSON.stringify(this._toolCalls, null, 2));\n // }\n return this._toolCalls.length > 0;\n }\n\n /**\n * Get attachments of a specific type\n */\n public getAttachmentsByType<T extends Attachment>(type: AttachmentType): T[] {\n return this._attachments.filter((a) => a.type === type) as T[];\n }\n\n /**\n * Add an attachment to the message\n */\n public addAttachment(attachment: Attachment): void {\n this._attachments.push(attachment);\n }\n\n /**\n * Remove an attachment from the message\n */\n public removeAttachment(index: number): void {\n if (index >= 0 && index < this._attachments.length) {\n this._attachments.splice(index, 1);\n }\n }\n\n /**\n * Attempts to parse and return JSON content from the message\n * Supports multiple formats:\n * 1. Direct JSON string\n * 2. JSON within markdown code blocks\n * 3. JSON within specific delimiters\n */\n get json(): any | null {\n try {\n // Try direct JSON parse\n if (this._content) {\n return JSON.parse(this._content);\n }\n } catch {\n try {\n // Try extracting from markdown code blocks\n if (this._content) {\n const codeBlockMatch = this._content.match(/```(?:json)?\\s*([\\s\\S]*?)\\s*```/);\n if (codeBlockMatch?.[1]) {\n return JSON.parse(codeBlockMatch[1]);\n }\n }\n } catch {\n try {\n // Try extracting from specific delimiters\n if (this._content) {\n const jsonMatch = this._content.match(/\\{[\\s\\S]*\\}/);\n if (jsonMatch) {\n return JSON.parse(jsonMatch[0]);\n }\n }\n } catch {\n // If all parsing attempts fail, return null\n return null;\n }\n }\n }\n return null;\n }\n}\n","/**\n * Interface for tracing function executions in Improv\n */\nexport interface Tracer {\n /**\n * Trace a function execution\n * @param fn The function to trace\n * @param metadata Metadata about the trace\n * @returns The wrapped function that will be traced\n */\n traceable<T extends (...args: any[]) => any>(fn: T, metadata: TraceMetadata): T;\n}\n\n/**\n * Metadata for a trace\n */\nexport interface TraceMetadata {\n /** The name of the trace */\n name: string;\n /** The type of run (e.g., \"tool\", \"llm\", \"agent\") */\n run_type?: string;\n /** Any additional metadata */\n [key: string]: unknown;\n}\n\n/**\n * A tracer that does nothing\n */\nexport class NullTracer implements Tracer {\n traceable<T extends (...args: any[]) => any>(fn: T, _metadata: TraceMetadata): T {\n // Simply returns the original function without any tracing\n return fn;\n }\n}\n\n// The default tracer does nothing\nexport const defaultTracer = new NullTracer();\n","import { type Tracer, defaultTracer } from \"./tracer\";\n\n/**\n * Registry for managing tracers in the system\n */\nexport class TracerRegistry {\n private static instance: TracerRegistry;\n private tracers: Map<string, Tracer> = new Map();\n private activeTracer = \"null\";\n\n private constructor() {\n // Register the default null tracer\n this.register(\"null\", defaultTracer);\n }\n\n /**\n * Get the singleton instance of the registry\n */\n public static getInstance(): TracerRegistry {\n if (!TracerRegistry.instance) {\n TracerRegistry.instance = new TracerRegistry();\n }\n return TracerRegistry.instance;\n }\n\n /**\n * Register a tracer with the registry\n * @param name Name of the tracer\n * @param tracer The tracer implementation\n */\n public register(name: string, tracer: Tracer): void {\n this.tracers.set(name, tracer);\n }\n\n /**\n * Set the active tracer by name\n * @param name Name of the tracer to activate\n * @throws Error if the tracer doesn't exist\n */\n public setActiveTracer(name: string): void {\n if (!this.tracers.has(name)) {\n throw new Error(`Tracer '${name}' is not registered`);\n }\n this.activeTracer = name;\n }\n\n /**\n * Get the currently active tracer\n */\n public getActiveTracer(): Tracer {\n return this.tracers.get(this.activeTracer) || defaultTracer;\n }\n\n /**\n * Check if tracing is enabled (i.e., active tracer is not the null tracer)\n */\n public isTracingEnabled(): boolean {\n return this.activeTracer !== \"null\";\n }\n}\n\n// Export a convenience function to get the active tracer\nexport function getTracer(): Tracer {\n return TracerRegistry.getInstance().getActiveTracer();\n}\n\n// Export a convenience function to check if tracing is enabled\nexport function isTracingEnabled(): boolean {\n return TracerRegistry.getInstance().isTracingEnabled();\n}\n","import { TracerRegistry, getTracer, isTracingEnabled } from \"./registry\";\nimport { NullTracer, type TraceMetadata, type Tracer, defaultTracer } from \"./tracer\";\n\n/**\n * Make a function traceable using the currently active tracer\n *\n * @param fn The function to trace\n * @param metadata Metadata about the trace\n * @returns The traced function\n */\nexport function traceable<T extends (...args: any[]) => any>(fn: T, metadata: TraceMetadata): T {\n // Only trace if tracing is enabled\n if (!isTracingEnabled()) {\n return fn;\n }\n\n // Get the active tracer and trace the function\n return getTracer().traceable(fn, metadata);\n}\n\n// Export everything from the tracer module\nexport { NullTracer, defaultTracer, TracerRegistry, getTracer, isTracingEnabled };\nexport type { Tracer, TraceMetadata };\n","import { EventSource } from \"../event-source\";\nimport type { Message } from \"../message\";\nimport type { Thread, ThreadDriver } from \"../thread\";\n\n/**\n * Abstract base class for model drivers.\n * Implements common functionality and defines the interface for model-specific implementations.\n */\nexport abstract class BaseModelDriver extends EventSource implements ThreadDriver {\n /**\n * Check if the abort signal has been triggered and throw an error if so\n */\n protected checkAbortSignal(abortSignal?: AbortSignal): void {\n if (abortSignal?.aborted) {\n throw new Error(\"Thread execution aborted\");\n }\n }\n\n /**\n * Execute an async operation with abort signal checking before and after\n */\n protected async withAbortCheck<T>(\n abortSignal: AbortSignal | undefined,\n operation: () => Promise<T>,\n ): Promise<T> {\n this.checkAbortSignal(abortSignal);\n const result = await operation();\n this.checkAbortSignal(abortSignal);\n return result;\n }\n\n /**\n * Wrap an async generator with abort signal checking on each iteration\n */\n protected async *wrapStreamWithAbort<T>(\n abortSignal: AbortSignal | undefined,\n stream: AsyncGenerator<T>,\n ): AsyncGenerator<T> {\n try {\n for await (const chunk of stream) {\n this.checkAbortSignal(abortSignal);\n yield chunk;\n }\n } finally {\n this.checkAbortSignal(abortSignal);\n }\n }\n\n /**\n * Process and send a thread to the model and return the updated thread\n */\n abstract sendThread(thread: Thread, abortSignal?: AbortSignal): Promise<Thread>;\n\n /**\n * Stream a response from the model\n */\n abstract streamThread(\n thread: Thread,\n abortSignal?: AbortSignal,\n ): AsyncGenerator<{ stream: AsyncGenerator<string, void>; message: Message }, Thread>;\n}\n","/**\n * Implement a class that represents an OpenAI client\n * and provides a set of tools for converting messages to and from the LLM.\n * - sendThread(thread: Thread): Thread - send a message to the LLM\n * - streamThread(thread: Thread): AsyncGenerator<string, Thread> - stream a message to the LLM\n */\n\nimport { nanoid } from \"nanoid\";\nimport OpenAI from \"openai\";\nimport { z } from \"zod\";\nimport {\n type Attachment,\n type DocumentAttachment,\n type ImageAttachment,\n Message,\n type ToolCall,\n} from \"../message\";\nimport type { Thread, ToolTokenBreakdown } from \"../thread\";\nimport type { Tool } from \"../tool\";\nimport { traceable } from \"../tracing\";\nimport { BaseModelDriver } from \"./base\";\n\n// Constants\nconst DEFAULT_MAX_PROMPT_CHARACTERS = 1_000_000; // 1M chars ≈ 200K-500K tokens depending on content\nconst JSON_SCHEMA_URL = \"http://json-schema.org/draft-07/schema#\";\n\n/**\n * Available OpenAI models\n */\nexport type OpenAIModel =\n | \"gpt-4o\"\n | \"gpt-4o-mini\"\n | \"gpt-4o-mini-audio-preview\"\n | \"gpt-4\"\n | \"gpt-4-turbo\"\n | \"gpt-3.5-turbo\"\n | \"gpt-4-vision-preview\"\n | \"gpt-4.1\"\n | \"gpt-5\"\n // Reasoning models\n | \"o1\"\n | \"o1-mini\"\n | \"o1-preview\"\n | \"o3\"\n | \"o3-mini\"\n | \"o3-pro\"\n | \"o4-mini\";\n\n/**\n * Configuration options for the OpenAI driver\n */\n/**\n * Schema for structured output\n */\nexport interface OpenAIResponseSchema {\n type: \"string\" | \"integer\" | \"number\" | \"boolean\" | \"array\" | \"object\";\n format?: string;\n description?: string;\n nullable?: boolean;\n enum?: string[];\n maxItems?: string;\n minItems?: string;\n properties?: Record<string, OpenAIResponseSchema>;\n required?: string[];\n propertyOrdering?: string[];\n items?: OpenAIResponseSchema;\n}\n\n/**\n * Advanced provider-specific options for OpenAI API\n */\nexport interface OpenAIProviderOptions {\n /**\n * Reasoning effort level for reasoning models (o1, o3, etc.)\n * Controls how thoroughly the model thinks before responding\n */\n reasoning?: {\n effort?: \"low\" | \"medium\" | \"high\";\n };\n\n /**\n * Direct reasoning effort parameter for GPT-5 (alternative to reasoning.effort)\n * @example 'high' | 'medium' | 'low'\n */\n reasoning_effort?: \"low\" | \"medium\" | \"high\";\n\n /**\n * Verbosity level for GPT-5 responses\n * Controls how detailed or concise the model's responses should be\n */\n verbosity?: \"low\" | \"medium\" | \"high\";\n\n /**\n * Maximum completion tokens for reasoning models\n * (different from standard max_tokens parameter)\n */\n max_completion_tokens?: number;\n\n /**\n * Context-Free Grammar (CFG) configuration for GPT-5\n * Strictly constrains model output to match predefined syntax\n */\n cfg?: {\n grammar?: string;\n };\n\n /**\n * Freeform tool calling configuration for GPT-5\n * Enables raw text payloads without JSON wrapping\n */\n freeform_tools?: boolean;\n\n /**\n * MCP (Model Context Protocol) server configuration\n */\n mcp?: {\n server?: string;\n headers?: Record<string, string>;\n approval_required?: boolean;\n };\n\n /**\n * Modality configuration for multi-modal models\n */\n modality?: \"text\" | \"audio\" | \"image\" | \"multimodal\";\n\n /**\n * Prediction configuration for speculative decoding\n */\n prediction?: {\n type?: \"content\";\n content?: string;\n };\n\n /**\n * Store conversation for future reference\n */\n store?: boolean;\n\n /**\n * Additional OpenAI-specific parameters that should be passed directly to the API\n * This allows for future GPT-5 parameters without needing code changes\n */\n [key: string]: any;\n}\n\n/**\n * Enhanced token usage with OpenAI-specific breakdown\n */\nexport interface OpenAIEnhancedTokenUsage {\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n cachedTokens?: number; // Tokens served from cache\n reasoningTokens?: number; // Internal reasoning tokens (o1, o3, o4 models)\n newContentTokens?: number; // Tokens for genuinely new content (non-cached)\n toolTokenBreakdown?: ToolTokenBreakdown[]; // Per-tool token breakdown\n}\n\nexport interface OpenAIConfig {\n /**\n * OpenAI API Key\n */\n apiKey?: string;\n\n /**\n * Model to use\n * @default \"gpt-4o\"\n */\n model?: OpenAIModel;\n\n /**\n * Temperature for response generation\n * @default 0.7\n */\n temperature?: number;\n\n /**\n * Maximum number of tokens to generate\n */\n maxTokens?: number;\n\n /**\n * Whether to cache responses\n * @default false\n */\n cache?: boolean;\n\n /**\n * Whether to enable tracing\n * @default false\n */\n trace?: boolean;\n\n /**\n * Additional metadata for tracing\n */\n traceMetadata?: Record<string, unknown>;\n\n /**\n * Schema for structured output\n */\n responseSchema?: OpenAIResponseSchema;\n\n /**\n * Advanced provider-specific options (e.g., reasoning effort, CFG, MCP configuration)\n * @example { reasoning: { effort: 'high' } } // For reasoning models\n * @example { cfg: { grammar: 'python-like' } } // For GPT-5 constrained output\n * @example { max_completion_tokens: 4000 } // For reasoning models\n */\n providerOptions?: OpenAIProviderOptions;\n\n /**\n * Maximum character limit for input validation\n * @default 1_000_000 (1M characters ≈ 200K-500K tokens)\n * Set to undefined to disable character-based validation entirely\n */\n maxPromptCharacters?: number;\n}\n\n/**\n * Represents an OpenAI client and provides tools for converting messages to/from the LLM\n */\nexport class OpenAIThreadDriver extends BaseModelDriver {\n /**\n * OpenAI API Client\n */\n private client: OpenAI;\n\n /**\n * OpenAI model to use\n */\n private model: OpenAIModel | string;\n\n /**\n * Temperature for response generation\n */\n private temperature: number;\n\n /**\n * Maximum number of tokens to generate\n */\n private maxTokens?: number;\n\n /**\n * Whether to cache responses\n */\n private cache: boolean;\n\n /**\n * Schema for structured output\n */\n private responseSchema?: OpenAIResponseSchema;\n\n /**\n * Provider-specific options\n */\n private providerOptions?: OpenAIProviderOptions;\n\n /**\n * Maximum character limit for input validation\n */\n private maxPromptCharacters?: number;\n\n /**\n * Returns a list of available OpenAI models\n */\n static getAvailableModels(): OpenAIModel[] {\n return [\n \"gpt-4o\",\n \"gpt-4o-mini\",\n \"gpt-4o-mini-audio-preview\",\n \"gpt-4\",\n \"gpt-4-turbo\",\n \"gpt-3.5-turbo\",\n \"gpt-4-vision-preview\",\n \"gpt-4.1\",\n \"gpt-5\",\n // Reasoning models\n \"o1\",\n \"o1-mini\",\n \"o1-preview\",\n \"o3\",\n \"o3-mini\",\n \"o3-pro\",\n \"o4-mini\",\n ];\n }\n\n /**\n * Create a new OpenAI driver\n * @param config Configuration options\n */\n constructor(config: OpenAIConfig = {}) {\n super();\n\n // Get API Key from config or environment variable\n const apiKey = config.apiKey || process.env.OPENAI_API_KEY;\n if (!apiKey) {\n throw new Error(\n \"OpenAI API Key must be provided in config or as OPENAI_API_KEY environment variable\",\n );\n }\n\n // Create a base client with prompt caching support\n const baseClient = new OpenAI({\n apiKey,\n // Use API version that supports prompt caching (automatic for prompts >1024 tokens)\n defaultQuery: { \"api-version\": \"2024-10-01-preview\" },\n });\n\n // Only wrap with tracing if trace is enabled (default to false)\n if (config.trace ?? false) {\n // Create a proxy around the OpenAI client to intercept API calls\n this.client = new Proxy(baseClient, {\n get: (target, prop, receiver) => {\n const value = Reflect.get(target, prop, receiver);\n\n // Handle nested properties (chat.completions.create)\n if (prop === \"chat\" && value && typeof value === \"object\") {\n return new Proxy(value, {\n get: (chatTarget, chatProp, chatReceiver) => {\n const chatValue = Reflect.get(chatTarget, chatProp, chatReceiver);\n\n // Handle completions property\n if (chatProp === \"completions\" && chatValue && typeof chatValue === \"object\") {\n return new Proxy(chatValue, {\n get: (completionsTarget, completionsProp, completionsReceiver) => {\n const completionsValue = Reflect.get(\n completionsTarget,\n completionsProp,\n completionsReceiver,\n );\n\n // Wrap the create method with tracing\n if (completionsProp === \"create\" && typeof completionsValue === \"function\") {\n return traceable(completionsValue.bind(completionsTarget), {\n run_type: \"llm\",\n name: \"openai\",\n ...config.traceMetadata,\n aggregator: (args: any[], result: any) => {\n // Extract relevant data for tracing\n const params = args[0] || {};\n\n // For streaming responses, we can't easily get metrics, so just return the input\n if (params.stream) {\n return {\n model: params.model,\n input: {\n messages: params.messages,\n temperature: params.temperature,\n max_tokens: params.max_tokens,\n tools: params.tools ? params.tools.length : 0,\n },\n streaming: true,\n };\n }\n\n // For non-streaming, include response data\n return {\n model: params.model,\n input: {\n messages: params.messages,\n temperature: params.temperature,\n max_tokens: params.max_tokens,\n tools: params.tools ? params.tools.length : 0,\n },\n output: result.choices?.[0]?.message,\n metrics: result.usage\n ? {\n prompt_tokens: result.usage.prompt_tokens,\n completion_tokens: result.usage.completion_tokens,\n total_tokens: result.usage.total_tokens,\n }\n : undefined,\n finish_reason: result.choices?.[0]?.finish_reason,\n };\n },\n });\n }\n\n return completionsValue;\n },\n });\n }\n\n return chatValue;\n },\n });\n }\n\n return value;\n },\n });\n } else {\n this.client = baseClient;\n }\n\n // Set configuration options\n this.model = config.model || \"gpt-4o\";\n this.temperature = config.temperature ?? 0.7;\n this.maxTokens = config.maxTokens;\n this.cache = config.cache ?? false;\n this.responseSchema = config.responseSchema;\n this.providerOptions = config.providerOptions;\n this.maxPromptCharacters = config.maxPromptCharacters ?? DEFAULT_MAX_PROMPT_CHARACTERS;\n }\n\n /**\n * Send a thread to the LLM and get a response\n * @param thread Thread to send\n * @returns Updated thread with LLM response\n */\n async sendThread(thread: Thread, abortSignal?: AbortSignal): Promise<Thread> {\n if (thread.all().length === 0) {\n throw new Error(\"Cannot send an empty thread\");\n }\n\n // Check if execution has been aborted\n this.checkAbortSignal(abortSignal);\n\n // this.debug(\"Sending message to OpenAI API\", { model: this.model });\n\n // Format messages for OpenAI API\n const messages = this.formatMessagesForAPI(thread);\n\n // Check for potential token issues\n const totalCharacters = messages.reduce(\n (sum, msg) => sum + (typeof msg.content === \"string\" ? msg.content.length : 0),\n 0,\n );\n\n // Only enforce character limit if configured (allows disabling by setting to undefined)\n if (this.maxPromptCharacters !== undefined && totalCharacters > this.maxPromptCharacters) {\n throw new Error(\n `The thread content is too long (${totalCharacters} characters). Try a shorter conversation or increase maxPromptCharacters.`,\n );\n }\n\n try {\n // Create parameters for the API call\n const params: any = {\n model: this.model,\n messages,\n temperature: this.temperature,\n };\n\n // Add max tokens if specified\n if (this.maxTokens) {\n params.max_tokens = this.maxTokens;\n }\n\n // Add provider-specific options\n if (this.providerOptions) {\n // Handle reasoning models' specific parameters (nested format)\n if (this.providerOptions.reasoning) {\n Object.assign(params, { reasoning: this.providerOptions.reasoning });\n }\n\n // Handle direct reasoning_effort parameter for GPT-5 (alternative to nested format)\n if (this.providerOptions.reasoning_effort) {\n params.reasoning_effort = this.providerOptions.reasoning_effort;\n }\n\n // Handle verbosity for GPT-5\n if (this.providerOptions.verbosity) {\n params.verbosity = this.providerOptions.verbosity;\n }\n\n // Handle max_completion_tokens for reasoning models\n if (this.providerOptions.max_completion_tokens) {\n params.max_completion_tokens = this.providerOptions.max_completion_tokens;\n }\n\n // Handle CFG for GPT-5\n if (this.providerOptions.cfg) {\n Object.assign(params, { cfg: this.providerOptions.cfg });\n }\n\n // Handle freeform tools for GPT-5\n if (this.providerOptions.freeform_tools !== undefined) {\n params.freeform_tools = this.providerOptions.freeform_tools;\n }\n\n // Handle MCP configuration\n if (this.providerOptions.mcp) {\n Object.assign(params, { mcp: this.providerOptions.mcp });\n }\n\n // Handle modality for multi-modal models\n if (this.providerOptions.modality) {\n params.modality = this.providerOptions.modality;\n }\n\n // Handle prediction for speculative decoding\n if (this.providerOptions.prediction) {\n params.prediction = this.providerOptions.prediction;\n }\n\n // Handle store for conversation persistence\n if (this.providerOptions.store !== undefined) {\n params.store = this.providerOptions.store;\n }\n\n // Add any additional provider options (excluding those explicitly handled above)\n const {\n reasoning,\n reasoning_effort,\n verbosity,\n max_completion_tokens,\n cfg,\n freeform_tools,\n mcp,\n modality,\n prediction,\n store,\n ...otherOptions\n } = this.providerOptions;\n Object.assign(params, otherOptions);\n }\n\n // Add structured output if responseSchema is defined\n if (this.responseSchema) {\n params.response_format = {\n type: \"json_schema\",\n json_schema: {\n name: \"response\",\n schema: this.responseSchema,\n strict: true,\n },\n } as any;\n }\n\n // Add tools if there are any in the thread\n const tools = thread.getTools();\n if (tools.length > 0) {\n params.tools = this.createToolDefinitions(tools);\n params.tool_choice = \"auto\";\n }\n\n // Make the API call\n const response = await this.client.chat.completions.create(params);\n\n // Get the last choice from the response\n const lastChoice = response.choices[0];\n if (!lastChoice) {\n throw new Error(\"No response from OpenAI API\");\n }\n\n const choice = lastChoice.message;\n if (!choice) {\n throw new Error(\"Empty response from OpenAI API\");\n }\n\n // Create a new message from the response\n const message = new Message({\n role: \"assistant\",\n content: choice.content || \"\",\n toolCalls: this.parseToolCalls(choice.tool_calls),\n cache: this.cache,\n });\n\n // Add the message to the thread\n thread.push(message);\n\n // Enhanced token usage tracking with cache and reasoning metrics\n if (response.usage) {\n const toolCalls = this.parseToolCalls(choice.tool_calls);\n const enhancedTokenUsage: OpenAIEnhancedTokenUsage = {\n inputTokens: response.usage.prompt_tokens,\n outputTokens: response.usage.completion_tokens,\n totalTokens: response.usage.total_tokens,\n cachedTokens: response.usage.prompt_tokens_details?.cached_tokens || undefined,\n reasoningTokens: response.usage.completion_tokens_details?.reasoning_tokens || undefined,\n };\n\n // Calculate new content tokens (non-cached)\n const cachedTokens = enhancedTokenUsage.cachedTokens || 0;\n enhancedTokenUsage.newContentTokens = enhancedTokenUsage.inputTokens - cachedTokens;\n\n // Generate per-tool token breakdown if tools were used\n const toolTokenBreakdown = this.generateToolTokenBreakdown(\n toolCalls,\n enhancedTokenUsage,\n thread,\n );\n if (toolTokenBreakdown.length > 0) {\n enhancedTokenUsage.toolTokenBreakdown = toolTokenBreakdown;\n }\n\n // Store token usage in the thread\n thread.updateTokenUsage(enhancedTokenUsage);\n\n // Enhanced debug output with cache and reasoning information\n this.debug(\"OpenAI API usage metrics\", {\n inputTokens: enhancedTokenUsage.inputTokens,\n outputTokens: enhancedTokenUsage.outputTokens,\n totalTokens: enhancedTokenUsage.totalTokens,\n cachedTokens: enhancedTokenUsage.cachedTokens,\n reasoningTokens: enhancedTokenUsage.reasoningTokens,\n newContentTokens: enhancedTokenUsage.newContentTokens,\n toolBreakdownCount: toolTokenBreakdown.length,\n });\n }\n\n return thread;\n } catch (error) {\n this.error(\"Error sending message to OpenAI API\", { error });\n throw error;\n }\n }\n\n /**\n * Stream a thread to the LLM and get a streaming response\n * @param thread Thread to send\n * @returns AsyncGenerator yielding the stream and updated thread\n */\n async *streamThread(\n thread: Thread,\n ): AsyncGenerator<{ stream: AsyncGenerator<string, void>; message: Message }, Thread> {\n if (thread.all().length === 0) {\n throw new Error(\"Cannot stream an empty thread\");\n }\n\n this.debug(\"Streaming message to OpenAI API\", { model: this.model });\n\n // Format messages for OpenAI API\n const messages = this.formatMessagesForAPI(thread);\n\n // Check for potential token issues\n const totalCharacters = messages.reduce(\n (sum, msg) => sum + (typeof msg.content === \"string\" ? msg.content.length : 0),\n 0,\n );\n\n // Only enforce character limit if configured (allows disabling by setting to undefined)\n if (this.maxPromptCharacters !== undefined && totalCharacters > this.maxPromptCharacters) {\n throw new Error(\n `The thread content is too long (${totalCharacters} characters). Try a shorter conversation or increase maxPromptCharacters.`,\n );\n }\n\n try {\n // Create parameters for the API call\n const params: any = {\n model: this.model,\n messages,\n temperature: this.temperature,\n stream: true,\n stream_options: {\n include_usage: true, // Request usage information in the final chunk\n },\n };\n\n // Add max tokens if specified\n if (this.maxTokens) {\n params.max_tokens = this.maxTokens;\n }\n\n // Add provider-specific options\n if (this.providerOptions) {\n // Handle reasoning models' specific parameters (nested format)\n if (this.providerOptions.reasoning) {\n Object.assign(params, { reasoning: this.providerOptions.reasoning });\n }\n\n // Handle direct reasoning_effort parameter for GPT-5 (alternative to nested format)\n if (this.providerOptions.reasoning_effort) {\n params.reasoning_effort = this.providerOptions.reasoning_effort;\n }\n\n // Handle verbosity for GPT-5\n if (this.providerOptions.verbosity) {\n params.verbosity = this.providerOptions.verbosity;\n }\n\n // Handle max_completion_tokens for reasoning models\n if (this.providerOptions.max_completion_tokens) {\n params.max_completion_tokens = this.providerOptions.max_completion_tokens;\n }\n\n // Handle CFG for GPT-5\n if (this.providerOptions.cfg) {\n Object.assign(params, { cfg: this.providerOptions.cfg });\n }\n\n // Handle freeform tools for GPT-5\n if (this.providerOptions.freeform_tools !== undefined) {\n params.freeform_tools = this.providerOptions.freeform_tools;\n }\n\n // Handle MCP configuration\n if (this.providerOptions.mcp) {\n Object.assign(params, { mcp: this.providerOptions.mcp });\n }\n\n // Handle modality for multi-modal models\n if (this.providerOptions.modality) {\n params.modality = this.providerOptions.modality;\n }\n\n // Handle prediction for speculative decoding\n if (this.providerOptions.prediction) {\n params.prediction = this.providerOptions.prediction;\n }\n\n // Handle store for conversation persistence\n if (this.providerOptions.store !== undefined) {\n params.store = this.providerOptions.store;\n }\n\n // Add any additional provider options (excluding those explicitly handled above)\n const {\n reasoning,\n reasoning_effort,\n verbosity,\n max_completion_tokens,\n cfg,\n freeform_tools,\n mcp,\n modality,\n prediction,\n store,\n ...otherOptions\n } = this.providerOptions;\n Object.assign(params, otherOptions);\n }\n\n // Add structured output if responseSchema is defined\n if (this.responseSchema) {\n params.response_format = {\n type: \"json_schema\",\n json_schema: {\n name: \"response\",\n schema: this.responseSchema,\n strict: true,\n },\n } as any;\n }\n\n // Add tools if there are any in the thread\n const tools = thread.getTools();\n if (tools.length > 0) {\n params.tools = this.createToolDefinitions(tools);\n params.tool_choice = \"auto\";\n }\n\n // Create a new message to add to the thread\n const message = new Message({\n role: \"assistant\",\n content: \"\",\n cache: this.cache,\n });\n\n // Make the streaming API call\n const stream = await this.client.chat.completions.create(params);\n\n // Create a streaming content generator\n const streamContent = this.createStreamGenerator(stream as any, message, thread);\n\n // Yield the stream\n yield { stream: streamContent, message };\n\n // Add the message to the thread\n thread.push(message);\n\n // Return the updated thread\n return thread;\n } catch (error) {\n this.error(\"Error streaming message to OpenAI API\", { error });\n throw error;\n }\n }\n\n /**\n * Create a stream generator that handles updates to the message\n */\n private async *createStreamGenerator(\n stream: AsyncIterable<OpenAI.Chat.ChatCompletionChunk>,\n message: Message,\n thread: Thread