@tanstack/ai
Version:
Core TanStack AI library - Open source AI SDK
1,038 lines (1,037 loc) • 34.1 kB
JavaScript
import { generateMessageId, uiMessageToModelMessages } from "../messages.js";
import { defaultJSONParser } from "./json-parser.js";
import { updateToolCallWithOutput, updateToolResultPart, updateToolCallApprovalResponse, updateToolCallPart, updateThinkingPart, updateToolCallApproval, updateTextPart } from "./message-updaters.js";
import { ImmediateStrategy } from "./strategies.js";
class StreamProcessor {
constructor(options = {}) {
this.messages = [];
this.messageStates = /* @__PURE__ */ new Map();
this.activeMessageIds = /* @__PURE__ */ new Set();
this.toolCallToMessage = /* @__PURE__ */ new Map();
this.pendingManualMessageId = null;
this.activeRuns = /* @__PURE__ */ new Set();
this.finishReason = null;
this.hasError = false;
this.isDone = false;
this.recording = null;
this.recordingStartTime = 0;
this.chunkStrategy = options.chunkStrategy || new ImmediateStrategy();
this.events = options.events || {};
this.jsonParser = options.jsonParser || defaultJSONParser;
this.recordingEnabled = options.recording ?? false;
if (options.initialMessages) {
this.messages = [...options.initialMessages];
}
}
// ============================================
// Message Management Methods
// ============================================
/**
* Set the messages array (e.g., from persisted state)
*/
setMessages(messages) {
this.messages = [...messages];
this.emitMessagesChange();
}
/**
* Add a user message to the conversation.
* Supports both simple string content and multimodal content arrays.
*
* @param content - The message content (string or array of content parts)
* @param id - Optional custom message ID (generated if not provided)
* @returns The created UIMessage
*
* @example
* ```ts
* // Simple text message
* processor.addUserMessage('Hello!')
*
* // Multimodal message with image
* processor.addUserMessage([
* { type: 'text', content: 'What is in this image?' },
* { type: 'image', source: { type: 'url', value: 'https://example.com/photo.jpg' } }
* ])
*
* // With custom ID
* processor.addUserMessage('Hello!', 'custom-id-123')
* ```
*/
addUserMessage(content, id) {
const parts = typeof content === "string" ? [{ type: "text", content }] : content.map((part) => {
return part;
});
const userMessage = {
id: id ?? generateMessageId(),
role: "user",
parts,
createdAt: /* @__PURE__ */ new Date()
};
this.messages = [...this.messages, userMessage];
this.emitMessagesChange();
return userMessage;
}
/**
* Prepare for a new assistant message stream.
* Does NOT create the message immediately -- the message is created lazily
* when the first content-bearing chunk arrives via ensureAssistantMessage().
* This prevents empty assistant messages from flickering in the UI when
* auto-continuation produces no content.
*/
prepareAssistantMessage() {
this.resetStreamState();
}
/**
* @deprecated Use prepareAssistantMessage() instead. This eagerly creates
* an assistant message which can cause empty message flicker.
*/
startAssistantMessage(messageId) {
this.prepareAssistantMessage();
const { messageId: id } = this.ensureAssistantMessage(messageId);
this.pendingManualMessageId = id;
return id;
}
/**
* Get the current assistant message ID (if one has been created).
* Returns null if prepareAssistantMessage() was called but no content
* has arrived yet.
*/
getCurrentAssistantMessageId() {
let lastId = null;
for (const [id, state] of this.messageStates) {
if (state.role === "assistant") {
lastId = id;
}
}
return lastId;
}
/**
* Add a tool result (called by client after handling onToolCall)
*/
addToolResult(toolCallId, output, error) {
const messageWithToolCall = this.messages.find(
(msg) => msg.parts.some(
(p) => p.type === "tool-call" && p.id === toolCallId
)
);
if (!messageWithToolCall) {
console.warn(
`[StreamProcessor] Could not find message with tool call ${toolCallId}`
);
return;
}
let updatedMessages = updateToolCallWithOutput(
this.messages,
toolCallId,
output,
error ? "input-complete" : void 0,
error
);
const content = typeof output === "string" ? output : JSON.stringify(output);
const toolResultState = error ? "error" : "complete";
updatedMessages = updateToolResultPart(
updatedMessages,
messageWithToolCall.id,
toolCallId,
content,
toolResultState,
error
);
this.messages = updatedMessages;
this.emitMessagesChange();
}
/**
* Add an approval response (called by client after handling onApprovalRequest)
*/
addToolApprovalResponse(approvalId, approved) {
this.messages = updateToolCallApprovalResponse(
this.messages,
approvalId,
approved
);
this.emitMessagesChange();
}
/**
* Get the conversation as ModelMessages (for sending to LLM)
*/
toModelMessages() {
const modelMessages = [];
for (const msg of this.messages) {
modelMessages.push(...uiMessageToModelMessages(msg));
}
return modelMessages;
}
/**
* Get current messages
*/
getMessages() {
return this.messages;
}
/**
* Check if all tool calls in the last assistant message are complete
* Useful for auto-continue logic
*/
areAllToolsComplete() {
const lastAssistant = this.messages.findLast(
(m) => m.role === "assistant"
);
if (!lastAssistant) return true;
const toolParts = lastAssistant.parts.filter(
(p) => p.type === "tool-call"
);
if (toolParts.length === 0) return true;
const toolResultIds = new Set(
lastAssistant.parts.filter((p) => p.type === "tool-result").map((p) => p.toolCallId)
);
return toolParts.every(
(part) => part.state === "approval-responded" || part.output !== void 0 && !part.approval || toolResultIds.has(part.id)
);
}
/**
* Remove messages after a certain index (for reload/retry)
*/
removeMessagesAfter(index) {
this.messages = this.messages.slice(0, index + 1);
this.emitMessagesChange();
}
/**
* Clear all messages
*/
clearMessages() {
this.messages = [];
this.messageStates.clear();
this.activeMessageIds.clear();
this.toolCallToMessage.clear();
this.pendingManualMessageId = null;
this.emitMessagesChange();
}
// ============================================
// Stream Processing Methods
// ============================================
/**
* Process a stream and emit events through handlers
*/
async process(stream) {
this.resetStreamState();
if (this.recordingEnabled) {
this.startRecording();
}
for await (const chunk of stream) {
this.processChunk(chunk);
}
this.finalizeStream();
if (this.recording) {
this.recording.result = this.getResult();
}
return this.getResult();
}
/**
* Process a single chunk from the stream.
*
* Central dispatch for all AG-UI events. Each event type maps to a specific
* handler. Events not listed in the switch are intentionally ignored
* (RUN_STARTED, STEP_STARTED, STATE_DELTA).
*
* @see docs/chat-architecture.md#adapter-contract — Expected event types and ordering
*/
processChunk(chunk) {
if (this.recording) {
this.recording.chunks.push({
chunk,
timestamp: Date.now(),
index: this.recording.chunks.length
});
}
switch (chunk.type) {
// AG-UI Events
case "TEXT_MESSAGE_START":
this.handleTextMessageStartEvent(chunk);
break;
case "TEXT_MESSAGE_CONTENT":
this.handleTextMessageContentEvent(chunk);
break;
case "TEXT_MESSAGE_END":
this.handleTextMessageEndEvent(chunk);
break;
case "TOOL_CALL_START":
this.handleToolCallStartEvent(chunk);
break;
case "TOOL_CALL_ARGS":
this.handleToolCallArgsEvent(chunk);
break;
case "TOOL_CALL_END":
this.handleToolCallEndEvent(chunk);
break;
case "RUN_FINISHED":
this.handleRunFinishedEvent(chunk);
break;
case "RUN_ERROR":
this.handleRunErrorEvent(chunk);
break;
case "STEP_FINISHED":
this.handleStepFinishedEvent(chunk);
break;
case "MESSAGES_SNAPSHOT":
this.handleMessagesSnapshotEvent(chunk);
break;
case "CUSTOM":
this.handleCustomEvent(chunk);
break;
case "RUN_STARTED":
this.handleRunStartedEvent(chunk);
break;
}
}
// ============================================
// Per-Message State Helpers
// ============================================
/**
* Create a new MessageStreamState for a message
*/
createMessageState(messageId, role) {
const state = {
id: messageId,
role,
totalTextContent: "",
currentSegmentText: "",
lastEmittedText: "",
thinkingContent: "",
toolCalls: /* @__PURE__ */ new Map(),
toolCallOrder: [],
hasToolCallsSinceTextStart: false,
isComplete: false
};
this.messageStates.set(messageId, state);
return state;
}
/**
* Get the MessageStreamState for a message
*/
getMessageState(messageId) {
return this.messageStates.get(messageId);
}
/**
* Get the most recent active assistant message ID.
* Used as fallback for events that don't include a messageId.
*/
getActiveAssistantMessageId() {
const ids = Array.from(this.activeMessageIds);
for (let i = ids.length - 1; i >= 0; i--) {
const id = ids[i];
const state = this.messageStates.get(id);
if (state && state.role === "assistant") {
return id;
}
}
return null;
}
/**
* Ensure an active assistant message exists, creating one if needed.
* Used for backward compat when events arrive without prior TEXT_MESSAGE_START.
*
* On reconnect/resume, a TEXT_MESSAGE_CONTENT may arrive for a message that
* already exists in this.messages (e.g. from initialMessages or a prior
* MESSAGES_SNAPSHOT) but whose transient state was cleared. In that case we
* hydrate state from the existing message rather than creating a duplicate.
*/
ensureAssistantMessage(preferredId) {
if (preferredId) {
const state2 = this.getMessageState(preferredId);
if (state2) return { messageId: preferredId, state: state2 };
}
const activeId = this.getActiveAssistantMessageId();
if (activeId) {
const state2 = this.getMessageState(activeId);
return { messageId: activeId, state: state2 };
}
if (preferredId) {
const existingMsg = this.messages.find((m) => m.id === preferredId);
if (existingMsg) {
const state2 = this.createMessageState(preferredId, existingMsg.role);
this.activeMessageIds.add(preferredId);
const lastPart = existingMsg.parts.length > 0 ? existingMsg.parts[existingMsg.parts.length - 1] : null;
if (lastPart && lastPart.type === "text") {
state2.currentSegmentText = lastPart.content;
state2.lastEmittedText = lastPart.content;
state2.totalTextContent = lastPart.content;
}
return { messageId: preferredId, state: state2 };
}
}
const id = preferredId || generateMessageId();
const assistantMessage = {
id,
role: "assistant",
parts: [],
createdAt: /* @__PURE__ */ new Date()
};
this.messages = [...this.messages, assistantMessage];
const state = this.createMessageState(id, "assistant");
this.activeMessageIds.add(id);
this.pendingManualMessageId = id;
this.events.onStreamStart?.();
this.emitMessagesChange();
return { messageId: id, state };
}
// ============================================
// Event Handlers
// ============================================
/**
* Handle TEXT_MESSAGE_START event
*/
handleTextMessageStartEvent(chunk) {
const { messageId, role } = chunk;
const uiRole = role === "tool" ? "assistant" : role;
if (this.pendingManualMessageId) {
const pendingId = this.pendingManualMessageId;
this.pendingManualMessageId = null;
if (pendingId !== messageId) {
this.messages = this.messages.map(
(msg) => msg.id === pendingId ? { ...msg, id: messageId } : msg
);
const existingState = this.messageStates.get(pendingId);
if (existingState) {
existingState.id = messageId;
this.messageStates.delete(pendingId);
this.messageStates.set(messageId, existingState);
}
this.activeMessageIds.delete(pendingId);
this.activeMessageIds.add(messageId);
}
if (!this.messageStates.has(messageId)) {
this.createMessageState(messageId, uiRole);
this.activeMessageIds.add(messageId);
}
this.emitMessagesChange();
return;
}
const existingMsg = this.messages.find((m) => m.id === messageId);
if (existingMsg) {
this.activeMessageIds.add(messageId);
if (!this.messageStates.has(messageId)) {
this.createMessageState(messageId, uiRole);
} else {
const existingState = this.messageStates.get(messageId);
if (existingState.hasToolCallsSinceTextStart) {
if (existingState.currentSegmentText !== existingState.lastEmittedText) {
this.emitTextUpdateForMessage(messageId);
}
existingState.currentSegmentText = "";
existingState.lastEmittedText = "";
existingState.hasToolCallsSinceTextStart = false;
}
}
return;
}
const newMessage = {
id: messageId,
role: uiRole,
parts: [],
createdAt: /* @__PURE__ */ new Date()
};
this.messages = [...this.messages, newMessage];
this.createMessageState(messageId, uiRole);
this.activeMessageIds.add(messageId);
this.events.onStreamStart?.();
this.emitMessagesChange();
}
/**
* Handle TEXT_MESSAGE_END event
*/
handleTextMessageEndEvent(chunk) {
const { messageId } = chunk;
const state = this.getMessageState(messageId);
if (!state) return;
if (state.isComplete) return;
if (state.currentSegmentText !== state.lastEmittedText) {
this.emitTextUpdateForMessage(messageId);
}
this.completeAllToolCallsForMessage(messageId);
}
/**
* Handle MESSAGES_SNAPSHOT event
*/
handleMessagesSnapshotEvent(chunk) {
this.resetStreamState();
this.messages = [...chunk.messages];
this.emitMessagesChange();
}
/**
* Handle TEXT_MESSAGE_CONTENT event.
*
* Accumulates delta into both currentSegmentText (for UI emission) and
* totalTextContent (for ProcessorResult). Lazily creates the assistant
* UIMessage on first content. Uses updateTextPart() which replaces the
* last TextPart or creates a new one depending on part ordering.
*
* @see docs/chat-architecture.md#single-shot-text-response — Text accumulation step-by-step
* @see docs/chat-architecture.md#uimessage-part-ordering-invariants — Replace vs. push logic
*/
handleTextMessageContentEvent(chunk) {
const { messageId, state } = this.ensureAssistantMessage(chunk.messageId);
this.completeAllToolCallsForMessage(messageId);
const previousSegment = state.currentSegmentText;
const isNewSegment = state.hasToolCallsSinceTextStart && previousSegment.length > 0 && this.isNewTextSegment(chunk, previousSegment);
if (isNewSegment) {
if (previousSegment !== state.lastEmittedText) {
this.emitTextUpdateForMessage(messageId);
}
state.currentSegmentText = "";
state.lastEmittedText = "";
state.hasToolCallsSinceTextStart = false;
}
const currentText = state.currentSegmentText;
let nextText = currentText;
const delta = chunk.delta || "";
if (delta !== "") {
nextText = currentText + delta;
} else if (chunk.content !== void 0 && chunk.content !== "") {
if (chunk.content.startsWith(currentText)) {
nextText = chunk.content;
} else if (currentText.startsWith(chunk.content)) {
nextText = currentText;
} else {
nextText = currentText + chunk.content;
}
}
const textDelta = nextText.slice(currentText.length);
state.currentSegmentText = nextText;
state.totalTextContent += textDelta;
const chunkPortion = chunk.delta || chunk.content || "";
const shouldEmit = this.chunkStrategy.shouldEmit(
chunkPortion,
state.currentSegmentText
);
if (shouldEmit && state.currentSegmentText !== state.lastEmittedText) {
this.emitTextUpdateForMessage(messageId);
}
}
/**
* Handle TOOL_CALL_START event.
*
* Creates a new InternalToolCallState entry in the toolCalls Map and appends
* a ToolCallPart to the UIMessage. Duplicate toolCallId is a no-op.
*
* CRITICAL: This MUST be received before any TOOL_CALL_ARGS for the same
* toolCallId. Args for unknown IDs are silently dropped.
*
* @see docs/chat-architecture.md#single-shot-tool-call-response — Tool call state transitions
* @see docs/chat-architecture.md#parallel-tool-calls-single-shot — Parallel tracking by ID
* @see docs/chat-architecture.md#adapter-contract — Ordering requirements
*/
handleToolCallStartEvent(chunk) {
const targetMessageId = chunk.parentMessageId ?? this.getActiveAssistantMessageId();
const { messageId, state } = this.ensureAssistantMessage(
targetMessageId ?? void 0
);
state.hasToolCallsSinceTextStart = true;
const toolCallId = chunk.toolCallId;
const existingToolCall = state.toolCalls.get(toolCallId);
if (!existingToolCall) {
const initialState = "awaiting-input";
const newToolCall = {
id: chunk.toolCallId,
name: chunk.toolName,
arguments: "",
state: initialState,
parsedArguments: void 0,
index: chunk.index ?? state.toolCalls.size
};
state.toolCalls.set(toolCallId, newToolCall);
state.toolCallOrder.push(toolCallId);
this.toolCallToMessage.set(toolCallId, messageId);
this.messages = updateToolCallPart(this.messages, messageId, {
id: chunk.toolCallId,
name: chunk.toolName,
arguments: "",
state: initialState
});
this.emitMessagesChange();
this.events.onToolCallStateChange?.(
messageId,
chunk.toolCallId,
initialState,
""
);
}
}
/**
* Handle TOOL_CALL_ARGS event.
*
* Appends the delta to the tool call's accumulated arguments string.
* Transitions state from awaiting-input → input-streaming on first non-empty delta.
* Attempts partial JSON parse on each update for UI preview.
*
* If toolCallId is not found in the Map (no preceding TOOL_CALL_START),
* this event is silently dropped.
*
* @see docs/chat-architecture.md#single-shot-tool-call-response — Step-by-step tool call processing
*/
handleToolCallArgsEvent(chunk) {
const toolCallId = chunk.toolCallId;
const messageId = this.toolCallToMessage.get(toolCallId);
if (!messageId) return;
const state = this.getMessageState(messageId);
if (!state) return;
const existingToolCall = state.toolCalls.get(toolCallId);
if (!existingToolCall) return;
const wasAwaitingInput = existingToolCall.state === "awaiting-input";
existingToolCall.arguments += chunk.delta || "";
if (wasAwaitingInput && chunk.delta) {
existingToolCall.state = "input-streaming";
}
existingToolCall.parsedArguments = this.jsonParser.parse(
existingToolCall.arguments
);
this.messages = updateToolCallPart(this.messages, messageId, {
id: existingToolCall.id,
name: existingToolCall.name,
arguments: existingToolCall.arguments,
state: existingToolCall.state
});
this.emitMessagesChange();
this.events.onToolCallStateChange?.(
messageId,
existingToolCall.id,
existingToolCall.state,
existingToolCall.arguments
);
}
/**
* Handle TOOL_CALL_END event — authoritative signal that a tool call's input is finalized.
*
* This event has a DUAL ROLE:
* - Without `result`: Signals arguments are done (from adapter). Transitions to input-complete.
* - With `result`: Signals tool was executed and result is available (from TextEngine).
* Creates both output on the tool-call part AND a tool-result part.
*
* If `input` is provided, it overrides the accumulated string parse as the
* canonical parsed arguments.
*
* @see docs/chat-architecture.md#tool-results-and-the-tool_call_end-dual-role — Full explanation
* @see docs/chat-architecture.md#single-shot-tool-call-response — End-to-end flow
*/
handleToolCallEndEvent(chunk) {
const messageId = this.toolCallToMessage.get(chunk.toolCallId);
if (!messageId) return;
const msgState = this.getMessageState(messageId);
if (!msgState) return;
const existingToolCall = msgState.toolCalls.get(chunk.toolCallId);
if (existingToolCall && existingToolCall.state !== "input-complete") {
if (chunk.input !== void 0 && !existingToolCall.arguments) {
existingToolCall.arguments = JSON.stringify(chunk.input);
}
const index = msgState.toolCallOrder.indexOf(chunk.toolCallId);
this.completeToolCall(messageId, index, existingToolCall);
if (chunk.input !== void 0) {
existingToolCall.parsedArguments = chunk.input;
}
}
if (chunk.result) {
let output;
try {
output = JSON.parse(chunk.result);
} catch {
output = chunk.result;
}
this.messages = updateToolCallWithOutput(
this.messages,
chunk.toolCallId,
output
);
const resultState = "complete";
this.messages = updateToolResultPart(
this.messages,
messageId,
chunk.toolCallId,
chunk.result,
resultState
);
this.emitMessagesChange();
}
}
/**
* Handle RUN_STARTED event.
*
* Registers the run so that RUN_FINISHED can determine whether other
* runs are still active before finalizing.
*/
handleRunStartedEvent(chunk) {
this.activeRuns.add(chunk.runId);
}
/**
* Handle RUN_FINISHED event.
*
* Records the finishReason and removes the run from activeRuns.
* Only finalizes when no more runs are active, so that concurrent
* runs don't interfere with each other.
*
* @see docs/chat-architecture.md#single-shot-tool-call-response — finishReason semantics
* @see docs/chat-architecture.md#adapter-contract — Why RUN_FINISHED is mandatory
*/
handleRunFinishedEvent(chunk) {
this.finishReason = chunk.finishReason;
this.activeRuns.delete(chunk.runId);
if (this.activeRuns.size === 0) {
this.isDone = true;
this.completeAllToolCalls();
this.finalizeStream();
}
}
/**
* Handle RUN_ERROR event
*/
handleRunErrorEvent(chunk) {
this.hasError = true;
if (chunk.runId) {
this.activeRuns.delete(chunk.runId);
} else {
this.activeRuns.clear();
}
this.ensureAssistantMessage();
this.events.onError?.(new Error(chunk.error.message || "An error occurred"));
}
/**
* Handle STEP_FINISHED event (for thinking/reasoning content).
*
* Accumulates delta into thinkingContent and updates a single ThinkingPart
* in the UIMessage (replaced in-place, not appended).
*
* @see docs/chat-architecture.md#thinkingreasoning-content — Thinking flow
*/
handleStepFinishedEvent(chunk) {
const { messageId, state } = this.ensureAssistantMessage(
this.getActiveAssistantMessageId() ?? void 0
);
const previous = state.thinkingContent;
let nextThinking = previous;
if (chunk.delta && chunk.delta !== "") {
nextThinking = previous + chunk.delta;
} else if (chunk.content && chunk.content !== "") {
if (chunk.content.startsWith(previous)) {
nextThinking = chunk.content;
} else if (previous.startsWith(chunk.content)) {
nextThinking = previous;
} else {
nextThinking = previous + chunk.content;
}
}
state.thinkingContent = nextThinking;
this.messages = updateThinkingPart(
this.messages,
messageId,
state.thinkingContent
);
this.emitMessagesChange();
this.events.onThinkingUpdate?.(messageId, state.thinkingContent);
}
/**
* Handle CUSTOM event.
*
* Handles special custom events emitted by the TextEngine (not adapters):
* - 'tool-input-available': Client tool needs execution. Fires onToolCall.
* - 'approval-requested': Tool needs user approval. Updates tool-call part
* state and fires onApprovalRequest.
*
* @see docs/chat-architecture.md#client-tools-and-approval-flows — Full flow details
*/
handleCustomEvent(chunk) {
const messageId = this.getActiveAssistantMessageId();
if (chunk.name === "tool-input-available" && chunk.value) {
const { toolCallId, toolName, input } = chunk.value;
this.events.onToolCall?.({
toolCallId,
toolName,
input
});
return;
}
if (chunk.name === "approval-requested" && chunk.value) {
const { toolCallId, toolName, input, approval } = chunk.value;
const resolvedMessageId = messageId ?? this.toolCallToMessage.get(toolCallId);
if (resolvedMessageId) {
this.messages = updateToolCallApproval(
this.messages,
resolvedMessageId,
toolCallId,
approval.id
);
this.emitMessagesChange();
}
this.events.onApprovalRequest?.({
toolCallId,
toolName,
input,
approvalId: approval.id
});
return;
}
if (this.events.onCustomEvent) {
const toolCallId = chunk.value && typeof chunk.value === "object" ? chunk.value.toolCallId : void 0;
this.events.onCustomEvent(chunk.name, chunk.value, { toolCallId });
}
}
// ============================================
// Internal Helpers
// ============================================
/**
* Detect if an incoming content chunk represents a NEW text segment
*/
isNewTextSegment(chunk, previous) {
if (chunk.content !== void 0) {
if (chunk.content.length < previous.length) {
return true;
}
if (!chunk.content.startsWith(previous) && !previous.startsWith(chunk.content)) {
return true;
}
}
return false;
}
/**
* Complete all tool calls across all active messages — safety net for stream termination.
*
* Called by RUN_FINISHED and finalizeStream(). Force-transitions any tool call
* not yet in input-complete state. Handles cases where TOOL_CALL_END was
* missed (adapter bug, network error, aborted stream).
*
* @see docs/chat-architecture.md#single-shot-tool-call-response — Safety net behavior
*/
completeAllToolCalls() {
for (const messageId of this.activeMessageIds) {
this.completeAllToolCallsForMessage(messageId);
}
}
/**
* Complete all tool calls for a specific message
*/
completeAllToolCallsForMessage(messageId) {
const state = this.getMessageState(messageId);
if (!state) return;
state.toolCalls.forEach((toolCall, id) => {
if (toolCall.state !== "input-complete") {
const index = state.toolCallOrder.indexOf(id);
this.completeToolCall(messageId, index, toolCall);
}
});
}
/**
* Mark a tool call as complete and emit event
*/
completeToolCall(messageId, _index, toolCall) {
toolCall.state = "input-complete";
toolCall.parsedArguments = this.jsonParser.parse(toolCall.arguments);
this.messages = updateToolCallPart(this.messages, messageId, {
id: toolCall.id,
name: toolCall.name,
arguments: toolCall.arguments,
state: "input-complete"
});
this.emitMessagesChange();
this.events.onToolCallStateChange?.(
messageId,
toolCall.id,
"input-complete",
toolCall.arguments
);
}
/**
* Emit pending text update for a specific message.
*
* Calls updateTextPart() which has critical append-vs-replace logic:
* - If last UIMessage part is TextPart → replaces its content (same segment).
* - If last part is anything else → pushes new TextPart (new segment after tools).
*
* @see docs/chat-architecture.md#uimessage-part-ordering-invariants — Replace vs. push logic
*/
emitTextUpdateForMessage(messageId) {
const state = this.getMessageState(messageId);
if (!state) return;
state.lastEmittedText = state.currentSegmentText;
this.messages = updateTextPart(
this.messages,
messageId,
state.currentSegmentText
);
this.emitMessagesChange();
this.events.onTextUpdate?.(messageId, state.currentSegmentText);
}
/**
* Emit messages change event
*/
emitMessagesChange() {
this.events.onMessagesChange?.([...this.messages]);
}
/**
* Finalize the stream — complete all pending operations.
*
* Called when the async iterable ends (stream closed). Acts as the final
* safety net: completes any remaining tool calls, flushes un-emitted text,
* and fires onStreamEnd.
*
* @see docs/chat-architecture.md#single-shot-text-response — Finalization step
*/
finalizeStream() {
let lastAssistantMessage;
for (const messageId of this.activeMessageIds) {
const state = this.getMessageState(messageId);
if (!state) continue;
this.completeAllToolCallsForMessage(messageId);
if (state.currentSegmentText !== state.lastEmittedText) {
this.emitTextUpdateForMessage(messageId);
}
state.isComplete = true;
const msg = this.messages.find((m) => m.id === messageId);
if (msg && msg.role === "assistant") {
lastAssistantMessage = msg;
}
}
this.activeMessageIds.clear();
if (lastAssistantMessage && !this.hasError) {
if (this.isWhitespaceOnlyMessage(lastAssistantMessage)) {
this.messages = this.messages.filter(
(m) => m.id !== lastAssistantMessage.id
);
this.emitMessagesChange();
return;
}
}
if (lastAssistantMessage) {
this.events.onStreamEnd?.(lastAssistantMessage);
}
}
/**
* Get completed tool calls in API format (aggregated across all messages)
*/
getCompletedToolCalls() {
const result = [];
for (const state of this.messageStates.values()) {
for (const tc of state.toolCalls.values()) {
if (tc.state === "input-complete") {
result.push({
id: tc.id,
type: "function",
function: {
name: tc.name,
arguments: tc.arguments
}
});
}
}
}
return result;
}
/**
* Get current result (aggregated across all messages)
*/
getResult() {
const toolCalls = this.getCompletedToolCalls();
let content = "";
let thinking = "";
for (const state of this.messageStates.values()) {
content += state.totalTextContent;
thinking += state.thinkingContent;
}
return {
content,
thinking: thinking || void 0,
toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
finishReason: this.finishReason
};
}
/**
* Get current processor state (aggregated across all messages)
*/
getState() {
let content = "";
let thinking = "";
const toolCalls = /* @__PURE__ */ new Map();
const toolCallOrder = [];
for (const state of this.messageStates.values()) {
content += state.totalTextContent;
thinking += state.thinkingContent;
for (const [id, tc] of state.toolCalls) {
toolCalls.set(id, tc);
}
toolCallOrder.push(...state.toolCallOrder);
}
return {
content,
thinking,
toolCalls,
toolCallOrder,
finishReason: this.finishReason,
done: this.isDone
};
}
/**
* Start recording chunks
*/
startRecording() {
this.recordingEnabled = true;
this.recordingStartTime = Date.now();
this.recording = {
version: "1.0",
timestamp: this.recordingStartTime,
chunks: []
};
}
/**
* Get the current recording
*/
getRecording() {
return this.recording;
}
/**
* Reset stream state (but keep messages)
*/
resetStreamState() {
this.messageStates.clear();
this.activeMessageIds.clear();
this.activeRuns.clear();
this.toolCallToMessage.clear();
this.pendingManualMessageId = null;
this.finishReason = null;
this.hasError = false;
this.isDone = false;
this.chunkStrategy.reset?.();
}
/**
* Full reset (including messages)
*/
reset() {
this.resetStreamState();
this.messages = [];
}
/**
* Check if a message contains only whitespace text and no other meaningful parts
* (no tool calls, tool results, thinking, etc.)
*/
isWhitespaceOnlyMessage(message) {
if (message.parts.length === 0) return false;
return message.parts.every(
(part) => part.type === "text" && part.content.trim() === ""
);
}
/**
* Replay a recording through the processor
*/
static async replay(recording, options) {
const processor = new StreamProcessor(options);
return processor.process(createReplayStream(recording));
}
}
function createReplayStream(recording) {
return {
// eslint-disable-next-line @typescript-eslint/require-await
async *[Symbol.asyncIterator]() {
for (const { chunk } of recording.chunks) {
yield chunk;
}
}
};
}
export {
StreamProcessor,
createReplayStream
};
//# sourceMappingURL=processor.js.map