@mariozechner/pi-agent
Version:
General-purpose agent with transport abstraction, state management, and attachment support
311 lines • 11.6 kB
JavaScript
import { getModel } from "@mariozechner/pi-ai";
/**
* Default message transformer: Keep only LLM-compatible messages, strip app-specific fields.
* Converts attachments to proper content blocks (images → ImageContent, documents → TextContent).
*/
function defaultMessageTransformer(messages) {
return messages
.filter((m) => {
// Only keep standard LLM message roles
return m.role === "user" || m.role === "assistant" || m.role === "toolResult";
})
.map((m) => {
if (m.role === "user") {
const { attachments, ...rest } = m;
// If no attachments, return as-is
if (!attachments || attachments.length === 0) {
return rest;
}
// Convert attachments to content blocks
const content = Array.isArray(rest.content) ? [...rest.content] : [{ type: "text", text: rest.content }];
for (const attachment of attachments) {
// Add image blocks for image attachments
if (attachment.type === "image") {
content.push({
type: "image",
data: attachment.content,
mimeType: attachment.mimeType,
});
}
// Add text blocks for documents with extracted text
else if (attachment.type === "document" && attachment.extractedText) {
content.push({
type: "text",
text: `\n\n[Document: ${attachment.fileName}]\n${attachment.extractedText}`,
isDocument: true,
});
}
}
return { ...rest, content };
}
return m;
});
}
export class Agent {
_state = {
systemPrompt: "",
model: getModel("google", "gemini-2.5-flash-lite-preview-06-17"),
thinkingLevel: "off",
tools: [],
messages: [],
isStreaming: false,
streamMessage: null,
pendingToolCalls: new Set(),
error: undefined,
};
listeners = new Set();
abortController;
transport;
messageTransformer;
messageQueue = [];
queueMode;
runningPrompt;
resolveRunningPrompt;
constructor(opts) {
this._state = { ...this._state, ...opts.initialState };
this.transport = opts.transport;
this.messageTransformer = opts.messageTransformer || defaultMessageTransformer;
this.queueMode = opts.queueMode || "one-at-a-time";
}
get state() {
return this._state;
}
subscribe(fn) {
this.listeners.add(fn);
return () => this.listeners.delete(fn);
}
// State mutators - update internal state without emitting events
setSystemPrompt(v) {
this._state.systemPrompt = v;
}
setModel(m) {
this._state.model = m;
}
setThinkingLevel(l) {
this._state.thinkingLevel = l;
}
setQueueMode(mode) {
this.queueMode = mode;
}
getQueueMode() {
return this.queueMode;
}
setTools(t) {
this._state.tools = t;
}
replaceMessages(ms) {
this._state.messages = ms.slice();
}
appendMessage(m) {
this._state.messages = [...this._state.messages, m];
}
async queueMessage(m) {
// Transform message and queue it for injection at next turn
const transformed = await this.messageTransformer([m]);
this.messageQueue.push({
original: m,
llm: transformed[0], // undefined if filtered out
});
}
clearMessageQueue() {
this.messageQueue = [];
}
clearMessages() {
this._state.messages = [];
}
abort() {
this.abortController?.abort();
}
/**
* Returns a promise that resolves when the current prompt completes.
* Returns immediately resolved promise if no prompt is running.
*/
waitForIdle() {
return this.runningPrompt ?? Promise.resolve();
}
/**
* Clear all messages and state. Call abort() first if a prompt is in flight.
*/
reset() {
this._state.messages = [];
this._state.isStreaming = false;
this._state.streamMessage = null;
this._state.pendingToolCalls = new Set();
this._state.error = undefined;
this.messageQueue = [];
}
async prompt(input, attachments) {
const model = this._state.model;
if (!model) {
throw new Error("No model configured");
}
// Set up running prompt tracking
this.runningPrompt = new Promise((resolve) => {
this.resolveRunningPrompt = resolve;
});
// Build user message with attachments
const content = [{ type: "text", text: input }];
if (attachments?.length) {
for (const a of attachments) {
if (a.type === "image") {
content.push({ type: "image", data: a.content, mimeType: a.mimeType });
}
else if (a.type === "document" && a.extractedText) {
content.push({
type: "text",
text: `\n\n[Document: ${a.fileName}]\n${a.extractedText}`,
isDocument: true,
});
}
}
}
const userMessage = {
role: "user",
content,
attachments: attachments?.length ? attachments : undefined,
timestamp: Date.now(),
};
this.abortController = new AbortController();
this._state.isStreaming = true;
this._state.streamMessage = null;
this._state.error = undefined;
const reasoning = this._state.thinkingLevel === "off"
? undefined
: this._state.thinkingLevel === "minimal"
? "low"
: this._state.thinkingLevel;
const cfg = {
systemPrompt: this._state.systemPrompt,
tools: this._state.tools,
model,
reasoning,
getQueuedMessages: async () => {
// Return queued messages based on queue mode
if (this.queueMode === "one-at-a-time") {
// Return only first message
if (this.messageQueue.length > 0) {
const first = this.messageQueue[0];
this.messageQueue = this.messageQueue.slice(1);
return [first];
}
return [];
}
else {
// Return all queued messages at once
const queued = this.messageQueue.slice();
this.messageQueue = [];
return queued;
}
},
};
// Track all messages generated in this prompt
const generatedMessages = [];
try {
let partial = null;
// Transform app messages to LLM-compatible messages (initial set)
const llmMessages = await this.messageTransformer(this._state.messages);
for await (const ev of this.transport.run(llmMessages, userMessage, cfg, this.abortController.signal)) {
// Pass through all events directly
this.emit(ev);
// Update internal state as needed
switch (ev.type) {
case "message_start": {
// Track streaming message
partial = ev.message;
this._state.streamMessage = ev.message;
break;
}
case "message_update": {
// Update streaming message
partial = ev.message;
this._state.streamMessage = ev.message;
break;
}
case "message_end": {
// Add completed message to state
partial = null;
this._state.streamMessage = null;
this.appendMessage(ev.message);
generatedMessages.push(ev.message);
break;
}
case "tool_execution_start": {
const s = new Set(this._state.pendingToolCalls);
s.add(ev.toolCallId);
this._state.pendingToolCalls = s;
break;
}
case "tool_execution_end": {
const s = new Set(this._state.pendingToolCalls);
s.delete(ev.toolCallId);
this._state.pendingToolCalls = s;
break;
}
case "turn_end": {
// Capture error from turn_end event
if (ev.message.role === "assistant" && ev.message.errorMessage) {
this._state.error = ev.message.errorMessage;
}
break;
}
case "agent_end": {
this._state.streamMessage = null;
break;
}
}
}
// Handle any remaining partial message
if (partial && partial.role === "assistant" && partial.content.length > 0) {
const onlyEmpty = !partial.content.some((c) => (c.type === "thinking" && c.thinking.trim().length > 0) ||
(c.type === "text" && c.text.trim().length > 0) ||
(c.type === "toolCall" && c.name.trim().length > 0));
if (!onlyEmpty) {
this.appendMessage(partial);
generatedMessages.push(partial);
}
else {
if (this.abortController?.signal.aborted) {
throw new Error("Request was aborted");
}
}
}
}
catch (err) {
const msg = {
role: "assistant",
content: [{ type: "text", text: "" }],
api: model.api,
provider: model.provider,
model: model.id,
usage: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
},
stopReason: this.abortController?.signal.aborted ? "aborted" : "error",
errorMessage: err?.message || String(err),
timestamp: Date.now(),
};
this.appendMessage(msg);
generatedMessages.push(msg);
this._state.error = err?.message || String(err);
}
finally {
this._state.isStreaming = false;
this._state.streamMessage = null;
this._state.pendingToolCalls = new Set();
this.abortController = undefined;
this.resolveRunningPrompt?.();
this.runningPrompt = undefined;
this.resolveRunningPrompt = undefined;
}
}
emit(e) {
for (const listener of this.listeners) {
listener(e);
}
}
}
//# sourceMappingURL=agent.js.map