mnemos-coder
Version:
CLI-based coding agent with graph-based execution loop and terminal UI
172 lines • 6.69 kB
TypeScript
/**
* Subagent Response Format
* Structured response format for subagent-to-main-agent communication
*/
export interface SubagentStructuredResponse {
/**
* Response metadata
*/
metadata: {
subagentName: string;
subagentVersion: string;
taskId: string;
sessionId: string;
timestamp: string;
executionTime: number;
};
/**
* Task execution status
*/
status: {
success: boolean;
code: 'completed' | 'partial' | 'failed' | 'timeout' | 'error';
message: string;
confidence: number;
};
/**
* Actual work performed
*/
execution: {
/**
* What the subagent planned to do
*/
planned: {
description: string;
steps: string[];
estimatedActions: number;
};
/**
* What was actually executed
*/
performed: {
steps: ExecutedStep[];
toolCallsCount: number;
filesCreated: string[];
filesModified: string[];
filesDeleted: string[];
commandsExecuted: string[];
};
/**
* Verification of work
*/
verification: {
checked: boolean;
method: 'file_exists' | 'test_passed' | 'output_validated' | 'none';
details?: string;
evidence?: any;
};
};
/**
* Deliverables produced
*/
outputs: {
/**
* Primary output/result
*/
primary: {
type: 'file' | 'text' | 'data' | 'action' | 'none';
value: any;
location?: string;
};
/**
* Secondary outputs
*/
secondary?: Array<{
name: string;
type: string;
value: any;
location?: string;
}>;
/**
* Artifacts created (files, configs, etc.)
*/
artifacts?: Array<{
type: 'file' | 'directory' | 'config' | 'database' | 'other';
path: string;
size?: number;
checksum?: string;
}>;
};
/**
* Problems encountered
*/
issues?: {
errors: ErrorDetail[];
warnings: string[];
limitations: string[];
};
/**
* Recommendations for next steps
*/
recommendations?: {
immediate: string[];
followUp: string[];
requirements?: string[];
};
/**
* Debug information
*/
debug?: {
llmCalls: number;
toolCalls: ToolCallSummary[];
contextSize: number;
memoryUsage?: number;
logs?: string[];
};
}
export interface ExecutedStep {
order: number;
description: string;
action: 'tool_call' | 'llm_query' | 'validation' | 'other';
tool?: string;
status: 'success' | 'failed' | 'skipped';
duration?: number;
result?: any;
error?: string;
}
export interface ErrorDetail {
code: string;
message: string;
severity: 'critical' | 'error' | 'warning';
context?: string;
stackTrace?: string;
recoverable: boolean;
}
export interface ToolCallSummary {
name: string;
count: number;
successCount: number;
failureCount: number;
avgDuration?: number;
}
/**
* Response formatter for subagents
*/
export declare class SubagentResponseFormatter {
/**
* Create a success response
*/
static success(subagentName: string, taskId: string, execution: SubagentStructuredResponse['execution'], outputs: SubagentStructuredResponse['outputs'], options?: {
sessionId?: string;
confidence?: number;
recommendations?: SubagentStructuredResponse['recommendations'];
debug?: SubagentStructuredResponse['debug'];
}): SubagentStructuredResponse;
/**
* Create a failure response
*/
static failure(subagentName: string, taskId: string, error: string, execution?: Partial<SubagentStructuredResponse['execution']>, issues?: SubagentStructuredResponse['issues']): SubagentStructuredResponse;
/**
* Validate response format
*/
static validate(response: any): response is SubagentStructuredResponse;
/**
* Extract key information for logging
*/
static summarize(response: SubagentStructuredResponse): string;
}
/**
* Response template for subagents to use
*/
export declare const SUBAGENT_RESPONSE_TEMPLATE = "\nWhen completing a task, format your response as follows:\n\n<tool_response>\n{\n \"metadata\": {\n \"subagentName\": \"[your_name]\",\n \"taskId\": \"[task_id]\",\n \"timestamp\": \"[ISO_timestamp]\"\n },\n \"status\": {\n \"success\": true/false,\n \"code\": \"completed|partial|failed|timeout|error\",\n \"message\": \"Clear status message\",\n \"confidence\": 0.0-1.0\n },\n \"execution\": {\n \"planned\": {\n \"description\": \"What you intended to do\",\n \"steps\": [\"step1\", \"step2\", ...],\n \"estimatedActions\": number\n },\n \"performed\": {\n \"steps\": [\n {\n \"order\": 1,\n \"description\": \"What you did\",\n \"action\": \"tool_call\",\n \"tool\": \"tool_name\",\n \"status\": \"success|failed|skipped\",\n \"result\": \"...\"\n }\n ],\n \"toolCallsCount\": number,\n \"filesCreated\": [\"file1.py\", \"file2.js\"],\n \"filesModified\": [],\n \"filesDeleted\": [],\n \"commandsExecuted\": [\"npm install\", \"python test.py\"]\n },\n \"verification\": {\n \"checked\": true/false,\n \"method\": \"file_exists|test_passed|output_validated|none\",\n \"details\": \"How you verified the work\",\n \"evidence\": \"Actual proof of completion\"\n }\n },\n \"outputs\": {\n \"primary\": {\n \"type\": \"file|text|data|action|none\",\n \"value\": \"...\",\n \"location\": \"/path/to/output\"\n },\n \"artifacts\": [\n {\n \"type\": \"file\",\n \"path\": \"/path/to/artifact\",\n \"size\": 1234\n }\n ]\n },\n \"issues\": {\n \"errors\": [],\n \"warnings\": [],\n \"limitations\": []\n },\n \"recommendations\": {\n \"immediate\": [\"Things to do now\"],\n \"followUp\": [\"Things to consider later\"]\n }\n}\n</tool_response>\n\nIMPORTANT: \n- Always verify that your work was actually completed\n- Include evidence of completion (file paths, test results, etc.)\n- Be honest about confidence level\n- List all files/artifacts created\n- Report any issues encountered\n";
//# sourceMappingURL=SubagentResponseFormat.d.ts.map