orchestry-mcp
Version:
Orchestry MCP Server for multi-session task management
324 lines (287 loc) • 11.7 kB
text/typescript
/**
* LLM Collaboration Tools for Orchestry
*
* These tools are designed to be automatically invoked by LLMs when they detect
* work that needs to be tracked or coordinated with other sessions.
*/
import { DatabaseManager } from '../database-manager.js';
import { Database } from '../database.js';
export class LLMCollaborationTools {
constructor(
private dbManager: DatabaseManager,
private sessionId: string
) {}
getCollaborationTools() {
return [
{
name: 'track_epic',
description: `Automatically track high-level epics when discussing major features or initiatives.
Use this when the user mentions building a significant feature or system.
Example: When user says "Let's build an authentication system", track it as an epic.`,
inputSchema: {
type: 'object',
properties: {
title: { type: 'string', description: 'Epic title (e.g., "Authentication System")' },
context: { type: 'string', description: 'What the user is trying to achieve' },
estimatedScope: {
type: 'string',
enum: ['small', 'medium', 'large', 'epic'],
description: 'Estimated scope of work'
},
},
required: ['title', 'context'],
},
},
{
name: 'track_story',
description: `Track user stories or tasks within an epic.
Use this when breaking down work into smaller, actionable pieces.
Example: When implementing "user login", track it as a story under "Authentication System" epic.`,
inputSchema: {
type: 'object',
properties: {
epicTitle: { type: 'string', description: 'Parent epic title' },
storyTitle: { type: 'string', description: 'Story title' },
acceptanceCriteria: {
type: 'array',
items: { type: 'string' },
description: 'What needs to be done for this story to be complete'
},
currentProgress: { type: 'string', description: 'What has been done so far' },
},
required: ['epicTitle', 'storyTitle'],
},
},
{
name: 'update_progress',
description: `Update progress on a story or epic.
Use this when completing work or making significant progress.
Example: After implementing a feature, update its progress.`,
inputSchema: {
type: 'object',
properties: {
storyTitle: { type: 'string', description: 'Story or epic title' },
progress: {
type: 'string',
enum: ['started', 'in_progress', 'blocked', 'review', 'completed'],
description: 'Current status'
},
notes: { type: 'string', description: 'What was accomplished or what is blocking' },
completedItems: {
type: 'array',
items: { type: 'string' },
description: 'List of completed items'
},
},
required: ['storyTitle', 'progress'],
},
},
{
name: 'check_team_status',
description: `Check what other LLM sessions are working on.
Use this to avoid duplicate work and coordinate efforts.
Example: Before starting major work, check if another session is already working on it.`,
inputSchema: {
type: 'object',
properties: {
scope: {
type: 'string',
enum: ['current_project', 'all_active'],
description: 'Scope of status check'
},
},
},
},
{
name: 'handoff_work',
description: `Hand off work to another session or mark work as ready for pickup.
Use this when you've prepared work that another session should continue.
Example: After setting up the backend API, hand off frontend work to another session.`,
inputSchema: {
type: 'object',
properties: {
storyTitle: { type: 'string', description: 'Story being handed off' },
handoffNotes: { type: 'string', description: 'Context and instructions for the next session' },
suggestedNextSteps: {
type: 'array',
items: { type: 'string' },
description: 'Suggested next steps'
},
targetSession: { type: 'string', description: 'Optional: specific session to hand off to' },
},
required: ['storyTitle', 'handoffNotes'],
},
},
{
name: 'request_collaboration',
description: `Request help or collaboration from other sessions.
Use this when you need expertise or help that another session might provide.
Example: "Need help with database schema design for the user management system"`,
inputSchema: {
type: 'object',
properties: {
requestType: {
type: 'string',
enum: ['help', 'review', 'pair_work', 'expertise'],
description: 'Type of collaboration needed'
},
topic: { type: 'string', description: 'What you need help with' },
context: { type: 'string', description: 'Current situation and what you\'ve tried' },
priority: {
type: 'string',
enum: ['low', 'medium', 'high', 'blocking'],
description: 'How urgent is this request'
},
},
required: ['requestType', 'topic', 'context'],
},
},
{
name: 'log_decision',
description: `Log important technical or architectural decisions.
Use this when making decisions that other sessions should know about.
Example: "Decided to use PostgreSQL instead of MongoDB for better relational data handling"`,
inputSchema: {
type: 'object',
properties: {
decision: { type: 'string', description: 'The decision made' },
reasoning: { type: 'string', description: 'Why this decision was made' },
alternatives: {
type: 'array',
items: { type: 'string' },
description: 'Alternatives that were considered'
},
impact: { type: 'string', description: 'How this affects the project' },
},
required: ['decision', 'reasoning'],
},
},
{
name: 'sync_context',
description: `Get or share context about the current work session.
Use this at the start of a session to understand what's been done, or at the end to summarize.
Example: At session start, sync context to understand the project state.`,
inputSchema: {
type: 'object',
properties: {
action: {
type: 'string',
enum: ['get', 'share'],
description: 'Get context from others or share your context'
},
summary: { type: 'string', description: 'If sharing, summarize what you worked on' },
nextSession: { type: 'string', description: 'If sharing, what the next session should focus on' },
},
required: ['action'],
},
},
];
}
}
/**
* Prompt injection for LLMs to use these tools automatically
*/
export const LLM_COLLABORATION_PROMPT = `
# Automatic Task Tracking with Orchestry
You have access to Orchestry MCP for automatic task tracking and collaboration with other LLM sessions.
## When to automatically use Orchestry tools:
1. **Starting a new feature/epic** → Use \`track_epic\`
- User says: "Let's build a chat feature"
- You: Track it as an epic automatically
2. **Breaking down work** → Use \`track_story\`
- When you identify specific tasks within a larger feature
- Track them as stories under the relevant epic
3. **Making progress** → Use \`update_progress\`
- After completing any significant work
- When encountering blockers
4. **Starting a session** → Use \`sync_context\` with action: "get"
- Understand what's been done before
- Check for any handoffs or requests
5. **Ending a session** → Use \`sync_context\` with action: "share"
- Summarize what you accomplished
- Note what should be done next
6. **Need help** → Use \`request_collaboration\`
- When stuck on something
- When you need specific expertise
7. **Important decisions** → Use \`log_decision\`
- Architecture choices
- Technology selections
- Design patterns
## Collaboration Guidelines:
- **Be concise**: Other LLMs need quick context, not novels
- **Be specific**: Clear titles and descriptions help coordination
- **Check before starting**: Use \`check_team_status\` to avoid duplicate work
- **Hand off cleanly**: Leave clear notes for the next session
- **Update regularly**: Keep progress current so others know the state
## Examples:
User: "I need to add user authentication to the app"
You should:
1. Use \`track_epic\` to create "User Authentication" epic
2. Use \`track_story\` for individual stories like "Login Form", "JWT Implementation", etc.
3. Use \`update_progress\` as you complete each part
Remember: You're part of a team of LLM agents. Coordinate effectively!
`;
/**
* Session-aware wrapper for automatic tracking
*/
export class SessionAwareOrchestry {
private sessionId: string;
private projectId: string | null = null;
private lastSync: Date = new Date();
constructor(
private dbManager: DatabaseManager,
sessionName?: string
) {
this.sessionId = this.generateSessionId(sessionName);
}
private generateSessionId(name?: string): string {
const timestamp = new Date().toISOString().substring(0, 16).replace(/[^0-9]/g, '');
const sessionName = name || `session-${timestamp}`;
return sessionName;
}
/**
* Auto-detect when to create epics/stories based on conversation
*/
async autoTrack(userMessage: string, llmResponse: string): Promise<void> {
// Detect epic-level work
const epicKeywords = ['build', 'implement', 'create system', 'develop feature', 'add support for'];
const storyKeywords = ['add', 'fix', 'update', 'modify', 'create', 'implement'];
const lowerMessage = userMessage.toLowerCase();
for (const keyword of epicKeywords) {
if (lowerMessage.includes(keyword)) {
// Extract the feature name and create an epic
await this.createEpicFromContext(userMessage, llmResponse);
break;
}
}
for (const keyword of storyKeywords) {
if (lowerMessage.includes(keyword)) {
// Create a story under the current epic
await this.createStoryFromContext(userMessage, llmResponse);
break;
}
}
}
private async createEpicFromContext(userMessage: string, context: string): Promise<void> {
// AI-powered epic extraction would go here
// For now, simplified version
console.log('Auto-tracking epic from:', userMessage);
}
private async createStoryFromContext(userMessage: string, context: string): Promise<void> {
// AI-powered story extraction would go here
console.log('Auto-tracking story from:', userMessage);
}
/**
* Check if it's time to sync context (e.g., every 10 minutes)
*/
shouldSyncContext(): boolean {
const now = new Date();
const timeSinceLastSync = now.getTime() - this.lastSync.getTime();
const tenMinutes = 10 * 60 * 1000;
if (timeSinceLastSync > tenMinutes) {
this.lastSync = now;
return true;
}
return false;
}
}