@gork-labs/secondbrain-mcp
Version:
Second Brain MCP Server - Agent team orchestration with dynamic tool discovery
76 lines (75 loc) • 2.97 kB
JavaScript
/**
* Simple Integration Example for OpenAI Function Calling
*
* This shows how to integrate the OpenAI function calling executor into
* the existing SecondBrain MCP server to replace regex-based tool parsing.
*
* Created: 2025-07-25T17:17:13+02:00
* Author: Staff Software Engineer - Gorka
*/
import { OpenAIFunctionCallingExecutor, FunctionCallingConfig } from './openai-function-calling.js';
/**
* Example integration in server.ts executeWithOpenAI method
*/
export class SimplifiedServerIntegration {
mcpClientManager;
openaiExecutor;
constructor(mcpClientManager) {
this.mcpClientManager = mcpClientManager;
// Initialize OpenAI executor with API key from environment
const apiKey = FunctionCallingConfig.getOpenAIApiKey();
this.openaiExecutor = new OpenAIFunctionCallingExecutor(apiKey, this.mcpClientManager.getSafeTools(), 20 // maxIterations
);
}
/**
* Enhanced executeWithOpenAI method using OpenAI function calling
*/
async executeWithOpenAI(instructions, chatmodeDefinition, sessionId) {
// Check if this chatmode should use OpenAI function calling
if (FunctionCallingConfig.shouldUseOpenAIFunctionCalling(chatmodeDefinition.name)) {
// Use OpenAI native function calling
return await this.openaiExecutor.executeSubAgent(instructions, chatmodeDefinition.name, sessionId, (toolName, args) => this.mcpClientManager.callTool(toolName, args));
}
else {
// Fallback to existing regex-based implementation
return await this.executeWithOpenAILegacy(instructions, chatmodeDefinition, sessionId);
}
}
/**
* Legacy implementation (existing regex-based approach)
* Keep this as fallback during transition
*/
async executeWithOpenAILegacy(instructions, chatmodeDefinition, sessionId) {
// This would be the existing implementation
// from server.ts lines 1850-2100
throw new Error('Legacy implementation placeholder');
}
}
/**
* Environment Configuration Guide
*
* To use OpenAI function calling, set this environment variable:
*
* # Required: OpenAI API key
* OPENAI_API_KEY=your-openai-api-key
*
* # Optional: Specify which chatmodes to use it for (if not set, all chatmodes)
* SECONDBRAIN_FUNCTION_CALLING_CHATMODES=Software Engineer,Security Engineer,Database Architect
*/
/**
* Quick Setup Steps:
*
* 1. Set OPENAI_API_KEY in your .env file
* 2. Import OpenAIFunctionCallingExecutor and FunctionCallingConfig in server.ts
* 3. Replace the executeWithOpenAI method logic with the example above
* 4. Test with one chatmode first (e.g., Software Engineer)
* 5. Gradually expand to other chatmodes
*
* Benefits:
* - No more regex parsing errors
* - Built-in JSON validation
* - Better error handling
* - Automatic retry logic
* - Performance monitoring
* - No feature flags needed - just works with OpenAI SDK
*/