vibe-coder-mcp
Version:
Production-ready MCP server with complete agent integration, multi-transport support, and comprehensive development automation tools for AI-assisted workflows.
275 lines (272 loc) • 15.7 kB
JavaScript
import axios from 'axios';
import https from 'https';
import logger from '../logger.js';
import { sequentialThoughtSchema } from '../types/sequentialThought.js';
import { ApiError, ParsingError, ValidationError, AppError, FallbackError } from '../utils/errors.js';
import { selectModelForTask } from '../utils/configLoader.js';
const httpsAgent = new https.Agent({
rejectUnauthorized: true,
maxVersion: 'TLSv1.3',
minVersion: 'TLSv1.2',
ciphers: 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384',
honorCipherOrder: true,
keepAlive: true,
timeout: 30000
});
export const SEQUENTIAL_THINKING_SYSTEM_PROMPT = `
You are a dynamic and reflective problem-solver that analyzes problems through a flexible thinking process that can adapt and evolve.
Each thought can build on, question, or revise previous insights as understanding deepens.
Follow these guidelines:
1. Start with an initial estimate of needed thoughts, but be ready to adjust.
2. Feel free to question or revise previous thoughts within the 'thought' text itself.
3. Don't hesitate to add more thoughts if needed, even if it exceeds the initial 'total_thoughts' estimate.
4. Express uncertainty when present.
5. Ignore information that is irrelevant to the current step.
6. Generate a solution hypothesis when appropriate.
7. Verify the hypothesis based on the Chain of Thought steps.
8. Repeat the process until satisfied with the solution.
9. Provide a single, correct answer or the final generated content within the 'thought' field of the last step.
10. Only set next_thought_needed to false when truly done and a satisfactory answer is reached.
Your response MUST be a valid JSON object with ONLY these fields:
- thought: (string) Your current thinking step, analysis, or generated content for this step.
- next_thought_needed: (boolean) True if you need more thinking steps to complete the task, False otherwise.
- thought_number: (integer) Current step number in the sequence (must be positive).
- total_thoughts: (integer) Current estimate of the total thoughts needed (must be positive, can be adjusted).
`;
export async function processWithSequentialThinking(userPrompt, config, systemPrompt) {
const MAX_SEQUENTIAL_THOUGHTS = 10;
logger.debug({
configReceived: true,
hasLlmMapping: Boolean(config.llm_mapping),
mappingKeys: config.llm_mapping ? Object.keys(config.llm_mapping) : []
}, 'processWithSequentialThinking received config');
const thoughts = [];
let currentThought = {
thought: '',
next_thought_needed: true,
thought_number: 1,
total_thoughts: 5
};
const fullSystemPrompt = systemPrompt
? `${SEQUENTIAL_THINKING_SYSTEM_PROMPT}\n\n${systemPrompt}`
: SEQUENTIAL_THINKING_SYSTEM_PROMPT;
const getThoughtContext = () => {
if (thoughts.length === 0)
return '';
return 'Previous thoughts:\n' +
thoughts.map(t => `[Thought ${t.thought_number}/${t.total_thoughts}]: ${t.thought}`).join('\n\n');
};
while (currentThought.next_thought_needed && thoughts.length < MAX_SEQUENTIAL_THOUGHTS) {
const thoughtContext = getThoughtContext();
const initialPrompt = thoughtContext
? `${thoughtContext}\n\nTask: ${userPrompt}\n\nContinue with the next thought:`
: `Task: ${userPrompt}\n\nProvide your first thought:`;
logger.debug(`Processing thought ${currentThought.thought_number} (total estimate: ${currentThought.total_thoughts})...`);
const maxRetries = 3;
let lastError = null;
let currentPromptForLLM = initialPrompt;
let nextThought = null;
for (let attempt = 1; attempt <= maxRetries; attempt++) {
try {
nextThought = await getNextThought(currentPromptForLLM, fullSystemPrompt, config, currentThought.thought_number);
lastError = null;
logger.debug(`Attempt ${attempt} to get thought ${currentThought.thought_number} succeeded.`);
break;
}
catch (error) {
lastError = error;
logger.warn({ err: lastError, attempt, maxRetries }, `Attempt ${attempt} to get thought ${currentThought.thought_number} failed.`);
if (error instanceof FallbackError) {
logger.error({ err: error, rawContent: error.rawContent }, "Sequential thinking aborted due to persistent LLM formatting error (FallbackError). Not retrying.");
throw error;
}
else if (error instanceof ApiError) {
logger.error({ err: error }, "API error occurred - not retrying");
throw error;
}
else if (attempt < maxRetries && (error instanceof ValidationError || error instanceof ParsingError)) {
currentPromptForLLM = `${initialPrompt}\n\nYour previous attempt (attempt ${attempt}) failed with this error: ${lastError.message}\nPlease carefully review the required JSON format and schema described in the system prompt, then provide a valid JSON object.\nRetry thought generation:`;
logger.info(`Retrying thought generation (attempt ${attempt + 1})...`);
}
else {
if (attempt === maxRetries) {
logger.error(`All ${maxRetries} attempts failed for thought ${currentThought.thought_number}. Final error will be thrown.`);
}
else {
logger.error({ err: error }, "Unexpected error occurred during thought generation - not retrying.");
throw error;
}
}
}
}
if (lastError !== null) {
throw lastError;
}
if (!nextThought) {
logger.error("Internal error: nextThought is null after retry loop without throwing an error.");
throw new Error("Internal error: Failed to retrieve thought after retries.");
}
thoughts.push(nextThought);
currentThought = nextThought;
}
if (thoughts.length >= MAX_SEQUENTIAL_THOUGHTS && currentThought.next_thought_needed) {
const message = `Sequential thinking process terminated after reaching the maximum limit of ${MAX_SEQUENTIAL_THOUGHTS} thoughts. The final thought may be incomplete or represent a fallback state.`;
logger.error({
finalThoughtNumber: currentThought.thought_number,
maxThoughts: MAX_SEQUENTIAL_THOUGHTS,
}, message);
}
return currentThought.thought;
}
export async function getNextThought(prompt, systemPrompt, config, currentThoughtNumber) {
const logicalTaskName = 'sequential_thought_generation';
const maxRetries = 2;
let lastError = null;
const defaultModel = config.geminiModel || "google/gemini-2.5-flash-preview-05-20";
const modelToUse = selectModelForTask(config, logicalTaskName, defaultModel);
for (let attempt = 1; attempt <= maxRetries; attempt++) {
try {
logger.debug(`Attempt ${attempt}/${maxRetries} to call OpenRouter for sequential thought using model ${modelToUse}...`);
const response = await axios.post(`${config.baseUrl}/chat/completions`, {
model: modelToUse,
messages: [
{
role: "system",
content: systemPrompt
},
{
role: "user",
content: prompt
}
],
response_format: { type: "json_object" },
max_tokens: 2000,
temperature: 0.5
}, {
headers: {
"Content-Type": "application/json",
"Authorization": `Bearer ${config.apiKey}`,
"HTTP-Referer": "https://vibe-coder-mcp.local"
},
httpsAgent: httpsAgent,
timeout: 90000,
maxRedirects: 5,
validateStatus: (status) => status < 500
});
if (response.data.choices && response.data.choices.length > 0) {
const rawContent = response.data.choices[0].message.content;
let jsonContent = rawContent.trim();
let cleaned = false;
const firstBrace = rawContent.indexOf('{');
const lastBrace = rawContent.lastIndexOf('}');
if (firstBrace !== -1 && lastBrace !== -1 && lastBrace > firstBrace) {
const extracted = rawContent.substring(firstBrace, lastBrace + 1);
if (extracted.startsWith('{') && extracted.endsWith('}')) {
jsonContent = extracted;
if (jsonContent !== rawContent.trim()) {
logger.debug({ raw: rawContent, cleaned: jsonContent }, "Stripped potential garbage/fences from LLM JSON response.");
cleaned = true;
}
}
else {
logger.warn({ raw: rawContent }, "Found braces but extracted content doesn't look like JSON, using trimmed raw content for parsing.");
jsonContent = rawContent.trim();
}
}
else {
logger.warn({ raw: rawContent }, "Could not find expected JSON object braces, attempting to parse trimmed raw content.");
jsonContent = rawContent.trim();
}
let parsedContent;
let validationResult;
try {
parsedContent = JSON.parse(jsonContent);
validationResult = sequentialThoughtSchema.safeParse(parsedContent);
if (validationResult.success) {
logger.debug('Sequential thought successfully parsed and validated.');
return validationResult.data;
}
else {
logger.error({
message: 'Zod validation failed for SequentialThought',
errors: validationResult.error.issues,
rawContent: rawContent,
}, 'Sequential thought schema validation failed');
throw new ValidationError(`Sequential thought validation failed: ${validationResult.error.message}`, validationResult.error.issues, { rawContent: rawContent, cleanedContent: cleaned ? jsonContent : undefined });
}
}
catch (parseOrValidationError) {
logger.warn({
message: 'JSON parsing or validation failed in getNextThought attempt',
error: parseOrValidationError instanceof Error ? parseOrValidationError.message : String(parseOrValidationError),
rawContent: rawContent,
cleanedContent: cleaned ? jsonContent : undefined,
attempt: attempt,
});
const errorContext = { rawContent: rawContent, cleanedContent: cleaned ? jsonContent : undefined };
if (parseOrValidationError instanceof SyntaxError) {
throw new ParsingError(`LLM output was not valid JSON: ${parseOrValidationError.message}`, errorContext, parseOrValidationError);
}
else if (parseOrValidationError instanceof ValidationError) {
throw new ValidationError(parseOrValidationError.message, parseOrValidationError.validationIssues, { ...parseOrValidationError.context, ...errorContext });
}
else {
throw new AppError(`Unexpected error during parsing/validation: ${parseOrValidationError instanceof Error ? parseOrValidationError.message : String(parseOrValidationError)}`, errorContext, parseOrValidationError instanceof Error ? parseOrValidationError : undefined);
}
}
}
else {
logger.warn({ responseData: response.data }, "No choices found in LLM response for sequential thought.");
throw new ParsingError("No response choices received from model for sequential thought", { responseData: response.data });
}
}
catch (error) {
lastError = error;
const isRetryableNetworkError = (axios.isAxiosError(error) &&
(error.code === 'ECONNRESET' || (error.message && error.message.includes('SSL routines:ssl3_read_bytes:sslv3 alert bad record mac'))));
if (isRetryableNetworkError && attempt < maxRetries) {
const delay = 500 * attempt;
logger.warn({ err: lastError, attempt, maxRetries, delay }, `Attempt ${attempt} failed with retryable network error. Retrying in ${delay}ms...`);
await new Promise(resolve => setTimeout(resolve, delay));
}
else {
logger.warn({ err: lastError, attempt, maxRetries, isRetryable: isRetryableNetworkError }, `Not retrying after attempt ${attempt}.`);
break;
}
}
}
if (lastError) {
let errorRawContent;
if (lastError instanceof AppError && lastError.context?.rawContent && typeof lastError.context.rawContent === 'string') {
errorRawContent = lastError.context.rawContent;
}
if ((lastError instanceof ParsingError || lastError instanceof ValidationError) && errorRawContent !== undefined) {
logger.error({
message: 'Persistent LLM formatting error after retries and cleaning. Throwing FallbackError.',
originalError: lastError.message,
rawContent: errorRawContent,
cleanedContent: (lastError instanceof AppError && lastError.context?.cleanedContent) ? lastError.context.cleanedContent : undefined,
thoughtNumber: currentThoughtNumber,
});
throw new FallbackError(`Sequential thinking aborted after ${maxRetries} attempts due to persistent LLM formatting error: ${lastError.message}`, errorRawContent, lastError);
}
logger.error({ err: lastError, modelUsed: modelToUse, thoughtNumber: currentThoughtNumber }, `Sequential thought generation failed after ${maxRetries} attempts.`);
if (axios.isAxiosError(lastError)) {
const axiosError = lastError;
const status = axiosError.response?.status;
const apiMessage = `OpenRouter API Error: Status ${status || 'N/A'}. ${axiosError.message}`;
const apiError = new ApiError(apiMessage, status, { model: modelToUse, logicalTaskName }, axiosError);
throw apiError;
}
else if (lastError instanceof AppError) {
throw lastError;
}
else if (lastError instanceof Error) {
throw new AppError(`Failed to get next thought after retries: ${lastError.message}`, undefined, lastError);
}
else {
throw new AppError("Unknown failure while getting next thought after retries.");
}
}
throw new Error("Internal error: Reached end of getNextThought without success or error throw.");
}