@skyramp/mcp
Version:
Skyramp MCP (Model Context Protocol) Server - AI-powered test generation and execution
110 lines (103 loc) • 4.34 kB
JavaScript
import { logger } from "../utils/logger.js";
import { getModularizationPrompt } from "../utils/modularization-prompts.js";
export class ModularizationService {
constructor() {
// Initialize any required dependencies
}
async processModularizationRequest(params) {
try {
const validationResult = this.validateInputs(params);
if (validationResult.isError) {
return validationResult;
}
const processedResponse = this.buildModularizationResponse(params);
const finalResponse = this.applyModularization(processedResponse, params);
logger.info("Modularization request processed successfully", {
promptLength: params.prompt.length,
contextType: params.contextType,
includeModularization: params.includeModularization,
});
return {
content: [
{
type: "text",
text: finalResponse,
},
],
isError: false,
};
}
catch (error) {
logger.error("Error in modularization service", {
error: error instanceof Error ? error.message : String(error),
});
return {
content: [
{
type: "text",
text: `Modularization service failed: ${error instanceof Error ? error.message : String(error)}`,
},
],
isError: true,
};
}
}
validateInputs(params) {
const errors = [];
if (!params.prompt || params.prompt.trim().length === 0) {
errors.push("Prompt cannot be empty");
}
if (params.prompt && params.prompt.length > 10000) {
errors.push("Prompt is too long (maximum 10,000 characters)");
}
if (errors.length > 0) {
return {
content: [
{
type: "text",
text: `Validation errors:\n${errors
.map((err) => `- ${err}`)
.join("\n")}`,
},
],
isError: true,
};
}
return { content: [], isError: false };
}
buildModularizationResponse(params) {
const contextPrefix = this.getContextPrefix(params.contextType || "general");
const processedPrompt = this.processPrompt(params.prompt);
return `${contextPrefix}
**Processed Prompt:** "${processedPrompt}"
**MCP Modularization Context:**
This tool has received and processed your prompt within the MCP (Model Context Protocol) framework with modularization principles applied. Since this MCP tool is being called by an LLM (like Claude), the LLM should now respond to the original prompt with structured, modularized output.
**Original Prompt:**
"${params.prompt}"
**Instructions for LLM:**
The LLM should provide a comprehensive, modularized response based on the prompt content, applying modularization principles to structure the output with logical grouping, clear separation of concerns, and maintainable organization.`;
}
getContextPrefix(contextType) {
switch (contextType) {
case "technical":
return "**Technical Context Analysis:**\nProcessing technical prompt with specialized context handling.";
case "creative":
return "**Creative Context Analysis:**\nProcessing creative prompt with enhanced imaginative context.";
default:
return "**General Context Analysis:**\nProcessing prompt with standard context handling.";
}
}
processPrompt(prompt) {
// Apply basic prompt processing (trim, normalize whitespace)
return prompt.trim().replace(/\s+/g, " ");
}
applyModularization(response, params) {
if (params.includeModularization) {
return `${response}
${getModularizationPrompt()}
**Modularization Applied:**
The response has been structured using modularization principles to improve readability and maintainability.`;
}
return response;
}
}