@simonecoelhosfo/optimizely-mcp-server
Version:
Optimizely MCP Server for AI assistants with integrated CLI tools
989 lines (933 loc) ⢠272 kB
JavaScript
/**
* Main entry point for Optimizely MCP Server
* @description Initializes and starts the MCP server with StdioServerTransport
*
* CRITICAL: This entry point is designed for MCP StdioServerTransport compatibility.
* It uses file-based logging and stderr for debugging to prevent stdout contamination
* which would break the MCP JSON-RPC 2.0 communication protocol.
*
* @author Optimizely MCP Server
* @version 1.0.0
*/
// Load environment variables from .env file
import * as dotenv from "dotenv";
// Only load dotenv if not in MCP mode (no MCP environment variable set)
if (!process.env.MCP_MODE) {
dotenv.config();
}
import { Server } from "@modelcontextprotocol/sdk/server/index.js";
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
import { CallToolRequestSchema, ListToolsRequestSchema, ListResourcesRequestSchema, ReadResourceRequestSchema, ErrorCode, McpError, } from "@modelcontextprotocol/sdk/types.js";
import { MCPErrorMapper, MCPErrorUtils, getMCPErrorContext, } from "./errors/MCPErrorMapping.js";
import { ConfigManager } from "./config/ConfigManager.js";
import { ProjectFilter } from "./config/ProjectFilter.js";
import { OptimizelyAPIHelper } from "./api/OptimizelyAPIHelper.js";
import { SQLiteEngine } from "./storage/SQLiteEngine.js";
import { CacheManager } from "./cache/CacheManager.js";
import { OptimizelyMCPTools } from "./tools/OptimizelyMCPTools.js";
import { getToolReferenceTool } from "./tools/GetToolReference.js";
import { SyncScheduler } from "./sync/SyncScheduler.js";
import { createLogger, getLogger, shutdownLogger } from "./logging/Logger.js";
import { HardStopError, HardStopErrorType } from "./errors/HardStopError.js";
import { ResponseWrapper, } from "./utils/ResponseWrapper.js";
import { DatabaseCleanupManager } from "./utils/DatabaseCleanupManager.js";
import path from "path";
import { promises as fs } from "fs";
import os from "os";
import { fileURLToPath } from "url";
// Get project root directory
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
const PROJECT_ROOT = path.resolve(__dirname, "../");
/**
* Main MCP Server class for Optimizely integration
* @description Orchestrates the MCP server initialization, configuration loading,
* and component integration while maintaining separation between business logic
* and MCP protocol concerns.
*/
export class OptimizelyMCPServer {
server;
config;
apiHelper = null;
cacheManager = null;
optimizelyTools = null;
syncScheduler = null;
isInitialized = false;
databaseResetRequired = null;
cacheEmpty = false;
/**
* Creates a new OptimizelyMCPServer instance
* @description Sets up the MCP server with appropriate capabilities
*/
constructor() {
this.server = new Server({
name: "optimizely-mcp-server",
version: "1.0.0",
}, {
capabilities: {
tools: {},
resources: {
list: true,
},
},
});
this.config = new ConfigManager();
this.setupServerHandlers();
}
/**
* Initializes the server components in the correct order
* @description Loads configuration, sets up logging, initializes Optimizely components
* @throws {Error} When initialization fails
* @private
*/
async initialize() {
if (this.isInitialized) {
return;
}
try {
// Step 1: Load and validate configuration
await this.config.loadConfig();
const serverConfig = this.config.getConfig();
// Step 2: Initialize logging with MCP-safe configuration
createLogger({
logLevel: serverConfig.logging.level,
logFile: serverConfig.logging.logFile,
consoleLogging: serverConfig.logging.consoleLogging,
prettyPrint: serverConfig.logging.prettyPrint,
maxFileSize: serverConfig.logging.maxFileSize,
maxFiles: serverConfig.logging.maxFiles,
});
// Initialize MCP Debug Logger for STDIO communication debugging
// MCPDebugLogger.initialize(undefined, PROJECT_ROOT);
// MCPDebugLogger.logRawMessage('OUT', `š MCP Server Starting from: ${PROJECT_ROOT}`);
getLogger().info("š OPTIMIZELY MCP SERVER - STARTUP DEBUG INFO");
getLogger().info(`š PROJECT_ROOT: ${PROJECT_ROOT}`);
getLogger().info(`š process.cwd(): ${process.cwd()}`);
getLogger().info(`š __dirname: ${__dirname}`);
getLogger().info("š Server starting initialization...");
getLogger().info({
serverName: "optimizely-mcp-server",
version: "1.0.0",
}, "Optimizely MCP Server starting initialization");
// Step 3: Validate critical configuration
if (!serverConfig.optimizely.apiToken) {
throw new Error("Optimizely API token is required. Set OPTIMIZELY_API_TOKEN environment variable.");
}
// Log token info for debugging (mask most of it for security)
const tokenPreview = serverConfig.optimizely.apiToken.substring(0, 10) +
"..." +
serverConfig.optimizely.apiToken.substring(serverConfig.optimizely.apiToken.length - 4);
getLogger().info({ tokenPreview }, "Using Optimizely API token");
// Step 4: Validate project configuration
const projects = serverConfig.optimizely.projects;
const hasProjectIds = projects.allowedIds && projects.allowedIds.length > 0;
const hasProjectNames = projects.allowedNames && projects.allowedNames.length > 0;
const autoDiscover = projects.autoDiscoverAll;
if (!hasProjectIds && !hasProjectNames && !autoDiscover) {
throw new Error("Project filtering must be configured. Set OPTIMIZELY_PROJECT_IDS, OPTIMIZELY_PROJECT_NAMES, or OPTIMIZELY_AUTO_DISCOVER_ALL");
}
// Step 5: Initialize Optimizely API helper
this.apiHelper = new OptimizelyAPIHelper(serverConfig.optimizely.apiToken, {
baseUrl: serverConfig.optimizely.baseUrl,
flagsUrl: serverConfig.optimizely.flagsUrl,
requestsPerMinute: serverConfig.optimizely.rateLimits?.requestsPerMinute,
requestsPerSecond: serverConfig.optimizely.rateLimits?.requestsPerSecond,
retryAttempts: serverConfig.optimizely.retries?.maxAttempts,
retryDelay: serverConfig.optimizely.retries?.baseDelay,
});
// Step 6: Test API connectivity
const healthCheck = await this.apiHelper.healthCheck();
if (healthCheck.status !== "healthy") {
throw new Error(`Optimizely API health check failed: ${healthCheck.error}`);
}
getLogger().info("Optimizely API connectivity verified");
// Step 7: Initialize storage engine
const defaultDbPath = "./data/optimizely-cache.db";
// CRITICAL FIX: Force absolute path for WSL environment
// Use environment variable if set, otherwise use sensible default in home directory
const finalDbPath = process.env.STORAGE_DATABASE_PATH
? path.resolve(process.env.STORAGE_DATABASE_PATH) // Use exactly what user specified
: path.join(os.homedir(), ".optimizely-mcp", "cache.db"); // Sensible default in home directory
getLogger().info({ dbPath: finalDbPath }, "Using database path");
// CRITICAL: Clean up orphaned database connections before opening
const cleanupManager = new DatabaseCleanupManager({
dbPath: finalDbPath,
timeoutMs: 10000,
forceCleanup: false,
verbose: false,
});
const cleanupSuccess = await cleanupManager.cleanup();
if (!cleanupSuccess) {
getLogger().warn({ dbPath: finalDbPath }, "Database cleanup failed, proceeding with caution");
}
const storageEngine = new SQLiteEngine({
path: finalDbPath,
backupDir: serverConfig.storage.backupDir,
verbose: serverConfig.storage.verbose
? getLogger().debug.bind(getLogger())
: undefined,
});
await storageEngine.init();
// Step 8: Initialize project filter
const projectFilter = new ProjectFilter(serverConfig.optimizely.projects);
// Step 9: Initialize cache manager with project filter and config
this.cacheManager = new CacheManager(storageEngine, this.apiHelper, projectFilter, serverConfig);
// Try to initialize cache manager - may require database reset confirmation
try {
await this.cacheManager.init();
}
catch (error) {
if (error.code === "DATABASE_RESET_REQUIRED") {
// Store the error details for later use in tool handlers
this.databaseResetRequired = error.details;
getLogger().warn("Database reset required - tools will prompt user for confirmation", error.details);
// Continue initialization without the cache for now
// Tool handlers will check this flag and return appropriate messages
}
else {
getLogger().error({
error: error.message,
code: error.code,
dbPath: finalDbPath,
}, "Cache manager initialization failed");
throw error;
}
}
// Step 9: Check cache status but DO NOT BLOCK on sync
let cacheEmpty = false;
if (!this.databaseResetRequired && this.cacheManager) {
const syncStatus = await this.cacheManager.getSyncStatus();
if (!syncStatus.last_full_sync || syncStatus.project_count === 0) {
cacheEmpty = true;
getLogger().warn("Cache is empty - will need initialization before use");
// DO NOT perform blocking sync here! Let the server start responding to MCP
}
else {
getLogger().info({
lastSync: syncStatus.last_full_sync,
projectCount: syncStatus.project_count,
flagCount: syncStatus.flag_count,
}, "Cache already populated");
}
}
// Store cache state for tool handlers to check
this.cacheEmpty = cacheEmpty;
// Step 10: Initialize MCP tools (business logic layer - UNCHANGED)
this.optimizelyTools = new OptimizelyMCPTools(this.cacheManager, this.config);
// Step 11: Initialize and start sync scheduler if auto-sync is enabled (only if cache is initialized)
const cacheConfig = serverConfig.cache;
getLogger().info({
databaseResetRequired: this.databaseResetRequired,
autoSyncEnabled: cacheConfig.autoSync,
cacheManagerExists: !!this.cacheManager,
syncIntervalMinutes: cacheConfig.syncIntervalMinutes,
}, "Checking auto-sync configuration");
if (!this.databaseResetRequired &&
cacheConfig.autoSync &&
this.cacheManager) {
getLogger().info({
intervalMinutes: cacheConfig.syncIntervalMinutes,
}, "Auto-sync enabled, starting scheduler");
this.syncScheduler = new SyncScheduler(this.cacheManager, {
intervalMinutes: cacheConfig.syncIntervalMinutes || 60,
useIncremental: true, // Always use incremental for auto-sync
maxRetries: 3,
backoffMultiplier: 2,
});
await this.syncScheduler.start();
getLogger().info("SyncScheduler successfully started");
// Pass sync scheduler reference to tools for diagnostics
this.optimizelyTools.setSyncScheduler(this.syncScheduler);
}
else if (this.databaseResetRequired) {
getLogger().error({
databaseResetRequired: this.databaseResetRequired,
}, "Auto-sync disabled due to pending database reset");
}
else {
getLogger().warn({
autoSync: cacheConfig.autoSync,
cacheManager: !!this.cacheManager,
}, "Auto-sync disabled - check configuration");
}
this.isInitialized = true;
getLogger().info("Optimizely MCP Server initialization completed successfully");
}
catch (error) {
getLogger().error({
error: error.message,
stack: error.stack,
}, "Optimizely MCP Server initialization failed");
throw error;
}
}
/**
* Sets up MCP server request handlers
* @description Registers handlers for tools and resources according to MCP protocol
* @private
*/
setupServerHandlers() {
// Tool listing handler
this.server.setRequestHandler(ListToolsRequestSchema, async () => {
// Tool listing doesn't require full initialization
return {
tools: [
// š PRIMARY ANALYTICS TOOL - Use this FIRST for all data analysis and queries
{
name: "analyze_data",
description: `š ANALYTICS QUERY ENGINE - Template-based data analysis for all your data needs
ā ļø ALWAYS call get_entity_documentation(entity_type="view_name") FIRST to get available fields!
š BASIC QUERY STRUCTURE:
{
from: "view_name", // REQUIRED - what data to query
where: { ... }, // OPTIONAL - filter conditions
select: ["field"], // OPTIONAL - specific fields (default: all)
limit: 20 // OPTIONAL - max rows (default: 10)
}
š AGGREGATION QUERIES (Count, Sum, etc.):
When using aggregate functions, you MUST follow this pattern:
{
from: "view_name",
select: ["field1", "field2"], // Fields to show in results
group_by: ["field1", "field2"], // MUST include ALL fields from select
aggregate: { count: "*" }, // The aggregation function
order_by: { field: "count", direction: "desc" } // Use function name as field
}
ā ļø CRITICAL AGGREGATION RULES:
1. When using aggregate, ALWAYS include group_by
2. group_by MUST list ALL fields from select
3. To order by aggregate result, use the function name (e.g., "count", "sum")
šÆ COMMON PATTERNS:
1) Count items by category:
{
from: "experiments",
select: ["status"], // What to group by
group_by: ["status"], // Must match select
aggregate: { count: "*" }, // Count rows
order_by: { field: "count", direction: "desc" }
}
2) Count by multiple fields:
{
from: "pages_flat",
select: ["project_id", "project_name"], // Two fields
group_by: ["project_id", "project_name"], // Must list both
aggregate: { count: "*" },
order_by: { field: "count", direction: "desc" }
}
3) Sum numeric values:
{
from: "flags",
select: ["environment_key"],
group_by: ["environment_key"],
aggregate: { sum: "rollout_percentage" },
order_by: { field: "sum", direction: "desc" }
}
š WHERE FILTERS:
⢠Equality: { flag_key: "my-flag" }
⢠Multiple values: { status: ["running", "paused"] }
⢠Text search: { name: "contains:checkout" }
⢠Numeric: { rollout_percentage: { ">" : 50 } }
⢠Boolean: { enabled: true } (converts to 1/0)
š MAIN VIEWS:
⢠flags_unified_view - Feature flags with A/B test rules (rule_type='a/b')
⢠experiments_unified_view - ALL experiments from both platforms:
- Web Experimentation: Traditional standalone experiments
- Feature Experimentation: A/B test rules within flags (rule_type='a/b')
⢠audiences_flat/pages_flat - Audiences and pages
⢠change_history_flat - Audit log
š PLATFORM-SPECIFIC QUERY EXAMPLES:
Find Feature Experimentation A/B tests:
{
from: "flags_unified_view",
where: { rule_type: "a/b", enabled: true },
select: ["flag_key", "flag_name", "environment_key", "rule_name"]
}
Find Web Experimentation experiments:
{
from: "experiments_unified_view",
where: { platform: "web", status: "running" },
select: ["experiment_name", "campaign_name", "variation_count"]
}
Find ALL experiments from both platforms:
{
from: "experiments_unified_view",
select: ["experiment_name", "platform", "status_category"],
order_by: { field: "experiment_name", direction: "asc" }
}
š” TIP: Use get_entity_documentation(entity_type="view_name") to see all available fields for any view.
š PERFORMANCE ANALYSIS BEST PRACTICE:
For performance analysis queries, ALWAYS use this sequence:
1. get_insights FIRST - AI-powered overview and recommendations
2. get_analytics SECOND - Detailed health scores and environment metrics
3. analyze_data LAST - Drill into specific data points identified above
Example: "Analyze flag performance in development environment"
ā Call get_insights ā review recommendations ā get_analytics for health scores ā analyze_data to query specific flags/metrics mentioned in insights
ā” PERFORMANCE QUERIES REQUIRE ALL THREE TOOLS IN THIS ORDER!`,
inputSchema: {
type: "object",
properties: {
structured_query: {
type: "object",
description: "Template-based query system. Use get_entity_documentation with the exact view name for field discovery.",
properties: {
from: {
type: "string",
enum: [
"flags_unified_view",
"flag_variations_flat",
"flag_variation_variables",
"flag_variables_summary",
"experiments_unified_view",
"audiences_flat",
"pages_flat",
"entity_usage_view",
"flag_state_history_view",
"analytics_summary_view",
"experiment_audiences_flat",
"experiment_events_flat",
"experiment_pages_flat",
"change_history_flat",
"experiment_code_analysis_view",
"experiment_code_snippets_flat",
"project_code_security_view",
"code_search_patterns_view",
],
description: "Database view to query",
},
select: {
type: "array",
items: { type: "string" },
description: "Fields to return. Use get_entity_documentation for available fields.",
},
where: {
type: "object",
description: `Filter conditions with multiple operator support:
EQUALITY (most common):
{ flag_key: "my-flag" } - Exact match
{ project_id: "123456" } - Always use strings for IDs
MULTIPLE VALUES (IN clause):
{ status: ["running", "paused"] } - Any of these values
{ flag_key: ["flag1", "flag2", "flag3"] } - Multiple flags
TEXT SEARCH (contains):
{ flag_name: "contains:checkout" } - Case-insensitive substring
{ description: "contains:experiment" } - Works on any text field
NUMERIC COMPARISONS:
{ rollout_percentage: { ">": 50 } } - Greater than 50
{ days_since_creation: { "<": 30 } } - Less than 30 days
{ count: { ">=": 100 } } - Greater or equal to 100
Operators: ">", "<", ">=", "<="
NULL CHECKS:
{ description: null } - Find records with NULL description
BOOLEAN VALUES:
{ enabled: true } - Automatically converts to 1
{ archived: false } - Automatically converts to 0
COMBINED CONDITIONS (AND logic):
{
project_id: "123",
enabled: true,
rollout_percentage: { ">": 0 },
environment_key: "production"
}
SPECIAL FIELD BEHAVIORS:
⢠key/name fields: Case-insensitive comparison
⢠JSON fields: Use "contains:" for searching within
⢠Date fields: Use days_since_* fields for relative dates`,
additionalProperties: true,
},
group_by: {
type: "array",
items: { type: "string" },
description: "Group by fields for aggregations",
},
order_by: {
type: "object",
properties: {
field: { type: "string" },
direction: { type: "string", enum: ["asc", "desc"] },
},
description: "Sort order",
},
aggregate: {
type: "object",
description: `Aggregation functions (use with group_by):
⢠count: { count: "*" } - Count all rows
⢠count: { count: "flag_id" } - Count non-null values
⢠sum: { sum: "rollout_percentage" } - Sum numeric field
⢠avg: { avg: "days_since_creation" } - Average numeric field
⢠min: { min: "created_time" } - Minimum value
⢠max: { max: "updated_time" } - Maximum value
IMPORTANT: When using aggregate, you MUST include group_by with all non-aggregated fields from select.
Example: select: ["project_id"], aggregate: { count: "*" }, group_by: ["project_id"]`,
properties: {
count: {
type: "string",
description: "COUNT(field) or COUNT(*)",
},
sum: {
type: "string",
description: "SUM(numeric_field)",
},
avg: {
type: "string",
description: "AVG(numeric_field)",
},
min: { type: "string", description: "MIN(field)" },
max: { type: "string", description: "MAX(field)" },
},
},
limit: {
type: "integer",
description: "Result limit",
},
},
required: ["from"],
},
options: {
type: "object",
properties: {
simplified: {
type: "boolean",
description: "Reduce JSON complexity for agents",
},
pagination: {
type: "object",
description: "Pagination settings",
},
},
},
},
required: ["structured_query"],
examples: [
{
title: "Find all enabled flags in production",
value: {
structured_query: {
from: "flags",
where: {
project_id: "YOUR_PROJECT_ID",
environment_key: "production",
enabled: true,
},
select: ["flag_key", "flag_name", "rollout_percentage"],
order_by: { field: "flag_key", direction: "asc" },
},
options: { simplified: true },
},
description: "Basic query with filters. No aggregation, so no group_by needed.",
},
{
title: "Count pages by project",
value: {
structured_query: {
from: "pages_flat",
select: ["project_id", "project_name"],
group_by: ["project_id", "project_name"],
aggregate: { count: "*" },
order_by: { field: "count", direction: "desc" },
},
},
description: "IMPORTANT: When grouping by multiple fields, ALL must be in both select AND group_by",
},
{
title: "Count items with WHERE filter",
value: {
structured_query: {
from: "pages_flat",
select: ["project_id"],
where: {
project_id: ["12345", "67890"], // Filter by multiple project IDs
},
group_by: ["project_id"],
aggregate: { count: "*" },
order_by: { field: "count", direction: "desc" },
},
},
description: "Count with WHERE clause - great for analyzing specific projects",
},
{
title: "Count experiments by status",
value: {
structured_query: {
from: "experiments",
where: { project_id: "YOUR_PROJECT_ID" },
select: ["status"],
group_by: ["status"],
aggregate: { count: "*" },
order_by: { field: "count", direction: "desc" },
},
},
description: "Groups experiments by status and counts how many are in each status. Note how group_by matches select exactly.",
},
{
title: "Search flags containing 'checkout'",
value: {
structured_query: {
from: "flags",
where: {
project_id: "YOUR_PROJECT_ID",
flag_name: "contains:checkout",
},
},
},
},
{
title: "Flags with partial rollout",
value: {
structured_query: {
from: "flags",
where: {
project_id: "YOUR_PROJECT_ID",
rollout_percentage: { ">": 0, "<": 100 },
},
select: [
"flag_key",
"rollout_percentage",
"environment_key",
],
},
},
},
{
title: "Recent flag changes (last 7 days)",
value: {
structured_query: {
from: "flags",
where: {
project_id: "YOUR_PROJECT_ID",
days_since_creation: { "<": 7 },
},
order_by: { field: "created_time", direction: "desc" },
},
},
},
{
title: "Flag performance drill-down (STEP 3 after get_insights & get_analytics)",
value: {
structured_query: {
from: "flags_unified_view",
where: {
project_id: "YOUR_PROJECT_ID",
environment_key: "development",
},
select: [
"flag_key",
"enabled",
"rollout_percentage",
"days_since_creation",
],
order_by: {
field: "rollout_percentage",
direction: "desc",
},
},
},
description: "IMPORTANT: Run get_insights FIRST, then get_analytics SECOND, then use this query to investigate specific flags mentioned in those insights",
},
],
},
},
{
name: "list_projects",
documentation: "List all projects in your Optimizely account. Returns project metadata including ID, name, platform type, and counts of flags/experiments.",
description: `š DISCOVER all available projects and their platform types
ā” PROJECT TYPES:
⢠Feature Experimentation ā is_flags_enabled: true (flags, A/B tests, rollouts)
⢠Web Experimentation ā is_flags_enabled: false (page experiments, campaigns)
š¤ DECISION PATTERN:
⢠"I want to work with flags" ā Look for is_flags_enabled: true
⢠"I want page experiments" ā Look for is_flags_enabled: false
⢠"Show me all projects" ā Display both types with counts
š RETURNED DATA:
⢠Project ID, name, platform type
⢠Entity counts (flags, experiments, audiences)
⢠Account and status information
⢠Platform-specific capabilities
š” EXAMPLES:
⢠Feature project: {id: "123", name: "Mobile App", is_flags_enabled: true}
⢠Web project: {id: "456", name: "Homepage Tests", is_flags_enabled: false}`,
inputSchema: {
type: "object",
properties: {},
additionalProperties: false,
},
},
{
name: "get_entity_details",
documentation: "PREFERRED METHOD for checking entity existence or getting single entity details. Much faster than list_entities. Use this to check if a flag/experiment/etc exists by ID or key. Returns error if not found. See resource://entity-rules for guidelines.",
warnings: "For creating Flags with A/B tests or Experiments with dependencies, template mode will automatically orchestrate the entire process. | Do not enable/disable/archive/delete entities without explicit user approval | When a flag is created, default variations, on and off, are automatically generated.",
description: `š GET single entity details by ID or key (fastest existence check)
ā” LOOKUP METHODS:
⢠entity_id ā Direct ID lookup (fastest)
⢠entity_key ā Key-based search
⢠Returns error if not found (clear validation)
š¤ DECISION PATTERN:
⢠"Does entity X exist?" ā Use this tool
⢠"Get details for one entity" ā Use this tool
⢠"List multiple entities" ā Use list_entities instead
⢠"Before updating entity" ā Check existence first
š ENTITY TYPES:
⢠flag, experiment, audience, page, event, attribute
⢠campaign, project, environment, webhook, group
⢠extension, feature, rule, ruleset, variable
ā” PERFORMANCE: Much faster than list_entities for single lookups
š§ OPTIONS:
⢠include_templates: Model-friendly format (default: true)
⢠template_complexity: Detail level 1-3
⢠enhanced_details: Special flag/experiment formatting
š” EXAMPLES:
⢠Check flag: {"entity_type": "flag", "entity_id": "my_flag_key", "project_id": "123"}
⢠Get experiment: {"entity_type": "experiment", "entity_id": "987654", "project_id": "123"}`,
inputSchema: {
type: "object",
properties: {
entity_type: {
type: "string",
description: "Type of entity to retrieve",
enum: [
"account",
"attribute",
"audience",
"campaign",
"collaborator",
"environment",
"event",
"experiment",
"extension",
"feature",
"flag",
"group",
"list_attribute",
"page",
"project",
"results",
"rule",
"ruleset",
"segment",
"variable",
"variable_definition",
"variation",
"webhook",
],
},
entity_id: {
type: "string",
description: "ID or key of the entity (project ID, flag key, experiment ID, etc.)",
},
project_id: {
type: "string",
description: "Project ID (required for most entities except projects)",
},
options: {
type: "object",
description: "Entity-specific options. Set include_templates=false to disable model-friendly templates. Set template_complexity (1-3) to control template detail level.",
properties: {
include_templates: {
type: "boolean",
description: "Include model-friendly templates (default: true)",
},
template_complexity: {
type: "number",
description: "Template complexity level: 1=simple, 2=medium, 3=comprehensive (default: 2)",
minimum: 1,
maximum: 3,
},
include_results: {
type: "boolean",
description: "Include experiment results if available",
},
enhanced_details: {
type: "boolean",
description: "For flags/experiments: Use specialized formatting like get_flag_details/get_experiment_details (default: false)",
},
},
additionalProperties: true,
},
},
required: ["entity_type", "entity_id"],
additionalProperties: false,
},
},
{
name: "get_optimization_analysis",
description: `šÆ UNIFIED project performance analysis
ā” ANALYSIS TYPES:
⢠analytics ā Health scores, metrics, performance data
⢠insights ā AI recommendations, optimization opportunities
⢠comprehensive ā Both analytics + insights combined
š¤ DECISION PATTERN:
⢠"How is my project performing?" ā analytics
⢠"What should I optimize/improve?" ā insights
⢠"Give me everything about this project" ā comprehensive
š INSIGHTS MODE OPTIONS:
⢠timeframe_days: Recent data focus (default: 30)
⢠entity_type: Specific entity analysis
⢠include_experiment_results: Live experiment data
ā ļø NOTE: This is PROJECT-LEVEL analysis, use analyze_data for entity queries
š” EXAMPLES:
⢠Health metrics: {"analysis_type": "analytics", "project_id": "12345"}
⢠Recent issues: {"analysis_type": "insights", "project_id": "12345", "timeframe_days": 7}
⢠Full assessment: {"analysis_type": "comprehensive", "project_id": "12345"}`,
documentation: 'UNIFIED PERFORMANCE ANALYSIS: This tool combines analytics data (health scores, metrics) with AI-powered insights for comprehensive project analysis. Use analysis_type to get specific data or "comprehensive" for everything.',
inputSchema: {
type: "object",
properties: {
analysis_type: {
type: "string",
enum: ["analytics", "insights", "comprehensive"],
description: "Type of analysis to perform",
},
project_id: {
type: "string",
description: "Project ID to analyze",
},
include_experiment_results: {
type: "boolean",
description: "Include live experiment results (for insights mode)",
},
timeframe_days: {
type: "number",
description: "Number of days to analyze (for insights mode, default: 30)",
},
entity_type: {
type: "string",
description: "Focus on specific entity type (for insights mode)",
},
},
required: ["analysis_type", "project_id"],
additionalProperties: false,
},
},
{
name: "get_recommendations",
description: `š” RULE-BASED optimization recommendations and opportunities
ā” FOCUS AREAS:
⢠experiments ā A/B test optimization suggestions
⢠flags ā Feature flag best practices
⢠audiences ā Targeting improvements
⢠performance ā Speed and efficiency gains
⢠all ā Comprehensive analysis across all areas
š¤ DECISION PATTERN:
⢠"How can I improve my experiments?" ā experiments
⢠"Flag management advice?" ā flags
⢠"Audience targeting tips?" ā audiences
⢠"Overall project optimization?" ā all
š ANALYSIS FEATURES:
⢠Pattern detection in historical data
⢠Common optimization opportunities
⢠Best practice compliance checks
⢠Performance bottleneck identification
ā ļø NOTE: Uses rule-based analysis, not generative AI
š” EXAMPLES:
⢠Experiment focus: {"project_id": "12345", "focus_area": "experiments"}
⢠Full analysis: {"project_id": "12345", "focus_area": "all", "timeframe_days": 60}`,
inputSchema: {
type: "object",
properties: {
project_id: {
type: "string",
description: "Project ID to analyze",
},
focus_area: {
type: "string",
enum: [
"experiments",
"flags",
"audiences",
"performance",
"all",
],
description: "Area to focus recommendations on (default: all)",
},
include_historical: {
type: "boolean",
description: "Include historical analysis (default: true)",
},
timeframe_days: {
type: "number",
description: "Days to look back for historical analysis (default: 30)",
},
limit: {
type: "number",
description: "Maximum number of recommendations to return",
},
},
required: ["project_id"],
},
},
{
name: "compare_environments",
description: `āļø COMPARE flag configurations across environments
ā” COMPARISON SCOPES:
⢠Single flag ā Specify flag_key for targeted comparison
⢠All flags ā Omit flag_key for project-wide analysis
⢠Specific environments ā List in environments array
⢠All environments ā Omit environments for complete comparison
š¤ DECISION PATTERN:
⢠"How does flag X differ across environments?" ā flag_key + environments
⢠"What's different between prod and staging?" ā environments: ["prod", "staging"]
⢠"Show all configuration drift" ā project_id only
š COMPARISON DATA:
⢠Flag enable/disable states
⢠Variation traffic allocations
⢠Audience targeting differences
⢠Variable value mismatches
š” EXAMPLES:
⢠Single flag: {"project_id": "12345", "flag_key": "new-checkout"}
⢠Environment pair: {"project_id": "12345", "environments": ["production", "staging"]}
⢠Full audit: {"project_id": "12345"}`,
inputSchema: {
type: "object",
properties: {
project_id: {
type: "string",
description: "Project ID to compare",
},
flag_key: {
type: "string",
description: "Specific flag to compare (optional)",
},
environments: {
type: "array",
items: { type: "string" },
description: "List of environments to compare (optional)",
},
},
required: ["project_id"],
},
},
{
name: "manage_cache",
description: `š UNIFIED cache management for all data operations
ā” OPERATIONS:
⢠initialize ā First-time setup (7-10 min, empty cache only)
⢠refresh ā Update existing cache (fast with incremental option)
⢠clear ā Remove all cached data (requires re-initialization)
š¤ DECISION PATTERN:
⢠"Cache is empty" ā initialize
⢠"Data seems outdated" ā refresh with incremental
⢠"Cache is corrupted" ā clear then initialize
⢠"Views not working" ā refresh with views_only
š REFRESH OPTIONS:
⢠incremental: Only fetch changed data (FASTEST)
⢠force: Clear before refresh (slower but thorough)
⢠views_only: Fix SQL views without data sync
⢠wait_for_completion: Block until finished
ā ļø CRITICAL: Other tools return HARD_STOP_REQUIRED when cache empty
š” EXAMPLES:
⢠First setup: {"operation": "initialize"}
⢠Quick update: {"operation": "refresh", "options": {"incremental": true}}
⢠Full rebuild: {"operation": "clear"} ā {"operation": "initialize"}`,
inputSchema: {
type: "object",
properties: {
operation: {
type: "string",
enum: ["initialize", "refresh", "clear"],
description: "Cache operation to perform",
},
project_id: {
type: "string",
description: "Specific project to refresh (only valid for refresh operation)",
},
options: {
type: