@simonecoelhosfo/optimizely-mcp-server
Version:
Optimizely MCP Server for AI assistants with integrated CLI tools
475 lines • 24.7 kB
JavaScript
/**
* Get Optimization Analysis Tool - Individual Module
* @description Provides detailed analysis of experiments and optimization results
* @since 2025-08-04
* @author Tool Modularization Team
*
* Migration Status: COMPLETED
* Original Method: OptimizelyMCPTools.getOptimizationAnalysis
* Complexity: MEDIUM
* Dependencies: storage.query, logger, errorMapper, apiClient, generateInsightRecommendations
*/
import { MCPErrorUtils } from '../../errors/MCPErrorMapping.js';
import { safeIdToString } from '../../utils/SafeIdConverter.js';
import { generateInsightRecommendations } from './GetRecommendations.js';
/**
* Generates actionable insights from project analytics data
*/
function generateInsights(analyticsData) {
const insights = [];
const overview = analyticsData.project_overview;
// Low Flag Utilization (< 50%)
if (overview.health_scores.flag_utilization_percentage < 50 && overview.total_active_flags > 0) {
insights.push({
type: 'optimization',
severity: 'high',
message: `Low flag utilization (${overview.health_scores.flag_utilization_percentage}%) - Many flags are disabled across environments. Consider removing unused flags or enabling flags in more environments.`,
affected_entities: overview.total_active_flags - overview.distinct_flags_enabled_somewhere,
category: 'flag_lifecycle'
});
}
// Low experiment activity (< 30%)
if (overview.health_scores.experiment_activity_percentage < 30 && overview.total_active_experiments > 0) {
insights.push({
type: 'experimentation',
severity: 'high',
message: `Low experiment activity (${overview.health_scores.experiment_activity_percentage}%) - Most experiments are not running. Consider activating paused experiments or creating new ones.`,
affected_entities: overview.paused_experiments,
category: 'experiment_velocity'
});
}
// Environment consistency check
const envUtilization = analyticsData.environment_breakdown.map(env => env.utilization_rate_percentage);
const envVariance = Math.max(...envUtilization) - Math.min(...envUtilization);
if (envVariance > 50 && analyticsData.environment_breakdown.length > 1) {
insights.push({
type: 'configuration',
severity: 'medium',
message: `High variance in environment utilization (${envVariance.toFixed(1)}% difference) - Some environments have significantly different flag configurations. Review for consistency.`,
affected_entities: analyticsData.environment_breakdown.length,
category: 'environment_consistency'
});
}
// Stale flags
const thirtyDaysAgo = new Date();
thirtyDaysAgo.setDate(thirtyDaysAgo.getDate() - 30);
const staleFlags = analyticsData.top_flags_by_usage.filter(flag => {
const lastUpdated = new Date(flag.last_updated);
return lastUpdated < thirtyDaysAgo && flag.enabled_in_environments_count === 0;
});
if (staleFlags.length > 0) {
insights.push({
type: 'optimization',
severity: 'medium',
message: `Found ${staleFlags.length} flags that haven't been updated in 30+ days and are disabled everywhere. Consider archiving these flags.`,
affected_entities: staleFlags.length,
category: 'flag_lifecycle'
});
}
return insights;
}
/**
* Helper function to get analytics data
*/
async function getAnalytics(deps, params) {
// DEPRECATED: This method is deprecated. Use getOptimizationAnalysis with analysis_type: 'analytics' instead.
deps.logger.warn('Tool get_analytics is deprecated. Use get_optimization_analysis with analysis_type: "analytics" instead.');
const { project_id } = params;
if (!project_id) {
throw MCPErrorUtils.invalidParameters('get_analytics', ['project_id'], Object.keys(params));
}
try {
// First verify the project exists
const projectExists = await deps.storage.get('SELECT id FROM projects WHERE id = ?', [safeIdToString(project_id)]);
if (!projectExists) {
throw MCPErrorUtils.projectNotFound(project_id);
}
const projectStatsResults = await deps.storage.query(`
SELECT
(SELECT COUNT(*) FROM flags WHERE project_id = ? AND archived = 0) as total_active_flags,
(SELECT COUNT(*) FROM flags WHERE project_id = ? AND archived = 1) as total_archived_flags,
(SELECT COUNT(DISTINCT fe.flag_key) FROM flag_environments fe WHERE fe.project_id = ? AND fe.enabled = 1) as distinct_flags_enabled_somewhere,
(SELECT COUNT(*) FROM flag_environments WHERE project_id = ? AND enabled = 1) as total_enabled_flag_env_instances,
(SELECT COUNT(*) FROM experiments WHERE project_id = ? AND archived = 0) as total_active_experiments,
(SELECT COUNT(*) FROM experiments WHERE project_id = ? AND status = 'running' AND archived = 0) as running_experiments,
(SELECT COUNT(*) FROM experiments WHERE project_id = ? AND status = 'paused' AND archived = 0) as paused_experiments,
(SELECT COUNT(*) FROM audiences WHERE project_id = ? AND archived = 0) as total_active_audiences,
(SELECT COUNT(*) FROM attributes WHERE project_id = ? AND archived = 0) as total_active_attributes,
(SELECT COUNT(*) FROM events WHERE project_id = ? AND archived = 0) as total_active_events,
(SELECT COUNT(DISTINCT key) FROM environments WHERE project_id = ?) as total_environments
`, Array(11).fill(safeIdToString(project_id)));
const projectStats = projectStatsResults[0] || {};
const envBreakdownResults = await deps.storage.query(`
SELECT
fe.environment_key,
COUNT(fe.flag_key) as total_flags_configured_in_env,
SUM(CASE WHEN fe.enabled = 1 THEN 1 ELSE 0 END) as enabled_flags_in_env
FROM flag_environments fe
WHERE fe.project_id = ?
GROUP BY fe.environment_key
ORDER BY fe.environment_key
`, [safeIdToString(project_id)]);
const topFlagsUsageResults = await deps.storage.query(`
SELECT
f.key as flag_key, f.name as flag_name, f.updated_time,
COUNT(DISTINCT e.id) as experiment_count,
(SELECT COUNT(*) FROM flag_environments fe_sub WHERE fe_sub.project_id = f.project_id AND fe_sub.flag_key = f.key AND fe_sub.enabled = 1) as enabled_environments_count
FROM flags f
LEFT JOIN experiments e ON f.project_id = e.project_id AND f.key = e.flag_key AND e.archived = 0
WHERE f.project_id = ? AND f.archived = 0
GROUP BY f.project_id, f.key, f.name, f.updated_time
ORDER BY experiment_count DESC, enabled_environments_count DESC, f.updated_time DESC
LIMIT 10
`, [safeIdToString(project_id)]);
const activityTrendResults = await deps.storage.query(`
SELECT
strftime('%Y-%m-%d', timestamp) as date,
COUNT(*) as changes_count,
COUNT(DISTINCT entity_type) as distinct_entity_types_changed
FROM change_history
WHERE project_id = ? AND timestamp >= strftime('%Y-%m-%dT%H:%M:%fZ', 'now', '-30 days')
GROUP BY date
ORDER BY date DESC
`, [safeIdToString(project_id)]);
const totalEnvs = projectStats.total_environments || envBreakdownResults.length || 1;
const flagHealthScore = projectStats.total_active_flags > 0 && totalEnvs > 0
? (projectStats.total_enabled_flag_env_instances / (projectStats.total_active_flags * totalEnvs)) * 100
: (projectStats.total_active_flags === 0 ? 100 : 0);
const experimentHealthScore = projectStats.total_active_experiments > 0
? (projectStats.running_experiments / projectStats.total_active_experiments) * 100
: 0;
const analyticsData = {
project_overview: {
total_active_flags: projectStats.total_active_flags || 0,
total_archived_flags: projectStats.total_archived_flags || 0,
distinct_flags_enabled_somewhere: projectStats.distinct_flags_enabled_somewhere || 0,
total_enabled_flag_env_instances: projectStats.total_enabled_flag_env_instances || 0,
total_active_experiments: projectStats.total_active_experiments || 0,
running_experiments: projectStats.running_experiments || 0,
paused_experiments: projectStats.paused_experiments || 0,
total_active_audiences: projectStats.total_active_audiences || 0,
total_active_attributes: projectStats.total_active_attributes || 0,
total_active_events: projectStats.total_active_events || 0,
total_environments: totalEnvs,
health_scores: {
flag_utilization_percentage: parseFloat(flagHealthScore.toFixed(1)),
experiment_activity_percentage: parseFloat(experimentHealthScore.toFixed(1))
}
},
environment_breakdown: envBreakdownResults.map((env) => ({
environment: env.environment_key,
total_flags_configured: env.total_flags_configured_in_env || 0,
enabled_flags: env.enabled_flags_in_env || 0,
disabled_flags: (env.total_flags_configured_in_env || 0) - (env.enabled_flags_in_env || 0),
utilization_rate_percentage: (env.total_flags_configured_in_env || 0) > 0
? parseFloat((((env.enabled_flags_in_env || 0) / (env.total_flags_configured_in_env || 0)) * 100).toFixed(1))
: 0
})),
top_flags_by_usage: topFlagsUsageResults.map((flag) => ({
key: flag.flag_key,
name: flag.flag_name,
last_updated: flag.updated_time,
associated_active_experiment_count: flag.experiment_count || 0,
enabled_in_environments_count: flag.enabled_environments_count || 0
})),
activity_trend_last_30d: activityTrendResults.map((day) => ({
date: day.date,
changes_count: day.changes_count || 0,
distinct_entity_types_changed: day.distinct_entity_types_changed || 0
})),
insights: [],
generated_at: new Date().toISOString()
};
analyticsData.insights = generateInsights(analyticsData);
return analyticsData;
}
catch (error) {
deps.logger.error({ projectId: project_id, error: error.message, stack: error.stack }, 'OptimizelyMCPTools.getAnalytics failed');
throw deps.errorMapper.toMCPError(error, 'Failed to get analytics');
}
}
/**
* Helper function to get insights data
*/
async function getInsights(deps, params) {
// DEPRECATED: This method is deprecated. Use getOptimizationAnalysis with analysis_type: 'insights' instead.
deps.logger.warn('Tool get_insights is deprecated. Use get_optimization_analysis with analysis_type: "insights" instead.');
const { project_id, include_experiment_results = true, timeframe_days = 30, entity_type } = params;
if (!project_id) {
throw MCPErrorUtils.invalidParameters('get_insights', ['project_id'], Object.keys(params));
}
try {
const insights = {
project_id,
generated_at: new Date().toISOString(),
timeframe_days,
summary: {},
experiments: {},
flags: {},
audiences: {},
recommendations: []
};
// Get project details
const projectResult = await deps.storage.get('SELECT * FROM projects WHERE id = ?', [project_id]);
if (!projectResult) {
throw MCPErrorUtils.projectNotFound(project_id);
}
insights.project = {
id: projectResult.id,
name: projectResult.name,
platform: projectResult.platform,
is_flags_enabled: projectResult.is_flags_enabled
};
const isFeatureExperimentation = projectResult.is_flags_enabled;
// Get experiment insights - handle platform differences
if (!entity_type || entity_type === 'experiment') {
if (isFeatureExperimentation) {
// Feature Experimentation: Count rulesets within flags as experiments
const flagRulesetStats = await deps.storage.query(`
SELECT
COUNT(CASE WHEN fe.data_json LIKE '%"type":"experiment"%' THEN 1 END) as ab_test_rulesets,
COUNT(CASE WHEN fe.data_json LIKE '%"type":"rollout"%' THEN 1 END) as rollout_rulesets,
COUNT(CASE WHEN fe.data_json LIKE '%"type":"multi_armed_bandit"%' THEN 1 END) as mab_rulesets,
COUNT(CASE WHEN fe.enabled = 1 THEN 1 END) as enabled_rulesets,
COUNT(*) as total_rulesets
FROM flag_environments fe
WHERE fe.project_id = ?
`, [project_id]);
insights.experiments = {
platform_type: 'feature_experimentation',
explanation: 'In Feature Experimentation, experiments exist as rulesets within flags',
...flagRulesetStats[0],
recent_flag_changes: []
};
// Get recent flag changes instead of experiments
const recentFlags = await deps.storage.query(`
SELECT key, name, description, updated_time
FROM flags
WHERE project_id = ? AND archived = 0
ORDER BY updated_time DESC
LIMIT 10
`, [project_id]);
insights.experiments.recent_flag_changes = recentFlags;
}
else {
// Web Experimentation: Count traditional experiments
const experimentStats = await deps.storage.query(`
SELECT
COUNT(CASE WHEN status = 'running' THEN 1 END) as running_count,
COUNT(CASE WHEN status = 'paused' THEN 1 END) as paused_count,
COUNT(CASE WHEN status = 'completed' THEN 1 END) as completed_count,
COUNT(CASE WHEN status = 'not_started' THEN 1 END) as not_started_count,
COUNT(*) as total_count
FROM experiments
WHERE project_id = ? AND archived = 0
`, [project_id]);
insights.experiments = {
platform_type: 'web_experimentation',
explanation: 'In Web Experimentation, experiments are standalone entities',
...experimentStats[0],
recent_experiments: []
};
// Get recent experiments
const recentExperiments = await deps.storage.query(`
SELECT id, name, status, flag_key, environment, created_time, updated_time
FROM experiments
WHERE project_id = ? AND archived = 0
ORDER BY updated_time DESC
LIMIT 10
`, [project_id]);
// Fetch results for running experiments if requested
if (include_experiment_results && deps.apiClient) {
for (const exp of recentExperiments) {
if (exp.status === 'running') {
try {
const results = await deps.apiClient.getExperimentResults(exp.id);
insights.experiments.recent_experiments.push({
...exp,
results: results
});
}
catch (error) {
deps.logger.warn('Failed to fetch experiment results', {
experimentId: exp.id,
error: error.message
});
insights.experiments.recent_experiments.push({
...exp,
results: { error: 'Failed to fetch results' }
});
}
}
else {
insights.experiments.recent_experiments.push(exp);
}
}
}
else {
insights.experiments.recent_experiments = recentExperiments;
}
}
}
// Get flag insights - only for Feature Experimentation
if (!entity_type || entity_type === 'flag') {
if (isFeatureExperimentation) {
const flagStats = await deps.storage.query(`
SELECT
COUNT(DISTINCT f.key) as total_flags,
COUNT(DISTINCT CASE WHEN fe.enabled = 1 THEN f.key END) as enabled_flags,
COUNT(DISTINCT fe.environment_key) as environments_used
FROM flags f
LEFT JOIN flag_environments fe ON f.project_id = fe.project_id AND f.key = fe.flag_key
WHERE f.project_id = ? AND f.archived = 0
`, [project_id]);
insights.flags = {
platform_type: 'feature_experimentation',
explanation: 'Feature flags are only available in Feature Experimentation projects',
...flagStats[0]
};
// Get flag adoption by environment
const envAdoption = await deps.storage.query(`
SELECT
environment_key,
COUNT(CASE WHEN enabled = 1 THEN 1 END) as enabled_count,
COUNT(*) as total_count
FROM flag_environments
WHERE project_id = ?
GROUP BY environment_key
`, [project_id]);
insights.flags.environment_adoption = envAdoption;
}
else {
insights.flags = {
platform_type: 'web_experimentation',
explanation: 'Web Experimentation projects do not have feature flags',
total_flags: 0,
enabled_flags: 0,
environments_used: 0,
environment_adoption: []
};
}
}
// Get audience insights
if (!entity_type || entity_type === 'audience') {
const audienceStats = await deps.storage.query(`
SELECT
COUNT(*) as total_audiences,
COUNT(CASE WHEN archived = 0 THEN 1 END) as active_audiences
FROM audiences
WHERE project_id = ?
`, [project_id]);
insights.audiences = audienceStats[0];
}
// Get recent changes
const recentChanges = await deps.storage.query(`
SELECT
entity_type,
action,
COUNT(*) as change_count
FROM change_history
WHERE project_id = ?
AND datetime(timestamp) > datetime('now', '-' || ? || ' days')
GROUP BY entity_type, action
ORDER BY change_count DESC
`, [project_id, timeframe_days]);
insights.recent_activity = recentChanges;
// Generate recommendations based on insights
insights.recommendations = generateInsightRecommendations(insights);
return insights;
}
catch (error) {
deps.logger.error({
projectId: project_id,
error: error.message,
stack: error.stack
}, 'OptimizelyMCPTools.getInsights: Failed to generate insights');
throw deps.errorMapper.toMCPError(error, 'Failed to generate insights');
}
}
/**
* Creates the Get Optimization Analysis tool with injected dependencies
* @param deps - Injected dependencies (storage, logger, errorMapper, etc.)
* @returns Tool definition with handler
*/
export function createGetOptimizationAnalysisTool(deps) {
return {
name: 'get_optimization_analysis',
requiresCache: true,
category: 'analytics',
description: 'Provides detailed analysis of experiments and optimization results',
handler: async (params) => {
const { analysis_type, project_id } = params;
if (!project_id) {
throw new Error('project_id is required');
}
switch (analysis_type) {
case 'analytics':
// Return just the analytics data (health scores, metrics)
return getAnalytics(deps, { project_id });
case 'insights':
// Return just the AI-powered insights
return getInsights(deps, {
project_id,
include_experiment_results: params.include_experiment_results,
timeframe_days: params.timeframe_days,
entity_type: params.entity_type
});
case 'comprehensive':
// Return both analytics and insights combined
const [analytics, insights] = await Promise.all([
getAnalytics(deps, { project_id }),
getInsights(deps, {
project_id,
include_experiment_results: params.include_experiment_results ?? true,
timeframe_days: params.timeframe_days ?? 30,
entity_type: params.entity_type
})
]);
// Extract the health scores from project_overview
const healthScores = analytics.project_overview?.health_scores || {
flag_utilization_percentage: 0,
experiment_activity_percentage: 0
};
return {
project_id,
timestamp: new Date().toISOString(),
analytics: {
health_scores: healthScores,
entity_counts: {
flags: analytics.project_overview.total_active_flags,
experiments: analytics.project_overview.total_active_experiments,
audiences: analytics.project_overview.total_active_audiences,
events: analytics.project_overview.total_active_events,
attributes: analytics.project_overview.total_active_attributes,
environments: analytics.project_overview.total_environments
},
environment_summary: analytics.environment_breakdown,
quality_metrics: {
flag_utilization_percentage: healthScores.flag_utilization_percentage,
experiment_activity_percentage: healthScores.experiment_activity_percentage,
active_vs_total_flags: `${analytics.project_overview.distinct_flags_enabled_somewhere}/${analytics.project_overview.total_active_flags}`,
running_vs_total_experiments: `${analytics.project_overview.running_experiments}/${analytics.project_overview.total_active_experiments}`
}
},
insights: insights,
recommendations: {
from_analytics: analytics.insights,
from_insights: insights.recommendations || [],
combined_score: Math.round((healthScores.flag_utilization_percentage + healthScores.experiment_activity_percentage) / 2)
},
summary: {
total_health_score: Math.round((healthScores.flag_utilization_percentage + healthScores.experiment_activity_percentage) / 2),
optimization_opportunities: insights.optimization_opportunities || [],
key_findings: [
...analytics.insights.map((i) => i.message),
...(insights.key_findings || [])
]
}
};
default:
throw new Error(`Invalid analysis_type: ${analysis_type}. Must be "analytics", "insights", or "comprehensive"`);
}
}
};
}
//# sourceMappingURL=GetOptimizationAnalysis.js.map