@simonecoelhosfo/optimizely-mcp-server
Version:
Optimizely MCP Server for AI assistants with integrated CLI tools
126 lines • 7.42 kB
JavaScript
/**
* List Projects Tool - Individual Module
* @description Lists all projects with metadata and platform information
* @since 2025-08-04
* @author Tool Modularization Team
*
* Migration Status: COMPLETED
* Original Method: OptimizelyMCPTools.listProjects
* Complexity: LOW
* Dependencies: storage.query, logger, errorMapper, cacheManager
*/
/**
* Creates the List Projects tool with injected dependencies
* @param deps - Injected dependencies (storage, logger, errorMapper, etc.)
* @returns Tool definition with handler
*/
export function createListProjectsTool(deps) {
return {
name: 'list_projects',
requiresCache: true,
category: 'discovery',
description: 'Lists all projects with their metadata and statistics',
handler: async (args) => {
deps.logger.info('OptimizelyMCPTools.listProjects: Getting project overview');
try {
if (!deps.cacheManager) {
throw deps.errorMapper.toMCPError(new Error("Cache manager not initialized"), { operation: 'List projects overview' });
}
// Get ALL projects with counts using SQL aggregation
// CRITICAL: Query all projects first, then apply filter in memory (like EntityRouter does)
// This ensures newly created projects added via addAllowedProjectId() are included
const allProjects = await deps.storage.query(`
SELECT
p.id, p.name, p.platform, p.is_flags_enabled, p.account_id, p.last_modified,
(SELECT COUNT(*) FROM flags WHERE project_id = p.id) as flag_count,
-- Platform-aware experiment counting
CASE
WHEN p.platform = 'custom' OR p.is_flags_enabled = 1 THEN
-- Feature Experimentation: Count A/B test rulesets in flag_environments
(SELECT COUNT(*) FROM flag_environments WHERE project_id = p.id AND (data_json LIKE '%"type":"a/b_test"%' OR data_json LIKE '%"type":"experiment"%'))
ELSE
-- Web Experimentation: Count standalone experiments
(SELECT COUNT(*) FROM experiments WHERE project_id = p.id)
END as experiment_count,
(SELECT COUNT(*) FROM environments WHERE project_id = p.id) as environment_count
FROM projects p
ORDER BY p.name
`, []);
// DO NOT apply project filter here - we want to show ALL projects in the database
// The filter should only be applied during sync operations, not when querying existing data
let projects = allProjects;
const projectSummaries = projects.map(p => {
const isFeatureExperimentation = p.platform === 'custom' || Boolean(p.is_flags_enabled);
return {
id: p.id,
name: p.name,
platform: p.platform || 'unknown',
is_flags_enabled: Boolean(p.is_flags_enabled),
account_id: p.account_id,
flag_count: p.flag_count || 0,
experiment_count: p.experiment_count || 0,
environment_count: p.environment_count || 0,
last_modified: p.last_modified,
platform_type: isFeatureExperimentation ? 'Feature Experimentation' : 'Web Experimentation',
experiment_explanation: isFeatureExperimentation
? 'A/B tests are RULESETS with type="a/b_test" within flags. Query with: analyze_data from flags_unified_view WHERE rule_type="a/b" OR list_entities with entity_type="ruleset"'
: 'Experiments are standalone entities. Query with: analyze_data from experiments_unified_view WHERE platform="web" OR list_entities with entity_type="experiment"'
};
});
// Calculate summary statistics
const featureExperimentationProjects = projectSummaries.filter(p => p.platform_type === 'Feature Experimentation');
const webExperimentationProjects = projectSummaries.filter(p => p.platform_type === 'Web Experimentation');
const summary = {
flags_enabled_projects: projectSummaries.filter(p => p.is_flags_enabled).length,
total_flags: projectSummaries.reduce((sum, p) => sum + p.flag_count, 0),
total_experiments: projectSummaries.reduce((sum, p) => sum + p.experiment_count, 0),
platforms: projectSummaries.reduce((acc, p) => {
acc[p.platform] = (acc[p.platform] || 0) + 1;
return acc;
}, {}),
platform_breakdown: {
feature_experimentation: {
count: featureExperimentationProjects.length,
experiment_note: "Experiment counts represent A/B test rulesets (type='a/b_test') within flags",
total_experiments: featureExperimentationProjects.reduce((sum, p) => sum + p.experiment_count, 0)
},
web_experimentation: {
count: webExperimentationProjects.length,
experiment_note: "Experiment counts represent standalone experiment entities",
total_experiments: webExperimentationProjects.reduce((sum, p) => sum + p.experiment_count, 0)
}
}
};
return {
result: "success",
metadata: {
operation: "list_projects",
total_count: projectSummaries.length,
operation_successful: true,
timestamp: new Date().toISOString()
},
total_projects: projectSummaries.length,
projects: projectSummaries,
summary,
_platform_education: {
key_understanding: "Experiment counts are platform-specific",
feature_experimentation: "A/B tests are rulesets within flags, not standalone experiments",
web_experimentation: "Experiments are standalone entities with their own variations",
how_to_find_experiments: {
feature_experimentation: "Use analyze_data with flags_unified_view WHERE rule_type='a/b' OR use list_entities with entity_type='ruleset' and filter type='a/b_test'",
web_experimentation: "Use analyze_data with experiments_unified_view WHERE platform='web' OR use list_entities with entity_type='experiment'"
}
}
};
}
catch (error) {
deps.logger.error({
error: error.message,
stack: error.stack
}, 'OptimizelyMCPTools.listProjects: Failed to get project overview');
throw deps.errorMapper.toMCPError(error, 'Failed to get project overview');
}
}
};
}
//# sourceMappingURL=ListProjects.js.map