@intellectronica/ruler
Version:
Ruler — apply the same rules to all coding agents
683 lines (682 loc) • 30.3 kB
JavaScript
;
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
if (k2 === undefined) k2 = k;
var desc = Object.getOwnPropertyDescriptor(m, k);
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
desc = { enumerable: true, get: function() { return m[k]; } };
}
Object.defineProperty(o, k2, desc);
}) : (function(o, m, k, k2) {
if (k2 === undefined) k2 = k;
o[k2] = m[k];
}));
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
Object.defineProperty(o, "default", { enumerable: true, value: v });
}) : function(o, v) {
o["default"] = v;
});
var __importStar = (this && this.__importStar) || (function () {
var ownKeys = function(o) {
ownKeys = Object.getOwnPropertyNames || function (o) {
var ar = [];
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
return ar;
};
return ownKeys(o);
};
return function (mod) {
if (mod && mod.__esModule) return mod;
var result = {};
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
__setModuleDefault(result, mod);
return result;
};
})();
Object.defineProperty(exports, "__esModule", { value: true });
exports.loadNestedConfigurations = loadNestedConfigurations;
exports.loadSingleConfiguration = loadSingleConfiguration;
exports.processHierarchicalConfigurations = processHierarchicalConfigurations;
exports.processSingleConfiguration = processSingleConfiguration;
exports.applyConfigurationsToAgents = applyConfigurationsToAgents;
exports.updateGitignore = updateGitignore;
const path = __importStar(require("path"));
const fs_1 = require("fs");
const toml_1 = require("@iarna/toml");
const FileSystemUtils = __importStar(require("./FileSystemUtils"));
const RuleProcessor_1 = require("./RuleProcessor");
const ConfigLoader_1 = require("./ConfigLoader");
const GitignoreUtils_1 = require("./GitignoreUtils");
const merge_1 = require("../mcp/merge");
const mcp_1 = require("../paths/mcp");
const propagateOpenHandsMcp_1 = require("../mcp/propagateOpenHandsMcp");
const propagateOpenCodeMcp_1 = require("../mcp/propagateOpenCodeMcp");
const agent_utils_1 = require("../agents/agent-utils");
const capabilities_1 = require("../mcp/capabilities");
const constants_1 = require("../constants");
async function loadNestedConfigurations(projectRoot, configPath, localOnly, resolvedNested) {
const { dirs: rulerDirs } = await findRulerDirectories(projectRoot, localOnly, true);
const results = [];
const rulerDirConfigs = await processIndependentRulerDirs(rulerDirs);
for (const { rulerDir, files } of rulerDirConfigs) {
const config = await loadConfigForRulerDir(rulerDir, configPath, resolvedNested);
results.push(await createHierarchicalConfiguration(rulerDir, files, config, configPath));
}
return results;
}
/**
* Processes each .ruler directory independently, returning configuration for each.
* Each .ruler directory gets its own rules (not merged with others).
*/
async function processIndependentRulerDirs(rulerDirs) {
const results = [];
// Process each .ruler directory independently
for (const rulerDir of rulerDirs) {
const files = await FileSystemUtils.readMarkdownFiles(rulerDir);
results.push({ rulerDir, files });
}
return results;
}
async function createHierarchicalConfiguration(rulerDir, files, config, cliConfigPath) {
await warnAboutLegacyMcpJson(rulerDir);
const concatenatedRules = (0, RuleProcessor_1.concatenateRules)(files, path.dirname(rulerDir));
const directoryRoot = path.dirname(rulerDir);
const localConfigPath = path.join(rulerDir, 'ruler.toml');
let configPathToUse = cliConfigPath;
try {
await fs_1.promises.access(localConfigPath);
configPathToUse = localConfigPath;
}
catch {
// fall back to CLI config or default resolution
}
const { loadUnifiedConfig } = await Promise.resolve().then(() => __importStar(require('./UnifiedConfigLoader')));
const unifiedConfig = await loadUnifiedConfig({
projectRoot: directoryRoot,
configPath: configPathToUse,
});
let rulerMcpJson = null;
if (unifiedConfig.mcp && Object.keys(unifiedConfig.mcp.servers).length > 0) {
rulerMcpJson = {
mcpServers: unifiedConfig.mcp.servers,
};
}
return {
rulerDir,
config,
concatenatedRules,
rulerMcpJson,
};
}
async function loadConfigForRulerDir(rulerDir, cliConfigPath, resolvedNested) {
const directoryRoot = path.dirname(rulerDir);
const localConfigPath = path.join(rulerDir, 'ruler.toml');
let hasLocalConfig = false;
try {
await fs_1.promises.access(localConfigPath);
hasLocalConfig = true;
}
catch {
hasLocalConfig = false;
}
const loaded = await (0, ConfigLoader_1.loadConfig)({
projectRoot: directoryRoot,
configPath: hasLocalConfig ? localConfigPath : cliConfigPath,
});
const cloned = cloneLoadedConfig(loaded);
if (resolvedNested) {
if (hasLocalConfig && loaded.nestedDefined && loaded.nested === false) {
(0, constants_1.logWarn)(`Nested mode is enabled but ${localConfigPath} sets nested = false. Continuing with nested processing.`);
}
cloned.nested = true;
cloned.nestedDefined = true;
}
return cloned;
}
function cloneLoadedConfig(config) {
const clonedAgentConfigs = {};
for (const [agent, agentConfig] of Object.entries(config.agentConfigs)) {
clonedAgentConfigs[agent] = {
...agentConfig,
mcp: agentConfig.mcp ? { ...agentConfig.mcp } : undefined,
};
}
return {
defaultAgents: config.defaultAgents ? [...config.defaultAgents] : undefined,
agentConfigs: clonedAgentConfigs,
cliAgents: config.cliAgents ? [...config.cliAgents] : undefined,
mcp: config.mcp ? { ...config.mcp } : undefined,
gitignore: config.gitignore ? { ...config.gitignore } : undefined,
nested: config.nested,
nestedDefined: config.nestedDefined,
};
}
/**
* Finds ruler directories based on the specified mode.
*/
async function findRulerDirectories(projectRoot, localOnly, hierarchical) {
if (hierarchical) {
const dirs = await FileSystemUtils.findAllRulerDirs(projectRoot);
const allDirs = [...dirs];
// Add global config if not local-only
if (!localOnly) {
const globalDir = await FileSystemUtils.findGlobalRulerDir();
if (globalDir) {
allDirs.push(globalDir);
}
}
if (allDirs.length === 0) {
throw (0, constants_1.createRulerError)(`.ruler directory not found`, `Searched from: ${projectRoot}`);
}
return { dirs: allDirs, primaryDir: allDirs[0] };
}
else {
const dir = await FileSystemUtils.findRulerDir(projectRoot, !localOnly);
if (!dir) {
throw (0, constants_1.createRulerError)(`.ruler directory not found`, `Searched from: ${projectRoot}`);
}
return { dirs: [dir], primaryDir: dir };
}
}
/**
* Warns about legacy mcp.json files if they exist.
*/
async function warnAboutLegacyMcpJson(rulerDir) {
try {
const legacyMcpPath = path.join(rulerDir, 'mcp.json');
await fs_1.promises.access(legacyMcpPath);
(0, constants_1.logWarn)('Warning: Using legacy .ruler/mcp.json. Please migrate to ruler.toml. This fallback will be removed in a future release.');
}
catch {
// ignore
}
}
/**
* Loads configuration for single-directory mode (existing behavior).
*/
async function loadSingleConfiguration(projectRoot, configPath, localOnly) {
// Find the single ruler directory
const { dirs: rulerDirs, primaryDir } = await findRulerDirectories(projectRoot, localOnly, false);
// Warn about legacy mcp.json
await warnAboutLegacyMcpJson(primaryDir);
// Load the ruler.toml configuration
const config = await (0, ConfigLoader_1.loadConfig)({
projectRoot,
configPath,
});
// Read rule files
const files = await FileSystemUtils.readMarkdownFiles(rulerDirs[0]);
// Concatenate rules
const concatenatedRules = (0, RuleProcessor_1.concatenateRules)(files, path.dirname(primaryDir));
// Load unified config to get merged MCP configuration
const { loadUnifiedConfig } = await Promise.resolve().then(() => __importStar(require('./UnifiedConfigLoader')));
const unifiedConfig = await loadUnifiedConfig({ projectRoot, configPath });
// Synthesize rulerMcpJson from unified MCP bundle for backward compatibility
let rulerMcpJson = null;
if (unifiedConfig.mcp && Object.keys(unifiedConfig.mcp.servers).length > 0) {
rulerMcpJson = {
mcpServers: unifiedConfig.mcp.servers,
};
}
return {
config,
concatenatedRules,
rulerMcpJson,
};
}
/**
* Processes hierarchical configurations by applying rules to each .ruler directory independently.
* Each directory gets its own set of rules and generates its own agent files.
* @param agents Array of agents to process
* @param configurations Array of hierarchical configurations for each .ruler directory
* @param verbose Whether to enable verbose logging
* @param dryRun Whether to perform a dry run
* @param cliMcpEnabled Whether MCP is enabled via CLI
* @param cliMcpStrategy MCP strategy from CLI
* @returns Promise resolving to array of generated file paths
*/
async function processHierarchicalConfigurations(agents, configurations, verbose, dryRun, cliMcpEnabled, cliMcpStrategy, backup = true) {
const allGeneratedPaths = [];
for (const config of configurations) {
(0, constants_1.logVerboseInfo)(`Processing .ruler directory: ${config.rulerDir}`, verbose, dryRun);
const rulerRoot = path.dirname(config.rulerDir);
const paths = await applyConfigurationsToAgents(agents, config.concatenatedRules, config.rulerMcpJson, config.config, rulerRoot, verbose, dryRun, cliMcpEnabled, cliMcpStrategy, backup);
const normalizedPaths = paths.map((p) => path.isAbsolute(p) ? p : path.join(rulerRoot, p));
allGeneratedPaths.push(...normalizedPaths);
}
return allGeneratedPaths;
}
/**
* Processes a single configuration by applying rules to all selected agents.
* All rules are concatenated and applied to generate agent files in the project root.
* @param agents Array of agents to process
* @param configuration Single ruler configuration with concatenated rules
* @param projectRoot Root directory of the project
* @param verbose Whether to enable verbose logging
* @param dryRun Whether to perform a dry run
* @param cliMcpEnabled Whether MCP is enabled via CLI
* @param cliMcpStrategy MCP strategy from CLI
* @returns Promise resolving to array of generated file paths
*/
async function processSingleConfiguration(agents, configuration, projectRoot, verbose, dryRun, cliMcpEnabled, cliMcpStrategy, backup = true) {
return await applyConfigurationsToAgents(agents, configuration.concatenatedRules, configuration.rulerMcpJson, configuration.config, projectRoot, verbose, dryRun, cliMcpEnabled, cliMcpStrategy, backup);
}
/**
* Applies configurations to the selected agents (internal function).
* @param agents Array of agents to process
* @param concatenatedRules Concatenated rule content
* @param rulerMcpJson MCP configuration JSON
* @param config Loaded configuration
* @param projectRoot Root directory of the project
* @param verbose Whether to enable verbose logging
* @param dryRun Whether to perform a dry run
* @returns Promise resolving to array of generated file paths
*/
async function applyConfigurationsToAgents(agents, concatenatedRules, rulerMcpJson, config, projectRoot, verbose, dryRun, cliMcpEnabled = true, cliMcpStrategy, backup = true) {
const generatedPaths = [];
let agentsMdWritten = false;
for (const agent of agents) {
(0, constants_1.logInfo)(`Applying rules for ${agent.getName()}...`, dryRun);
(0, constants_1.logVerbose)(`Processing agent: ${agent.getName()}`, verbose);
const agentConfig = config.agentConfigs[agent.getIdentifier()];
const agentRulerMcpJson = rulerMcpJson;
// Collect output paths for .gitignore
const outputPaths = (0, agent_utils_1.getAgentOutputPaths)(agent, projectRoot, agentConfig);
(0, constants_1.logVerbose)(`Agent ${agent.getName()} output paths: ${outputPaths.join(', ')}`, verbose);
generatedPaths.push(...outputPaths);
// Only add the backup file paths to the gitignore list if backups are enabled
if (backup) {
const backupPaths = outputPaths.map((p) => `${p}.bak`);
generatedPaths.push(...backupPaths);
}
if (dryRun) {
(0, constants_1.logVerbose)(`DRY RUN: Would write rules to: ${outputPaths.join(', ')}`, verbose);
}
else {
let skipApplyForThisAgent = false;
if (agent.getIdentifier() === 'jules' ||
agent.getIdentifier() === 'agentsmd') {
if (agentsMdWritten) {
// Skip rewriting AGENTS.md, but still allow MCP handling below
skipApplyForThisAgent = true;
}
else {
agentsMdWritten = true;
}
}
let finalAgentConfig = agentConfig;
if (agent.getIdentifier() === 'augmentcode' && agentRulerMcpJson) {
const resolvedStrategy = cliMcpStrategy ??
agentConfig?.mcp?.strategy ??
config.mcp?.strategy ??
'merge';
finalAgentConfig = {
...agentConfig,
mcp: {
...agentConfig?.mcp,
strategy: resolvedStrategy,
},
};
}
if (!skipApplyForThisAgent) {
await agent.applyRulerConfig(concatenatedRules, projectRoot, agentRulerMcpJson, finalAgentConfig, backup);
}
}
// Handle MCP configuration
await handleMcpConfiguration(agent, agentConfig, config, agentRulerMcpJson, projectRoot, generatedPaths, verbose, dryRun, cliMcpEnabled, cliMcpStrategy, backup);
}
return generatedPaths;
}
async function handleMcpConfiguration(agent, agentConfig, config, rulerMcpJson, projectRoot, generatedPaths, verbose, dryRun, cliMcpEnabled = true, cliMcpStrategy, backup = true) {
if (!(0, capabilities_1.agentSupportsMcp)(agent)) {
(0, constants_1.logVerbose)(`Agent ${agent.getName()} does not support MCP - skipping MCP configuration`, verbose);
return;
}
const dest = await (0, mcp_1.getNativeMcpPath)(agent.getName(), projectRoot);
const mcpEnabledForAgent = cliMcpEnabled && (agentConfig?.mcp?.enabled ?? config.mcp?.enabled ?? true);
if (!dest || !mcpEnabledForAgent) {
return;
}
const filteredMcpJson = rulerMcpJson
? (0, capabilities_1.filterMcpConfigForAgent)(rulerMcpJson, agent)
: null;
if (!filteredMcpJson) {
(0, constants_1.logVerbose)(`No compatible MCP servers found for ${agent.getName()} - skipping MCP configuration`, verbose);
return;
}
await updateGitignoreForMcpFile(dest, projectRoot, generatedPaths, backup);
await applyMcpConfiguration(agent, filteredMcpJson, dest, agentConfig, config, projectRoot, cliMcpStrategy, dryRun, verbose, backup);
}
async function updateGitignoreForMcpFile(dest, projectRoot, generatedPaths, backup = true) {
if (dest.startsWith(projectRoot)) {
const relativeDest = path.relative(projectRoot, dest);
generatedPaths.push(relativeDest);
if (backup) {
generatedPaths.push(`${relativeDest}.bak`);
}
}
}
function sanitizeMcpTimeoutsForAgent(agent, mcpJson, dryRun) {
if (agent.supportsMcpTimeout?.()) {
return mcpJson;
}
if (!mcpJson.mcpServers || typeof mcpJson.mcpServers !== 'object') {
return mcpJson;
}
const servers = mcpJson.mcpServers;
const sanitizedServers = {};
const strippedTimeouts = [];
for (const [name, serverDef] of Object.entries(servers)) {
if (serverDef && typeof serverDef === 'object') {
const copy = { ...serverDef };
if ('timeout' in copy) {
delete copy.timeout;
strippedTimeouts.push(name);
}
sanitizedServers[name] = copy;
}
else {
sanitizedServers[name] = serverDef;
}
}
if (strippedTimeouts.length > 0) {
(0, constants_1.logWarn)(`${agent.getName()} does not support MCP server timeout configuration; ignoring timeout for: ${strippedTimeouts.join(', ')}`, dryRun);
}
return {
...mcpJson,
mcpServers: sanitizedServers,
};
}
async function applyMcpConfiguration(agent, filteredMcpJson, dest, agentConfig, config, projectRoot, cliMcpStrategy, dryRun, verbose, backup = true) {
// Prevent writing MCP configs outside the project root (e.g., legacy home-directory targets)
if (!dest.startsWith(projectRoot)) {
(0, constants_1.logVerbose)(`Skipping MCP config for ${agent.getName()} because target path is outside project: ${dest}`, verbose);
return;
}
const agentMcpJson = sanitizeMcpTimeoutsForAgent(agent, filteredMcpJson, dryRun);
if (agent.getIdentifier() === 'openhands') {
return await applyOpenHandsMcpConfiguration(agentMcpJson, dest, dryRun, verbose, backup);
}
if (agent.getIdentifier() === 'opencode') {
return await applyOpenCodeMcpConfiguration(agentMcpJson, dest, dryRun, verbose, backup);
}
// Agents that handle MCP configuration internally should not have external MCP handling
if (agent.getIdentifier() === 'zed' ||
agent.getIdentifier() === 'gemini-cli' ||
agent.getIdentifier() === 'amazon-q-cli' ||
agent.getIdentifier() === 'crush') {
(0, constants_1.logVerbose)(`Skipping external MCP config for ${agent.getName()} - handled internally by agent`, verbose);
return;
}
return await applyStandardMcpConfiguration(agent, agentMcpJson, dest, agentConfig, config, cliMcpStrategy, dryRun, verbose, backup);
}
async function applyOpenHandsMcpConfiguration(filteredMcpJson, dest, dryRun, verbose, backup = true) {
if (dryRun) {
(0, constants_1.logVerbose)(`DRY RUN: Would apply MCP config by updating TOML file: ${dest}`, verbose);
}
else {
await (0, propagateOpenHandsMcp_1.propagateMcpToOpenHands)(filteredMcpJson, dest, backup);
}
}
async function applyOpenCodeMcpConfiguration(filteredMcpJson, dest, dryRun, verbose, backup = true) {
if (dryRun) {
(0, constants_1.logVerbose)(`DRY RUN: Would apply MCP config by updating OpenCode config file: ${dest}`, verbose);
}
else {
await (0, propagateOpenCodeMcp_1.propagateMcpToOpenCode)(filteredMcpJson, dest, backup);
}
}
/**
* Transform MCP server types for Claude Code compatibility.
* Claude expects "http" for HTTP servers and "sse" for SSE servers, not "remote".
*/
function transformMcpForClaude(mcpJson) {
if (!mcpJson.mcpServers || typeof mcpJson.mcpServers !== 'object') {
return mcpJson;
}
const transformedMcp = { ...mcpJson };
const transformedServers = {};
for (const [name, serverDef] of Object.entries(mcpJson.mcpServers)) {
if (serverDef && typeof serverDef === 'object') {
const server = serverDef;
const transformedServer = { ...server };
// Transform type: "remote" to appropriate Claude types
if (server.type === 'remote' &&
server.url &&
typeof server.url === 'string') {
const url = server.url;
// Check if URL suggests SSE (contains /sse path segment)
if (/\/sse(\/|$)/i.test(url)) {
transformedServer.type = 'sse';
}
else {
transformedServer.type = 'http';
}
}
transformedServers[name] = transformedServer;
}
else {
transformedServers[name] = serverDef;
}
}
transformedMcp.mcpServers = transformedServers;
return transformedMcp;
}
/**
* Transform MCP server types for Kilo Code compatibility.
* Kilo Code expects "streamable-http" for remote HTTP servers, not "remote".
*/
function transformMcpForKiloCode(mcpJson) {
if (!mcpJson.mcpServers || typeof mcpJson.mcpServers !== 'object') {
return mcpJson;
}
const transformedMcp = { ...mcpJson };
const transformedServers = {};
for (const [name, serverDef] of Object.entries(mcpJson.mcpServers)) {
if (serverDef && typeof serverDef === 'object') {
const server = serverDef;
const transformedServer = { ...server };
// Transform type: "remote" to "streamable-http" for HTTP-based servers
if (server.type === 'remote' &&
server.url &&
typeof server.url === 'string') {
transformedServer.type = 'streamable-http';
}
transformedServers[name] = transformedServer;
}
else {
transformedServers[name] = serverDef;
}
}
transformedMcp.mcpServers = transformedServers;
return transformedMcp;
}
/**
* Transform MCP server types for Factory Droid compatibility.
* Factory Droid expects "http" for remote HTTP servers, not "remote".
*/
function transformMcpForFactoryDroid(mcpJson) {
if (!mcpJson.mcpServers || typeof mcpJson.mcpServers !== 'object') {
return mcpJson;
}
const transformedMcp = { ...mcpJson };
const transformedServers = {};
for (const [name, serverDef] of Object.entries(mcpJson.mcpServers)) {
if (serverDef && typeof serverDef === 'object') {
const server = serverDef;
const transformedServer = { ...server };
if (server.type === 'remote' &&
server.url &&
typeof server.url === 'string') {
transformedServer.type = 'http';
}
transformedServers[name] = transformedServer;
}
else {
transformedServers[name] = serverDef;
}
}
transformedMcp.mcpServers = transformedServers;
return transformedMcp;
}
async function applyStandardMcpConfiguration(agent, filteredMcpJson, dest, agentConfig, config, cliMcpStrategy, dryRun, verbose, backup = true) {
const strategy = cliMcpStrategy ??
agentConfig?.mcp?.strategy ??
config.mcp?.strategy ??
'merge';
const serverKey = agent.getMcpServerKey?.() ?? 'mcpServers';
// Skip agents with empty server keys (e.g., AgentsMdAgent, GooseAgent)
if (serverKey === '') {
(0, constants_1.logVerbose)(`Skipping MCP config for ${agent.getName()} - agent has empty server key`, verbose);
return;
}
(0, constants_1.logVerbose)(`Applying filtered MCP config for ${agent.getName()} with strategy: ${strategy} and key: ${serverKey}`, verbose);
if (dryRun) {
(0, constants_1.logVerbose)(`DRY RUN: Would apply MCP config to: ${dest}`, verbose);
}
else {
// Transform MCP config for agent-specific compatibility
let mcpToMerge = filteredMcpJson;
if (agent.getIdentifier() === 'claude') {
mcpToMerge = transformMcpForClaude(filteredMcpJson);
}
else if (agent.getIdentifier() === 'kilocode') {
mcpToMerge = transformMcpForKiloCode(filteredMcpJson);
}
else if (agent.getIdentifier() === 'factory') {
mcpToMerge = transformMcpForFactoryDroid(filteredMcpJson);
}
const CODEX_AGENT_ID = 'codex';
const isCodexToml = agent.getIdentifier() === CODEX_AGENT_ID && dest.endsWith('.toml');
let existing = await (0, mcp_1.readNativeMcp)(dest);
if (isCodexToml) {
try {
const tomlContent = await fs_1.promises.readFile(dest, 'utf8');
existing = (0, toml_1.parse)(tomlContent);
}
catch (error) {
(0, constants_1.logVerbose)(`Failed to read Codex MCP TOML at ${dest}: ${error.message}`, verbose);
// ignore missing or invalid TOML, fall back to previously read value
}
}
let merged = (0, merge_1.mergeMcp)(existing, mcpToMerge, strategy, serverKey);
if (isCodexToml) {
const { [serverKey]: servers, ...rest } = merged;
merged = {
...rest,
// Codex CLI expects MCP servers under mcp_servers in config.toml.
mcp_servers: servers ?? {},
};
}
// Firebase Studio (IDX) expects no "type" fields in .idx/mcp.json server entries.
// Sanitize merged config by stripping 'type' from each server when targeting Firebase.
const sanitizeForFirebase = (obj) => {
if (agent.getIdentifier() !== 'firebase')
return obj;
const out = { ...obj };
const servers = out[serverKey] || {};
const cleanedServers = {};
for (const [name, def] of Object.entries(servers)) {
if (def && typeof def === 'object') {
const copy = { ...def };
delete copy.type;
cleanedServers[name] = copy;
}
else {
cleanedServers[name] = def;
}
}
out[serverKey] = cleanedServers;
return out;
};
// Gemini CLI (since v0.21.0) no longer accepts the "type" field in MCP server entries.
// Following the MCP spec update from Nov 25, 2025, the transport type is now inferred
// from the presence of specific keys (command/args -> stdio, url -> sse/http).
// Sanitize merged config by stripping 'type' from each server when targeting Gemini.
const sanitizeForGemini = (obj) => {
if (agent.getIdentifier() !== 'gemini-cli')
return obj;
const out = { ...obj };
const servers = out[serverKey] || {};
const cleanedServers = {};
for (const [name, def] of Object.entries(servers)) {
if (def && typeof def === 'object') {
const copy = { ...def };
delete copy.type;
cleanedServers[name] = copy;
}
else {
cleanedServers[name] = def;
}
}
out[serverKey] = cleanedServers;
return out;
};
let toWrite = sanitizeForFirebase(merged);
toWrite = sanitizeForGemini(toWrite);
// Only backup and write if content would actually change (idempotent)
const currentContent = isCodexToml
? (0, toml_1.stringify)(existing)
: JSON.stringify(existing, null, 2);
const newContent = isCodexToml
? (0, toml_1.stringify)(toWrite)
: JSON.stringify(toWrite, null, 2);
if (currentContent !== newContent) {
if (backup) {
const { backupFile } = await Promise.resolve().then(() => __importStar(require('../core/FileSystemUtils')));
await backupFile(dest);
}
if (isCodexToml) {
await FileSystemUtils.writeGeneratedFile(dest, (0, toml_1.stringify)(toWrite));
}
else {
await (0, mcp_1.writeNativeMcp)(dest, toWrite);
}
}
else {
(0, constants_1.logVerbose)(`MCP config for ${agent.getName()} is already up to date - skipping backup and write`, verbose);
}
}
}
/**
* Updates the .gitignore file with generated paths.
* @param projectRoot Root directory of the project
* @param generatedPaths Array of generated file paths
* @param config Loaded configuration
* @param cliGitignoreEnabled CLI gitignore setting
* @param dryRun Whether to perform a dry run
* @param cliGitignoreLocal CLI toggle for .git/info/exclude usage
*/
async function updateGitignore(projectRoot, generatedPaths, config, cliGitignoreEnabled, dryRun, cliGitignoreLocal) {
// Configuration precedence: CLI > TOML > Default (enabled)
let gitignoreEnabled;
if (cliGitignoreEnabled !== undefined) {
gitignoreEnabled = cliGitignoreEnabled;
}
else if (config.gitignore?.enabled !== undefined) {
gitignoreEnabled = config.gitignore.enabled;
}
else {
gitignoreEnabled = true; // Default enabled
}
const gitignoreTarget = cliGitignoreLocal !== undefined
? cliGitignoreLocal
? '.git/info/exclude'
: '.gitignore'
: config.gitignore?.local
? '.git/info/exclude'
: '.gitignore';
if (gitignoreEnabled && generatedPaths.length > 0) {
const uniquePaths = [...new Set(generatedPaths)];
// Note: Individual backup patterns are added per-file in the collection phase
// No need to add a broad *.bak pattern here
if (uniquePaths.length > 0) {
if (dryRun) {
(0, constants_1.logInfo)(`Would update ${gitignoreTarget} with ${uniquePaths.length} unique path(s): ${uniquePaths.join(', ')}`, dryRun);
}
else {
await (0, GitignoreUtils_1.updateGitignore)(projectRoot, uniquePaths, gitignoreTarget);
(0, constants_1.logInfo)(`Updated ${gitignoreTarget} with ${uniquePaths.length} unique path(s) in the Ruler block.`, dryRun);
}
}
}
}