UNPKG

@elsikora/commitizen-plugin-commitlint-ai

Version:
374 lines (370 loc) 23.1 kB
'use strict'; var numeric_constant = require('../../domain/constant/numeric.constant.js'); var llmConfiguration_entity = require('../../domain/entity/llm-configuration.entity.js'); var anthropicModel_enum = require('../../domain/enum/anthropic-model.enum.js'); var awsBedrockModel_enum = require('../../domain/enum/aws-bedrock-model.enum.js'); var azureOpenaiModel_enum = require('../../domain/enum/azure-openai-model.enum.js'); var commitMode_enum = require('../../domain/enum/commit-mode.enum.js'); var googleModel_enum = require('../../domain/enum/google-model.enum.js'); var llmProvider_enum = require('../../domain/enum/llm-provider.enum.js'); var ollamaModel_enum = require('../../domain/enum/ollama-model.enum.js'); var openaiModel_enum = require('../../domain/enum/openai-model.enum.js'); var apiKey_valueObject = require('../../domain/value-object/api-key.value-object.js'); /** * Use case for configuring LLM settings */ class ConfigureLLMUseCase { CLI_INTERFACE; CONFIG_SERVICE; constructor(configService, cliInterface) { this.CONFIG_SERVICE = configService; this.CLI_INTERFACE = cliInterface; } /** * Configure LLM settings interactively * @returns {Promise<LLMConfiguration>} Promise resolving to the new configuration */ async configureInteractively() { // First, select mode const mode = await this.CLI_INTERFACE.select("Select commit mode:", [ { label: "Auto (AI-powered)", value: commitMode_enum.ECommitMode.AUTO }, { label: "Manual", value: commitMode_enum.ECommitMode.MANUAL }, ], commitMode_enum.ECommitMode.AUTO); // If manual mode, create a minimal configuration if (mode === commitMode_enum.ECommitMode.MANUAL) { // Create configuration with dummy values for manual mode const configuration = new llmConfiguration_entity.LLMConfiguration("openai", // Default provider (won't be used) new apiKey_valueObject.ApiKey("manual-mode"), // Dummy API key mode, undefined, // No model needed for manual mode numeric_constant.DEFAULT_MAX_RETRIES, // Default max retries numeric_constant.DEFAULT_VALIDATION_MAX_RETRIES); // Save configuration await this.saveConfiguration(configuration); return configuration; } // Auto mode - ask for LLM details this.CLI_INTERFACE.info("Setting up AI-powered commit mode..."); // Select provider const provider = await this.CLI_INTERFACE.select("Select your LLM provider:", [ { label: "OpenAI (GPT-4, GPT-3.5)", value: llmProvider_enum.ELLMProvider.OPENAI }, { label: "Anthropic (Claude)", value: llmProvider_enum.ELLMProvider.ANTHROPIC }, { label: "Google (Gemini)", value: llmProvider_enum.ELLMProvider.GOOGLE }, { label: "Azure OpenAI", value: llmProvider_enum.ELLMProvider.AZURE_OPENAI }, { label: "AWS Bedrock", value: llmProvider_enum.ELLMProvider.AWS_BEDROCK }, { label: "Ollama (Local)", value: llmProvider_enum.ELLMProvider.OLLAMA }, ]); // Select model based on provider let model; switch (provider) { case llmProvider_enum.ELLMProvider.ANTHROPIC: { model = await this.CLI_INTERFACE.select("Select Anthropic model:", [ { label: "Claude Opus 4 (Latest 2025, most capable)", value: anthropicModel_enum.EAnthropicModel.CLAUDE_OPUS_4 }, { label: "Claude Sonnet 4 (Latest 2025, high-performance)", value: anthropicModel_enum.EAnthropicModel.CLAUDE_SONNET_4 }, { label: "Claude 3.7 Sonnet (Extended thinking)", value: anthropicModel_enum.EAnthropicModel.CLAUDE_3_7_SONNET }, { label: "Claude 3.5 Sonnet (Previous flagship)", value: anthropicModel_enum.EAnthropicModel.CLAUDE_3_5_SONNET }, { label: "Claude 3.5 Haiku (Fastest)", value: anthropicModel_enum.EAnthropicModel.CLAUDE_3_5_HAIKU }, { label: "Claude 3 Opus (Complex tasks)", value: anthropicModel_enum.EAnthropicModel.CLAUDE_3_OPUS }, ], anthropicModel_enum.EAnthropicModel.CLAUDE_SONNET_4); break; } case llmProvider_enum.ELLMProvider.AWS_BEDROCK: { model = await this.CLI_INTERFACE.select("Select AWS Bedrock model:", [ { label: "Claude Opus 4 (Latest 2025, most capable)", value: awsBedrockModel_enum.EAWSBedrockModel.CLAUDE_OPUS_4 }, { label: "Claude Sonnet 4 (Latest 2025, balanced performance)", value: awsBedrockModel_enum.EAWSBedrockModel.CLAUDE_SONNET_4 }, { label: "Claude 3.5 Sonnet v2 (Previous flagship)", value: awsBedrockModel_enum.EAWSBedrockModel.CLAUDE_3_5_SONNET_V2 }, { label: "Claude 3.5 Haiku (Fast)", value: awsBedrockModel_enum.EAWSBedrockModel.CLAUDE_3_5_HAIKU }, { label: "Claude 3.5 Sonnet", value: awsBedrockModel_enum.EAWSBedrockModel.CLAUDE_3_5_SONNET }, { label: "Amazon Nova Pro (Latest Amazon model)", value: awsBedrockModel_enum.EAWSBedrockModel.NOVA_PRO }, { label: "DeepSeek R1 (Advanced reasoning)", value: awsBedrockModel_enum.EAWSBedrockModel.DEEPSEEK_R1 }, { label: "Llama 3.2 90B (Open source)", value: awsBedrockModel_enum.EAWSBedrockModel.LLAMA_3_2_90B }, { label: "Mistral Large (Latest)", value: awsBedrockModel_enum.EAWSBedrockModel.MISTRAL_LARGE_2_24_11 }, ], awsBedrockModel_enum.EAWSBedrockModel.CLAUDE_SONNET_4); break; } case llmProvider_enum.ELLMProvider.AZURE_OPENAI: { model = await this.CLI_INTERFACE.select("Select Azure OpenAI model:", [ { label: "GPT-4.1 Turbo (Latest 2025, most capable)", value: azureOpenaiModel_enum.EAzureOpenAIModel.GPT_4_1_TURBO_2024_12_17 }, { label: "GPT-4.1 Preview (Latest preview)", value: azureOpenaiModel_enum.EAzureOpenAIModel.GPT_4_1_PREVIEW_2024_12_17 }, { label: "GPT-4.1 Mini (Fast 4.1 model)", value: azureOpenaiModel_enum.EAzureOpenAIModel.GPT_4_1_MINI_2024_12_17 }, { label: "GPT-4o 2024-11 (Enhanced creative)", value: azureOpenaiModel_enum.EAzureOpenAIModel.GPT_4O_2024_11_20 }, { label: "GPT-4o Mini", value: azureOpenaiModel_enum.EAzureOpenAIModel.GPT_4O_MINI_2024_07_18 }, { label: "GPT-4 Turbo", value: azureOpenaiModel_enum.EAzureOpenAIModel.GPT_4_TURBO }, { label: "GPT-3.5 Turbo", value: azureOpenaiModel_enum.EAzureOpenAIModel.GPT_35_TURBO }, { label: "O3 (Enhanced reasoning)", value: azureOpenaiModel_enum.EAzureOpenAIModel.O3_2024_12_17 }, { label: "O4 Mini (Fast reasoning)", value: azureOpenaiModel_enum.EAzureOpenAIModel.O4_MINI_2024_12_17 }, ], azureOpenaiModel_enum.EAzureOpenAIModel.GPT_4_1_TURBO_2024_12_17); break; } case llmProvider_enum.ELLMProvider.GOOGLE: { model = await this.CLI_INTERFACE.select("Select Google model:", [ { label: "Gemini 2.5 Pro (Latest 2025, most capable)", value: googleModel_enum.EGoogleModel.GEMINI_2_5_PRO }, { label: "Gemini 2.5 Flash (Latest 2025, fast)", value: googleModel_enum.EGoogleModel.GEMINI_2_5_FLASH }, { label: "Gemini 2.0 Flash (Experimental)", value: googleModel_enum.EGoogleModel.GEMINI_2_0_FLASH_EXP }, { label: "Gemini 1.5 Pro (Stable, capable)", value: googleModel_enum.EGoogleModel.GEMINI_1_5_PRO }, { label: "Gemini 1.5 Flash (Fast, stable)", value: googleModel_enum.EGoogleModel.GEMINI_1_5_FLASH }, { label: "Gemini 1.5 Flash 8B (Lightweight)", value: googleModel_enum.EGoogleModel.GEMINI_1_5_FLASH_8B }, { label: "Gemini 1.0 Pro", value: googleModel_enum.EGoogleModel.GEMINI_1_0_PRO }, { label: "Gemma 3 27B (Most capable open model)", value: googleModel_enum.EGoogleModel.GEMMA_3_27B }, { label: "Gemma 3 12B (Strong language model)", value: googleModel_enum.EGoogleModel.GEMMA_3_12B }, { label: "Gemma 3 4B (Balanced, multimodal)", value: googleModel_enum.EGoogleModel.GEMMA_3_4B }, { label: "Gemma 3 1B (Lightweight)", value: googleModel_enum.EGoogleModel.GEMMA_3_1B }, ], googleModel_enum.EGoogleModel.GEMINI_2_5_FLASH); break; } case llmProvider_enum.ELLMProvider.OLLAMA: { model = await this.CLI_INTERFACE.select("Select Ollama model:", [ { label: "Llama 3.2 (Latest)", value: ollamaModel_enum.EOllamaModel.LLAMA3_2 }, { label: "Llama 3.1", value: ollamaModel_enum.EOllamaModel.LLAMA3_1 }, { label: "Llama 3", value: ollamaModel_enum.EOllamaModel.LLAMA3 }, { label: "Mistral", value: ollamaModel_enum.EOllamaModel.MISTRAL }, { label: "CodeLlama", value: ollamaModel_enum.EOllamaModel.CODELLAMA }, { label: "DeepSeek Coder", value: ollamaModel_enum.EOllamaModel.DEEPSEEK_CODER }, { label: "Custom Model", value: ollamaModel_enum.EOllamaModel.CUSTOM }, ], ollamaModel_enum.EOllamaModel.LLAMA3_2); break; } case llmProvider_enum.ELLMProvider.OPENAI: { model = await this.CLI_INTERFACE.select("Select OpenAI model:", [ { label: "GPT-4.1 (Latest 2025, most capable)", value: openaiModel_enum.EOpenAIModel.GPT_4_1 }, { label: "GPT-4.1 Nano (Fastest 4.1 model)", value: openaiModel_enum.EOpenAIModel.GPT_4_1_NANO }, { label: "GPT-4.1 Mini", value: openaiModel_enum.EOpenAIModel.GPT_4_1_MINI }, { label: "GPT-4o (Latest, enhanced creative writing)", value: openaiModel_enum.EOpenAIModel.GPT_4O }, { label: "GPT-4o Mini (Faster, cheaper)", value: openaiModel_enum.EOpenAIModel.GPT_4O_MINI }, { label: "GPT-4 Turbo", value: openaiModel_enum.EOpenAIModel.GPT_4_TURBO }, { label: "GPT-4 (Original)", value: openaiModel_enum.EOpenAIModel.GPT_4 }, { label: "GPT-3.5 Turbo (Fastest, cheapest)", value: openaiModel_enum.EOpenAIModel.GPT_35_TURBO }, { label: "O1 (Enhanced reasoning)", value: openaiModel_enum.EOpenAIModel.O1 }, { label: "O1 Mini (Fast reasoning)", value: openaiModel_enum.EOpenAIModel.O1_MINI }, ], openaiModel_enum.EOpenAIModel.GPT_4O); break; } default: { // This ensures exhaustiveness - TypeScript will error if a case is missing const exhaustiveCheck = provider; throw new Error(`Unsupported provider: ${String(exhaustiveCheck)}`); } } // Get API key let credentialValue; // Check environment variables first const environmentVariableNames = { anthropic: "ANTHROPIC_API_KEY", "aws-bedrock": "AWS_BEDROCK_API_KEY", "azure-openai": "AZURE_OPENAI_API_KEY", google: "GOOGLE_API_KEY", ollama: "OLLAMA_API_KEY", openai: "OPENAI_API_KEY", }; const environmentVariableName = environmentVariableNames[provider] ?? ""; const environmentApiKey = process.env[environmentVariableName]; if (environmentApiKey && environmentApiKey.trim().length > 0) { this.CLI_INTERFACE.success(`Found API key in environment variable: ${environmentVariableName}`); credentialValue = environmentApiKey; } else { // Inform user about environment variable and format requirements let keyFormatInfo = ""; switch (provider) { case llmProvider_enum.ELLMProvider.ANTHROPIC: { // Standard API key format - no special format info needed // keyFormatInfo is already initialized as empty string break; } case llmProvider_enum.ELLMProvider.AWS_BEDROCK: { keyFormatInfo = " (format: region|access-key-id|secret-access-key)"; break; } case llmProvider_enum.ELLMProvider.AZURE_OPENAI: { keyFormatInfo = " (format: endpoint|api-key|deployment-name)"; break; } case llmProvider_enum.ELLMProvider.GOOGLE: { // Standard API key format - no special format info needed // keyFormatInfo is already initialized as empty string break; } case llmProvider_enum.ELLMProvider.OLLAMA: { keyFormatInfo = " (format: host:port or host:port|custom-model)"; break; } case llmProvider_enum.ELLMProvider.OPENAI: { // Standard API key format - no special format info needed // keyFormatInfo is already initialized as empty string break; } default: { // This ensures exhaustiveness - TypeScript will error if a case is missing const exhaustiveCheck = provider; throw new Error(`Unsupported provider: ${String(exhaustiveCheck)}`); } } this.CLI_INTERFACE.info(`API key will be read from ${environmentVariableName} environment variable${keyFormatInfo} or prompted each time.`); // Use dummy value for configuration credentialValue = "will-prompt-on-use"; } // Ask for retry configuration (advanced settings) const shouldConfigureAdvanced = await this.CLI_INTERFACE.confirm("Would you like to configure advanced settings (retry counts)?", false); let maxRetries = numeric_constant.DEFAULT_MAX_RETRIES; let validationMaxRetries = numeric_constant.DEFAULT_VALIDATION_MAX_RETRIES; if (shouldConfigureAdvanced) { const retriesString = await this.CLI_INTERFACE.text("Max retries for AI generation (default: 3):", "3", "3", (value) => { const parsedNumber = Number.parseInt(value, 10); if (Number.isNaN(parsedNumber) || parsedNumber < numeric_constant.MIN_RETRY_COUNT || parsedNumber > numeric_constant.MAX_RETRY_COUNT) { return "Please enter a number between 1 and 10"; } // eslint-disable-next-line @elsikora/sonar/no-redundant-jump return; }); maxRetries = Number.parseInt(retriesString, 10); const validationRetriesString = await this.CLI_INTERFACE.text("Max retries for validation fixes (default: 3):", "3", "3", (value) => { const parsedNumber = Number.parseInt(value, 10); if (Number.isNaN(parsedNumber) || parsedNumber < numeric_constant.MIN_RETRY_COUNT || parsedNumber > numeric_constant.MAX_RETRY_COUNT) { return "Please enter a number between 1 and 10"; } // eslint-disable-next-line @elsikora/sonar/no-redundant-jump return; }); validationMaxRetries = Number.parseInt(validationRetriesString, 10); } // Create configuration // Create configuration - will save without API key const configuration = new llmConfiguration_entity.LLMConfiguration(provider, new apiKey_valueObject.ApiKey(credentialValue), mode, model, maxRetries, validationMaxRetries); // Save configuration (without API key) await this.saveConfiguration(configuration); this.CLI_INTERFACE.success("Configuration saved successfully!"); // If we have an environment API key, return config with it // Otherwise, return config with dummy key (will prompt later) return configuration; } /** * Get the current LLM configuration * @returns {Promise<LLMConfiguration | null>} Promise resolving to the current configuration or null if not configured */ async getCurrentConfiguration() { const config = await this.CONFIG_SERVICE.get(); if (!config.provider || !config.mode) { return null; } // Add backward compatibility - set default retry values if missing let isConfigUpdated = false; if (config.maxRetries === undefined) { config.maxRetries = numeric_constant.DEFAULT_MAX_RETRIES; isConfigUpdated = true; } if (config.validationMaxRetries === undefined) { config.validationMaxRetries = numeric_constant.DEFAULT_VALIDATION_MAX_RETRIES; isConfigUpdated = true; } // Save updated config if we added defaults if (isConfigUpdated) { await this.CONFIG_SERVICE.set(config); } // Migrate deprecated models let migratedModel = config.model; if (migratedModel) { // Map old models to new ones const modelMigrations = { // Anthropic migrations "claude-2.0": anthropicModel_enum.EAnthropicModel.CLAUDE_3_5_SONNET, "claude-2.1": anthropicModel_enum.EAnthropicModel.CLAUDE_3_5_SONNET, "claude-3-5-haiku-20241022": anthropicModel_enum.EAnthropicModel.CLAUDE_3_5_HAIKU, "claude-3-5-sonnet-20241022": anthropicModel_enum.EAnthropicModel.CLAUDE_3_5_SONNET, "claude-3-haiku-20240307": anthropicModel_enum.EAnthropicModel.CLAUDE_3_5_HAIKU, "claude-3-sonnet-20240229": anthropicModel_enum.EAnthropicModel.CLAUDE_3_5_SONNET, // OpenAI migrations (upgrade old GPT-4 references) "gpt-3.5-turbo": openaiModel_enum.EOpenAIModel.GPT_35_TURBO, "gpt-4": openaiModel_enum.EOpenAIModel.GPT_4, "gpt-4-0125-preview": openaiModel_enum.EOpenAIModel.GPT_4_TURBO, "gpt-4-0613": openaiModel_enum.EOpenAIModel.GPT_4, "gpt-4-1106-preview": openaiModel_enum.EOpenAIModel.GPT_4_TURBO, "gpt-4-32k": openaiModel_enum.EOpenAIModel.GPT_4_32K, "gpt-4-32k-0613": openaiModel_enum.EOpenAIModel.GPT_4_32K, "gpt-4o": openaiModel_enum.EOpenAIModel.GPT_4O_MAY, "gpt-4o-2024-05-13": openaiModel_enum.EOpenAIModel.GPT_4O_MAY, "gpt-4o-2024-08-06": openaiModel_enum.EOpenAIModel.GPT_4O_AUGUST, "gpt-4o-mini": openaiModel_enum.EOpenAIModel.GPT_4O_MINI, }; if (modelMigrations[migratedModel]) { const oldModel = migratedModel; migratedModel = modelMigrations[migratedModel]; // Save the migrated configuration await this.CONFIG_SERVICE.setProperty("model", migratedModel); this.CLI_INTERFACE.warn(`Migrated deprecated model ${oldModel} to ${migratedModel}`); } } // For manual mode, return configuration with dummy API key if (config.mode === commitMode_enum.ECommitMode.MANUAL) { return new llmConfiguration_entity.LLMConfiguration(config.provider, new apiKey_valueObject.ApiKey("manual-mode"), config.mode, migratedModel, config.maxRetries ?? numeric_constant.DEFAULT_MAX_RETRIES, config.validationMaxRetries ?? numeric_constant.DEFAULT_VALIDATION_MAX_RETRIES); } // For auto mode, check environment variables const environmentVariableNames = { anthropic: "ANTHROPIC_API_KEY", "aws-bedrock": "AWS_BEDROCK_API_KEY", "azure-openai": "AZURE_OPENAI_API_KEY", google: "GOOGLE_API_KEY", ollama: "OLLAMA_API_KEY", openai: "OPENAI_API_KEY", }; const environmentVariableName = environmentVariableNames[config.provider] ?? ""; const environmentApiKey = process.env[environmentVariableName]; // If no API key in environment, return null (will prompt later) if (!environmentApiKey || environmentApiKey.trim().length === 0) { return null; } return new llmConfiguration_entity.LLMConfiguration(config.provider, new apiKey_valueObject.ApiKey(environmentApiKey), config.mode, migratedModel, config.maxRetries ?? numeric_constant.DEFAULT_MAX_RETRIES, config.validationMaxRetries ?? numeric_constant.DEFAULT_VALIDATION_MAX_RETRIES); } /** * Check if the current configuration needs LLM details * @returns {Promise<boolean>} Promise resolving to true if LLM details are needed */ async needsLLMDetails() { const config = await this.CONFIG_SERVICE.get(); if (!config.mode || config.mode === commitMode_enum.ECommitMode.MANUAL) { return false; } // For auto mode, check if API key is in environment const environmentVariableNames = { anthropic: "ANTHROPIC_API_KEY", "aws-bedrock": "AWS_BEDROCK_API_KEY", "azure-openai": "AZURE_OPENAI_API_KEY", google: "GOOGLE_API_KEY", ollama: "OLLAMA_API_KEY", openai: "OPENAI_API_KEY", }; const environmentVariableName = environmentVariableNames[config.provider] ?? ""; const environmentApiKey = process.env[environmentVariableName]; // Need details if no API key in environment return !environmentApiKey || environmentApiKey.trim().length === 0; } /** * Save LLM configuration * @param {LLMConfiguration} configuration - The configuration to save * @returns {Promise<void>} Promise that resolves when configuration is saved */ async saveConfiguration(configuration) { const config = { maxRetries: configuration.getMaxRetries(), mode: configuration.getMode(), model: configuration.getModel(), provider: configuration.getProvider(), validationMaxRetries: configuration.getValidationMaxRetries(), }; await this.CONFIG_SERVICE.set(config); } /** * Update the commit mode * @param {ECommitMode} mode - The new commit mode * @returns {Promise<LLMConfiguration | null>} Promise resolving to the updated configuration */ async updateMode(mode) { const current = await this.getCurrentConfiguration(); if (!current) { return null; } const updated = current.withMode(mode); await this.saveConfiguration(updated); return updated; } } exports.ConfigureLLMUseCase = ConfigureLLMUseCase; //# sourceMappingURL=configure-llm.use-case.js.map