UNPKG

pr-commit-ai-agent

Version:

A CLI tool powered by AI to streamline Git workflows by generating commit messages, branch names, and pull requests.

1,338 lines (1,302 loc) 87.4 kB
var __create = Object.create; var __getProtoOf = Object.getPrototypeOf; var __defProp = Object.defineProperty; var __getOwnPropNames = Object.getOwnPropertyNames; var __hasOwnProp = Object.prototype.hasOwnProperty; var __toESM = (mod, isNodeMode, target) => { target = mod != null ? __create(__getProtoOf(mod)) : {}; const to = isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target; for (let key of __getOwnPropNames(mod)) if (!__hasOwnProp.call(to, key)) __defProp(to, key, { get: () => mod[key], enumerable: true }); return to; }; var __export = (target, all) => { for (var name in all) __defProp(target, name, { get: all[name], enumerable: true, configurable: true, set: (newValue) => all[name] = () => newValue }); }; // bin/run.ts var import_yargs = __toESM(require("yargs")); var import_config6 = require("dotenv/config"); var import_picocolors6 = require("picocolors"); var import_helpers = require("yargs/helpers"); // src/commands/config.ts var exports_config = {}; __export(exports_config, { handler: () => handler, describe: () => describe, command: () => command, aliases: () => aliases }); var import_picocolors = require("picocolors"); // src/config.ts var import_config = require("dotenv/config"); var import_conf = __toESM(require("conf")); var schema = { llmProvider: { type: "string", enum: ["openai", "anthropic", "deepseek", "ollama", "gemini"], default: "openai" }, model: { type: "string", default: "" }, openai: { type: "object", properties: { apiKey: { type: "string", default: "" }, baseURL: { type: "string", format: "uri", default: "https://api.openai.com/v1" } } }, anthropic: { type: "object", properties: { apiKey: { type: "string", default: "" } } }, deepseek: { type: "object", properties: { apiKey: { type: "string", default: "" }, baseURL: { type: "string", format: "uri", default: "https://api.deepseek.com/v1" } } }, ollama: { type: "object", properties: { baseURL: { type: "string", format: "uri", default: "http://localhost:11434/api/generate" }, apiKey: { type: "string", default: "" } } }, gemini: { type: "object", properties: { apiKey: { type: "string", default: "" } } } }; var configInstance = new import_conf.default({ projectName: "pr-commit-ai-agent", schema }); function initializeConfig() { if (process.env.LLM_PROVIDER) { configInstance.set("llmProvider", process.env.LLM_PROVIDER.toLowerCase()); } if (process.env.MODEL) { configInstance.set("model", process.env.MODEL); } if (process.env.OPENAI_API_KEY) { configInstance.set("openai.apiKey", process.env.OPENAI_API_KEY); } if (process.env.OPENAI_BASE_URL) { configInstance.set("openai.baseURL", process.env.OPENAI_BASE_URL); } if (process.env.ANTHROPIC_API_KEY) { configInstance.set("anthropic.apiKey", process.env.ANTHROPIC_API_KEY); } if (process.env.DEEPSEEK_API_KEY) { configInstance.set("deepseek.apiKey", process.env.DEEPSEEK_API_KEY); } if (process.env.DEEPSEEK_BASE_URL) { configInstance.set("deepseek.baseURL", process.env.DEEPSEEK_BASE_URL); } if (process.env.OLLAMA_API_KEY) { configInstance.set("ollama.apiKey", process.env.OLLAMA_API_KEY); } if (process.env.OLLAMA_BASE_URL) { configInstance.set("ollama.baseURL", process.env.OLLAMA_BASE_URL); } if (process.env.GEMINI_API_KEY) { configInstance.set("gemini.apiKey", process.env.GEMINI_API_KEY); } } initializeConfig(); var config = { llmProvider: configInstance.get("llmProvider"), model: configInstance.get("model"), openai: { apiKey: configInstance.get("openai.apiKey"), baseURL: configInstance.get("openai.baseURL") }, anthropic: { apiKey: configInstance.get("anthropic.apiKey") }, deepseek: { apiKey: configInstance.get("deepseek.apiKey"), baseURL: configInstance.get("deepseek.baseURL") }, ollama: { baseURL: configInstance.get("ollama.baseURL"), apiKey: configInstance.get("ollama.apiKey") }, gemini: { apiKey: configInstance.get("gemini.apiKey") } }; // src/logger.ts var import_consola = require("consola"); var logger = import_consola.createConsola({}); // src/commands/config.ts var command = "config"; var describe = "View and modify configuration settings"; var aliases = ["conf", "c"]; async function handler() { logger.info(import_picocolors.bold(import_picocolors.blue("PR-Agent Configuration Manager"))); logger.info(import_picocolors.yellow("Current configuration path: ") + import_picocolors.blue(configInstance.path)); logger.info(import_picocolors.yellow("Press Ctrl+C at any time to exit")); displayCurrentConfig(); await configurationLoop(); } function displayCurrentConfig() { logger.info(import_picocolors.bold(import_picocolors.green(` === Current Configuration ===`))); logger.info(import_picocolors.yellow("Global Settings:")); logger.info(` ${import_picocolors.bold("LLM Provider:")} ${config.llmProvider}`); logger.info(` ${import_picocolors.bold("Default Model:")} ${config.model || "(using provider default)"}`); const providers = ["openai", "anthropic", "deepseek", "ollama", "gemini"]; for (const provider of providers) { logger.info(import_picocolors.yellow(` ${provider.charAt(0).toUpperCase() + provider.slice(1)} Configuration:`)); Object.entries(config[provider]).forEach(([key, value]) => { const displayValue = key.toLowerCase().includes("apikey") ? value ? "********" : "(not set)" : value || "(not set)"; logger.info(`${import_picocolors.bold(key)}: ${displayValue}`); }); } } async function configurationLoop() { let exit = false; while (!exit) { logger.info(import_picocolors.bold(import_picocolors.green(` === Configuration Options ===`))); logger.info("1. Set LLM provider (openai, anthropic, deepseek, ollama, gemini)"); logger.info("2. Set default model"); logger.info("3. Configure OpenAI settings"); logger.info("4. Configure Anthropic settings"); logger.info("5. Configure DeepSeek settings"); logger.info("6. Configure Ollama settings"); logger.info("7. Configure Gemini settings"); logger.info("8. Reset all settings to defaults"); logger.info("9. Show current configuration values"); logger.info("10. Exit configuration"); const choice = await logger.prompt(import_picocolors.yellow("[CONFIG] Select an option:"), { type: "select", options: [ "1. Set LLM provider", "2. Set default model", "3. Configure OpenAI", "4. Configure Anthropic", "5. Configure DeepSeek", "6. Configure Ollama", "7. Configure Gemini", "8. Reset to defaults", "9. Show current configuration", "10. Exit" ] }); if (typeof choice === "undefined") { exit = true; return; } switch (choice) { case "1. Set LLM provider": await configureLLMProvider(); break; case "2. Set default model": await configureDefaultModel(); break; case "3. Configure OpenAI": await configureProvider("openai"); break; case "4. Configure Anthropic": await configureProvider("anthropic"); break; case "5. Configure DeepSeek": await configureProvider("deepseek"); break; case "6. Configure Ollama": await configureProvider("ollama"); break; case "7. Configure Gemini": await configureProvider("gemini"); break; case "8. Reset to defaults": await resetConfig(); break; case "9. Show current configuration": displayCurrentConfig(); break; case "10. Exit": exit = true; break; } } logger.info(import_picocolors.green("[CONFIG] Configuration saved. Exiting configuration manager.")); } async function configureLLMProvider() { const provider = await logger.prompt(import_picocolors.yellow("[CONFIG] Select LLM provider:"), { type: "select", options: ["openai", "anthropic", "deepseek", "ollama", "gemini"] }); await performAction("set", "llmProvider", provider); logger.info(import_picocolors.yellow("[CONFIG] Remember to set an appropriate model for this provider")); const setModel = await logger.prompt(import_picocolors.yellow("[CONFIG] Would you like to set a default model now?"), { type: "confirm" }); if (setModel) { await configureDefaultModel(provider); } } async function configureDefaultModel(provider) { const currentProvider = provider || config.llmProvider; let suggestedModels = []; switch (currentProvider) { case "openai": suggestedModels = ["gpt-5", "gpt-4o", "gpt-3.5-turbo"]; break; case "anthropic": suggestedModels = ["claude-opus-4.1", "claude-sonnet-4", "claude-3.5-haiku"]; break; case "gemini": suggestedModels = ["gemini-2.5-pro", "gemini-2.5-flash"]; break; case "ollama": suggestedModels = ["llama4", "llama3.2", "mistral", "codellama", "qwen3"]; break; case "deepseek": suggestedModels = ["deepseek-r1", "deepseek-v3"]; break; } let model; if (suggestedModels.length > 0) { const modelOptions = [...suggestedModels, "Enter custom model name"]; const selectedOption = await logger.prompt(import_picocolors.yellow("[CONFIG] Select or enter model name:"), { type: "select", options: modelOptions }); if (selectedOption === "Enter custom model name") { model = await logger.prompt(import_picocolors.yellow("[CONFIG] Enter custom model name:"), { type: "text", initial: config.model }); } else { model = selectedOption; } } else { model = await logger.prompt(import_picocolors.yellow("[CONFIG] Enter model name:"), { type: "text", initial: config.model }); } await performAction("set", "model", model); } async function configureProvider(provider) { const providerConfig = config[provider]; logger.info(import_picocolors.bold(import_picocolors.green(` [CONFIG] === ${provider.charAt(0).toUpperCase() + provider.slice(1)} Configuration ===`))); for (const [key, value] of Object.entries(providerConfig)) { const displayValue = key.toLowerCase().includes("apikey") ? value ? "********" : "" : value; if (key === "baseURL" && provider === "ollama") { const options = ["http://localhost:11434/api/generate", "http://localhost:11434/api/chat", "Enter custom URL"]; const selectedOption = await logger.prompt(import_picocolors.yellow(`[CONFIG] Select ${key}:`), { type: "select", options, initial: options.includes(value) ? value : options[0] }); const newValue = selectedOption === "Enter custom URL" ? await logger.prompt(import_picocolors.yellow("[CONFIG] Enter custom URL:"), { type: "text", initial: value }) : selectedOption; if (newValue !== value && newValue !== "") { await performAction("set", `${provider}.${key}`, newValue); } } else { const newValue = await logger.prompt(import_picocolors.yellow(`[CONFIG] Enter ${key} (leave empty to keep current):`), { type: "text", initial: displayValue }); if (newValue !== displayValue && newValue !== "") { await performAction("set", `${provider}.${key}`, newValue); } } } const makeDefault = await logger.prompt(import_picocolors.yellow(`[CONFIG] Set ${provider} as your default LLM provider?`), { type: "confirm" }); if (makeDefault) { await performAction("set", "llmProvider", provider); logger.info(import_picocolors.green(`[CONFIG] ${provider.charAt(0).toUpperCase() + provider.slice(1)} set as default provider.`)); } } async function resetConfig() { const confirm = await logger.prompt(import_picocolors.red("[CONFIG] Are you sure you want to reset all settings to defaults?"), { type: "confirm" }); if (confirm) { await performAction("reset"); logger.info(import_picocolors.green("[CONFIG] All settings have been reset to defaults.")); } } async function performAction(action, key, value) { switch (action) { case "list": displayCurrentConfig(); break; case "set": { if (!key) { logger.error(import_picocolors.red("Key must be specified for set action")); return; } const finalValue = value || await logger.prompt(import_picocolors.yellow(`Enter value for ${key}:`), { type: "text" }); try { configInstance.set(key, finalValue); logger.info(import_picocolors.green(`Updated ${key} = ${finalValue}`)); } catch (error) { logger.error(import_picocolors.red(`Failed to set ${key}: ${error.message}`)); } break; } case "get": { if (!key) { logger.error(import_picocolors.red("Key must be specified for get action")); return; } const val = configInstance.get(key); logger.info(`${key} = ${val}`); break; } case "reset": { configInstance.clear(); initializeConfig(); logger.info(import_picocolors.green("Configuration reset to defaults")); break; } case "exit": break; } } // src/commands/create.ts var exports_create = {}; __export(exports_create, { handler: () => handler2, describe: () => describe2, command: () => command2, builder: () => builder, aliases: () => aliases2 }); var process2 = __toESM(require("node:process")); var import_execa = require("execa"); var import_picocolors3 = require("picocolors"); var import_simple_git = require("simple-git"); // src/services/llm.ts var fs = __toESM(require("node:fs/promises")); var os = __toESM(require("node:os")); var path = __toESM(require("node:path")); var import_google = require("@ai-sdk/google"); var import_sdk = __toESM(require("@anthropic-ai/sdk")); var import_ai = require("ai"); var import_llm_cost = require("llm-cost"); var import_openai = require("openai"); var import_picocolors2 = require("picocolors"); var import_uuid = require("uuid"); // src/services/prompts.ts function getSystemPrompt() { return `You are a senior software architect and code reviewer. Analyze the provided git diff to generate the following structured analysis: Skip any sections or sub-section that are not relevant or not needed or not application or does not require changes to be mentioned. Make sure all content is concise, relevant, and actionable. Avoid jargon and unnecessary verbosity or repetition. ## 1. Commit Message - Format: type(scope): concise summary of main functionality - Example: "feat(logs): implement log viewer with options to delete and view logs" - First line: 50-120 characters, written in imperative mood - Follow with 3-5 bullet points that: - Each begin with a past tense action verb (Added, Implemented, Fixed, Updated, etc.) - Describe specific components or functionality added/changed - Highlight important implementation details or user-facing changes - Are ordered from most significant to least significant change - Example: \`\`\` - Added a command to view LLM request logs with user prompts. - Implemented functionality to delete all logs with confirmation. - Enhanced log file handling to display entries and their details. - Updated log entry structure to include token usage and cost estimates. \`\`\` ## 2. Pull Request Title - Create a precise title (60-100 characters) with appropriate type prefix - Clearly communicate the primary purpose of the changes - Example: "feat(user-profile): implement image upload with client-side compression" ## 3. Pull Request Description - Begin with a Short Technical Summary a Concise overview of the core functionality being changed - Changes Made: - Use bullet points with past tense action verbs (Added, Implemented, Fixed, etc.) - Describe each significant change or addition in detail - Order from most important to least important - Group related changes under sub-categories if needed - Example: \`\`\` - Added a command to view LLM request logs with user prompts - Implemented functionality to delete all logs with confirmation - Enhanced log file handling to display entries and their details - Updated log entry structure to include token usage and cost estimates \`\`\` ## 4. Change Classification Categorize using specific prefixes with detailed scope: - feat: New functionality or feature implementation - Example: "feat(auth): implement multi-factor authentication" - Include scope indicating the system area affected - Describe user-facing changes explicitly - Note dependencies on other features - fix: Bug correction with clear description of the issue - Example: "fix(checkout): prevent duplicate order submission" - Include root cause analysis - Reference issue tracker ID when applicable - Document verification steps - refactor: Code restructuring without behavioral changes - Example: "refactor(api): convert REST endpoints to use repository pattern" - Specify architectural patterns introduced/removed - Note test coverage to verify behavior preservation - Highlight technical debt addressed - docs: Documentation updates or improvements - Example: "docs(api): update user authentication API reference" - Specify documentation type (API, user guide, developer notes) - Note target audience - Include validation steps if applicable - perf: Performance optimizations (with measurable impact) - Example: "perf(search): reduce query latency by 40% with index optimization" - Include baseline performance metrics - Document improvement methodology - Note test environment details - security: Security vulnerability patches or hardening - Example: "security(auth): fix JWT validation to prevent token forgery" - Note vulnerability type (OWASP category) - Document attack vector being addressed - Include verification methodology - test: Test coverage improvements or testing framework changes - Example: "test(payment): add integration tests for refund workflows" - Specify test types added/modified - Note coverage percentage changes - Document test environment requirements - build: Build system or external dependency changes - Example: "build(deps): upgrade webpack to v5.75.0" - Document build performance impact - Note breaking changes in dependencies - Include verification steps - ci: Continuous integration configuration updates - Example: "ci(pipeline): add accessibility testing to PR checks" - Document pipeline performance impact - Note changes to development workflow - Include verification methodology - chore: Regular maintenance tasks or dependency updates - Example: "chore(deps): update non-critical dependencies" - Group related maintenance tasks - Note impact on development experience - Document follow-up tasks if applicable - style: Code formatting or style adjustments (no functional changes) - Example: "style(components): apply consistent naming convention" - Reference style guide being followed - Note automation tools used - Document scope of changes ## 5. Provide actionable feedback that addresses both immediate code quality and long-term maintainability. Use concrete examples when suggesting improvements. ## 6. Use correct sentence case Make sure the commit message and PR title are clear and concise, and follow the provided guidelines and follows @semantic-release/commit-analyzer angular convention. `; } // src/services/llm.ts var openaiClient = null; var anthropicClient = null; var geminiClient = null; var logDir = path.join(os.homedir(), ".llm-logs"); function initializeClients() { try { if (config.openai?.apiKey) { openaiClient = new import_openai.OpenAI({ apiKey: config.openai.apiKey, baseURL: config.openai.baseURL }); logger.debug("[LLM-INIT] OpenAI client initialized successfully"); } else { logger.debug("[LLM-INIT] Skipping OpenAI client initialization: No API key provided"); } if (config.anthropic?.apiKey) { anthropicClient = new import_sdk.default({ apiKey: config.anthropic.apiKey }); logger.debug("[LLM-INIT] Anthropic client initialized successfully"); } else { logger.debug("[LLM-INIT] Skipping Anthropic client initialization: No API key provided"); } if (config.gemini?.apiKey) { geminiClient = import_google.createGoogleGenerativeAI({ apiKey: config.gemini.apiKey }); logger.debug("[LLM-INIT] Google Gemini client initialized successfully"); } else { logger.debug("[LLM-INIT] Skipping Google Gemini client initialization: No API key provided"); } } catch (error) { logger.error(import_picocolors2.red(`[LLM-INIT] Failed to initialize LLM clients: ${error.message}`)); throw new Error(`LLM client initialization failed: ${error.message}`); } } async function ensureLogDirectory() { try { await fs.mkdir(logDir, { recursive: true }); logger.debug(`[LLM-LOGS] Log directory ensured at: ${logDir}`); } catch (error) { logger.error(import_picocolors2.red(`[LLM-LOGS] Failed to create log directory: ${error.message}`)); throw new Error(`Log directory creation failed: ${error.message}`); } } async function logRequest(logEntry) { try { const requestFileName = `request-${new Date().toISOString().split("T")[0]}-${logEntry.id}.json`; const requestFilePath = path.join(logDir, requestFileName); await fs.writeFile(requestFilePath, JSON.stringify(logEntry, null, 2), { encoding: "utf8" }); logger.info(import_picocolors2.green(`[LLM-LOGS] Request saved as ${requestFilePath} (ID: ${logEntry.id})`)); return logEntry.id; } catch (error) { logger.error(import_picocolors2.red(`[LLM-LOGS] Failed to log LLM request: ${error.message}`)); return logEntry.id; } } function getModelForProvider(provider, customModel) { if (customModel) { return customModel; } switch (provider) { case "openai": return config.model || "gpt-3.5-turbo"; case "anthropic": return config.model || "claude-3-sonnet-20240229"; case "deepseek": return config.model || "deepseek-chat"; case "ollama": return config.model || "llama2"; case "gemini": return config.model || "gemini-1.5-pro"; default: return "unknown"; } } async function logTokensAndCost(model, input, output) { try { if (output) { const inputOutputCost = await import_llm_cost.tokenizeAndEstimateCost({ model, input, output }); logger.info(import_picocolors2.yellow(`[LLM-TOKENS] Input tokens: ${inputOutputCost.inputTokens}, Output tokens: ${inputOutputCost.outputTokens}, Cost: ${inputOutputCost.cost}`)); return { inputTokens: inputOutputCost.inputTokens, outputTokens: inputOutputCost.outputTokens, totalTokens: inputOutputCost.inputTokens + inputOutputCost.outputTokens, cost: inputOutputCost.cost }; } const inputToken = await import_llm_cost.tokenizeAndEstimateCost({ model, input }); logger.info(import_picocolors2.yellow(`[LLM-TOKENS] Input tokens: ${inputToken.inputTokens}`)); return { inputTokens: inputToken.inputTokens, outputTokens: 0, totalTokens: inputToken.inputTokens, cost: inputToken.cost }; } catch (error) { logger.warn(import_picocolors2.yellow(`[LLM-TOKENS] Failed to calculate token usage: ${error.message}`)); return; } } async function openaiGenerate(options) { if (!openaiClient) { throw new Error("OpenAI client not initialized. Check your API key."); } const model = options.model || config.model || "gpt-3.5-turbo"; logger.info(import_picocolors2.yellow(`[OPENAI] Making completion request with model: ${model}`)); await logTokensAndCost(model, options.prompt); logger.debug(`[OPENAI] Request params: temperature=${options.temperature || 0.1}, maxTokens=${options.maxTokens || 1e6}`); const clientBaseUrl = openaiClient.baseURL; let response; let outputText = ""; try { if (!clientBaseUrl.includes("api.openai.com")) { response = await openaiClient.chat.completions.create({ model, messages: [ { role: "user", content: options.prompt } ], temperature: options.temperature || 0.1, response_format: { type: "json_object" } }); outputText = response?.choices?.[0]?.message?.content || ""; } else { response = await openaiClient.responses.create({ model, input: [ { role: "user", content: options.prompt, type: "message" } ], temperature: options.temperature || 0.1 }); outputText = response.output_text || ""; } const tokenUsage = await logTokensAndCost(model, options.prompt, outputText); if (outputText.startsWith("```json\n")) { outputText = outputText.substring(8); } if (outputText.endsWith("\n```")) { outputText = outputText.substring(0, outputText.length - 4); } return { text: outputText, response, tokenUsage }; } catch (error) { throw new Error(`OpenAI API error: ${error.message}`); } } async function anthropicGenerate(options) { if (!anthropicClient) { throw new Error("Anthropic client not initialized. Check your API key."); } const model = options.model || config.model || "claude-3-sonnet-20240229"; logger.info(import_picocolors2.yellow(`[ANTHROPIC] Making completion request with model: ${model}`)); await logTokensAndCost(model, options.prompt); logger.debug(`[ANTHROPIC] Request params: temperature=${options.temperature || 0.1}, maxTokens=${options.maxTokens || 1e6}`); try { const messages = [ { role: "user", content: options.prompt } ]; const response = await anthropicClient.messages.create({ model, messages, temperature: options.temperature || 0.1, max_tokens: options.maxTokens || 1e6, thinking: { type: "disabled" } }); const responseText = response.content[0]?.text || ""; const tokenUsage = await logTokensAndCost(model, options.prompt, responseText); return { text: responseText, response, tokenUsage }; } catch (error) { throw new Error(`Anthropic API error: ${error.message}`); } } async function deepseekGenerate(options) { if (!config.deepseek?.apiKey) { throw new Error("DeepSeek API key not configured."); } const model = options.model || config.model || "deepseek-chat"; const apiUrl = config.deepseek?.baseURL || "https://api.deepseek.com/v1/chat/completions"; logger.info(import_picocolors2.yellow(`[DEEPSEEK] Making completion request with model: ${model}`)); await logTokensAndCost(model, options.prompt); logger.debug(`[DEEPSEEK] Request params: temperature=${options.temperature || 0.1}, maxTokens=${options.maxTokens || 1e6}, url=${apiUrl}`); try { const response = await fetch(apiUrl, { method: "POST", headers: { "Content-Type": "application/json", Authorization: `Bearer ${config.deepseek.apiKey}` }, body: JSON.stringify({ model, messages: [ { role: "user", content: options.prompt } ], temperature: options.temperature || 0.1, max_tokens: options.maxTokens || 1e6 }) }); if (!response.ok) { throw new Error(`DeepSeek API error: ${response.status} ${response.statusText}`); } const data = await response.json(); if (!data || !data.choices || !data.choices[0]?.message) { throw new Error("Invalid response format from DeepSeek API"); } const responseText = data.choices[0].message.content || ""; const tokenUsage = await logTokensAndCost(model, options.prompt, responseText); return { text: responseText, response, tokenUsage }; } catch (error) { throw new Error(`DeepSeek API error: ${error.message}`); } } async function ollamaGenerate(options) { const model = options.model || config.model || "llama2"; const apiUrl = config.ollama?.baseURL || "http://localhost:11434/api/generate"; logger.info(import_picocolors2.yellow(`[OLLAMA] Making completion request with model: ${model}`)); await logTokensAndCost(model, options.prompt); logger.debug(`[OLLAMA] Request params: temperature=${options.temperature || 0.1}, url=${apiUrl}`); try { const body = JSON.stringify({ model, stream: false, prompt: options.prompt, format: "json", options: { temperature: options.temperature || 0.1, num_ctx: 32768, top_p: 0.8 } }); const headers = { "Content-Type": "application/json" }; if (config.ollama?.apiKey) { headers.Authorization = `Bearer ${config.ollama.apiKey}`; } const response = await fetch(apiUrl, { method: "POST", headers, body }); if (!response.ok) { throw new Error(`Ollama API error: ${response.status} ${response.statusText}`); } const data = await response.json(); if (!data || typeof data !== "object" || !("response" in data)) { throw new Error("Invalid response format from Ollama API"); } const responseText = data.response || ""; const tokenUsage = await logTokensAndCost(model, options.prompt, responseText); return { response: data, text: responseText, tokenUsage }; } catch (error) { throw new Error(`Ollama API error: ${error.message}`); } } async function geminiGenerate(options) { if (!geminiClient) { throw new Error("Gemini client not initialized. Check your API key."); } const model = options.model || config.model || "gemini-1.5-pro"; logger.info(import_picocolors2.yellow(`[GEMINI] Making completion request with model: ${model}`)); await logTokensAndCost(model, options.prompt); logger.debug(`[GEMINI] Request params: temperature=${options.temperature || 0.1}, model=${model}`); try { const response = await import_ai.generateObject({ model: geminiClient(model), schema: import_ai.jsonSchema({ type: "object" }), prompt: options.prompt, temperature: options.temperature || 0.1 }); const object = response.object ?? {}; const tokenUsage = await logTokensAndCost(model, options.prompt, JSON.stringify(object)); return { response, text: JSON.stringify(object), tokenUsage }; } catch (error) { throw new Error(`Gemini API error: ${error.message}`); } } var generateCompletion = async (provider, options) => { logger.info(import_picocolors2.yellow(`[LLM] Generating completion using ${provider}...`)); if (!openaiClient && !anthropicClient && !geminiClient) { initializeClients(); } await ensureLogDirectory().catch((err) => { logger.warn(import_picocolors2.yellow(`[LLM] Failed to ensure log directory, but will continue: ${err.message}`)); }); const startTime = Date.now(); const model = getModelForProvider(provider, options.model); const requestId = import_uuid.v4(); options.prompt = getSystemPrompt() + options.prompt; const logEntry = { id: requestId, timestamp: new Date().toISOString(), provider, model, options: { ...options } }; try { let response = {}; let text = ""; let tokenUsage; switch (provider) { case "openai": { const openaiResult = await openaiGenerate(options); response = openaiResult.response; text = openaiResult.text; tokenUsage = openaiResult.tokenUsage; break; } case "anthropic": { const anthropicResult = await anthropicGenerate(options); response = anthropicResult.response; text = anthropicResult.text; tokenUsage = anthropicResult.tokenUsage; break; } case "deepseek": { const deepseekResult = await deepseekGenerate(options); response = deepseekResult.response; text = deepseekResult.text; tokenUsage = deepseekResult.tokenUsage; break; } case "ollama": { const ollamaResult = await ollamaGenerate(options); response = ollamaResult.response; text = ollamaResult.text; tokenUsage = ollamaResult.tokenUsage; break; } case "gemini": { const geminiResult = await geminiGenerate(options); response = geminiResult.response; text = geminiResult.text; tokenUsage = geminiResult.tokenUsage; break; } default: throw new Error(`Unsupported LLM provider: ${provider}`); } logEntry.response = response; logEntry.text = text; logEntry.executionTimeMs = Date.now() - startTime; if (tokenUsage) { logEntry.tokenUsage = { inputTokens: tokenUsage.inputTokens, outputTokens: tokenUsage.outputTokens, totalTokens: tokenUsage.totalTokens, cost: tokenUsage.cost }; } if (options.logRequest) { await logRequest(logEntry).catch((err) => { logger.warn(import_picocolors2.yellow(`[LLM] Failed to log request, but will continue: ${err.message}`)); }); } return { text, response, requestId }; } catch (error) { logEntry.error = error.message; logEntry.executionTimeMs = Date.now() - startTime; if (options.logRequest) { await logRequest(logEntry).catch((err) => { logger.warn(import_picocolors2.yellow(`[LLM] Failed to log error request, but will continue: ${err.message}`)); }); } logger.error(import_picocolors2.red(`[LLM] Error generating completion with ${provider}: ${error.message}`)); throw error; } }; initializeClients(); ensureLogDirectory().catch((err) => logger.error(import_picocolors2.red(`[LLM-LOGS] Log directory initialization error: ${err.message}`))); // src/commands/create.ts var command2 = "create"; var describe2 = "Generate commit messages and create a PR using AI"; var aliases2 = ["c"]; function builder(yargs) { return yargs.option("yes", { type: "boolean", alias: "y", describe: "Automatically answer yes to all confirmations", default: false }).option("log-request", { type: "boolean", describe: "Log AI requests for debugging purposes", default: false }).option("pr", { type: "boolean", describe: "Create a branch and PR", default: false }).option("provider", { type: "string", describe: "LLM provider to use (e.g., openai, ollama)", default: config.llmProvider }).option("model", { type: "string", describe: "LLM model to use (e.g., gpt-3.5-turbo, gpt-4)", default: config.model }).option("draft", { type: "boolean", describe: "Create the PR as a draft", default: false }); } var globalConfirm; var globalLogRequest = false; var model; var provider; var commitsOptimizedInSession = false; var commitsCreatedInSession = false; var PR_AGENT_NOTE_NAMESPACE = "pr-agent"; var PR_AGENT_NOTE_MESSAGE = "created-by-pr-agent"; function initializeGlobals(argv) { globalConfirm = async (message, options = { type: "confirm" }) => { if (argv.yes) { logger.info(import_picocolors3.yellow(`[Auto-confirmed] ${message}`)); return true; } return await logger.prompt(import_picocolors3.green(message), options); }; globalLogRequest = argv["log-request"] ?? false; provider = argv.provider; model = argv.model; } async function performGitOperation(operation, errorMessage) { try { return await operation(); } catch (error) { logger.error(import_picocolors3.red(`${errorMessage}: ${error.message}`)); return null; } } async function checkForExistingPR(branchName) { if (!branchName) { logger.debug("[PR-CHECK] No branch name provided to checkForExistingPR."); return null; } try { const { exitCode: ghExitCode } = await import_execa.execa("gh", ["--version"], { reject: false }); if (ghExitCode !== 0) { logger.debug("[PR-CHECK] GitHub CLI not available for PR check"); return null; } const { stdout: prJson, exitCode } = await import_execa.execa("gh", ["pr", "list", "--head", branchName, "--state", "open", "--json", "url,number,title", "--limit", "1"], { reject: false }); if (exitCode !== 0 || !prJson) { logger.debug(`[PR-CHECK] No PR information available for branch ${branchName}`); return null; } const prs = JSON.parse(prJson); if (Array.isArray(prs) && prs.length > 0) { logger.debug(`[PR-CHECK] Found existing PR for branch ${branchName}: #${prs[0].number}`); return prs[0]; } return null; } catch (error) { logger.debug(`[PR-CHECK] Error checking for existing PR: ${error.message}`); return null; } } async function handler2(argv) { initializeGlobals(argv); const baseUrl = config?.[provider]?.baseURL; logger.info(import_picocolors3.green("[CONFIG] Current settings:")); logger.info(`[CONFIG] Provider: ${provider}`); logger.info(`[CONFIG] Model: ${model}`); if (baseUrl) { logger.info(`[CONFIG] API URL: ${baseUrl}`); } const ready = await globalConfirm("Are you ready to create an AI PR?"); if (!ready) { return; } try { const currentDir = process2.cwd(); if (!currentDir) { logger.error(import_picocolors3.red("[INIT] Failed to get current working directory.")); return; } const git = import_simple_git.simpleGit({ baseDir: currentDir, binary: "git", maxConcurrentProcesses: 6 }); const status = await performGitOperation(() => git.status(), "[GIT] Failed to get git status"); if (!status) { return; } logger.info(import_picocolors3.yellow("[WORKFLOW] Next step: Determine the target branch for your PR")); const proceedWithBranch = await globalConfirm("Would you like to proceed with determining the target branch?"); if (!proceedWithBranch) { logger.info(import_picocolors3.yellow("[WORKFLOW] Process cancelled by user")); return; } const upstreamBranch = await performGitOperation(() => getUpstreamBranch(git, globalConfirm), "[GIT] Failed to determine upstream branch"); if (!upstreamBranch) { return; } if (!status.isClean()) { logger.info(import_picocolors3.yellow("[WORKFLOW] Next step: Handle uncommitted changes in your working directory")); const proceedWithChanges = await globalConfirm("Would you like to commit your uncommitted changes?"); if (!proceedWithChanges) { logger.info(import_picocolors3.yellow("[WORKFLOW] Process cancelled by user")); return; } await performGitOperation(() => handleUncommittedChanges(git, status, globalConfirm), "[GIT] Failed to handle uncommitted changes"); } else { logger.info(import_picocolors3.green("[GIT] Working directory is clean")); } await performGitOperation(() => optimizeCommitMessages(git, upstreamBranch, globalConfirm), "[GIT] Failed to optimize commit messages"); if (!argv?.pr) { logger.info(import_picocolors3.yellow("[WORKFLOW] Skipping PR creation as --pr flag is not set")); return; } logger.info(import_picocolors3.yellow("[WORKFLOW] Final step: Create a new branch and push PR to remote")); const proceedWithPR = await globalConfirm("Would you like to proceed with creating a PR?"); if (!proceedWithPR) { logger.info(import_picocolors3.yellow("[WORKFLOW] PR creation cancelled by user")); return; } await performGitOperation(() => createAndPushPR(git, upstreamBranch, argv?.draft, globalConfirm), "[GIT] Failed to create and push PR"); } catch (e) { logger.error(import_picocolors3.red(`[ERROR] Unexpected error occurred: ${e.message}`)); } } var ignoredFiles = ["pnpm-lock.yaml", "bun.lockb", "bun.lock", "yarn.lock", "package-lock.json", "tsconfig.json"]; async function getUpstreamBranch(git, confirm) { try { logger.info(import_picocolors3.green("[BRANCH] Attempting to determine the upstream branch...")); let branchInfo; try { branchInfo = await git.branch(); if (!branchInfo || !branchInfo.current) { logger.error(import_picocolors3.red("[BRANCH] Branch information is undefined or missing current branch.")); throw new Error("Could not determine current branch"); } } catch (error) { logger.error(import_picocolors3.red(`[BRANCH] Failed to get branch information: ${error.message}`)); throw new Error("Could not determine current branch"); } const trackingBranch = await git.revparse(["--abbrev-ref", "--symbolic-full-name", "@{u}"]).catch(() => { logger.info(import_picocolors3.yellow("[BRANCH] No tracking branch found")); return null; }); if (branchInfo.current && trackingBranch) { logger.info(import_picocolors3.green(`[BRANCH] Found tracking branch: ${trackingBranch}`)); const confirmTracking = await confirm(`Use "${trackingBranch}" as the target branch?`); if (confirmTracking) { return trackingBranch; } logger.info(import_picocolors3.yellow("[BRANCH] You chose to select a different target branch")); } logger.info(import_picocolors3.yellow("[BRANCH] Fetching available remote branches...")); let remoteBranches; try { remoteBranches = await git.branch(["--remotes"]); if (!remoteBranches || !remoteBranches.all) { logger.error(import_picocolors3.red("[BRANCH] Remote branches information is undefined.")); throw new Error("Could not retrieve remote branches"); } } catch (error) { logger.error(import_picocolors3.red(`[BRANCH] Failed to fetch remote branches: ${error.message}`)); throw new Error("Could not retrieve remote branches"); } const branches = remoteBranches.all.filter((branch) => branch && !branch.includes("HEAD ->")).map((branch) => branch.trim()); if (!branches || branches.length === 0) { throw new Error("No remote branches found"); } const targetBranch = await logger.prompt(import_picocolors3.yellow("[BRANCH] Select target branch for PR:"), { type: "select", options: branches }); if (!targetBranch) { throw new Error("No branch selected"); } logger.info(import_picocolors3.green(`[BRANCH] Selected target branch: ${targetBranch}`)); const confirmSelected = await confirm(`Confirm "${targetBranch}" as your target branch?`); if (!confirmSelected) { logger.info(import_picocolors3.yellow("[BRANCH] Branch selection cancelled. Please start over.")); process2.exit(0); } return targetBranch; } catch (error) { logger.error(import_picocolors3.red(`[BRANCH] Failed to determine upstream branch: ${error.message}`)); throw error; } } async function handleUncommittedChanges(git, status, confirm) { logger.info(import_picocolors3.yellow("[COMMIT] Found uncommitted changes in the working directory")); const proceedWithAnalysis = await confirm("Analyze changes with AI to generate a commit message?"); if (!proceedWithAnalysis) { logger.info(import_picocolors3.yellow("[COMMIT] Commit creation cancelled")); process2.exit(0); } logger.info(import_picocolors3.yellow("[COMMIT] Collecting modified file details for analysis...")); const modifiedFiles = status && Array.isArray(status.modified) ? status.modified.filter((e) => e && !ignoredFiles.includes(e)) : []; if (!modifiedFiles || modifiedFiles.length === 0) { logger.info(import_picocolors3.yellow("[COMMIT] No modified files to analyze.")); return; } const tempModified = []; for (const file of modifiedFiles) { if (!file) { continue; } logger.info(import_picocolors3.yellow(`[COMMIT] Analyzing changes in: ${file}`)); try { const stagedDiff = await git.diff(["-U3", "--minimal", "--staged", file]); const unstagedDiff = await git.diff(["-U3", "--minimal", file]); const diff = stagedDiff + unstagedDiff; tempModified.push(` filename: ${file} diff changes: ${diff} `); } catch (error) { logger.warn(import_picocolors3.yellow(`[COMMIT] Failed to get diff for ${file}: ${error.message}`)); } } logger.info(import_picocolors3.yellow("[COMMIT] Generating commit message with AI...")); const confirmAiRequest = await confirm("Send changes to AI for commit message suggestion?"); if (!confirmAiRequest) { logger.info(import_picocolors3.yellow("[COMMIT] AI message generation cancelled")); process2.exit(0); } if (!tempModified || tempModified.length === 0) { logger.error(import_picocolors3.red("[COMMIT] No diffs available for AI analysis.")); return; } const commitPrompt = ` Provide a better multi-line commit message with summary and bullet points for all changes following the ## 1. Commit Message format in the prompt. Format your response as a JSON object with structure: { "commitMessage": "type(scope): summary of changes detailed explanation of changes... bullet points of changes" } Git diff changes are as follows: ${tempModified.join("")} `; logger.info(import_picocolors3.green("[COMMIT] Sending changes to LLM for commit suggestion...")); const res = await generateCompletion(provider, { model, logRequest: globalLogRequest, prompt: commitPrompt }); let commitData; try { commitData = JSON.parse(res.text); if (!commitData.commitMessage) { logger.error(import_picocolors3.red("[COMMIT] No commit message found in LLM response")); logger.debug("[COMMIT] Raw response:", res); throw new Error("Invalid LLM response format"); } logger.info(import_picocolors3.green("[COMMIT] Got commit suggestion:")); logger.info(` --------------------------- Commit Message: ${commitData.commitMessage} --------------------------- `); const commitConfirm = await confirm("Proceed with this commit?"); if (commitConfirm) { logger.info(import_picocolors3.yellow("[COMMIT] Adding all changes to git...")); try { await git.add("."); } catch (error) { logger.error(import_picocolors3.red(`[COMMIT] Failed to add changes to git staging area: ${error.message}`)); throw new Error("Failed to stage changes"); } logger.info(import_picocolors3.yellow("[COMMIT] Creating commit with the suggested message...")); try { const commitResult = await git.commit(commitData.commitMessage); if (commitResult.commit) { await markCommitAsCreatedByTool(git, commitResult.commit); commitsCreatedInSession = true; } } catch (error) { logger.error(import_picocolors3.red(`[COMMIT] Failed to create commit: ${error.message}`)); throw new Error("Commit creation failed"); } logger.success(import_picocolors3.green("[COMMIT] Changes committed successfully!")); } else { logger.info(import_picocolors3.yellow("[COMMIT] Commit cancelled")); process2.exit(0); } } catch (e) { logger.debug("[COMMIT] Raw response:", res); logger.error(import_picocolors3.red(`[COMMIT] Failed to parse LLM response as JSON: ${e.message}`)); process2.exit(0); } return commitData; } async function markCommitAsCreatedByTool(git, commitHash) { if (!commitHash) { logger.debug("No commit hash provided to markCommitAsCreatedByTool."); return; } try { logger.debug(`Marking commit ${commitHash} as created by PR Agent`); await git.raw(["notes", "--ref", PR_AGENT_NOTE_NAMESPACE, "add", "-m", PR_AGENT_NOTE_MESSAGE, commitHash]); logger.debug(`Successfully marked commit ${commitHash}`); } catch (error) { logger.debug(`Failed to mark commit with git notes: ${error.message}`); } } async function isCommitCreatedByTool(git, commitHash) { if (!commitHash) { logger.debug("No commit hash provided to isCommitCreatedByTool."); return false; } try { const notes = await git.raw(["notes", "--ref", PR_AGENT_NOTE_NAMESPACE, "show", commitHash]).catch(() => ""); return notes.includes(PR_AGENT_NOTE_MESSAGE); } catch (error) { logger.debug(`Failed to check git notes: ${error.message}`); return false; } } async function optimizeCommitMessages(git, upstreamBranch, confirm) { logger.info(import_picocolors3.yellow("[OPTIMIZE] Starting commit message optimization process...")); if (!upstreamBranch) { logger.error(import_picocolors3.red("[OPTIMIZE] Upstream branch is undefined.")); return; } logger.info(import_picocolors3.yellow("[OPTIMIZE] Fetching commits information...")); let commits; try { commits = await git.log({ from: upstreamBranch, to: "HEAD" }); } catch (error) { logger.error(import_picocolors3.red(`[OPTIMIZE] Failed to get commit logs: ${error.message}`)); throw new Error("Could not retrieve commit history"); } if (!commits || !Array.isArray(commits.all) || !commits.all.length) { logger.info(import_picocolors3.yellow("[OPTIMIZE] No commits to optimize")); return; } logger.info(import_picocolors3.green(`[OPTIMIZE] Found ${commits.all.length} commit(s) in the branch`)); logger.info(import_picocolors3.yellow("[OPTIMIZE] Next step: Optimize existing commit messages")); const proceedWithOptimize = await globalConfirm("Would you like to optimize your commit messages?"); if (!proceedWithOptimize) { logger.info(import_picocolors3.yellow("[OPTIMIZE] Skipping commit message optimization"));