UNPKG

llms-txt-generator

Version:

A powerful CLI tool and MCP server for generating standardized llms.txt and llms-full.txt documentation files to help AI models better understand project structures

294 lines (290 loc) • 11 kB
#!/usr/bin/env node "use strict"; /** * CLI entry point for LLMs TXT Generator * Supports configuration via YAML file and environment variables */ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { if (k2 === undefined) k2 = k; var desc = Object.getOwnPropertyDescriptor(m, k); if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) { desc = { enumerable: true, get: function() { return m[k]; } }; } Object.defineProperty(o, k2, desc); }) : (function(o, m, k, k2) { if (k2 === undefined) k2 = k; o[k2] = m[k]; })); var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) { Object.defineProperty(o, "default", { enumerable: true, value: v }); }) : function(o, v) { o["default"] = v; }); var __importStar = (this && this.__importStar) || (function () { var ownKeys = function(o) { ownKeys = Object.getOwnPropertyNames || function (o) { var ar = []; for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k; return ar; }; return ownKeys(o); }; return function (mod) { if (mod && mod.__esModule) return mod; var result = {}; if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]); __setModuleDefault(result, mod); return result; }; })(); Object.defineProperty(exports, "__esModule", { value: true }); exports.build = build; const core_1 = require("../llm/core"); const utils_1 = require("../llm/utils"); const fs = __importStar(require("node:fs/promises")); const path = __importStar(require("node:path")); const dotenv = __importStar(require("dotenv")); // Load environment variables dotenv.config({ path: ['.env.local', '.env'], }); /** * Parse command line arguments */ function parseArgs() { const args = process.argv.slice(2); const result = {}; for (let i = 0; i < args.length; i++) { const arg = args[i]; if (arg === '--config' && i + 1 < args.length) { result.configPath = args[i + 1]; i++; } else if (arg === '--output-dir' && i + 1 < args.length) { result.outputDir = args[i + 1]; i++; } else if (arg === '--help' || arg === '-h') { result.help = true; } } return result; } /** * Simple YAML parser for basic key-value pairs * Note: This is a minimal implementation for basic YAML structures */ function parseYaml(content) { const config = {}; const lines = content.split('\n'); for (const line of lines) { const trimmed = line.trim(); if (!trimmed || trimmed.startsWith('#')) continue; const colonIndex = trimmed.indexOf(':'); if (colonIndex === -1) continue; const key = trimmed.substring(0, colonIndex).trim(); const value = trimmed.substring(colonIndex + 1).trim(); if (!value) continue; // Parse different value types if (value === 'true' || value === 'false') { config[key] = value === 'true'; } else if (!isNaN(Number(value))) { config[key] = Number(value); } else if (value.startsWith('"') && value.endsWith('"')) { config[key] = value.slice(1, -1); } else if (value.startsWith("'") && value.endsWith("'")) { config[key] = value.slice(1, -1); } else { config[key] = value; } } return config; } /** * Load configuration from YAML file */ async function loadConfig(configPath) { try { const content = await fs.readFile(configPath, 'utf-8'); return parseYaml(content); } catch (error) { throw new Error(`Failed to load config file '${configPath}': ${error instanceof Error ? error.message : 'Unknown error'}`); } } /** * Create LLM configuration from YAML config and environment variables */ function createLLMConfig(yamlConfig) { return { model: yamlConfig.model || process.env.OPENAI_API_MODEL || 'gpt-3.5-turbo', apiKey: yamlConfig.apiKey || process.env.OPENAI_API_KEY, baseURL: yamlConfig.baseURL || process.env.OPENAI_API_BASE || 'https://api.openai.com/v1', maxTokens: yamlConfig.maxTokens || 1024 * 16, toolsType: yamlConfig.toolsType || 'function_call', azureOpenAI: yamlConfig.azureOpenAI || (process.env.AZURE_OPENAI_API_DEVELOPMENT ? { apiVersion: process.env.AZURE_OPENAI_API_VERSION || '2024-02-15-preview', deployment: process.env.AZURE_OPENAI_API_DEVELOPMENT, } : undefined), }; } /** * Write content to file with proper directory creation */ async function writeFile(filePath, content) { const dir = path.dirname(filePath); await fs.mkdir(dir, { recursive: true }); await fs.writeFile(filePath, content, 'utf-8'); } /** * Generate llms.txt and llms-full.txt files */ async function generateFiles(llm, outputDir) { console.log('šŸš€ Starting LLMs TXT generation...'); try { // Load the generation prompt const prompt = await (0, utils_1.loadPrompt)('generate'); // Create the chat stream const emitter = await llm.chatStream([ { role: 'system', content: prompt }, { role: 'user', content: 'Create llms.txt and llms-full.txt of this project for me' } ]); let fullResponse = ''; // Handle streaming response emitter.on('data', (chunk) => { process.stdout.write(chunk); fullResponse += chunk; }); emitter.on('end', async () => { console.log('\n\nāœ… Generation completed!'); try { // Copy generated files from .llms directory to output directory const sourceLlmsTxtPath = path.join(process.cwd(), '.llms', 'llms.txt'); const sourceLlmsFullTxtPath = path.join(process.cwd(), '.llms', 'llms-full.txt'); const targetLlmsTxtPath = path.join(outputDir, 'llms.txt'); const targetLlmsFullTxtPath = path.join(outputDir, 'llms-full.txt'); try { // Check if source files exist and copy them const fs = await Promise.resolve().then(() => __importStar(require('fs/promises'))); if (await fs.access(sourceLlmsTxtPath).then(() => true).catch(() => false)) { const llmsTxtContent = await fs.readFile(sourceLlmsTxtPath, 'utf-8'); await writeFile(targetLlmsTxtPath, llmsTxtContent); console.log(`šŸ“„ Created: ${targetLlmsTxtPath}`); } else { console.warn('āš ļø Could not find llms.txt in .llms directory'); } if (await fs.access(sourceLlmsFullTxtPath).then(() => true).catch(() => false)) { const llmsFullTxtContent = await fs.readFile(sourceLlmsFullTxtPath, 'utf-8'); await writeFile(targetLlmsFullTxtPath, llmsFullTxtContent); console.log(`šŸ“„ Created: ${targetLlmsFullTxtPath}`); } else { console.warn('āš ļø Could not find llms-full.txt in .llms directory'); } } catch (copyError) { console.error('āŒ Error copying files from .llms directory:', copyError instanceof Error ? copyError.message : 'Unknown error'); } console.log('\nšŸŽ‰ All files generated successfully!'); process.exit(0); } catch (error) { console.error('āŒ Error writing files:', error instanceof Error ? error.message : 'Unknown error'); process.exit(1); } }); emitter.on('error', (error) => { console.error('āŒ Generation error:', error); process.exit(1); }); } catch (error) { console.error('āŒ Failed to start generation:', error instanceof Error ? error.message : 'Unknown error'); process.exit(1); } } /** * Show help information */ function showHelp() { console.log(` LLMs TXT Generator CLI Usage: generate [options] Options: --config <path> Path to YAML configuration file (default: llms-txt-generator.yaml) --output-dir <dir> Output directory for generated files (default: current directory) --help, -h Show this help message Example: generate --config llms-txt-generator.yaml --output-dir ./output Configuration file format (YAML): model: gpt-3.5-turbo apiKey: your-api-key baseURL: https://api.openai.com/v1 maxTokens: 16384 temperature: 0.7 outputDir: ./output `); } /** * Main CLI function */ async function build() { const args = parseArgs(); if (args.help) { showHelp(); return; } const configPath = args.configPath || 'llms-txt-generator.yaml'; const outputDir = args.outputDir || process.cwd(); try { // Load configuration let yamlConfig = {}; try { yamlConfig = await loadConfig(configPath); console.log(`šŸ“‹ Loaded configuration from: ${configPath}`); } catch (error) { if (args.configPath) { // If config path was explicitly provided, fail console.error(`āŒ ${error instanceof Error ? error.message : 'Unknown error'}`); process.exit(1); } else { // If using default config path, continue with environment variables console.log('šŸ“‹ No configuration file found, using environment variables'); } } // Override output directory if specified in args if (args.outputDir) { yamlConfig.outputDir = args.outputDir; } const finalOutputDir = yamlConfig.outputDir || outputDir; // Create LLM configuration const llmConfig = createLLMConfig(yamlConfig); // Validate required configuration if (!llmConfig.apiKey) { console.error('āŒ API key is required. Set OPENAI_API_KEY environment variable or provide it in config file.'); process.exit(1); } console.log(`šŸ¤– Using model: ${llmConfig.model}`); console.log(`šŸ“ Output directory: ${finalOutputDir}`); // Create LLM instance const llm = new core_1.LLM(llmConfig); // Generate files await generateFiles(llm, finalOutputDir); } catch (error) { console.error('āŒ CLI error:', error instanceof Error ? error.message : 'Unknown error'); process.exit(1); } } //# sourceMappingURL=build.js.map