UNPKG

contextcode-cli

Version:

AI-assisted CLI that indexes repositories, generates architecture documentation, and produces executable task plans for LLM agents

1,417 lines (1,392 loc) 112 kB
// src/index.ts import process2 from "process"; import { createRequire } from "module"; // src/commands/init.ts import path11 from "path"; // src/utils/args.ts var ArgError = class extends Error { }; function toCamelCase(name) { return name.replace(/-([a-z])/g, (_, ch) => ch.toUpperCase()); } function parseArgs(argv, definitions) { const longLookup = /* @__PURE__ */ new Map(); const shortLookup = /* @__PURE__ */ new Map(); for (const def of definitions) { longLookup.set(`--${def.name}`, def); if (def.alias) { shortLookup.set(`-${def.alias}`, def); } } const flags = {}; const positionals = []; const assignValue = (def, raw) => { const key = toCamelCase(def.name); if (def.multiple) { const existing = flags[key] ?? []; if (typeof raw === "boolean") { throw new ArgError(`Flag --${def.name} expects a value.`); } flags[key] = [...existing, raw]; return; } flags[key] = raw; }; const getDefinition = (token) => { if (longLookup.has(token)) return longLookup.get(token); if (shortLookup.has(token)) return shortLookup.get(token); return null; }; for (let i = 0; i < argv.length; i++) { const token = argv[i]; if (token === "--") { positionals.push(...argv.slice(i + 1)); break; } if (!token.startsWith("-")) { positionals.push(token); continue; } const [flagToken, inlineValue] = token.includes("=") ? token.split(/=(.+)/) : [token, void 0]; const def = getDefinition(flagToken); if (!def) { throw new ArgError(`Unknown option: ${flagToken}`); } if (def.type === "boolean") { if (inlineValue !== void 0) { assignValue(def, !/^false$/i.test(inlineValue)); } else { assignValue(def, true); } continue; } const nextValue = inlineValue ?? argv[++i]; if (!nextValue) { throw new ArgError(`Option ${flagToken} expects a value.`); } assignValue(def, nextValue); } return { flags, positionals }; } // src/utils/git.ts import { spawnSync } from "child_process"; function isGitRepository(cwd) { try { const res = spawnSync("git", ["rev-parse", "--is-inside-work-tree"], { cwd, stdio: "ignore" }); return res.status === 0; } catch { return false; } } // src/shared/indexing.ts import fs5 from "fs/promises"; import path8 from "path"; import fsExtra2 from "fs-extra"; // packages/core/src/indexer.ts import fs from "fs/promises"; import path from "path"; import { globby } from "globby"; var DEFAULT_IGNORE = ["**/node_modules/**", "**/dist/**", "**/.git/**", "**/.next/**", "**/.turbo/**", "**/.cache/**"]; var WORKSPACE_PATTERNS = [ "packages/*/package.json", "apps/*/package.json", "services/*/package.json", "extensions/*/package.json", "cli/*/package.json" ]; var MAX_WORKSPACE_PACKAGES = 12; var MAX_SCRIPTS = 12; async function readJsonSafe(p) { try { const txt = await fs.readFile(p, "utf8"); return JSON.parse(txt); } catch { return null; } } async function indexRepo(cwd) { const pkgPath = path.join(cwd, "package.json"); const packageJson = await readJsonSafe(pkgPath); const patterns = [ "package.json", "tsconfig.json", "prisma/schema.prisma", "next.config.*", "Dockerfile", "src/**/*.{ts,tsx,js,jsx}", "README.md" ]; const paths = await globby(patterns, { cwd, gitignore: true, ignore: DEFAULT_IGNORE }); const sampleFiles = await Promise.all( paths.slice(0, 30).map(async (p) => { const full = path.join(cwd, p); try { const txt = await fs.readFile(full, "utf8"); const lines = txt.split("\n").slice(0, 80).join("\n"); return { path: p, excerpt: lines }; } catch { return { path: p, excerpt: "" }; } }) ); const detectedStack = []; const deps = packageJson?.dependencies ?? {}; const devDeps = packageJson?.devDependencies ?? {}; const allDeps = { ...deps, ...devDeps }; const heuristics = [ ["next", "Next.js"], ["react", "React"], ["express", "Express"], ["fastify", "Fastify"], ["prisma", "Prisma"], ["typeorm", "TypeORM"] ]; for (const [k, label] of heuristics) { if (allDeps[k]) detectedStack.push(label); } const workspacePackages = await discoverWorkspacePackages(cwd); const rootScripts = extractScripts(packageJson?.scripts ?? {}); const importantPaths = deriveImportantPaths(sampleFiles); return { packageJson, detectedStack, sampleFiles, workspacePackages, rootScripts, importantPaths, scannedFileCount: paths.length }; } async function discoverWorkspacePackages(cwd) { const packageGlobs = await globby(WORKSPACE_PATTERNS, { cwd, gitignore: true, ignore: DEFAULT_IGNORE }); const seen = /* @__PURE__ */ new Set(); const workspaces = []; for (const rel of packageGlobs) { const dir = path.dirname(rel); if (!dir || dir === "." || seen.has(dir)) continue; seen.add(dir); if (workspaces.length >= MAX_WORKSPACE_PACKAGES) break; const abs = path.join(cwd, rel); try { const data = await readJsonSafe(abs); if (!data?.name) continue; workspaces.push({ name: data.name, relativeDir: dir, description: data.description, scripts: extractScripts(data.scripts ?? {}), dependencies: Object.keys(data.dependencies ?? {}), devDependencies: Object.keys(data.devDependencies ?? {}), keywords: Array.isArray(data.keywords) ? data.keywords.slice(0, 12) : [] }); } catch { } } return workspaces; } function extractScripts(scripts) { return Object.entries(scripts).slice(0, MAX_SCRIPTS).map(([name, command]) => ({ name, command })); } function deriveImportantPaths(sampleFiles) { const paths = /* @__PURE__ */ new Set(); for (const file of sampleFiles) { const dir = path.dirname(file.path); if (dir && dir !== ".") paths.add(dir); if (paths.size >= 20) break; } return Array.from(paths); } // packages/core/src/context/context-template.ts import path2 from "path"; var MAX_PACKAGE_ROWS = 8; var MAX_COMMAND_ROWS = 8; var MAX_PACKAGE_COMMANDS = 3; var MAX_DIR_ROWS = 10; function renderContextMarkdown(index, options = {}) { const repoName = options.repoName ?? index.packageJson?.name ?? "this repository"; const lines = []; lines.push("# context.md", ""); lines.push( `This guide helps any AI assistant (Claude, GitHub Copilot, Cursor, etc.) work effectively inside **${repoName}** without scanning the entire codebase.` ); lines.push(""); lines.push("## Architecture", ""); const architectureRows = buildArchitectureRows(index.workspacePackages); if (architectureRows.length) { lines.push(...architectureRows); } else { lines.push("- Monorepo detected. Refer to packages/ for individual services and CLIs."); } lines.push(""); lines.push("## Detected Stack", ""); if (index.detectedStack.length) { lines.push(`- ${index.detectedStack.join(", ")}`); } else { lines.push("- Stack heuristics unavailable. Inspect package.json dependencies for details."); } lines.push(""); lines.push("## Development Commands", ""); const rootCommands = buildCommandBlock(index.rootScripts, "Root workspace (run from repo root)"); if (rootCommands.length) { lines.push(...rootCommands); lines.push(""); } const packageCommands = buildPackageCommandBlocks(index.workspacePackages); if (packageCommands.length) { lines.push(...packageCommands); } if (!rootCommands.length && !packageCommands.length) { lines.push("- No scripts found. Add npm/pnpm scripts to share common workflows."); } lines.push(""); lines.push("## Key Directories", ""); const dirRows = buildDirectoryRows(index.importantPaths); if (dirRows.length) { lines.push(...dirRows); } else { lines.push("- Refer to src/ and packages/ for source files."); } lines.push(""); lines.push("## Working With This Repo", ""); lines.push( "1. Use the commands above to set up dev servers, run tests, or generate builds.", "2. When asking an AI assistant for help, cite the package path (for example `packages/tui` or `src/commands/auth.ts`).", "3. If the assistant needs implementation details, open only the referenced files instead of the whole repo to stay within token limits.", "4. After large refactors, rerun `contextcode init` so this file stays accurate." ); lines.push(""); lines.push("## Search Tips", ""); lines.push( "- Start with `git grep '<keyword>'` or `rg '<keyword>'` scoped to the directories listed above.", "- Prefer smaller files or specific packages before loading monolithic files.", "- Mention file paths and line numbers when giving instructions to another AI agent." ); return lines.join("\n"); } function buildArchitectureRows(packages) { if (!packages.length) return []; return packages.slice(0, MAX_PACKAGE_ROWS).map((pkg2) => `- **${pkg2.name}** (${pkg2.relativeDir}) \u2014 ${describePackage(pkg2)}`); } function describePackage(pkg2) { const tags = detectPackageTags(pkg2); const snippets = []; if (pkg2.description) snippets.push(pkg2.description.trim()); if (tags.length) snippets.push(tags.join(", ")); if (!snippets.length) snippets.push("workspace package"); return clamp(snippets.join(" \u2014 "), 220); } function detectPackageTags(pkg2) { const deps = new Set([...pkg2.dependencies, ...pkg2.devDependencies].map((d) => d.toLowerCase())); const tags = []; const tagMap = [ ["next", "Next.js frontend"], ["react", "React UI"], ["ink", "Ink terminal UI"], ["nest", "NestJS backend"], ["express", "Express server"], ["fastify", "Fastify server"], ["prisma", "Prisma ORM"], ["typeorm", "TypeORM"], ["zod", "Zod validation"], ["tsup", "Bundled with tsup"], ["tsx", "TSX runtime"], ["vitest", "Vitest"], ["jest", "Jest tests"], ["redux", "Redux state"], ["tanstack", "TanStack Query"] ]; for (const [needle, label] of tagMap) { if (Array.from(deps).some((dep) => dep.includes(needle))) { tags.push(label); } } return tags.slice(0, 3); } function buildCommandBlock(scripts, heading) { if (!scripts.length) return []; const lines = []; lines.push(`### ${heading}`); lines.push("```bash"); scripts.slice(0, MAX_COMMAND_ROWS).forEach((script) => { lines.push(`# ${script.command}`); lines.push(`pnpm run ${script.name}`); lines.push(""); }); lines.push("```"); return lines; } function buildPackageCommandBlocks(packages) { const rows = []; packages.forEach((pkg2) => { if (!pkg2.scripts.length) return; rows.push(`### ${pkg2.name} (${pkg2.relativeDir})`); rows.push("```bash"); pkg2.scripts.slice(0, MAX_PACKAGE_COMMANDS).forEach((script) => { const suggestion = `pnpm --filter ${pkg2.name} ${script.name}`; rows.push(`# ${script.command}`); rows.push(suggestion); }); rows.push("```"); rows.push(""); }); return rows; } function buildDirectoryRows(paths) { if (!paths.length) return []; return paths.slice(0, MAX_DIR_ROWS).map((p) => `- \`${normalizePath(p)}\``); } function normalizePath(p) { return p.split(path2.sep).join("/"); } function clamp(text, max) { return text.length <= max ? text : `${text.slice(0, max - 3)}...`; } // packages/core/src/context/context-companion-docs.ts var MAX_FEATURE_PACKAGES = 6; var MAX_SAMPLE_FILES = 6; var MAX_ARCH_PACKAGES = 10; var MAX_DEP_ROWS = 12; var MAX_PACKAGE_SCRIPTS = 3; function renderFeaturesGuide(index, options = {}) { const repoName = resolveRepoName(index, options); const lines = []; lines.push(`# Feature Overview - ${repoName}`); lines.push("", "Use this document to quickly convey what the product offers and where each capability lives inside the repo.", ""); lines.push("## Workspace Highlights", ""); const featurePackages = index.workspacePackages.slice(0, MAX_FEATURE_PACKAGES); if (featurePackages.length) { featurePackages.forEach((pkg2) => { lines.push(`### ${pkg2.name} (${pkg2.relativeDir})`); if (pkg2.description) { lines.push(pkg2.description.trim()); } const focus = describePackageFocus(pkg2); if (focus) { lines.push(`- Focus: ${focus}`); } const scripts = summarizeScripts(pkg2.scripts, MAX_PACKAGE_SCRIPTS); if (scripts.length) { lines.push(`- Key scripts: ${scripts.join(", ")}`); } const deps = summarizeDependencies(pkg2.dependencies); if (deps.length) { lines.push(`- Dependencies: ${deps.join(", ")}`); } if (pkg2.keywords.length) { lines.push(`- Keywords: ${pkg2.keywords.slice(0, 6).join(", ")}`); } lines.push(""); }); } else { lines.push("- No workspace packages detected. Features live directly under src/.", ""); } lines.push("## Capabilities & Signals", ""); const capabilityLines = buildCapabilityLines(index); if (capabilityLines.length) { lines.push(...capabilityLines, ""); } else { lines.push("- Capabilities inferred after dependencies are installed.", ""); } lines.push("## Sample Files To Explore", ""); const sampleFiles = index.sampleFiles.slice(0, MAX_SAMPLE_FILES); if (sampleFiles.length) { sampleFiles.forEach((file) => { lines.push(`- \`${file.path}\``); }); lines.push(""); } else { lines.push("- Run `contextcode init` after adding representative files.", ""); } lines.push("## Suggested Next Experiments", ""); lines.push( "1. Pair an AI assistant with the package summaries above to draft feature briefs.", "2. Use the sample files list as seed context when exploring unfamiliar code.", "3. Keep this document updated whenever a new package or capability ships." ); return lines.join("\n"); } function renderArchitectureGuide(index, options = {}) { const repoName = resolveRepoName(index, options); const lines = []; lines.push(`# Architecture - ${repoName}`); lines.push("", "High-level map of packages, shared tooling, and directory conventions.", ""); lines.push("## Monorepo Layout", ""); const packages = index.workspacePackages.slice(0, MAX_ARCH_PACKAGES); if (packages.length) { packages.forEach((pkg2) => { const summary = describePackageFocus(pkg2) || "workspace package"; lines.push(`- **${pkg2.name}** (${pkg2.relativeDir}) - ${summary}`); }); lines.push(""); } else { lines.push("- Single-package repo. Inspect src/ for feature areas.", ""); } lines.push("## Shared Tooling", ""); const toolingLines = buildToolingLines(index); if (toolingLines.length) { lines.push(...toolingLines, ""); } else { lines.push("- Tooling inferred from package.json dependencies.", ""); } lines.push("## Directory Map", ""); if (index.importantPaths.length) { index.importantPaths.slice(0, 12).forEach((dir) => { lines.push(`- \`${dir}\``); }); lines.push(""); } else { lines.push("- Additional directory hints will appear after more files are indexed.", ""); } lines.push("## Execution Paths", ""); const commandLines = buildCommandLines(index.rootScripts); if (commandLines.length) { lines.push(...commandLines); } else { lines.push("- Define npm/pnpm scripts to document how to run the system."); } return lines.join("\n"); } function renderImplementationGuide(index, options = {}) { const repoName = resolveRepoName(index, options); const manager = detectPackageManager(index.packageJson?.packageManager); const installCmd = `${manager.install}`; const runScript = (script) => manager.run(script); const lines = []; lines.push(`# Implementation Guide - ${repoName}`); lines.push("", "Checklist for setting up, running, and verifying changes locally.", ""); lines.push("## Prerequisites", ""); if (index.packageJson?.engines?.node) { lines.push(`- Node.js ${index.packageJson.engines.node}`); } lines.push(`- Package manager: ${manager.label}`); lines.push("- Clone the repository and ensure dependencies are installed.", ""); lines.push("## Setup", ""); lines.push("1. Install dependencies:"); lines.push(" ```bash", ` ${installCmd}`, " ```"); const buildScript = findScript(index.rootScripts, "build"); if (buildScript) { lines.push("2. Build once to warm caches:"); lines.push(" ```bash", ` ${runScript(buildScript.name)}`, " ```"); } const devScript = findScript(index.rootScripts, "dev"); if (devScript) { lines.push("3. Start the development server:"); lines.push(" ```bash", ` ${runScript(devScript.name)}`, " ```"); } lines.push(""); lines.push("## Common Commands", ""); const commands = buildCommandLines(index.rootScripts); if (commands.length) { lines.push(...commands, ""); } else { lines.push("- Define scripts in package.json to document workflows.", ""); } lines.push("## Package Workflows", ""); if (index.workspacePackages.length) { index.workspacePackages.slice(0, 6).forEach((pkg2) => { const scripts = summarizeScripts(pkg2.scripts, 2); if (!scripts.length) return; lines.push(`- ${pkg2.name}: ${scripts.map((script) => manager.filter(pkg2.name, script)).join(", ")}`); }); lines.push(""); } else { lines.push("- Run commands from the repo root; no nested workspaces detected.", ""); } lines.push("## Verification", ""); const testScript = findScript(index.rootScripts, "test"); if (testScript) { lines.push("- Run the automated test suite before committing:"); lines.push(" ```bash", ` ${runScript(testScript.name)}`, " ```"); } else { lines.push("- Add a `test` script to codify verification steps."); } lines.push("\n## Collaboration Tips", ""); lines.push( "- Re-run `contextcode init` after major refactors to refresh all docs.", "- Reference the generated features and architecture guides when opening PRs.", "- Keep scripts and package descriptions up to date so AI assistants stay accurate." ); return lines.join("\n"); } function resolveRepoName(index, options) { return options.repoName ?? index.packageJson?.name ?? "this repository"; } function describePackageFocus(pkg2) { const tags = detectPackageTags2(pkg2); if (pkg2.description && tags.length) { return `${pkg2.description.trim()} - ${tags.join(", ")}`; } if (pkg2.description) return pkg2.description.trim(); if (tags.length) return tags.join(", "); return ""; } function detectPackageTags2(pkg2) { const deps = new Set([...pkg2.dependencies, ...pkg2.devDependencies].map((d) => d.toLowerCase())); const tags = []; const tagMap = [ ["next", "Next.js"], ["react", "React"], ["ink", "Ink TUI"], ["express", "Express"], ["fastify", "Fastify"], ["prisma", "Prisma"], ["typeorm", "TypeORM"], ["vitest", "Vitest"], ["jest", "Jest"], ["tsup", "tsup build"], ["tsx", "tsx runtime"], ["zod", "Zod validation"] ]; for (const [needle, label] of tagMap) { if (Array.from(deps).some((dep) => dep.includes(needle))) { tags.push(label); } } return tags.slice(0, 3); } function summarizeScripts(scripts, limit) { return scripts.slice(0, limit).map((script) => script.name); } function summarizeDependencies(deps, limit = 5) { return deps.slice(0, limit); } function buildCapabilityLines(index) { const lines = []; if (index.detectedStack.length) { lines.push(`- Stack: ${index.detectedStack.join(", ")}`); } const keywords = collectKeywords(index.workspacePackages); if (keywords.length) { lines.push(`- Keywords: ${keywords.join(", ")}`); } return lines; } function collectKeywords(packages) { const set = /* @__PURE__ */ new Set(); packages.forEach((pkg2) => { pkg2.keywords.slice(0, 8).forEach((kw) => set.add(kw)); }); return Array.from(set).slice(0, 12); } function buildToolingLines(index) { const lines = []; if (index.detectedStack.length) { lines.push(`- Application stack: ${index.detectedStack.join(", ")}`); } const deps = Object.keys(index.packageJson?.dependencies ?? {}); const devDeps = Object.keys(index.packageJson?.devDependencies ?? {}); const combined = [...deps, ...devDeps]; if (combined.length) { lines.push(`- Shared libraries: ${combined.slice(0, MAX_DEP_ROWS).join(", ")}`); } const scripts = summarizeScripts(index.rootScripts, 4); if (scripts.length) { lines.push(`- Root scripts: ${scripts.join(", ")}`); } return lines; } function buildCommandLines(scripts) { if (!scripts.length) return []; const lines = []; lines.push("```bash"); scripts.slice(0, MAX_DEP_ROWS).forEach((script) => { lines.push(`# ${script.command}`); lines.push(`pnpm run ${script.name}`); lines.push(""); }); lines.push("```"); return lines; } function detectPackageManager(value) { if (value?.startsWith("bun")) { return createManager("bun", "bun install", (script) => `bun run ${script}`, (pkg2, script) => `bun --filter ${pkg2} ${script}`); } if (value?.startsWith("yarn")) { return createManager("yarn", "yarn", (script) => `yarn ${script}`, (pkg2, script) => `yarn workspace ${pkg2} ${script}`); } if (value?.startsWith("npm")) { return createManager("npm", "npm install", (script) => `npm run ${script}`, (pkg2, script) => `npm run ${script} --workspace ${pkg2}`); } return createManager("pnpm", "pnpm install", (script) => `pnpm run ${script}`, (pkg2, script) => `pnpm --filter ${pkg2} ${script}`); } function createManager(label, install, run, filter) { return { label, install, run, filter }; } function findScript(scripts, name) { return scripts.find((script) => script.name === name); } // packages/core/src/context/context-ai-generator.ts var MAX_SAMPLE_EXCERPTS = 12; var MAX_WORKSPACE_DETAILS = 6; async function generateContextWithAI(provider, index, options = {}) { const repoName = options.repoName ?? index.packageJson?.name ?? "this repository"; const model = options.model ?? "claude-sonnet-4-5"; const systemPrompt = buildSystemPrompt(); const userPrompt = buildUserPrompt(repoName, index); const messages = [ { role: "system", content: systemPrompt }, { role: "user", content: userPrompt } ]; const response = await provider.request({ model, messages, max_tokens: options.maxTokens ?? 4096, temperature: options.temperature ?? 0.3 }); return { markdown: cleanMarkdownResponse(response.text), usage: response.usage }; } function buildSystemPrompt() { return `You are an expert technical writer specializing in developer documentation. Your task is to analyze repository metadata and create a clear, concise context.md file. The context.md serves as an AI-first guide that helps any coding assistant (Claude, GitHub Copilot, Cursor, etc.) quickly understand: - Project architecture and structure - Tech stack and frameworks - Development workflows and commands - Key directories and their purposes - How to work efficiently within the codebase Guidelines: - Be factual and precise. Don't speculate or add information not present in the metadata. - Keep descriptions concise but complete enough to orient any developer or AI assistant. - Focus on actionable information: commands, file paths, architectural patterns. - Use markdown formatting: headings, code blocks, bullet lists. - Target 150-300 lines for the complete guide. - Include a "Working With This Repo" section with best practices for AI assistants.`; } function buildUserPrompt(repoName, index) { const sections = []; sections.push(`# Repository Analysis Request`); sections.push(`Generate a comprehensive context.md for **${repoName}** based on the following metadata: `); sections.push(`## Detected Technologies`); if (index.detectedStack.length) { sections.push(index.detectedStack.map((tech) => `- ${tech}`).join("\n")); } else { sections.push("- Stack not automatically detected. Analyze dependencies below."); } sections.push(""); if (index.packageJson) { sections.push(`## Root Package`); sections.push(`Name: ${index.packageJson.name ?? "(unnamed)"}`); sections.push(`Version: ${index.packageJson.version ?? "0.0.0"}`); if (index.packageJson.description) { sections.push(`Description: ${index.packageJson.description}`); } const deps = Object.keys(index.packageJson.dependencies ?? {}); const devDeps = Object.keys(index.packageJson.devDependencies ?? {}); if (deps.length) { sections.push(` Key Dependencies: ${deps.slice(0, 12).join(", ")}`); } if (devDeps.length) { sections.push(`Dev Dependencies: ${devDeps.slice(0, 8).join(", ")}`); } const scripts = Object.entries(index.packageJson.scripts ?? {}).slice(0, 10); if (scripts.length) { sections.push(` Available Scripts:`); scripts.forEach(([name, cmd]) => { sections.push(`- ${name}: ${cmd}`); }); } sections.push(""); } if (index.workspacePackages.length) { sections.push(`## Workspace Packages (${index.workspacePackages.length} detected)`); index.workspacePackages.slice(0, MAX_WORKSPACE_DETAILS).forEach((pkg2) => { sections.push(` ### ${pkg2.name} (${pkg2.relativeDir})`); if (pkg2.description) sections.push(pkg2.description); const keyDeps = pkg2.dependencies.slice(0, 6); if (keyDeps.length) { sections.push(`Dependencies: ${keyDeps.join(", ")}`); } if (pkg2.scripts.length) { sections.push(`Scripts: ${pkg2.scripts.slice(0, 3).map((script) => script.name).join(", ")}`); } }); sections.push(""); } if (index.sampleFiles.length) { sections.push(`## Sample Files (${index.sampleFiles.length} discovered)`); sections.push("Below are excerpts from key configuration and source files:\n"); index.sampleFiles.slice(0, MAX_SAMPLE_EXCERPTS).forEach((file) => { sections.push(`### ${file.path}`); sections.push("```"); sections.push(file.excerpt.slice(0, 600)); sections.push("```\n"); }); } if (index.importantPaths.length) { sections.push(`## Key Directories`); sections.push(index.importantPaths.slice(0, 15).map((p) => `- ${p}`).join("\n")); sections.push(""); } sections.push(`## Output Format`); sections.push(`Generate a markdown file with these sections:`); sections.push(`1. **# context.md** - Title and introduction`); sections.push(`2. **## Architecture** - Describe the monorepo/package structure`); sections.push(`3. **## Detected Stack** - List technologies and frameworks`); sections.push(`4. **## Development Commands** - Organized by workspace (root + packages)`); sections.push(`5. **## Key Directories** - File structure overview`); sections.push(`6. **## Working With This Repo** - Best practices for developers and AI assistants`); sections.push(`7. **## Search Tips** - How to efficiently find code and navigate the codebase`); sections.push(""); sections.push(`Provide ONLY the markdown content. Do not include explanations, preambles, or commentary outside the markdown.`); return sections.join("\n"); } function cleanMarkdownResponse(text) { let cleaned = text.trim(); if (cleaned.startsWith("```markdown") || cleaned.startsWith("```md")) { cleaned = cleaned.replace(/^```(?:markdown|md)\n/, ""); cleaned = cleaned.replace(/\n```$/, ""); } else if (cleaned.startsWith("```")) { cleaned = cleaned.replace(/^```\n/, ""); cleaned = cleaned.replace(/\n```$/, ""); } return cleaned.trim(); } // packages/core/src/renderer.ts import path3 from "path"; // packages/core/src/scaffold.ts import fs2 from "fs/promises"; import path4 from "path"; import fsExtra from "fs-extra"; var CONTEXT_DIR = ".context"; async function ensureDotContextDir(cwd) { const dir = path4.join(cwd, CONTEXT_DIR); await fsExtra.mkdirp(dir); return dir; } async function createContextScaffold(cwd) { const contextDocsDir = path4.join(cwd, CONTEXT_DIR); const agentLogDir = path4.join(contextDocsDir, ".agent-log"); await fsExtra.mkdirp(agentLogDir); return { contextDocsDir, agentLogDir }; } async function writeJsonFileAtomic(filePath, payload) { const dir = path4.dirname(filePath); await fsExtra.mkdirp(dir); const tmp = path4.join(dir, `.tmp-${process.pid}-${Date.now()}-${Math.random().toString(16).slice(2)}.json`); await fs2.writeFile(tmp, JSON.stringify(payload, null, 2), "utf8"); await fs2.rename(tmp, filePath); } async function writeTextFileAtomic(filePath, contents) { const dir = path4.dirname(filePath); await fsExtra.mkdirp(dir); const tmp = path4.join(dir, `.tmp-${process.pid}-${Date.now()}-${Math.random().toString(16).slice(2)}.md`); await fs2.writeFile(tmp, contents, "utf8"); await fs2.rename(tmp, filePath); } // packages/core/src/tasks.ts import fs3 from "fs/promises"; import path5 from "path"; import { globby as globby2 } from "globby"; import { pathExists } from "fs-extra"; var DEFAULT_TASKS_DIR = path5.join(".context", "tasks"); var MARKDOWN_GLOBS = ["**/*.md", "**/*.mdx"]; var HEADING_REGEX = /^#\s+(.+)$/m; async function getTasks(baseDir = process.cwd()) { const tasksDir = path5.resolve(baseDir, DEFAULT_TASKS_DIR); const hasTasksDir = await pathExists(tasksDir); if (!hasTasksDir) return []; const taskFiles = await globby2(MARKDOWN_GLOBS, { cwd: tasksDir, absolute: true, followSymbolicLinks: false }); const tasks = []; for (const filePath of taskFiles.sort()) { const content = await fs3.readFile(filePath, "utf8"); const relativePath = path5.relative(baseDir, filePath); const headingMatch = content.match(HEADING_REGEX); const label = headingMatch?.[1]?.trim() || path5.relative(tasksDir, filePath); tasks.push({ label, relativePath, absolutePath: filePath, content }); } return tasks; } // packages/providers/src/anthropic.ts import crypto from "crypto"; import readline from "readline/promises"; import { stdin as input, stdout as output } from "process"; // packages/providers/src/provider.ts import path6 from "path"; import { pathToFileURL } from "url"; var providerFactories = /* @__PURE__ */ new Map(); var providerDescriptors = /* @__PURE__ */ new Map(); function registerProviderFactory(name, factory, metadata) { const key = name.trim().toLowerCase(); providerFactories.set(key, factory); const descriptor = { name: key, title: metadata?.title ?? metadata?.name ?? name, description: metadata?.description, supportsInteractiveLogin: metadata?.supportsInteractiveLogin ?? Boolean(metadata?.login), login: metadata?.login }; providerDescriptors.set(key, descriptor); } function listRegisteredProviders() { return Array.from(providerDescriptors.values()); } async function loadProvider(providerName, options = {}) { if (!providerName?.trim()) { throw new Error("[ERR_PROVIDER_NAME_MISSING] Provider name is required. Pass --provider or set CONTEXTCODE_PROVIDER."); } const key = providerName.trim().toLowerCase(); if (providerFactories.has(key)) { return providerFactories.get(key)(options); } if (key === "stub") { return createStubProvider(options); } const moduleId = options.modulePath ?? providerName; const specifier = resolveModuleSpecifier(moduleId, options.cwd); try { const imported = await import(specifier); const factory = imported.createProvider || imported.default; if (typeof factory !== "function") { throw new Error(`Module ${moduleId} does not export a createProvider() factory.`); } const provider = await factory(options); assertAiProvider(provider, providerName); return provider; } catch (err) { if (err?.code === "ERR_MODULE_NOT_FOUND" || /Cannot find module/.test(String(err?.message))) { throw new Error(`[ERR_PROVIDER_NOT_FOUND] Provider "${providerName}" could not be resolved. Supply --provider with a resolvable module or register it via registerProviderFactory.`); } throw err; } } function createStubProvider(options = {}) { const fallbackResponse = options.responseText?.trim(); return { name: options.name || "stub", async request({ messages }) { if (fallbackResponse) { return { text: fallbackResponse }; } const mode = detectScenario(messages); if (mode === "feature") { return { text: JSON.stringify(buildFeatureStub(messages)) }; } return { text: JSON.stringify(buildTaskStub(messages)) }; } }; } function detectScenario(messages) { const lastContent = messages[messages.length - 1]?.content || ""; if (/Feature metadata/i.test(lastContent) || /overview/i.test(lastContent) && /slug/i.test(lastContent)) { return "feature"; } return "tasks"; } function buildFeatureStub(messages) { const content = messages[messages.length - 1]?.content || ""; const match = content.match(/- name:\s*(.+)/i); const name = match?.[1]?.trim() || "Sample Feature"; const slug = slugify(name); return { slug, overview: `## Goal - Ship placeholder for ${name} ## Scope - Demonstrate the contextcode CLI stub provider ## Out-of-scope - Production ready planning ## Constraints - Replace stub with a live provider before shipping`, domain_context: `Stubbed context derived from CLI inputs for ${name}.`, requirements: "- Replace stub provider with a real LLM source\n- Validate outputs before committing" }; } function buildTaskStub(messages) { const content = messages[messages.length - 1]?.content || ""; const bulletLines = content.split(/\r?\n/).map((line) => line.trim()).filter((line) => line.startsWith("-")).slice(0, 3); const tasks = (bulletLines.length ? bulletLines : ["- Review PRD inputs", "- Implement contextcode command", "- Verify outputs"]).map((line, idx) => { const sentence = line.replace(/^[-*]\s*/, "").trim() || `Task ${idx + 1}`; const id = slugify(sentence || `task-${idx + 1}`) || `task-${idx + 1}`; return { id, title: sentence.charAt(0).toUpperCase() + sentence.slice(1), objective: `Deliver stub output: ${sentence}`, steps: ["Plan implementation", "Apply changes", "Verify result"], files_hint: ["context-docs/tasks.json"], acceptance_criteria: ["Output saved", "Team reviewed"] }; }); return { summary: "Stub response providing placeholder tasks. Configure a real provider for accurate planning.", tasks }; } function slugify(value) { return value.toLowerCase().replace(/[^a-z0-9]+/g, "-").replace(/(^-|-$)+/g, ""); } function resolveModuleSpecifier(moduleId, cwd) { if (moduleId.startsWith(".") || moduleId.startsWith("/")) { const abs = path6.isAbsolute(moduleId) ? moduleId : path6.resolve(cwd || process.cwd(), moduleId); return pathToFileURL(abs).href; } return moduleId; } function assertAiProvider(provider, label) { if (!provider || typeof provider.request !== "function") { throw new Error(`Module for provider ${label} did not return a valid AiProvider.`); } } // packages/providers/src/authMethods.ts var authMethodsRegistry = /* @__PURE__ */ new Map(); function registerAuthMethods(providerId, getMethodsFn) { authMethodsRegistry.set(providerId, getMethodsFn); } async function getProviderAuthMethods(providerId) { const fn = authMethodsRegistry.get(providerId); if (!fn) { return { methods: [] }; } return await fn(); } // packages/providers/src/credentials.ts import { promises as fs4 } from "fs"; import path7 from "path"; import os from "os"; function resolveContextcodeDir() { const override = process.env.CONTEXTCODE_HOME?.trim(); if (override) { return path7.resolve(override); } return path7.join(os.homedir(), ".contextcode"); } function getCredentialsDirectory() { return resolveContextcodeDir(); } function getCredentialsFilePath() { return path7.join(resolveContextcodeDir(), "credentials.json"); } async function readCredentialFile() { try { const raw = await fs4.readFile(getCredentialsFilePath(), "utf8"); const parsed = JSON.parse(raw); if (parsed && Array.isArray(parsed.credentials)) { return { credentials: parsed.credentials.filter( (entry) => typeof entry?.provider === "string" && (typeof entry?.key === "string" || entry?.oauth) ) }; } } catch (error) { if (error?.code === "ENOENT") { return { credentials: [] }; } console.warn(`[contextcode] Failed to read credentials file: ${error instanceof Error ? error.message : error}`); } return { credentials: [] }; } async function writeCredentialFile(file) { await fs4.mkdir(getCredentialsDirectory(), { recursive: true }); await fs4.writeFile(getCredentialsFilePath(), JSON.stringify(file, null, 2), "utf8"); } async function loadCredential(provider) { const file = await readCredentialFile(); return file.credentials.find((entry) => entry.provider === provider) ?? null; } async function saveCredential(provider, key) { const file = await readCredentialFile(); const filtered = file.credentials.filter((entry) => entry.provider !== provider); const updated = { credentials: [ ...filtered, { provider, key, date: (/* @__PURE__ */ new Date()).toISOString() } ] }; await writeCredentialFile(updated); } async function saveOAuthCredential(provider, accessToken, refreshToken2, expiresAt) { const file = await readCredentialFile(); const filtered = file.credentials.filter((entry) => entry.provider !== provider); const updated = { credentials: [ ...filtered, { provider, date: (/* @__PURE__ */ new Date()).toISOString(), oauth: { access_token: accessToken, refresh_token: refreshToken2, expires_at: expiresAt } } ] }; await writeCredentialFile(updated); } // packages/providers/src/anthropic.ts var CLIENT_ID = "9d1c250a-e61b-44d9-88ed-5944d1962f5e"; var REDIRECT_URI = "https://console.anthropic.com/oauth/code/callback"; var PROVIDER_ID = "anthropic"; var TOKEN_ENDPOINT = "https://console.anthropic.com/v1/oauth/token"; registerProviderFactory( PROVIDER_ID, async (options) => { const oauth = await resolveAnthropicCredential(options); return createAnthropicProvider(oauth); }, { title: "Anthropic Claude", description: "Use Claude models via Anthropic's OAuth flow", supportsInteractiveLogin: true, login: async () => { await runInteractiveConsoleAuthorization(); } } ); registerAuthMethods(PROVIDER_ID, async () => { return { methods: [ { label: "Claude Pro/Max", authorize: async () => { const { url, verifier } = await authorize("max"); return { url, instructions: "", callback: async (code) => { const credentials = await exchange(code, verifier); if (credentials.type !== "success" || !credentials.access) { throw new Error("Anthropic OAuth authorization failed. Try again."); } await saveOAuthCredential( PROVIDER_ID, credentials.access, credentials.refresh, credentials.expires ); } }; } } ] }; }); async function resolveAnthropicCredential(options = {}) { const stored = await loadCredential(PROVIDER_ID); if (stored?.oauth) { return stored.oauth; } if (options.interactive === false || !process.stdin.isTTY) { throw new Error("[ERR_ANTHROPIC_AUTH] No Anthropic OAuth token found. Run 'contextcode auth login'."); } return await runInteractiveConsoleAuthorization(); } async function runInteractiveConsoleAuthorization() { console.log("Starting OAuth flow for Claude Pro/MAX..."); const { url, verifier } = await authorize("max"); console.log("1. Open the following URL in a browser and complete the login:"); console.log(` ${url}`); console.log("2. Copy the final redirect URL fragment (code#state) and paste it below when prompted.\n"); const code = await promptForInput("Paste authorization code (code#state): "); const credentials = await exchange(code, verifier); if (credentials.type !== "success" || !credentials.access) { throw new Error("Anthropic OAuth authorization failed. Try again."); } console.log("Successfully authenticated with Claude Pro/MAX!"); await saveOAuthCredential( PROVIDER_ID, credentials.access, credentials.refresh, credentials.expires ); console.log(`Saved OAuth tokens to ${getCredentialsFilePath()}`); return { access_token: credentials.access, refresh_token: credentials.refresh, expires_at: credentials.expires }; } function createAnthropicProvider(oauth) { let currentToken = oauth.access_token; let expiresAt = oauth.expires_at; async function ensureValidToken() { if (Date.now() < expiresAt - 5 * 60 * 1e3) { return currentToken; } console.log("[anthropic] Refreshing expired OAuth token..."); const refreshed = await refreshToken(oauth.refresh_token); if (refreshed.type !== "success") { throw new Error("Failed to refresh Anthropic OAuth token. Run 'contextcode auth login' again."); } currentToken = refreshed.access; expiresAt = refreshed.expires; await saveOAuthCredential(PROVIDER_ID, currentToken, refreshed.refresh, expiresAt); return currentToken; } return { name: "anthropic", async request({ model, messages, max_tokens, temperature }) { const token = await ensureValidToken(); const body = buildAnthropicBody(model, messages, { max_tokens: max_tokens ?? 1024, temperature: temperature ?? 0.2 }); const response = await fetch("https://api.anthropic.com/v1/messages", { method: "POST", headers: { "Content-Type": "application/json", "Authorization": `Bearer ${token}`, "anthropic-version": "2023-06-01", "anthropic-beta": "oauth-2025-04-20,claude-code-20250219,interleaved-thinking-2025-05-14,fine-grained-tool-streaming-2025-05-14" }, body: JSON.stringify(body) }); if (!response.ok) { const text2 = await response.text(); throw new Error(`Anthropic request failed (${response.status}): ${text2}`); } const json = await response.json(); const text = Array.isArray(json.content) ? json.content.map((entry) => entry?.text ?? "").join("\n").trim() : String(json.content ?? ""); return { text, usage: mapUsage(json.usage) }; } }; } function buildAnthropicBody(model, messages, opts) { const userSystem = messages.filter((msg) => msg.role === "system").map((msg) => msg.content).join("\n\n").trim(); const systemArray = [ { type: "text", text: "You are Claude Code, Anthropic's official CLI for Claude." } ]; if (userSystem) { systemArray.push({ type: "text", text: userSystem }); } const conversation = messages.filter((msg) => msg.role !== "system").map((msg) => ({ role: msg.role === "assistant" ? "assistant" : "user", content: [{ type: "text", text: msg.content }] })); if (conversation.length === 0) { conversation.push({ role: "user", content: [{ type: "text", text: "" }] }); } return { model, system: systemArray, messages: conversation, max_tokens: opts.max_tokens, temperature: opts.temperature }; } function mapUsage(raw) { if (!raw) return void 0; const input4 = typeof raw.input_tokens === "number" ? raw.input_tokens : void 0; const output4 = typeof raw.output_tokens === "number" ? raw.output_tokens : void 0; const total = typeof raw.total_tokens === "number" ? raw.total_tokens : void 0; if (input4 == null && output4 == null && total == null) { return void 0; } return { inputTokens: input4, outputTokens: output4, totalTokens: total ?? ((input4 ?? 0) + (output4 ?? 0) || void 0) }; } async function authorize(mode) { const pkce = generatePkcePair(); const base = mode === "console" ? "https://console.anthropic.com" : "https://claude.ai"; const url = new URL("/oauth/authorize", base); url.searchParams.set("code", "true"); url.searchParams.set("client_id", CLIENT_ID); url.searchParams.set("response_type", "code"); url.searchParams.set("redirect_uri", REDIRECT_URI); url.searchParams.set("scope", "org:create_api_key user:profile user:inference"); url.searchParams.set("code_challenge", pkce.challenge); url.searchParams.set("code_challenge_method", "S256"); url.searchParams.set("state", pkce.verifier); return { url: url.toString(), verifier: pkce.verifier }; } async function exchange(code, verifier) { const [authCode, state] = code.split("#"); const result = await fetch(TOKEN_ENDPOINT, { method: "POST", headers: { "Content-Type": "application/json" }, body: JSON.stringify({ code: authCode, state, grant_type: "authorization_code", client_id: CLIENT_ID, redirect_uri: REDIRECT_URI, code_verifier: verifier }) }); if (!result.ok) { return { type: "failed" }; } const json = await result.json(); return { type: "success", refresh: json.refresh_token, access: json.access_token, expires: Date.now() + json.expires_in * 1e3 }; } async function refreshToken(refreshToken2) { const result = await fetch(TOKEN_ENDPOINT, { method: "POST", headers: { "Content-Type": "application/json" }, body: JSON.stringify({ grant_type: "refresh_token", client_id: CLIENT_ID, refresh_token: refreshToken2 }) }); if (!result.ok) { return { type: "failed" }; } const json = await result.json(); return { type: "success", refresh: json.refresh_token, access: json.access_token, expires: Date.now() + json.expires_in * 1e5 }; } async function promptForInput(question) { const rl = readline.createInterface({ input, output }); try { const answer = await rl.question(question); return answer.trim(); } finally { rl.close(); } } function generatePkcePair() { const verifier = base64Url(crypto.randomBytes(32)); const challenge = base64Url(crypto.createHash("sha256").update(verifier).digest()); return { verifier, challenge }; } function base64Url(data) { const buffer = typeof data === "string" ? Buffer.from(data) : Buffer.from(data); return buffer.toString("base64").replace(/\+/g, "-").replace(/\//g, "_").replace(/=+$/g, ""); } // packages/providers/src/gemini.ts import readline2 from "readline/promises"; import { stdin as input2, stdout as output2 } from "process"; // packages/types/src/index.ts import { z as z4 } from "zod"; // packages/types/src/gemini.ts import { z } from "zod"; var GeminiConfigSchema = z.object({ apiKey: z.string().min(10, "Gemini API key is required").regex(/^AI[a-zA-Z0-9_-]{20,}$/i, "Gemini API keys start with 'AI' and must be at least 20 characters."), model: z.string().min(1).default("gemini-3-pro-preview"), endpoint: z.string().url().optional().default("https://generativelanguage.googleapis.com/v1beta"), temperature: z.number().min(0).max(2).optional(), maxOutputTokens: z.number().int().positive().optional() }); // packages/types/src/providers.ts import { z as z2 } from "zod"; var ProviderModelSchema = z2.object({ id: z2.string().min(1), label: z2.string().min(1), description: z2.string().optional() }); var ProviderMetadataSchema = z2.object({ id: z2.string().min(1), title: z2.string().min(1), description: z2.string().optional(), defaultModel: z2.string().min(1), models: z2.array(ProviderModelSchema).min(1) }); var ProviderCatalog = [ { id: "anthropic", title: "Anthropic Claude", description: "Claude Sonnet 4.x and 4.5 models via OAuth.", defaultModel: "claude-sonnet-4-5", models: [ { id: "claude-sonnet-4-20250514", label: "Claude Sonnet 4 (May 14 '25)", description: "First Claude Sonnet 4 GA release." }, { id: "claude-sonnet-4-0", label: "Claude Sonnet 4 (latest)", description: "Tracks the rolling Claude Sonnet 4 channel." }, { id: "claude-sonnet-4-5-20250929", label: "Claude Sonnet 4.5 (Sep 29 '25)", description: "Pinned Claude Sonnet 4.5 release." }, { id: "claude-sonnet-4-5", label: "Claude Sonnet 4.5 (latest)", description: "Latest Claude Sonnet 4.5 channel (default)." } ] }, { id: "gemini", title: "Google Gemini", description: "Gemini 2.x and 3.x Flash/Pro models via API key.", defaultModel: "gemini-3-pro-preview", models: [ { id: "gemini-2.0-flash", label: "Gemini 2.0 Flash", description: "Fastest Gemini 2.0 Flash tier." }, { id: "gemini-2.0-flash-lite", label: "Gemini 2.0 Flash Lite", description: "Lower-cost Gemini 2.0 Flash Lite tier." }, { id: "gemini-2.5-flash", label: "Gemini 2.5 Flash", description: "Latest Flash tier on 2.5 stack." }, { id: "gemini-2.5-pro", label: "Gemini 2.5 Pro", description: "Production Gemini 2.5 Pro release." }, { id: "gemini-2.5-pro-preview-05-06", label: "Gemini 2.5 Pro Preview (2025-05-06)", description: "Preview build published May 6, 2025." }, { id: "gemini-2.5-pro-preview-06-05", label: "Gemini 2.5 Pro Preview (2025-06-05)", description: "Preview build published June 5, 2025." }, { id: "gemini-3-pro-preview", label: "Gemini 3 Pro Preview", description: "Latest Gemini 3 Pro preview (default)." } ] } ]; var providerIds = Provi