@langwatch/better-agents
Version:
CLI for kicking off production-ready agent projects with LangWatch best practices
1,637 lines (1,520 loc) • 56 kB
JavaScript
#!/usr/bin/env node
import { Command } from 'commander';
import * as fs6 from 'fs/promises';
import * as path6 from 'path';
import { dirname, join } from 'path';
import { select, password, input, confirm } from '@inquirer/prompts';
import { exec, spawn, spawnSync } from 'child_process';
import pino from 'pino';
import chalk from 'chalk';
import ora from 'ora';
import { promisify } from 'util';
import { readFileSync } from 'fs';
import { fileURLToPath } from 'url';
var BaseLogger = class {
projectLogger;
timers = /* @__PURE__ */ new Map();
/**
* Creates a new base logger instance.
* @param projectPath - Optional path to project for debug log file creation
*/
constructor(projectPath) {
if (projectPath) {
this.setupProjectLogging(projectPath);
}
}
/**
* Sets up project-specific JSON logging to a timestamped debug log file.
*/
async setupProjectLogging(projectPath) {
try {
const logDir = path6.join(projectPath, ".better-agents");
const now = /* @__PURE__ */ new Date();
const timestamp = now.toISOString().replace(/[:.]/g, "-").slice(0, 19);
const logPath = path6.join(logDir, `debug-${timestamp}.log`);
await fs6.mkdir(logDir, { recursive: true });
this.projectLogger = pino(
{
level: "debug",
formatters: {
level: (label) => ({
level: label,
timestamp: (/* @__PURE__ */ new Date()).toISOString()
})
}
},
pino.destination(logPath)
);
} catch {
}
}
/**
* Logs debug information in structured JSON format.
* @param step - The operation or step being logged
* @param context - Additional structured data for debugging
*/
debug(step, context = {}) {
this.projectLogger?.debug({ step, ...context });
}
/**
* Logs informational debug data in structured JSON format.
* @param step - The operation or step being logged
* @param context - Additional structured data for analysis
*/
info(step, context = {}) {
this.projectLogger?.info({ step, ...context });
}
/**
* Logs error with stack trace in structured JSON format.
* @param error - The error that occurred
* @param context - Additional context about the error
*/
error(error, context = {}) {
this.projectLogger?.error({
step: "error",
error: error.message,
stack: error.stack,
...context
});
}
/**
* Starts a performance timer and returns a function to end it.
* @param label - Identifier for the timed operation
* @returns Function that ends the timer and logs duration
*/
startTimer(label) {
const start = Date.now();
this.timers.set(label, start);
this.debug("timer-start", { label });
return () => {
const duration = Date.now() - start;
this.timers.delete(label);
this.debug("timer-end", { label, duration });
return duration;
};
}
/**
* Checks if the logger has file logging capability.
* @returns True if project logging is set up
*/
hasFileLogging() {
return this.projectLogger !== void 0;
}
};
var DisplayLogger = class _DisplayLogger {
spinner;
/**
* Logs user-facing informational message with cyan color.
* @param message - The message to display to the user
*/
userInfo(message) {
if (this.spinner) {
this.spinner.info(chalk.cyan(message));
} else {
console.log(chalk.cyan(message));
}
}
/**
* Logs user-facing success message with green color and checkmark.
* @param message - The success message to display
*/
userSuccess(message) {
if (this.spinner) {
this.spinner.succeed(chalk.green(message));
} else {
console.log(chalk.green(`\u2705 ${message}`));
}
}
/**
* Logs user-facing error message with red color and X mark.
* @param message - The error message to display
*/
userError(message) {
if (this.spinner) {
this.spinner.fail(chalk.red(message));
} else {
console.error(chalk.red(`\u274C ${message}`));
}
}
/**
* Logs user-facing warning message with yellow color and warning symbol.
* @param message - The warning message to display
*/
userWarning(message) {
if (this.spinner) {
this.spinner.warn(chalk.yellow(message));
} else {
console.warn(chalk.yellow(`\u26A0\uFE0F ${message}`));
}
}
/**
* Starts an ora spinner for long-running operations.
* @param text - Initial spinner text
* @returns The spinner instance
*/
startSpinner(text) {
if (!this.spinner) {
this.spinner = ora(text).start();
}
return this.spinner;
}
/**
* Gets the current spinner instance if one exists.
* @returns The current spinner or undefined
*/
getSpinner() {
return this.spinner;
}
/**
* Creates a child display logger with additional context.
* For display logger, this is mainly for API compatibility.
* @param _context - Context (ignored for display logger)
* @returns New display logger instance
*/
child(_context) {
return new _DisplayLogger();
}
};
var ConsoleLogger = class {
pinoLogger;
/**
* Creates a new console logger instance.
*/
constructor() {
if (this.detectDebugMode()) {
this.pinoLogger = pino({
level: "debug",
transport: {
target: "pino-pretty",
options: {
colorize: true,
translateTime: "HH:MM:ss",
ignore: "pid,hostname"
}
}
});
}
}
/**
* Detects if debug mode is enabled via environment variable or CLI flag.
*/
detectDebugMode() {
return Boolean(
process.env.SUPERAGENTS_DEBUG || process.argv.includes("--debug") || process.argv.includes("-d")
);
}
/**
* Logs debug information to console in pretty format.
* @param step - The operation or step being logged
* @param context - Additional structured data for debugging
*/
debug(step, context = {}) {
this.pinoLogger?.debug({ step, ...context });
}
/**
* Logs informational debug data to console in pretty format.
* @param step - The operation or step being logged
* @param context - Additional structured data for analysis
*/
info(step, context = {}) {
this.pinoLogger?.info({ step, ...context });
}
/**
* Logs error with stack trace to console in pretty format.
* @param error - The error that occurred
* @param context - Additional context about the error
*/
error(error, context = {}) {
this.pinoLogger?.error({
step: "error",
error: error.message,
stack: error.stack,
...context
});
}
/**
* Checks if console logging is enabled.
* @returns True if debug mode console logging is active
*/
isEnabled() {
return this.pinoLogger !== void 0;
}
};
// src/utils/logger/logger-facade.ts
var LoggerFacade = class _LoggerFacade {
baseLogger;
displayLogger;
consoleLogger;
/**
* Creates a new logger facade instance.
* @param projectPath - Optional path to project for debug log file creation
*/
constructor(projectPath) {
this.baseLogger = new BaseLogger(projectPath);
this.displayLogger = new DisplayLogger();
this.consoleLogger = new ConsoleLogger();
}
/**
* Logs user-facing informational message with cyan color.
* @param message - The message to display to the user
*/
userInfo(message) {
this.displayLogger.userInfo(message);
this.baseLogger.debug("user-info", { message, type: "info" });
this.consoleLogger.debug("user-info", { message, type: "info" });
}
/**
* Logs user-facing success message with green color and checkmark.
* @param message - The success message to display
*/
userSuccess(message) {
this.displayLogger.userSuccess(message);
this.baseLogger.debug("user-success", { message, type: "success" });
this.consoleLogger.debug("user-success", { message, type: "success" });
}
/**
* Logs user-facing error message with red color and X mark.
* @param message - The error message to display
*/
userError(message) {
this.displayLogger.userError(message);
this.baseLogger.error(new Error(message), { type: "user-error" });
this.consoleLogger.error(new Error(message), { type: "user-error" });
}
/**
* Logs user-facing warning message with yellow color and warning symbol.
* @param message - The warning message to display
*/
userWarning(message) {
this.displayLogger.userWarning(message);
this.baseLogger.debug("user-warning", { message, type: "warning" });
this.consoleLogger.debug("user-warning", { message, type: "warning" });
}
/**
* Logs debug information in structured JSON format.
* @param step - The operation or step being logged
* @param context - Additional structured data for debugging
*/
debug(step, context = {}) {
this.baseLogger.debug(step, context);
this.consoleLogger.debug(step, context);
}
/**
* Logs informational debug data in structured JSON format.
* @param step - The operation or step being logged
* @param context - Additional structured data for analysis
*/
info(step, context = {}) {
this.baseLogger.info(step, context);
this.consoleLogger.info(step, context);
}
/**
* Logs error with stack trace in structured JSON format.
* @param error - The error that occurred
* @param context - Additional context about the error
*/
error(error, context = {}) {
this.baseLogger.error(error, context);
this.consoleLogger.error(error, context);
}
/**
* Starts a performance timer and returns a function to end it.
* @param label - Identifier for the timed operation
* @returns Function that ends the timer and logs duration
*/
startTimer(label) {
return this.baseLogger.startTimer(label);
}
/**
* Creates a child logger with additional context.
* @param context - Context to add to all log entries from this child
* @returns New logger instance with inherited context
*/
child(context) {
const childFacade = new _LoggerFacade();
const currentSpinner = this.displayLogger.getSpinner();
if (currentSpinner) {
childFacade.displayLogger = this.displayLogger.child(context);
}
return childFacade;
}
/**
* Starts an ora spinner for long-running operations.
* @param text - Initial spinner text
* @returns The spinner instance
*/
startSpinner(text) {
return this.displayLogger.startSpinner(text);
}
/**
* Gets the current spinner instance if one exists.
* @returns The current spinner or undefined
*/
getSpinner() {
return this.displayLogger.getSpinner();
}
};
// src/utils/logger/index.ts
var logger = new LoggerFacade();
// src/config-collection/choice-builders/language-choices.ts
var buildLanguageChoices = () => {
return [
{ name: "TypeScript", value: "typescript" },
{ name: "Python", value: "python" }
];
};
// src/providers/frameworks/agno/knowledge.ts
var getKnowledge = () => ({
setupInstructions: "Python w/uv + pytest",
toolingInstructions: "Review the .cursorrules and llms.txt files for Agno best practices",
agentsGuideSection: `## Framework-Specific Guidelines
### Agno Framework
**Always follow Agno best practices:**
- Refer to the \`.cursorrules\` file for Agno-specific coding standards
- Consult \`llms.txt\` for comprehensive Agno documentation
- Use Agno's agent building patterns and conventions
- Follow Agno's recommended project structure
**Key Agno Resources:**
- Documentation: https://docs.agno.com/
- GitHub: https://github.com/agno-agi/agno
- Local files: \`.cursorrules\` and \`llms.txt\`
**When implementing agent features:**
1. Review Agno documentation for best practices
2. Use Agno's built-in tools and utilities
3. Follow Agno's patterns for agent state management
4. Leverage Agno's testing utilities
---
`
});
var AGNO_CURSORRULES_URL = "https://raw.githubusercontent.com/agno-agi/agno/main/.cursorrules";
var AGNO_LLMS_TXT_URL = "https://docs.agno.com/llms.txt";
var setup = async ({
projectPath
}) => {
await Promise.all([
fetchFile({
url: AGNO_CURSORRULES_URL,
targetPath: path6.join(projectPath, ".cursorrules"),
fallback: "# Agno cursor rules\n# Please manually download from: " + AGNO_CURSORRULES_URL
}),
fetchFile({
url: AGNO_LLMS_TXT_URL,
targetPath: path6.join(projectPath, "llms.txt"),
fallback: "# Agno LLMs documentation\n# Please manually download from: " + AGNO_LLMS_TXT_URL
})
]);
};
var fetchFile = async ({
url,
targetPath,
fallback
}) => {
try {
const response = await fetch(url);
if (!response.ok) {
throw new Error(`Failed to fetch: ${response.statusText}`);
}
const content = await response.text();
await fs6.writeFile(targetPath, content);
} catch {
logger.userWarning(`Could not fetch ${url}, using fallback`);
await fs6.writeFile(targetPath, fallback);
}
};
// src/providers/frameworks/agno/index.ts
var AgnoFrameworkProvider = {
id: "agno",
displayName: "Agno",
language: "python",
getKnowledge,
getMCPConfig: () => null,
setup
};
// src/providers/frameworks/mastra/knowledge.ts
var getKnowledge2 = () => ({
setupInstructions: "TypeScript w/pnpm + vitest",
toolingInstructions: "Use the Mastra MCP to learn about Mastra and how to build agents",
agentsGuideSection: `## Framework-Specific Guidelines
### Mastra Framework
**Always use the Mastra MCP for learning:**
- The Mastra MCP server provides real-time documentation
- Ask it questions about Mastra APIs and best practices
- Follow Mastra's recommended patterns for agent development
**When implementing agent features:**
1. Consult the Mastra MCP: "How do I [do X] in Mastra?"
2. Use Mastra's built-in agent capabilities
3. Follow Mastra's TypeScript patterns and conventions
4. Leverage Mastra's integration ecosystem
**Initial setup:**
1. Use \`pnpx mastra init --default\` to create a new mastra project, do it before setting up the rest of the project, right after having done \`pnpm init\`.
2. Then explore the setup it created, the folders, remove what not needed
3. Proceed with the user definition request to implement the agent and test it out
4. Open the UI for user to see using \`pnpx mastra dev\`
---
`
});
// src/providers/frameworks/mastra/mcp-config.ts
var getMCPConfig = () => ({
type: "stdio",
command: "npx",
args: ["-y", "@mastra/mcp-docs-server"]
});
// src/providers/frameworks/mastra/index.ts
var MastraFrameworkProvider = {
id: "mastra",
displayName: "Mastra",
language: "typescript",
getKnowledge: getKnowledge2,
getMCPConfig,
setup: async () => {
}
};
// src/providers/frameworks/index.ts
var PROVIDERS = {
agno: AgnoFrameworkProvider,
mastra: MastraFrameworkProvider
};
var getFrameworkProvider = ({
framework
}) => {
const provider = PROVIDERS[framework];
if (!provider) {
throw new Error(`Framework provider not found: ${framework}`);
}
return provider;
};
var getFrameworksByLanguage = ({
language
}) => {
return Object.values(PROVIDERS).filter((p) => p.language === language);
};
// src/config-collection/choice-builders/framework-choices.ts
var buildFrameworkChoices = ({
language
}) => {
return getFrameworksByLanguage({ language }).map((provider) => ({
name: provider.displayName,
value: provider.id
}));
};
var ProcessUtils = class {
/**
* Launches a command with full terminal control using spawnSync.
* Blocks until the command completes.
*
* @param command - The command to execute
* @param args - Arguments for the command
* @param options - Execution options
*
* @example
* ```ts
* ProcessUtils.launchWithTerminalControl('cursor-agent', ['prompt text'], { cwd: '/path' });
* // Blocks until cursor-agent exits
* ```
*/
static launchWithTerminalControl(command, args, options) {
const result = spawnSync(command, args, {
cwd: options.cwd,
stdio: "inherit"
});
if (result.error) {
throw new Error(`Failed to execute ${command}: ${result.error.message}`);
}
if (result.status !== null) {
process.exit(result.status);
}
}
};
var execAsync = promisify(exec);
var CliUtils = class {
/**
* Checks if a command is available in the system PATH.
*
* @param command - The command to check (e.g., 'claude', 'cursor-agent')
* @returns Promise resolving to true if command exists, false otherwise
*
* @example
* ```ts
* const hasCommand = await CliUtils.isCommandAvailable('claude');
* // Returns: true if claude is installed
* ```
*/
static async isCommandAvailable(command) {
try {
await execAsync(`which ${command}`);
return true;
} catch {
return false;
}
}
};
// src/providers/coding-assistants/claude/index.ts
var ClaudeCodingAssistantProvider = {
id: "claude-code",
displayName: "Claude Code",
command: "claude",
async isAvailable() {
const installed = await CliUtils.isCommandAvailable("claude");
return {
installed,
installCommand: installed ? void 0 : "npm install -g @anthropic-ai/claude-code"
};
},
async writeMCPConfig({ projectPath, config }) {
const mcpConfigPath = path6.join(projectPath, ".mcp.json");
await fs6.writeFile(mcpConfigPath, JSON.stringify(config, null, 2));
const claudeMdPath = path6.join(projectPath, "CLAUDE.md");
const claudeMdContent = `@AGENTS.md
`;
await fs6.writeFile(claudeMdPath, claudeMdContent);
},
async launch({
projectPath,
prompt
}) {
try {
logger.userInfo(`\u{1F916} Launching ${this.displayName}...`);
ProcessUtils.launchWithTerminalControl("claude", [prompt], {
cwd: projectPath
});
logger.userSuccess("Session complete!");
} catch (error) {
if (error instanceof Error) {
logger.userError(`Failed to launch ${this.displayName}: ${error.message}`);
}
throw error;
}
}
};
var CursorCodingAssistantProvider = {
id: "cursor",
displayName: "Cursor",
command: "",
async isAvailable() {
return { installed: true };
},
async writeMCPConfig({ projectPath, config }) {
const cursorDir = path6.join(projectPath, ".cursor");
await fs6.mkdir(cursorDir, { recursive: true });
const mcpConfigPath = path6.join(cursorDir, "mcp.json");
await fs6.writeFile(mcpConfigPath, JSON.stringify(config, null, 2));
},
async launch({
projectPath
}) {
logger.userWarning("To start with Cursor:");
logger.userInfo(" 1. Open Cursor");
logger.userInfo(` 2. Open the folder: ${projectPath}`);
logger.userInfo(" 3. Use the initial prompt above with Cursor Composer");
}
};
var KilocodeCodingAssistantProvider = {
id: "kilocode",
displayName: "Kilocode CLI",
command: "kilocode",
async isAvailable() {
const installed = await CliUtils.isCommandAvailable("kilocode");
return {
installed,
installCommand: installed ? void 0 : "npm install -g @kilocode/cli"
};
},
async writeMCPConfig({ projectPath, config }) {
const mcpConfigPath = path6.join(projectPath, ".mcp.json");
await fs6.writeFile(mcpConfigPath, JSON.stringify(config, null, 2));
},
async launch({
projectPath,
prompt
}) {
try {
logger.userInfo(`\u{1F916} Launching ${this.displayName}...`);
ProcessUtils.launchWithTerminalControl("kilocode", ["-a", prompt], {
cwd: projectPath
});
logger.userSuccess("Session complete!");
} catch (error) {
if (error instanceof Error) {
logger.userError(
`Failed to launch ${this.displayName}: ${error.message}`
);
}
throw error;
}
}
};
var NoneCodingAssistantProvider = {
id: "none",
displayName: "None - I will prompt it myself",
command: "",
async isAvailable() {
return { installed: true };
},
async writeMCPConfig({ projectPath, config }) {
const mcpConfigPath = path6.join(projectPath, ".mcp.json");
await fs6.writeFile(mcpConfigPath, JSON.stringify(config, null, 2));
const cursorDir = path6.join(projectPath, ".cursor");
await fs6.mkdir(cursorDir, { recursive: true });
const cursorMcpPath = path6.join(cursorDir, "mcp.json");
await fs6.writeFile(cursorMcpPath, JSON.stringify(config, null, 2));
const claudeMdPath = path6.join(projectPath, "CLAUDE.md");
const claudeMdContent = `@AGENTS.md
`;
await fs6.writeFile(claudeMdPath, claudeMdContent);
},
async launch(_params) {
logger.userInfo(
"When you're ready, use the initial prompt above with your coding assistant."
);
}
};
// src/providers/coding-assistants/index.ts
var PROVIDERS2 = {
kilocode: KilocodeCodingAssistantProvider,
"claude-code": ClaudeCodingAssistantProvider,
cursor: CursorCodingAssistantProvider,
none: NoneCodingAssistantProvider
};
var getCodingAssistantProvider = ({
assistant
}) => {
const provider = PROVIDERS2[assistant];
if (!provider) {
throw new Error(`Coding assistant provider not found: ${assistant}`);
}
return provider;
};
var getAllCodingAssistants = () => {
return Object.values(PROVIDERS2);
};
// src/utils/coding-assistant.util.ts
var CodingAssistantUtils = class {
/**
* Detects which coding assistants are installed on the system.
*
* @returns Promise resolving to a map of assistant IDs to installation status
*
* @example
* ```ts
* const installed = await CodingAssistantUtils.detectInstalledAgents();
* // Returns: { 'claude-code': true, 'cursor': false, 'kilocode': true }
* ```
*/
static async detectInstalledAgents() {
const [hasClaude, hasCursor, hasKilocode] = await Promise.all([
CliUtils.isCommandAvailable("claude"),
CliUtils.isCommandAvailable("cursor-agent"),
CliUtils.isCommandAvailable("kilocode")
]);
return {
"claude-code": hasClaude,
cursor: hasCursor,
kilocode: hasKilocode,
none: true
// Always available since it doesn't require installation
};
}
};
// src/config-collection/choice-builders/coding-assistant-choices.ts
var buildCodingAssistantChoices = async () => {
const assistants = getAllCodingAssistants();
const installedMap = await CodingAssistantUtils.detectInstalledAgents();
const installed = [];
const notInstalled = [];
for (const assistant of assistants) {
const isInstalled = installedMap[assistant.id];
const choice = {
name: isInstalled ? assistant.displayName : chalk.gray(`${assistant.displayName} (not installed)`),
value: assistant.id
};
if (isInstalled) {
installed.push(choice);
} else {
notInstalled.push(choice);
}
}
return [...installed, ...notInstalled];
};
// src/providers/llm-providers/openai/index.ts
var OpenAIProvider = {
id: "openai",
displayName: "OpenAI",
apiKeyUrl: "https://platform.openai.com/api-keys",
getEnvVariables: ({ apiKey }) => [
{ key: "OPENAI_API_KEY", value: apiKey }
]
};
// src/providers/llm-providers/anthropic/index.ts
var AnthropicProvider = {
id: "anthropic",
displayName: "Anthropic (Claude)",
apiKeyUrl: "https://console.anthropic.com/settings/keys",
getEnvVariables: ({ apiKey }) => [
{ key: "ANTHROPIC_API_KEY", value: apiKey }
]
};
// src/providers/llm-providers/gemini/index.ts
var GeminiProvider = {
id: "gemini",
displayName: "Google Gemini",
apiKeyUrl: "https://aistudio.google.com/app/apikey",
getEnvVariables: ({ apiKey }) => [
{ key: "GOOGLE_API_KEY", value: apiKey },
{ key: "GEMINI_API_KEY", value: apiKey }
]
};
// src/providers/llm-providers/bedrock/index.ts
var BedrockProvider = {
id: "bedrock",
displayName: "AWS Bedrock",
apiKeyUrl: "https://console.aws.amazon.com/iam/home#/security_credentials",
additionalCredentials: [
{
key: "awsSecretKey",
label: "AWS Secret Access Key",
type: "password",
validate: (value) => {
if (!value || value.length < 10) {
return "AWS Secret Access Key is required";
}
return true;
}
},
{
key: "awsRegion",
label: "AWS Region (e.g., us-east-1)",
type: "text",
defaultValue: "us-east-1",
validate: (value) => {
if (!value || value.length < 5) {
return "AWS Region is required";
}
return true;
}
}
],
getEnvVariables: ({ apiKey, additionalInputs }) => {
const envVars = [
{ key: "AWS_ACCESS_KEY_ID", value: apiKey }
];
if (additionalInputs?.awsSecretKey) {
envVars.push({
key: "AWS_SECRET_ACCESS_KEY",
value: additionalInputs.awsSecretKey
});
}
if (additionalInputs?.awsRegion) {
envVars.push({
key: "AWS_REGION",
value: additionalInputs.awsRegion
});
}
return envVars;
}
};
// src/providers/llm-providers/openrouter/index.ts
var OpenRouterProvider = {
id: "openrouter",
displayName: "OpenRouter",
apiKeyUrl: "https://openrouter.ai/keys",
getEnvVariables: ({ apiKey }) => [
{ key: "OPENROUTER_API_KEY", value: apiKey }
]
};
// src/providers/llm-providers/grok/index.ts
var GrokProvider = {
id: "grok",
displayName: "xAI (Grok)",
apiKeyUrl: "https://console.x.ai/team",
getEnvVariables: ({ apiKey }) => [
{ key: "XAI_API_KEY", value: apiKey }
]
};
// src/providers/llm-providers/index.ts
var PROVIDERS3 = {
openai: OpenAIProvider,
anthropic: AnthropicProvider,
gemini: GeminiProvider,
bedrock: BedrockProvider,
openrouter: OpenRouterProvider,
grok: GrokProvider
};
var getLLMProvider = ({
provider
}) => {
const llmProvider = PROVIDERS3[provider];
if (!llmProvider) {
throw new Error(`LLM provider not found: ${provider}`);
}
return llmProvider;
};
var getAllLLMProviders = () => {
return Object.values(PROVIDERS3);
};
// src/config-collection/validators/openai-key.ts
var validateOpenAIKey = (value) => {
if (!value || value.trim().length === 0) {
return "API key is required";
}
if (!value.startsWith("sk-")) {
return 'OpenAI API key should start with "sk-"';
}
return true;
};
// src/config-collection/validators/langwatch-key.ts
var validateLangWatchKey = (value) => {
if (!value || value.trim().length === 0) {
return "LangWatch API key is required";
}
if (!value.startsWith("sk-lw-")) {
return 'LangWatch API key should start with "sk-lw-"';
}
return true;
};
// src/config-collection/validators/project-goal.ts
var validateProjectGoal = (value) => {
if (!value || value.trim().length === 0) {
return "Please describe what you want to build";
}
return true;
};
// src/config-collection/collect-config.ts
var collectConfig = async () => {
try {
logger.userInfo(
"Setting up your agent project following the Better Agent Structure.\n"
);
const language = await select({
message: "What programming language do you want to use?",
choices: buildLanguageChoices()
});
const framework = await select({
message: "What agent framework do you want to use?",
choices: buildFrameworkChoices({ language })
});
const allProviders = getAllLLMProviders();
const llmProvider = await select({
message: "What LLM provider is your agent going to use?",
choices: allProviders.map((p) => ({
name: p.displayName,
value: p.id
}))
});
const selectedProvider = allProviders.find((p) => p.id === llmProvider);
const providerDisplayName = selectedProvider?.displayName || llmProvider;
if (selectedProvider?.apiKeyUrl) {
logger.userInfo(`To get your ${providerDisplayName} API key, visit:`);
logger.userInfo(`${selectedProvider.apiKeyUrl}`);
}
const llmApiKey = await password({
message: `Enter your ${providerDisplayName} API key:`,
mask: "*",
validate: llmProvider === "openai" ? validateOpenAIKey : (value) => {
if (!value || value.length < 5) {
return "API key is required and must be at least 5 characters";
}
return true;
}
});
let llmAdditionalInputs;
if (selectedProvider?.additionalCredentials && selectedProvider.additionalCredentials.length > 0) {
llmAdditionalInputs = {};
for (const credential of selectedProvider.additionalCredentials) {
if (credential.type === "password") {
llmAdditionalInputs[credential.key] = await password({
message: `Enter your ${credential.label}:`,
mask: "*",
validate: credential.validate
});
} else {
llmAdditionalInputs[credential.key] = await input({
message: `Enter your ${credential.label}:`,
default: credential.defaultValue,
validate: credential.validate
});
}
}
}
const codingAssistant = await select({
message: "What is your preferred coding assistant for building the agent?",
choices: await buildCodingAssistantChoices()
});
const codingAssistantProviders = getAllCodingAssistants();
const selectedCodingProvider = codingAssistantProviders.find(
(p) => p.id === codingAssistant
);
if (selectedCodingProvider) {
let availability = await selectedCodingProvider.isAvailable();
if (!availability.installed && availability.installCommand) {
logger.userWarning(
`${selectedCodingProvider.displayName} is not installed.`
);
logger.userInfo(`To install it, run:`);
logger.userInfo(`${availability.installCommand}`);
const shouldInstall = await confirm({
message: "Would you like me to install it for you?",
default: true
});
if (shouldInstall) {
logger.userInfo("Installing...");
try {
await new Promise((resolve2, reject) => {
const [cmd, ...args] = availability.installCommand.split(" ");
const child = spawn(cmd, args, { stdio: "inherit" });
child.on("close", (code) => {
if (code === 0) {
resolve2();
} else {
reject(
new Error(`Installation failed with exit code ${code}`)
);
}
});
child.on("error", reject);
});
availability = await selectedCodingProvider.isAvailable();
if (availability.installed) {
logger.userSuccess(
`${selectedCodingProvider.displayName} installed successfully!`
);
} else {
logger.userError(
"Installation may have failed. Please try installing manually."
);
}
} catch (error) {
logger.userError(
`Installation failed: ${error instanceof Error ? error.message : "Unknown error"}`
);
logger.userInfo("Please try installing manually.");
}
} else {
}
}
}
logger.userInfo("\u2714\uFE0E Your coding assistant will finish setup later if needed\n");
logger.userInfo("To get your LangWatch API key, visit:");
logger.userInfo("https://app.langwatch.ai/authorize");
const langwatchApiKey = await password({
message: "Enter your LangWatch API key (for prompt management, scenarios, evaluations and observability):",
mask: "*",
validate: validateLangWatchKey
});
logger.userInfo("To get your Smithery API key (optional), visit:");
logger.userInfo("https://smithery.ai/account/api-keys");
logger.userInfo(
"Smithery enables your coding agent to auto-discover MCP tools to integrate with your agent."
);
const smitheryApiKey = await password({
message: "Enter your Smithery API key (Optional - press Enter to skip):",
mask: "*",
validate: (value) => {
if (!value || value.trim() === "") {
return true;
}
if (value.length < 10) {
return "Smithery API key must be at least 10 characters";
}
return true;
}
});
const projectGoal = await input({
message: "What is your agent going to do?",
validate: validateProjectGoal
});
return {
language,
framework,
codingAssistant,
llmProvider,
llmApiKey,
llmAdditionalInputs,
langwatchApiKey,
smitheryApiKey: smitheryApiKey && smitheryApiKey.trim() !== "" ? smitheryApiKey : void 0,
projectGoal
};
} catch (error) {
if (error instanceof Error && error.message.includes("User force closed")) {
logger.userWarning("Setup cancelled by user");
process.exit(0);
}
throw error;
}
};
var createDirectories = async ({
projectPath,
config
}) => {
const srcDir = config.framework === "mastra" ? "src" : "app";
const directories = [
srcDir,
"prompts",
"tests",
"tests/evaluations",
"tests/scenarios"
];
for (const dir of directories) {
await fs6.mkdir(path6.join(projectPath, dir), { recursive: true });
}
};
var generateEnvFiles = async ({
projectPath,
config
}) => {
const provider = getLLMProvider({ provider: config.llmProvider });
const envVars = provider.getEnvVariables({
apiKey: config.llmApiKey,
additionalInputs: config.llmAdditionalInputs
});
const envExampleLines = [
"# LLM Provider API Keys",
...envVars.map((v) => `${v.key}=your_${v.key.toLowerCase()}_here`),
"",
"# LangWatch",
"LANGWATCH_API_KEY=your_langwatch_api_key_here"
];
const envExample = envExampleLines.join("\n") + "\n";
await fs6.writeFile(path6.join(projectPath, ".env.example"), envExample);
const envContentLines = [
"# LLM Provider API Keys",
...envVars.map((v) => `${v.key}=${v.value}`),
"",
"# LangWatch",
`LANGWATCH_API_KEY=${config.langwatchApiKey}`
];
const envContent = envContentLines.join("\n") + "\n";
await fs6.writeFile(path6.join(projectPath, ".env"), envContent);
};
var generateGitignore = async ({
projectPath
}) => {
const gitignoreContent = `# Environment variables
.env
# Dependencies
node_modules/
__pycache__/
*.pyc
venv/
.venv/
# IDE
.vscode/
.idea/
*.swp
*.swo
# OS
.DS_Store
Thumbs.db
# Build outputs
dist/
build/
*.egg-info/
`;
await fs6.writeFile(path6.join(projectPath, ".gitignore"), gitignoreContent);
};
var generateSamplePrompt = async ({
projectPath
}) => {
const samplePromptYaml = `# Sample prompt for your agent
model: gpt-4o
temperature: 0.7
messages:
- role: system
content: |
You are a helpful AI assistant.
`;
await fs6.writeFile(
path6.join(projectPath, "prompts", "sample_prompt.yaml"),
samplePromptYaml
);
await fs6.writeFile(
path6.join(projectPath, "prompts.json"),
JSON.stringify({ prompts: [] }, null, 2)
);
};
var generateSampleEvaluation = async ({
projectPath,
language
}) => {
const sampleEvalNotebook = {
cells: [
{
cell_type: "markdown",
metadata: {},
source: [
"# Sample Evaluation\n",
"\n",
"This notebook demonstrates how to evaluate your agent using LangWatch."
]
},
{
cell_type: "code",
execution_count: null,
metadata: {},
outputs: [],
source: [
"# TODO: Add your evaluation code here using LangWatch Evaluations API\n",
"# Refer to LangWatch MCP for documentation on how to use evaluations"
]
}
],
metadata: {
kernelspec: {
display_name: language === "python" ? "Python 3" : "TypeScript",
language,
name: language === "python" ? "python3" : "tslab"
}
},
nbformat: 4,
nbformat_minor: 4
};
await fs6.writeFile(
path6.join(projectPath, "tests", "evaluations", "example_eval.ipynb"),
JSON.stringify(sampleEvalNotebook, null, 2)
);
};
var generateSampleScenario = async ({
projectPath,
language
}) => {
const ext = language === "python" ? "py" : "ts";
const sampleScenarioContent = language === "python" ? `"""
Sample scenario test for your agent.
Follow the Agent Testing Pyramid: use Scenario for end-to-end agentic tests.
"""
# TODO: Add your scenario tests here
# Refer to https://scenario.langwatch.ai/ for documentation
` : `/**
* Sample scenario test for your agent.
* Follow the Agent Testing Pyramid: use Scenario for end-to-end agentic tests.
*/
// TODO: Add your scenario tests here
// Refer to https://scenario.langwatch.ai/ for documentation
`;
await fs6.writeFile(
path6.join(
projectPath,
"tests",
"scenarios",
`example_scenario.test.${ext}`
),
sampleScenarioContent
);
};
var generateMainEntryPoint = async ({
projectPath,
config
}) => {
const srcDir = config.framework === "mastra" ? "src" : "app";
const mainFileContent = config.language === "python" ? `"""
Main entry point for your agent.
"""
def main():
print("Welcome to your agent!")
# TODO: Implement your agent logic here
if __name__ == "__main__":
main()
` : `/**
* Main entry point for your agent.
*/
const main = () => {
console.log("Welcome to your agent!");
// TODO: Implement your agent logic here
};
main();
`;
const mainFileName = config.language === "python" ? "main.py" : "index.ts";
await fs6.writeFile(
path6.join(projectPath, srcDir, mainFileName),
mainFileContent
);
};
// src/project-scaffolding/create-project-structure.ts
var createProjectStructure = async ({
projectPath,
config
}) => {
await createDirectories({ projectPath, config });
await generateEnvFiles({
projectPath,
config
});
await generateGitignore({ projectPath });
await generateSamplePrompt({ projectPath });
await generateSampleEvaluation({ projectPath, language: config.language });
await generateSampleScenario({ projectPath, language: config.language });
await generateMainEntryPoint({ projectPath, config });
};
// src/documentation/sections/overview-section.ts
var buildOverviewSection = ({ config }) => {
const { projectGoal, framework, language } = config;
return `# Agent Development Guidelines
## Project Overview
**Goal:** ${projectGoal}
**Framework:** ${framework === "agno" ? "Agno" : "Mastra"}
**Language:** ${language === "python" ? "Python" : "TypeScript"}
This project follows LangWatch best practices for building production-ready AI agents.
---
`;
};
// src/documentation/sections/principles-section.ts
var buildPrinciplesSection = () => {
return `## Core Principles
### 1. Scenario Agent Testing
Scenario allows for end-to-end validation of multi-turn conversations and real-world scenarios, most agent functionality should be tested with scenarios
**CRITICAL**: Every new agent feature MUST be tested with Scenario tests before considering it complete.
- Write simulation tests for multi-turn conversations
- Validate edge cases
- Ensure business value is delivered
- Test different conversation paths
Best practices:
- NEVER check for regex or word matches in the agent's response, use judge criteria instead
- Use functions on the Scenario scripts for things that can be checked deterministically (tool calls, database entries, etc) instead of relying on the judge
- For the rest, use the judge criteria to check if agent is reaching the desired goal and
- When broken, run on single scenario at a time to debug and iterate faster, not the whole suite
- Write as few scenarios as possible, try to cover more ground with few scenarios, as they are heavy to run
- If user made 1 request, just 1 scenario might be enough, run it at the end of the implementation to check if it works
- ALWAYS consult the Scenario docs on how to write scenarios, do not assume the syntax
### 2. Prompt Management
**ALWAYS** use LangWatch Prompt CLI for managing prompts:
- Use the LangWatch MCP to learn about prompt management, search for Prompt CLI docs
- Never hardcode prompts in your application code
- Store all prompts in the \`prompts/\` directory as YAML files, use "langwatch prompt create <name>" to create a new prompt
- Run \`langwatch prompt sync\` after changing a prompt to update the registry
Example prompt structure:
\`\`\`yaml
# prompts/my_prompt.yaml
model: gpt-4o
temperature: 0.7
messages:
- role: system
content: |
Your system prompt here
- role: user
content: |
{{ user_input }}
\`\`\`
DO NOT use hardcoded prompts in your application code, example:
BAD:
\`\`\`
Agent(prompt="You are a helpful assistant.")
\`\`\`
GOOD:
\`\`\`python
import langwatch
prompt = langwatch.prompts.get("my_prompt")
Agent(prompt=prompt.prompt)
\`\`\`
\`\`\`typescript
import { LangWatch } from "langwatch";
const langwatch = new LangWatch({
apiKey: process.env.LANGWATCH_API_KEY
});
const prompt = await langwatch.prompts.get("my_prompt")
Agent(prompt=prompt!.prompt)
\`\`\`
Prompt fetching is very reliable when using the prompts cli because the files are local (double check they were created with the CLI and are listed on the prompts.json file).
DO NOT add try/catch around it and DO NOT duplicate the prompt here as a fallback
Explore the prompt management get started and data model docs if you need more advanced usages such as compiled prompts with variables or messages list.
### 3. Evaluations for specific cases
Only write evaluations for specific cases:
- When a RAG is implemented, so we can evaluate the accuracy given many sample queries (using an LLM to compare expected with generated outputs)
- For classification tasks, e.g. categorization, routing, simple true/false detection, etc
- When the user asks and you are sure an agent scenario wouldn't test the behaviour better
This is because evaluations are good for things when you have a lot of examples, with avery clear
definition of what is correct and what is not (that is, you can just compare expected with generated)
and you are looking for single input/output pairs. This is not the case for multi-turn agent flows.
Create evaluations in Jupyter notebooks under \`tests/evaluations/\`:
- Generate csv example datasets yourself to be read by pandas with plenty of examples
- Use LangWatch Evaluations API to create evaluation notebooks and track the evaluation results
- Use either a simple == comparison or a direct (e.g. openai) LLM call to compare expected with generated if possible and not requested otherwise
### 4. General good practices
- ALWAYS use the package manager cli commands to init, add and install new dependencies, DO NOT guess package versions, DO NOT add them to the dependencies file by hand.
- When setting up, remember to load dotenv for the tests so env vars are available
- Double check the guidelines on AGENTS.md after the end of the implementation.
---
`;
};
// src/documentation/sections/workflow-section.ts
var buildWorkflowSection = ({ config }) => {
const srcDir = config.framework === "mastra" ? "src" : "app";
const ext = config.language === "python" ? "py" : "ts";
return `## Project Structure
This project follows a standardized structure for production-ready agents:
\`\`\`
|__ ${srcDir}/ # Main application code
|__ prompts/ # Versioned prompt files (YAML)
|_____ *.yaml
|__ tests/
|_____ evaluations/ # Jupyter notebooks for component evaluation
|________ *.ipynb
|_____ scenarios/ # End-to-end scenario tests
|________ *.test.${ext}
|__ prompts.json # Prompt registry
|__ .env # Environment variables (never commit!)
\`\`\`
---
## Development Workflow
### When Starting a New Feature:
1. **Understand Requirements**: Clarify what the agent should do
2. **Design the Approach**: Plan which components you'll need
3. **Implement with Prompts**: Use LangWatch Prompt CLI to create/manage prompts
4. **Write Unit Tests**: Test deterministic components
5. **Create Evaluations**: Build evaluation notebooks for probabilistic components
6. **Write Scenario Tests**: Create end-to-end tests using Scenario
7. **Run Tests**: Verify everything works before moving on
### Always:
- \u2705 Version control your prompts
- \u2705 Write tests for new features
- \u2705 Use LangWatch MCP to learn best practices
- \u2705 Follow the Agent Testing Pyramid
- \u2705 Document your agent's capabilities
### Never:
- \u274C Hardcode prompts in application code
- \u274C Skip testing new features
- \u274C Commit API keys or sensitive data
- \u274C Optimize without measuring (use evaluations first)
---
## Using LangWatch MCP
The LangWatch MCP server provides expert guidance on:
- Prompt management with Prompt CLI
- Writing Scenario tests
- Creating evaluations
- Best practices for agent development
**How to use it:**
Simply ask your coding assistant questions like:
- "How do I use the LangWatch Prompt CLI?"
- "Show me how to write a Scenario test"
- "How do I create an evaluation for my RAG system?"
The MCP will provide up-to-date documentation and examples.
---
## Getting Started
1. **Set up your environment**: Copy \`.env.example\` to \`.env\` and fill in your API keys
2. **Learn the tools**: Ask the LangWatch MCP about prompt management and testing
3. **Start building**: Implement your agent in the \`${srcDir}/\` directory
4. **Write tests**: Create scenario tests for your agent's capabilities
5. **Iterate**: Use evaluations to improve your agent's performance
---
## Resources
- **Scenario Documentation**: https://scenario.langwatch.ai/
- **Agent Testing Pyramid**: https://scenario.langwatch.ai/best-practices/the-agent-testing-pyramid
- **LangWatch Dashboard**: https://app.langwatch.ai/
${config.framework === "agno" ? "- **Agno Documentation**: https://docs.agno.com/" : "- **Mastra Documentation**: Use the Mastra MCP for up-to-date docs"}
---
Remember: Building production-ready agents means combining great AI capabilities with solid software engineering practices. Follow these guidelines to create agents that are reliable, testable, and maintainable.
`;
};
// src/builders/agents-guide-builder.ts
var buildAgentsGuide = async ({
projectPath,
config
}) => {
const frameworkProvider = getFrameworkProvider({
framework: config.framework
});
const frameworkKnowledge = frameworkProvider.getKnowledge();
const content = [
buildOverviewSection({ config }),
buildPrinciplesSection(),
frameworkKnowledge.agentsGuideSection,
buildWorkflowSection({ config })
].join("\n");
await fs6.writeFile(path6.join(projectPath, "AGENTS.md"), content);
};
// src/builders/mcp-config-builder.ts
var buildMCPConfig = async ({
projectPath,
config
}) => {
const mcpConfig = {
mcpServers: {}
};
mcpConfig.mcpServers.langwatch = {
command: "npx",
args: ["-y", "@langwatch/mcp-server"]
};
if (config.smitheryApiKey) {
mcpConfig.mcpServers.toolbox = {
command: "npx",
args: [
"-y",
"@smithery/cli@latest",
"run",
"@smithery/toolbox",
"--key",
config.smitheryApiKey
]
};
}
const frameworkProvider = getFrameworkProvider({
framework: config.framework
});
const frameworkMCP = frameworkProvider.getMCPConfig?.();
if (frameworkMCP) {
mcpConfig.mcpServers[frameworkProvider.id] = frameworkMCP;
}
const assistantProvider = getCodingAssistantProvider({
assistant: config.codingAssistant
});
await assistantProvider.writeMCPConfig({ projectPath, config: mcpConfig });
};
// src/providers/languages/python/knowledge.ts
var getKnowledge3 = () => ({
setupInstructions: "Python with uv + pytest (install uv for them if they don't have it)",
sourceExtensions: [".py"],
testFramework: "pytest"
});
// src/providers/languages/python/index.ts
var PythonLanguageProvider = {
id: "python",
displayName: "Python",
getKnowledge: getKnowledge3
};
// src/providers/languages/typescript/knowledge.ts
var getKnowledge4 = () => ({
setupInstructions: "TypeScript with pnpm + vitest (install pnpm for them if they don't have it)",
sourceExtensions: [".ts", ".tsx"],
testFramework: "vitest"
});
// src/providers/languages/typescript/index.ts
var TypeScriptLanguageProvider = {
id: "typescript",
displayName: "TypeScript",
getKnowledge: getKnowledge4
};
// src/providers/languages/index.ts
var PROVIDERS4 = {
typescript: TypeScriptLanguageProvider,
python: PythonLanguageProvider
};
var getLanguageProvider = ({
language
}) => {
const provider = PROVIDERS4[language];
if (!provider) {
throw new Error(`Language provider not found: ${language}`);
}
return provider;
};
// src/assistant-kickoff/build-initial-prompt.ts
var buildInitialPrompt = ({
config
}) => {
const frameworkProvider = getFrameworkProvider({
framework: config.framework
});
const languageProvider = getLanguageProvider({ language: config.language });
const frameworkKnowledge = frameworkProvider.getKnowledge();
const languageKnowledge = languageProvider.getKnowledge();
const instructions = `You are an expert AI agent developer. This project has been set up with Better Agents best practices.
First steps:
1. Read and understand the AGENTS.md file - it contains all the guidelines for this project
2. Update the AGENTS.md with specific details about what this project does
3. Create a comprehensive README.md explaining the project, setup, and usage
4. Set up the ${languageKnowledge.setupInstructions}
5. ${frameworkKnowledge.toolingInstructions}
6. Execute any installation steps needed yourself, for the library dependencies, the CLI tools, etc
7. Use the LangWatch MCP to learn about prompt management and testing
8. Start implementing the core agent functionality
9. Instrument the agent with LangWatch
10. Use Scenario tests to ensure the agent is working as expected, integrate with the agent and consider it done only when all scenarios pass, check scenario docs on how to implement
11. If available from the framework, tell the