autollama
Version:
Modern JavaScript-first RAG framework with contextual embeddings, professional CLI, and one-command deployment
618 lines (521 loc) โข 20.9 kB
JavaScript
/**
* AutoLlama v3.0 - NPX Installer
* ๐ฆ The cuddliest context-aware RAG framework
* Usage: npx create-autollama [project-name]
*/
const fs = require('fs-extra');
const path = require('path');
const inquirer = require('inquirer');
const chalk = require('chalk');
const ora = require('ora');
const { execSync, spawn } = require('child_process');
// Llama ASCII Art Collection
const LLAMA_ART = {
welcome: `
ใ โฉ___โฉ
ใ |ใใใใใฝ
ใ/ใใโใใโ | ${chalk.cyan.bold('AutoLlama v3.0')}
ใ|ใใ( _โ_)ใใ ${chalk.gray('Your Context-Aware RAG Companion')}
ใๅฝก๏ฝคใใ|โช|ใ๏ฝค\`
/ใใใ ใฝใใ/ยด ${chalk.yellow('The fluffiest way to build RAG apps!')}
(ใ๏ผผ๏ผฟ๏ผฟ๏ผฟ,ใ/
ใ๏ผผ๏ผฟ,)ใใ/
ใใใใใ /`,
happy: `
ใ โฉ___โฉ
ใ |ใใใใใฝ
ใ/ใใโใใโ |
ใ|ใใ( โก )ใใ
ใๅฝก๏ฝคใใ|โช|ใ๏ฝค\`
/ใใใ ใฝใใ/ยด`,
thinking: `
ใ โฉ___โฉ
ใ |ใใใใใฝ
ใ/ใใโใใโ |
ใ|ใใ( _โ_)ใ๐ญ
ใๅฝก๏ฝคใใ|โช|ใ๏ฝค\`
/ใใใ ใฝใใ/ยด`,
mini: '๐ฆ'
};
// Llama-themed messages
const LLAMA_MESSAGES = {
dependencies: [
"๐ฆ Gathering the finest npm packages from the digital Andes...",
"๐ฟ Your llama is carefully selecting each dependency...",
"๐ฆ Packing supplies for your RAG adventure..."
],
database: [
"๐๏ธ Setting up a cozy database pasture...",
"๐พ Teaching your llama to remember everything...",
"๐๏ธ Organizing the document grazing grounds..."
],
configuration: [
"โจ Sprinkling some llama magic on your config...",
"๐จ Customizing your llama's outfit...",
"๐ง Adjusting the saddle for perfect fit..."
],
completion: [
"๐ Your llama herd is ready for action!",
"โจ Setup complete! Time to start your RAG adventure!",
"๐ฆ *Happy llama humming* Everything is configured!"
]
};
// Llama facts to display during long operations
const LLAMA_FACTS = [
"๐ฆ Fun fact: Llamas hum when they're happy or curious!",
"๐ฆ Did you know? Llamas are excellent guards for sheep and goats!",
"๐ฆ Llama wisdom: They can carry 25-30% of their body weight!",
"๐ฆ Cool fact: Llamas have excellent memories and can learn tasks quickly!",
"๐ฆ Trivia: Baby llamas are called 'crias' - how cute is that?",
"๐ฆ Fun fact: Llamas communicate through ear positions and tail movements!",
"๐ฆ Did you know? Llamas rarely spit at humans - only at other llamas!",
"๐ฆ Llama fact: They're environmentally friendly with soft, padded feet!"
];
class AutoLlamaInstaller {
constructor() {
this.projectName = process.argv[2];
this.targetDir = '';
this.config = {};
this.startTime = Date.now();
this.currentFactIndex = 0;
}
async install() {
// Clear console and show welcome
console.clear();
this.showWelcome();
try {
// Step 1: Get project name with llama flair
await this.getProjectName();
// Step 2: Configuration wizard
await this.configurationWizard();
// Step 3: Environment detection
await this.detectEnvironment();
// Step 4: Install dependencies
await this.installDependencies();
// Step 5: Initialize database
await this.initializeDatabase();
// Step 6: Final setup
await this.finalizeSetup();
// Success celebration!
this.celebrateSuccess();
} catch (error) {
this.handleError(error);
}
}
showWelcome() {
console.log(LLAMA_ART.welcome);
console.log();
// Animated typing effect for tagline
const tagline = "๐ฆ Welcome to the fluffiest RAG framework installation!";
console.log(chalk.cyan(tagline));
console.log();
}
async getProjectName() {
if (!this.projectName) {
const { projectName } = await inquirer.prompt([{
type: 'input',
name: 'projectName',
message: `${LLAMA_ART.mini} What shall we name your llama project?`,
default: 'my-fluffy-rag',
validate: (input) => {
if (/^[a-z0-9-]+$/.test(input)) return true;
return 'Please use lowercase letters, numbers, and hyphens only';
}
}]);
this.projectName = projectName;
}
this.targetDir = path.join(process.cwd(), this.projectName);
// Check if directory exists
if (await fs.pathExists(this.targetDir)) {
console.log(chalk.yellow(`\n${LLAMA_ART.mini} Hmm, there's already a ${this.projectName} pasture here...`));
const { overwrite } = await inquirer.prompt([{
type: 'confirm',
name: 'overwrite',
message: 'Should we clear it for a fresh start?',
default: false
}]);
if (!overwrite) {
console.log(chalk.cyan(`\n${LLAMA_ART.mini} No worries! Try again with a different name.`));
console.log(chalk.gray('Your llama will wait patiently for you! ๐'));
process.exit(0);
}
await fs.remove(this.targetDir);
}
await fs.ensureDir(this.targetDir);
console.log(chalk.green(`โ Created ${chalk.bold(this.projectName)} directory`));
}
async configurationWizard() {
console.log(chalk.cyan(`\n${LLAMA_ART.mini} Let's customize your llama setup!`));
console.log(chalk.gray('I\'ll ask a few questions to get everything just right.\n'));
// First, choose template
const TemplateManager = require('../lib/templates/manager');
const templateManager = new TemplateManager();
const template = await templateManager.selectTemplate();
this.config.template = template;
// Run template-specific wizard if available
if (template.wizardQuestions && template.wizardQuestions.length > 0) {
console.log(chalk.cyan(`\n๐ง Customizing ${template.name}...`));
this.config.customization = await templateManager.runTemplateWizard(template);
}
// Standard questions
const standardAnswers = await inquirer.prompt([
{
type: 'list',
name: 'aiProvider',
message: '๐ง Which AI treats shall we feed your llama?',
choices: [
{ name: '๐ OpenAI GPT (Recommended - Premium hay!)', value: 'openai' },
{ name: '๐ญ Anthropic Claude (Gourmet grass!)', value: 'anthropic' },
{ name: '๐พ I\'ll bring my own treats later', value: 'later' }
],
default: 'openai'
},
{
type: 'password',
name: 'apiKey',
message: '๐ Your API key (or press Enter to add it later):',
when: (answers) => answers.aiProvider !== 'later',
mask: '๐ฆ',
validate: (input) => {
if (!input) return true; // Allow empty
if (input.length < 20) return 'That seems a bit short for an API key...';
return true;
}
},
{
type: 'confirm',
name: 'installSampleData',
message: '๐ Would you like some sample documents to play with?',
default: true
},
{
type: 'list',
name: 'llamaPersonality',
message: '๐จ Choose your llama\'s personality:',
choices: [
{ name: '๐ฉ Professional Llama (Serious business mode)', value: 'professional' },
{ name: '๐ Friendly Llama (Balanced and approachable)', value: 'friendly' },
{ name: '๐ช Party Llama (Maximum fun and emojis!)', value: 'party' }
],
default: template.configuration?.ui?.theme === 'professional' ? 'professional' : 'friendly'
}
]);
// Merge all answers
this.config = { ...this.config, ...standardAnswers };
this.config.deploymentType = template.deploymentMode || 'local';
console.log(chalk.green(`\n${LLAMA_ART.mini} Excellent choices! Your llama approves!`));
}
async detectEnvironment() {
const spinner = ora({
text: 'Sniffing around your environment...',
spinner: {
interval: 80,
frames: ['๐ฆ ', ' ๐ฆ ', ' ๐ฆ', ' ๐ฆ ', '๐ฆ ']
}
}).start();
const capabilities = {
hasDocker: await this.checkDocker(),
hasPostgres: await this.checkPostgreSQL(),
hasGit: await this.checkGit(),
nodeVersion: process.version
};
this.config.capabilities = capabilities;
await this.sleep(1500); // Give time to see the animation
const capList = [];
capList.push(`Node ${capabilities.nodeVersion}`);
if (capabilities.hasDocker) capList.push('Docker');
if (capabilities.hasPostgres) capList.push('PostgreSQL');
if (capabilities.hasGit) capList.push('Git');
spinner.succeed(`Found: ${capList.join(', ')}`);
// Provide friendly feedback based on what's found
if (this.config.deploymentType === 'docker' && !capabilities.hasDocker) {
console.log(chalk.yellow(`\n${LLAMA_ART.mini} Heads up! Docker isn't installed.`));
console.log(chalk.gray('No worries - I\'ll adjust the setup for you!'));
this.config.deploymentType = 'local';
}
}
async installDependencies() {
console.log(chalk.cyan(`\n${LLAMA_ART.mini} Time to gather supplies!`));
const spinner = ora({
text: this.getRandomMessage('dependencies'),
spinner: {
interval: 100,
frames: ['๐ฟ', '๐พ', '๐ฑ', '๐']
}
}).start();
// Show llama facts during installation
const factInterval = setInterval(() => {
spinner.text = this.getNextLlamaFact();
}, 3000);
try {
// Apply template to project
const TemplateManager = require('../lib/templates/manager');
const templateManager = new TemplateManager();
spinner.text = '๐จ Applying template configuration...';
await templateManager.applyTemplate(
this.config.template,
this.targetDir,
this.projectName,
this.config.customization
);
// Install npm dependencies
process.chdir(this.targetDir);
spinner.text = '๐ฆ Installing npm packages (this might take a minute)...';
execSync('npm install', { stdio: 'pipe' });
clearInterval(factInterval);
spinner.succeed('All dependencies installed! Your llama is well-equipped!');
} catch (error) {
clearInterval(factInterval);
spinner.fail('Had trouble gathering dependencies');
throw error;
}
}
async generateEnvironmentConfig() {
const envContent = this.generateEnvContent();
await fs.writeFile(path.join(this.targetDir, '.env'), envContent);
// Create personality-based configuration
const config = {
deployment: this.config.deploymentType,
personality: this.config.llamaPersonality,
welcomeMessage: this.getPersonalityWelcome(),
database: this.config.deploymentType === 'local' ? {
type: 'sqlite',
path: './data/autollama.db'
} : {
type: 'postgresql',
url: 'postgresql://autollama:autollama@localhost:5432/autollama'
}
};
await fs.writeFile(
path.join(this.targetDir, 'autollama.config.js'),
`// ๐ฆ AutoLlama Configuration\nmodule.exports = ${JSON.stringify(config, null, 2)};`
);
}
generateEnvContent() {
const personality = {
professional: '# AutoLlama v3.0 Configuration',
friendly: '# ๐ฆ AutoLlama v3.0 Configuration - Happy grazing!',
party: '# ๐ฆ๐ AutoLlama v3.0 - Let\'s party! ๐๐ฆ'
};
const lines = [
personality[this.config.llamaPersonality],
'# Generated by your friendly setup llama',
`# Created: ${new Date().toLocaleDateString()}`,
'',
'# Deployment Configuration',
`DEPLOYMENT_MODE=${this.config.deploymentType}`,
`LLAMA_PERSONALITY=${this.config.llamaPersonality}`,
'',
'# AI Configuration'
];
if (this.config.aiProvider === 'openai') {
lines.push(`OPENAI_API_KEY=${this.config.apiKey || 'your_openai_api_key_here'}`);
lines.push('AI_PROVIDER=openai');
lines.push('# ๐ฆ Tip: Get your key at https://platform.openai.com/api-keys');
} else if (this.config.aiProvider === 'anthropic') {
lines.push(`ANTHROPIC_API_KEY=${this.config.apiKey || 'your_anthropic_api_key_here'}`);
lines.push('AI_PROVIDER=anthropic');
lines.push('# ๐ฆ Tip: Get your key at https://console.anthropic.com/');
}
if (this.config.deploymentType === 'local') {
lines.push('', '# Local Development Database');
lines.push('DATABASE_TYPE=sqlite');
lines.push('DATABASE_PATH=./data/autollama.db');
lines.push('# ๐ฆ Your data lives in a cozy local pasture!');
} else {
lines.push('', '# PostgreSQL Configuration');
lines.push('DATABASE_URL=postgresql://autollama:autollama@localhost:5432/autollama');
lines.push('# ๐ฆ Professional-grade database for serious llamas');
}
lines.push('', '# Vector Database');
lines.push('QDRANT_URL=http://localhost:6333');
if (this.config.deploymentType === 'local') {
lines.push('# ๐ฆ Qdrant will auto-start in development mode');
}
lines.push('', '# RAG Configuration');
lines.push('ENABLE_CONTEXTUAL_EMBEDDINGS=true');
lines.push('CONTEXT_GENERATION_BATCH_SIZE=5');
lines.push('# ๐ฆ 60% better accuracy with Anthropic\'s contextual retrieval!');
if (this.config.llamaPersonality === 'party') {
lines.push('', '# ๐ Party Mode Settings');
lines.push('ENABLE_CONFETTI=true');
lines.push('MAX_EMOJI_LEVEL=11');
}
return lines.join('\n');
}
async initializeDatabase() {
const spinner = ora({
text: this.getRandomMessage('database'),
spinner: {
interval: 120,
frames: ['๐๏ธ ', 'โฐ๏ธ ', '๐ป ', '๐๏ธ ']
}
}).start();
try {
// Use the auto-setup system
const { AutoSetup } = require('../lib/startup/auto-setup');
const autoSetup = new AutoSetup({
projectRoot: this.targetDir,
skipPrompts: true
});
spinner.text = '๐พ Setting up your database pasture...';
const result = await autoSetup.runAutoSetup();
if (result.setup) {
spinner.succeed('Database initialized! Your llama has a great memory now!');
if (this.config.installSampleData) {
console.log(chalk.gray(` ๐ Sample documents added for testing`));
}
} else {
spinner.succeed('Database setup completed');
}
} catch (error) {
spinner.warn('Database setup will complete on first run');
// Don't fail installation if database setup fails
}
}
async finalizeSetup() {
const spinner = ora({
text: 'Adding finishing touches...',
spinner: {
interval: 80,
frames: ['โจ', '๐ซ', 'โญ', '๐']
}
}).start();
// Create necessary directories
await fs.ensureDir(path.join(this.targetDir, 'data'));
await fs.ensureDir(path.join(this.targetDir, 'uploads'));
await fs.ensureDir(path.join(this.targetDir, 'logs'));
// Set executable permissions
const binFiles = ['bin/create-autollama.js'];
for (const binFile of binFiles) {
const binPath = path.join(this.targetDir, binFile);
if (await fs.pathExists(binPath)) {
await fs.chmod(binPath, '755');
}
}
await this.sleep(1500);
spinner.succeed('Everything is perfect!');
}
celebrateSuccess() {
const duration = Math.round((Date.now() - this.startTime) / 1000);
console.log('\n' + chalk.green('โ'.repeat(50)));
console.log(LLAMA_ART.happy);
console.log(chalk.green.bold('\n ๐ Installation Complete! ๐'));
console.log(chalk.gray(` Setup time: ${duration} seconds`));
console.log(chalk.green('โ'.repeat(50)));
console.log(chalk.cyan('\n๐ Next Steps:'));
console.log(chalk.white(` cd ${chalk.bold(this.projectName)}`));
// Use template-specific start command
const startCommand = this.config.template?.scripts?.dev ||
(this.config.deploymentType === 'docker' ? 'npm run docker:up' : 'npm run dev');
console.log(chalk.white(` ${startCommand}`));
console.log(chalk.cyan('\n๐ Your llama will be grazing at:'));
console.log(chalk.white(' โข Application: http://localhost:8080'));
console.log(chalk.white(' โข API Docs: http://localhost:8080/api/docs'));
console.log(chalk.white(' โข Health: http://localhost:8080/api/health'));
// Template-specific information
if (this.config.template) {
console.log(chalk.cyan(`\n๐ฏ ${this.config.template.name} Features:`));
this.config.template.features.slice(0, 3).forEach(feature => {
console.log(chalk.gray(` โข ${feature}`));
});
if (this.config.template.features.length > 3) {
console.log(chalk.gray(` โข ...and ${this.config.template.features.length - 3} more!`));
}
}
if (!this.config.apiKey && this.config.aiProvider !== 'later') {
console.log(chalk.yellow(`\nโ ๏ธ Don't forget to add your ${this.config.aiProvider.toUpperCase()} API key in .env`));
}
// Personality-based farewell
const farewells = {
professional: '\n๐ฆ Your AutoLlama instance is ready for deployment.',
friendly: '\n๐ฆ Happy coding! Your llama is excited to help you build amazing things!',
party: '\n๐ฆ๐ LET\'S GOOO! Time to build the most awesome RAG app ever! ๐๐'
};
console.log(chalk.bold.cyan(farewells[this.config.llamaPersonality]));
console.log(chalk.gray('\nNeed help? Visit https://github.com/autollama/autollama'));
console.log();
}
handleError(error) {
console.log(chalk.red(`\n${LLAMA_ART.mini}๐ Oh no! This llama encountered a rocky path...`));
console.log(chalk.yellow('Even the fluffiest llamas stumble sometimes!\n'));
console.log(chalk.red('Error details:'));
console.log(chalk.gray(` ${error.message}`));
if (error.code === 'EACCES') {
console.log(chalk.cyan('\n๐ก Llama tip: Try running with sudo or check your permissions!'));
} else if (error.message.includes('npm')) {
console.log(chalk.cyan('\n๐ก Llama tip: Make sure npm is installed and up to date!'));
} else if (error.message.includes('network')) {
console.log(chalk.cyan('\n๐ก Llama tip: Check your internet connection!'));
}
console.log(chalk.gray('\n๐ For more help, visit: https://github.com/autollama/autollama/issues'));
console.log(chalk.gray('๐ฆ Your llama believes in you! Try again!'));
process.exit(1);
}
// Utility functions
getRandomMessage(category) {
const messages = LLAMA_MESSAGES[category];
return messages[Math.floor(Math.random() * messages.length)];
}
getNextLlamaFact() {
const fact = LLAMA_FACTS[this.currentFactIndex % LLAMA_FACTS.length];
this.currentFactIndex++;
return fact;
}
getPersonalityWelcome() {
const welcomes = {
professional: 'Welcome to AutoLlama. Enterprise-grade RAG framework.',
friendly: '๐ฆ Welcome! Your friendly RAG companion is ready to help!',
party: '๐ฆ๐ WOOHOO! Welcome to the RAG party! Let\'s build something AMAZING! ๐'
};
return welcomes[this.config.llamaPersonality];
}
async sleep(ms) {
return new Promise(resolve => setTimeout(resolve, ms));
}
// Environment detection
async checkDocker() {
try {
execSync('docker --version', { stdio: 'ignore' });
execSync('docker compose version', { stdio: 'ignore' });
return true;
} catch {
return false;
}
}
async checkPostgreSQL() {
try {
execSync('psql --version', { stdio: 'ignore' });
return true;
} catch {
return false;
}
}
async checkGit() {
try {
execSync('git --version', { stdio: 'ignore' });
return true;
} catch {
return false;
}
}
}
// Handle Ctrl+C gracefully
process.on('SIGINT', () => {
console.log(chalk.yellow(`\n\n${LLAMA_ART.mini} Installation cancelled!`));
console.log(chalk.gray('Your llama will be here when you\'re ready to try again! ๐'));
process.exit(0);
});
// Handle unhandled errors
process.on('unhandledRejection', (error) => {
console.error(chalk.red(`\n${LLAMA_ART.mini} Unexpected error:`), error.message);
console.log(chalk.gray('Please report this at: https://github.com/autollama/autollama/issues'));
process.exit(1);
});
// Main execution
if (require.main === module) {
const installer = new AutoLlamaInstaller();
installer.install();
}
module.exports = AutoLlamaInstaller;