langterm
Version:
Secure CLI tool that translates natural language to shell commands using local AI models via Ollama, with project memory system, reusable command templates (hooks), MCP (Model Context Protocol) support, and dangerous command detection
111 lines (97 loc) • 3.49 kB
JavaScript
import fetch from 'node-fetch';
import chalk from 'chalk';
import os from 'os';
// Allow overriding Ollama URL via environment variable for Docker/remote setups
export const OLLAMA_URL = process.env.OLLAMA_URL || 'http://localhost:11434';
/**
* Checks if Ollama is running and accessible
* @returns {Promise<boolean>} Whether Ollama is accessible
*/
export async function checkOllama() {
try {
const response = await fetch(`${OLLAMA_URL}/api/tags`);
if (!response.ok) throw new Error('Ollama not responding');
return true;
} catch (error) {
if (error.code === 'ECONNREFUSED' || error.message.includes('ECONNREFUSED')) {
console.log(chalk.yellow('💡 Hint: Ollama not accessible at ' + OLLAMA_URL));
console.log(chalk.gray('If Ollama is running elsewhere, set OLLAMA_URL environment variable'));
console.log(chalk.gray('Example: export OLLAMA_URL=http://host.docker.internal:11434'));
}
return false;
}
}
/**
* Gets list of available Ollama models
* @returns {Promise<Array>} Array of model objects
*/
export async function getModels() {
try {
const response = await fetch(`${OLLAMA_URL}/api/tags`);
const data = await response.json();
return data.models || [];
} catch (error) {
return [];
}
}
/**
* Generates a shell command from natural language input
* @param {string} userInput - Natural language command description
* @param {string} modelName - Ollama model to use
* @param {string} mcpContext - Optional MCP context to enhance command generation
* @returns {Promise<string>} Generated shell command
*/
export async function generateCommand(userInput, modelName, mcpContext = '') {
// Detect the operating system
const platform = os.platform();
let osContext = '';
let exampleCommand = '';
if (platform === 'win32') {
osContext = 'Windows (Command Prompt/PowerShell)';
exampleCommand = 'dir /a';
} else if (platform === 'darwin') {
osContext = 'macOS (Terminal)';
exampleCommand = 'ls -la';
} else {
osContext = 'Linux (Terminal)';
exampleCommand = 'ls -la';
}
const prompt = `You are an expert translator that converts English instructions into a single, executable terminal command for ${osContext}.
**RULES:**
1. ONLY return the raw command appropriate for ${osContext}.
2. Do NOT include any explanation or natural language.
3. Do NOT include markdown formatting like \`\`\` or backticks (\`).
4. Use platform-specific commands (e.g., 'dir' on Windows, 'ls' on Unix/Linux/macOS).
5. Consider the additional context provided below to make more informed decisions.
**EXAMPLE:**
Instruction: list all files
Response: ${exampleCommand}${mcpContext}
**TASK:**
Instruction: ${userInput}
Response:`;
try {
const response = await fetch(`${OLLAMA_URL}/api/generate`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
model: modelName,
prompt: prompt,
stream: false
})
});
if (!response.ok) {
throw new Error(`Ollama API error: ${response.statusText}`);
}
const data = await response.json();
let command = data.response;
// Clean up the response
if (command.includes('`')) {
command = command.match(/`([^`]*)`/)?.[1] || command;
} else {
command = command.replace(/```/g, '').trim();
}
return command;
} catch (error) {
throw new Error(`Failed to generate command: ${error.message}`);
}
}