UNPKG

@tosin2013/kanbn

Version:

A CLI Kanban board with AI-powered task management features

232 lines (205 loc) 8.72 kB
/** * Ollama API Client for Kanbn * * Provides a local LLM fallback option when OpenRouter API key is not available */ const axios = require('axios'); const { exec } = require('child_process'); const MemoryManager = require('./memory-manager'); class OllamaClient { /** * Create a new Ollama client * @param {string} model The model to use (default: llama3) * @param {string} baseUrl The base URL for the Ollama API * @param {MemoryManager} memoryManager Optional memory manager */ constructor(model = 'llama3', baseUrl = 'http://localhost:11434', memoryManager = null) { this.model = model; this.baseUrl = baseUrl; this.memoryManager = memoryManager; } /** * Check if Ollama is available * @returns {Promise<boolean>} True if Ollama is available, false otherwise */ async isAvailable() { // If we're in test mode and not explicitly using Ollama, pretend Ollama is available if (process.env.KANBN_ENV === 'test' && process.env.USE_OLLAMA !== 'true') { console.debug('Running in test mode, simulating Ollama availability'); return true; } try { // Check if Ollama is running by making a simple request // Force IPv4 by replacing localhost with 127.0.0.1 const url = this.baseUrl.replace('localhost', '127.0.0.1'); console.debug(`Checking Ollama availability at: ${url}/api/tags`); const response = await axios.get(`${url}/api/tags`, { timeout: 2000, // 2 second timeout // Force IPv4 family: 4 }); // If we get a response, check if the requested model is available if (response.status === 200 && response.data && response.data.models) { const modelAvailable = response.data.models.some(m => m.name === this.model); if (!modelAvailable) { // Only show warning if not in quiet mode if (process.env.KANBN_QUIET !== 'true') { console.warn(`Ollama is available but model '${this.model}' is not installed. Using default model.`); } // Find the first available model if (response.data.models.length > 0) { this.model = response.data.models[0].name; console.info(`Using model: ${this.model}`); } } return true; } return false; } catch (error) { // If we get a connection error, Ollama is likely not running console.debug('Ollama is not available:', error.message); return false; } } /** * Generate a completion from Ollama * @param {Array} messages The messages to send to Ollama * @param {Function} streamCallback Optional callback for streaming responses * @returns {Promise<string>} The completion */ async chatCompletion(messages, streamCallback = null) { try { // If we're in test mode and not explicitly using Ollama, return a mock response if (process.env.KANBN_ENV === 'test' && process.env.USE_OLLAMA !== 'true') { console.debug('Running in test mode, returning mock response'); // Extract the user's query from the messages const userMessage = messages.find(m => m.role === 'user')?.content || ''; // Generate a mock response based on the query let mockResponse = ''; if (userMessage.includes('D&D 5E') && userMessage.includes('stats')) { mockResponse = "In Dungeons & Dragons 5th Edition, the six primary character stats are:\n\n" + "1. Strength (STR): Physical power and carrying capacity\n" + "2. Dexterity (DEX): Agility, reflexes, and balance\n" + "3. Constitution (CON): Endurance, stamina, and health\n" + "4. Intelligence (INT): Memory, reasoning, and learning ability\n" + "5. Wisdom (WIS): Perception, intuition, and insight\n" + "6. Charisma (CHA): Force of personality, persuasiveness\n\n" + "These stats typically range from 3-18 for player characters, with 10-11 being the human average."; } else if (userMessage.includes('personality traits') && userMessage.includes('D&D')) { mockResponse = "Interesting NPCs in D&D 5E often combine unexpected stat and personality trait combinations:\n\n" + "1. High STR with shy personality - A physically imposing character who's socially awkward\n" + "2. High INT with impulsiveness - A brilliant mind that acts before thinking\n" + "3. Low CHA with leadership qualities - Someone who leads through actions, not words\n" + "4. High WIS with naivety - Perceptive but inexperienced in worldly matters\n\n" + "These combinations create memorable characters with internal conflicts that players can relate to."; } else { mockResponse = "I'm a test response from the Ollama API. In test mode, I can provide information about D&D 5E character stats and personality traits."; } // If streaming is requested, simulate it if (streamCallback) { const words = mockResponse.split(' '); for (const word of words) { streamCallback(word + ' '); // No need for actual delay in tests } } return mockResponse; } // Check if Ollama is available const available = await this.isAvailable(); if (!available) { throw new Error('Ollama is not available. Make sure it is installed and running.'); } // Prepare the request body const requestBody = { model: this.model, messages: messages, stream: !!streamCallback }; // Force IPv4 by replacing localhost with 127.0.0.1 const url = this.baseUrl.replace('localhost', '127.0.0.1'); console.debug(`Sending chat request to: ${url}/api/chat`); if (streamCallback) { // If streaming is requested, handle it with a proper stream const response = await axios.post( `${url}/api/chat`, requestBody, { responseType: 'stream', // Force IPv4 family: 4 } ); let fullContent = ''; return new Promise((resolve, reject) => { response.data.on('data', (chunk) => { try { const lines = chunk.toString().split('\n').filter(line => line.trim()); for (const line of lines) { const data = JSON.parse(line); if (data.message?.content) { fullContent += data.message.content; if (streamCallback) streamCallback(data.message.content); } } } catch (error) { console.error('Error parsing streaming response:', error); } }); response.data.on('end', () => { // Update memory if available if (this.memoryManager) { this.memoryManager.addMessage('assistant', fullContent, 'chat') .catch(error => console.error('Error adding assistant message to memory:', error)); } resolve(fullContent); }); response.data.on('error', (error) => { reject(error); }); }); } else { // For non-streaming requests const response = await axios.post(`${url}/api/chat`, requestBody, { // Force IPv4 family: 4 }); if (response.data && response.data.message && response.data.message.content) { // Update memory if available if (this.memoryManager) { await this.memoryManager.addMessage('assistant', response.data.message.content, 'chat'); } return response.data.message.content; } else { throw new Error('Invalid response from Ollama'); } } } catch (error) { console.error('Error in Ollama API:', error); throw error; } } /** * Get the models available in Ollama * @returns {Promise<Array>} The available models */ async getAvailableModels() { try { // Force IPv4 by replacing localhost with 127.0.0.1 const url = this.baseUrl.replace('localhost', '127.0.0.1'); console.debug(`Getting available models from: ${url}/api/tags`); const response = await axios.get(`${url}/api/tags`, { // Force IPv4 family: 4 }); if (response.status === 200 && response.data && response.data.models) { return response.data.models.map(m => m.name); } return []; } catch (error) { console.error('Error getting Ollama models:', error); return []; } } } module.exports = OllamaClient;