giga-code
Version:
A personal AI CLI assistant powered by Grok for local development.
106 lines • 3.98 kB
JavaScript
;
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.getRecommendedOllamaModels = exports.testOllamaConnection = exports.searchOllamaModels = exports.fetchOllamaModels = void 0;
const axios_1 = __importDefault(require("axios"));
async function fetchOllamaModels(baseUrl = 'http://localhost:11434') {
try {
// Remove any trailing slashes and ensure proper URL format
let cleanBaseUrl = baseUrl.replace(/\/+$/, '');
// Add http:// protocol if missing
if (!cleanBaseUrl.startsWith('http://') && !cleanBaseUrl.startsWith('https://')) {
cleanBaseUrl = `http://${cleanBaseUrl}`;
}
const apiUrl = `${cleanBaseUrl}/api/tags`;
const response = await axios_1.default.get(apiUrl, {
headers: {
'Content-Type': 'application/json',
},
timeout: 10000, // 10 second timeout
});
const data = response.data;
if (!data.models || !Array.isArray(data.models)) {
throw new Error('Invalid response format from Ollama API');
}
// Extract model names and sort them
const modelNames = data.models
.map(model => model.name)
.filter(name => name && name.trim())
.sort();
return modelNames;
}
catch (error) {
console.error('Error fetching Ollama models:', error.message);
// Return empty array on error - will fallback to static list
return [];
}
}
exports.fetchOllamaModels = fetchOllamaModels;
async function searchOllamaModels(query, baseUrl = 'http://localhost:11434') {
try {
const allModels = await fetchOllamaModels(baseUrl);
if (!query || !query.trim()) {
return allModels;
}
const searchTerm = query.toLowerCase().trim();
// Filter models that contain the search term
const filtered = allModels.filter(model => model.toLowerCase().includes(searchTerm));
// Sort by relevance: exact matches first, then starts with, then contains
const sorted = filtered.sort((a, b) => {
const aLower = a.toLowerCase();
const bLower = b.toLowerCase();
// Exact match
if (aLower === searchTerm && bLower !== searchTerm)
return -1;
if (bLower === searchTerm && aLower !== searchTerm)
return 1;
// Starts with
if (aLower.startsWith(searchTerm) && !bLower.startsWith(searchTerm))
return -1;
if (bLower.startsWith(searchTerm) && !aLower.startsWith(searchTerm))
return 1;
// Alphabetical for same relevance
return a.localeCompare(b);
});
return sorted;
}
catch (error) {
console.error('Error searching Ollama models:', error);
return [];
}
}
exports.searchOllamaModels = searchOllamaModels;
async function testOllamaConnection(baseUrl = 'http://localhost:11434') {
try {
const models = await fetchOllamaModels(baseUrl);
return {
success: true,
modelCount: models.length
};
}
catch (error) {
return {
success: false,
error: error.message || 'Unknown error'
};
}
}
exports.testOllamaConnection = testOllamaConnection;
// Get popular/recommended Ollama models for new users
function getRecommendedOllamaModels() {
return [
'llama3.2:3b',
'llama3.2:1b',
'qwen2.5:7b',
'phi3:3.8b',
'gemma2:9b',
'mistral:7b',
'codellama:7b',
'neural-chat:7b',
'tinyllama:1.1b', // Ultra-fast tiny model
];
}
exports.getRecommendedOllamaModels = getRecommendedOllamaModels;
//# sourceMappingURL=ollama-models.js.map