@chainlink/mcp-server
Version:
Prototype MCP Server for CLL
178 lines • 6.45 kB
JavaScript
;
/**
* @fileoverview Ollama service implementation for local AI model inference
*
* Provides integration with locally-hosted Ollama models for AI completions.
* Handles connection management, model availability checking, and automatic
* model pulling when models are not available locally.
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports.OllamaService = void 0;
const ollama_1 = require("ollama");
const ai_service_1 = require("./ai-service");
const logger_1 = require("../utils/logger");
/**
* Ollama service implementation for local AI model inference
*
* Provides access to locally-hosted Ollama models including Llama, Mistral,
* and other open-source models. Handles connection management, model availability
* checking, and automatic model downloading when needed.
*
* @class OllamaService
* @extends AIService
*/
class OllamaService extends ai_service_1.AIService {
client;
baseURL;
/**
* Initialize the Ollama service with configuration
*
* @param config - Service configuration including optional base URL
*/
constructor(config) {
super(config);
this.baseURL = config.baseURL || "http://localhost:11434";
this.client = new ollama_1.Ollama({
host: this.baseURL,
});
}
/**
* Generate a chat completion response using a local Ollama model
*
* Tests connection to the Ollama server, converts message format,
* and sends the request. Provides detailed error messages for common
* connection issues and model availability problems.
*
* @param messages - Conversation history to send to the local model
* @returns Promise resolving to formatted AI response with usage statistics
* @throws {Error} When Ollama server is not running or model is unavailable
*/
async generateResponse(messages) {
try {
// Test connection before making the request
await this.testConnection();
// Convert our message format to Ollama format
const ollamaMessages = messages.map((msg) => ({
role: msg.role,
content: msg.content,
}));
logger_1.Logger.log("info", `Generating response with Ollama model: ${this.config.model} at ${this.baseURL}`);
const response = await this.client.chat({
model: this.config.model,
messages: ollamaMessages,
stream: false,
options: {
num_predict: this.config.maxTokens || 2000,
},
});
return {
content: response.message.content,
usage: {
input_tokens: response.prompt_eval_count || 0,
output_tokens: response.eval_count || 0,
total_tokens: (response.prompt_eval_count || 0) + (response.eval_count || 0),
},
id: undefined,
requestId: undefined,
};
}
catch (error) {
if (error.code === "ECONNREFUSED" || error.message?.includes("connect")) {
throw new Error(`Failed to connect to Ollama at ${this.baseURL}. Please ensure Ollama is running and accessible.`);
}
throw new Error(`Ollama request failed: ${error.message}`);
}
}
/**
* Test connection to the Ollama server
*
* @throws {Error} When Ollama server is not accessible
*/
async testConnection() {
try {
// Test if Ollama is running by listing models
await this.client.list();
}
catch (error) {
if (error.code === "ECONNREFUSED") {
throw new Error(`Ollama server is not running at ${this.baseURL}. Please start Ollama first.`);
}
throw error;
}
}
/**
* Check if a specific model is available locally
*
* @param modelName - Name of the model to check
* @returns Promise resolving to true if model is available, false otherwise
*/
async isModelAvailable(modelName) {
try {
const models = await this.client.list();
return models.models.some((model) => model.name === modelName);
}
catch {
return false;
}
}
/**
* Ensure a model exists locally, downloading it if necessary
*
* Checks if the specified model is available locally and attempts to
* download it from the Ollama registry if not found.
*
* @param modelName - Name of the model to ensure is available
* @throws {Error} When model download fails or model name is invalid
*/
async ensureModelExists(modelName) {
const isAvailable = await this.isModelAvailable(modelName);
if (!isAvailable) {
logger_1.Logger.log("info", `Model ${modelName} not found locally. Attempting to pull...`);
try {
await this.client.pull({ model: modelName });
logger_1.Logger.log("info", `Successfully pulled model: ${modelName}`);
}
catch (error) {
throw new Error(`Failed to pull model ${modelName}: ${error.message}. Please ensure the model name is correct and you have internet connectivity.`);
}
}
}
/**
* Check if the service is properly configured
*
* For Ollama, we don't require an API key, only a valid model name.
*
* @returns True if model is specified, false otherwise
*/
isConfigured() {
// For Ollama, we don't need an API key, just check if we have a model
return !!this.config.model;
}
/**
* Get the service name for identification
*
* @returns "Ollama" as the service identifier
*/
getServiceName() {
return "Ollama";
}
/**
* Get the base URL of the Ollama server
*
* @returns The configured Ollama server URL
*/
getBaseURL() {
return this.baseURL;
}
/**
* List all available models on the Ollama server
*
* @returns Promise resolving to the list of available models
* @throws {Error} When unable to connect to Ollama server
*/
listModels() {
return this.client.list();
}
}
exports.OllamaService = OllamaService;
//# sourceMappingURL=ollama-service.js.map