@chainlink/mcp-server
Version:
Prototype MCP Server for CLL
268 lines • 9.53 kB
JavaScript
;
/**
* @fileoverview Embedding provider abstraction for vector generation
*
* Provides a unified interface for different embedding providers (OpenAI, Ollama)
* with automatic provider detection from centralized configuration. Handles connection
* testing, model initialization, and text-to-vector conversion operations.
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports.EmbeddingProviderService = void 0;
exports.createEmbeddingManager = createEmbeddingManager;
const ollama_1 = require("@llamaindex/ollama");
const openai_1 = require("@llamaindex/openai");
const logger_1 = require("../utils/logger");
const config_1 = require("../config");
/**
* Embedding provider service for text-to-vector conversion
*
* Provides a unified interface for different embedding providers with automatic
* provider detection from environment variables. Supports both cloud-based
* (OpenAI) and local (Ollama) embedding generation with connection testing
* and error handling.
*
* Supported providers:
* - OpenAI: text-embedding-ada-002, text-embedding-3-small, text-embedding-3-large
* - Ollama: nomic-embed-text, other compatible local models
*
* @class EmbeddingProviderService
*/
class EmbeddingProviderService {
embedModel = null;
config = null;
/**
* Initialize the embedding provider with the specified or auto-detected configuration
*
* If no config is provided, automatically detects the provider from centralized
* configuration (explicit EMBEDDINGS_PROVIDER or inferred from MCP_LLM_SERVICE). Tests the connection
* and validates the configuration.
*
* @param config - Optional embedding configuration, otherwise auto-detected
* @throws {Error} When provider is unsupported or initialization fails
*/
async initialize(config) {
// Use provided config or detect from environment
this.config = config || this.detectConfigFromEnvironment();
switch (this.config.provider) {
case "ollama":
await this.initializeOllama();
break;
case "openai":
await this.initializeOpenAI();
break;
default:
throw new Error(`Unsupported embedding provider: ${this.config.provider}`);
}
}
/**
* Initialize OpenAI embedding provider
*
* @private
* @throws {Error} When API key is missing or connection test fails
*/
async initializeOpenAI() {
if (!this.config)
throw new Error("Config not initialized");
const apiKey = config_1.config.apiKeys.openai;
if (!apiKey) {
throw new Error("🚫 OpenAI API key not found. Set OPENAI_API_KEY environment variable.");
}
this.embedModel = new openai_1.OpenAIEmbedding({
model: this.config.model,
apiKey: apiKey,
});
// Test the connection
await this.testOpenAIConnection();
}
/**
* Test connection to OpenAI embedding API
*
* @private
* @throws {Error} When connection test fails
*/
async testOpenAIConnection() {
if (!this.embedModel || !(this.embedModel instanceof openai_1.OpenAIEmbedding)) {
throw new Error("OpenAI embedding model not initialized");
}
try {
// Test with a simple embedding
await this.embedModel.getTextEmbedding("test");
logger_1.Logger.debug("OpenAI embedding connection successful");
}
catch (error) {
throw new Error(`Failed to connect to OpenAI embeddings. Check your API key. ${error}`);
}
}
/**
* Initialize Ollama embedding provider
*
* @private
* @throws {Error} When Ollama server is not running or connection fails
*/
async initializeOllama() {
if (!this.config)
throw new Error("Config not initialized");
// check that the ollama server is running
const ollamaUrl = this.config.baseURL || "http://localhost:11434";
if (!(await this.isOllamaRunning(ollamaUrl))) {
throw new Error("🚫 Ollama server is not running 🔌");
}
this.embedModel = new ollama_1.OllamaEmbedding({
model: this.config.model,
});
// Test connection to Ollama
await this.testOllamaConnection();
}
/**
* Check if Ollama server is running at the specified URL
*
* @private
* @param baseURL - The Ollama server URL to test
* @returns Promise resolving to true if server is accessible
*/
async isOllamaRunning(baseURL) {
try {
const response = await fetch(`${baseURL}/api/tags`);
return response.ok;
}
catch {
return false;
}
}
/**
* Test connection to Ollama embedding service
*
* @private
* @throws {Error} When connection test fails
*/
async testOllamaConnection() {
if (!this.embedModel || !(this.embedModel instanceof ollama_1.OllamaEmbedding)) {
throw new Error("Ollama embedding model not initialized");
}
try {
// Test with a simple embedding
await this.embedModel.getTextEmbedding("test");
logger_1.Logger.debug("Ollama embedding connection successful");
}
catch (error) {
throw new Error(`Failed to connect to Ollama. Is Ollama running? ${error}`);
}
}
/**
* Generate embedding vector for a single text input
*
* @param text - Text to convert to embedding vector
* @returns Promise resolving to numerical embedding vector
* @throws {Error} When embedding model is not initialized or API call fails
*/
async getEmbedding(text) {
if (!this.embedModel) {
throw new Error("Embedding model not initialized. Call initialize() first.");
}
return await this.embedModel.getTextEmbedding(text);
}
/**
* Generate embedding vectors for multiple text inputs
*
* @param texts - Array of texts to convert to embeddings
* @returns Promise resolving to array of embedding vectors
* @throws {Error} When embedding model is not initialized
*/
async getEmbeddings(texts) {
if (!this.embedModel) {
throw new Error("Embedding model not initialized. Call initialize() first.");
}
const embeddings = [];
for (const text of texts) {
const embedding = await this.embedModel.getTextEmbedding(text);
embeddings.push(embedding);
}
return embeddings;
}
/**
* Get the currently configured embedding provider
*
* @returns The active provider name or null if not initialized
*/
getProvider() {
return this.config?.provider || null;
}
/**
* Get the currently configured model name
*
* @returns The active model name or null if not initialized
*/
getModel() {
return this.config?.model || null;
}
/**
* Get the vector dimensions for the current provider/model
*
* @returns Number of dimensions in the embedding vectors
*/
getDimensions() {
if (!this.config)
return 0;
// Return dimensions based on provider and model
if (this.config.provider === "ollama") {
return 768; // nomic-embed-text dimensions
}
else {
return 1536; // OpenAI text-embedding-ada-002 dimensions
}
}
/**
* Create embedding configuration from centralized configuration
*
* Automatically selects appropriate models and configuration based on the
* chosen provider (explicit EMBEDDINGS_PROVIDER or inferred from MCP_LLM_SERVICE).
*
* Optional environment variables used by providers:
* - OLLAMA_URL: Base URL for Ollama server
* - OLLAMA_EMBEDDING_MODEL: Model name for Ollama
* - OPENAI_EMBEDDING_MODEL: Model name for OpenAI
*
* @static
* @returns Configuration object based on environment variables
*/
static getConfigFromEnvironment() {
// Use centralized config for embedding provider
const embeddingsProvider = config_1.config.embeddings.provider;
const ollamaUrl = config_1.config.embeddings.ollama.url;
if (embeddingsProvider === "ollama") {
return {
provider: "ollama",
model: config_1.config.embeddings.models.ollama,
baseURL: ollamaUrl,
};
}
else {
return {
provider: "openai",
model: config_1.config.embeddings.models.openai,
};
}
}
/**
* Detect embedding configuration from environment variables
*
* @private
* @returns Configuration detected from environment
*/
detectConfigFromEnvironment() {
return EmbeddingProviderService.getConfigFromEnvironment();
}
}
exports.EmbeddingProviderService = EmbeddingProviderService;
/**
* Factory function for creating embedding manager instances
*
* Provides backward compatibility with existing code that expects
* a factory function rather than direct class instantiation.
*
* @returns New EmbeddingProviderService instance
*/
function createEmbeddingManager() {
return new EmbeddingProviderService();
}
//# sourceMappingURL=embedding-provider.js.map