@chainlink/mcp-server
Version:
Prototype MCP Server for CLL
136 lines • 4.7 kB
JavaScript
;
/**
* @fileoverview Centralized Configuration Management
*
* Single source of truth for all configuration values and environment variables.
* This module handles all environment variable reading and provides defaults.
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports.config = exports.VECTOR_DB_ALL_FILENAMES = exports.VECTOR_DB_FILENAMES = exports.ALLOWED_EMBEDDING_PROVIDERS = exports.LlmService = void 0;
const fs_1 = require("fs");
const path_1 = require("path");
const immutability_1 = require("../utils/immutability");
/**
* Supported LLM service types
*/
var LlmService;
(function (LlmService) {
LlmService["Anthropic"] = "anthropic";
LlmService["OpenAI"] = "openai";
LlmService["Gemini"] = "gemini";
LlmService["Ollama"] = "ollama";
})(LlmService || (exports.LlmService = LlmService = {}));
/**
* Allowed embedding providers (single source of truth for validation and selection)
*/
exports.ALLOWED_EMBEDDING_PROVIDERS = [
"openai",
"ollama",
];
/**
* Provider-specific vector database filenames
*/
exports.VECTOR_DB_FILENAMES = {
openai: "chainlink_docs_openai.db",
ollama: "chainlink_docs_ollama.db",
};
/**
* List of all vector database filenames (useful for existence checks)
*/
exports.VECTOR_DB_ALL_FILENAMES = Object.values(exports.VECTOR_DB_FILENAMES);
/**
* Read version from package.json
*/
function getVersion() {
try {
const packageJson = JSON.parse((0, fs_1.readFileSync)((0, path_1.join)(__dirname, "..", "..", "package.json"), "utf8"));
return packageJson.version;
}
catch (error) {
// Use fallback without noisy console warnings in tests/CI
return "unknown";
}
}
/**
* Load configuration from environment variables with defaults
*/
function loadConfig() {
// Read all environment variables once
const env = {
llmService: process.env.MCP_LLM_SERVICE?.toLowerCase(),
llmModel: process.env.MCP_LLM_MODEL,
embeddingsProvider: process.env.EMBEDDINGS_PROVIDER?.toLowerCase(),
ollamaUrl: process.env.OLLAMA_URL,
ollamaModel: process.env.OLLAMA_MODEL,
ollamaEmbeddingModel: process.env.OLLAMA_EMBEDDING_MODEL,
openaiEmbeddingModel: process.env.OPENAI_EMBEDDING_MODEL,
maxTokens: process.env.MCP_MAX_TOKENS,
debug: process.env.DEBUG,
};
return {
version: getVersion(),
llm: {
defaultService: LlmService.OpenAI,
supportedServices: [
LlmService.OpenAI,
LlmService.Anthropic,
LlmService.Gemini,
LlmService.Ollama,
],
maxTokens: parseInt(env.maxTokens || "2000"),
models: {
[LlmService.OpenAI]: "gpt-4o",
[LlmService.Anthropic]: "claude-3-7-sonnet-latest",
[LlmService.Gemini]: "gemini-2.5-flash-lite",
[LlmService.Ollama]: env.ollamaModel || "llama3.2:3b",
},
},
embeddings: {
/**
* Embeddings provider resolution
*
* Priority:
* 1) Explicit EMBEDDINGS_PROVIDER
* 2) Inferred from MCP_LLM_SERVICE (ollama → ollama, openai → openai)
* 3) Default to openai
*/
provider: (() => {
const explicit = env.embeddingsProvider;
if (explicit &&
exports.ALLOWED_EMBEDDING_PROVIDERS.includes(explicit)) {
return explicit;
}
const inferredFromLlm = env.llmService === LlmService.Ollama
? "ollama"
: env.llmService === LlmService.OpenAI
? "openai"
: undefined;
if (inferredFromLlm)
return inferredFromLlm;
return "openai";
})(),
models: {
openai: env.openaiEmbeddingModel || "text-embedding-ada-002",
ollama: env.ollamaEmbeddingModel || "nomic-embed-text",
},
ollama: {
url: env.ollamaUrl || "http://localhost:11434",
},
},
apiKeys: {
openai: process.env.OPENAI_API_KEY,
anthropic: process.env.ANTHROPIC_API_KEY,
gemini: process.env.GEMINI_API_KEY,
},
env: {
...env,
llmService: env.llmService,
llmModel: env.llmModel,
},
};
}
/**
* Global configuration instance
*/
exports.config = (0, immutability_1.deepFreeze)(loadConfig());
//# sourceMappingURL=index.js.map