mcp-openai-complete
Version:
MCP server for OpenAI text completion
298 lines • 12.7 kB
JavaScript
;
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.startServer = startServer;
const openai_1 = __importDefault(require("openai"));
const index_js_1 = require("@modelcontextprotocol/sdk/server/index.js");
const stdio_js_1 = require("@modelcontextprotocol/sdk/server/stdio.js");
const types_js_1 = require("@modelcontextprotocol/sdk/types.js");
const uuid_1 = require("uuid");
const dotenv_1 = __importDefault(require("dotenv"));
const constants_js_1 = require("./constants.js");
const types_js_2 = require("./types.js");
const utils_js_1 = require("./utils.js");
// Load environment variables
dotenv_1.default.config();
/**
* OpenAI Completion MCP Server
*
* This server provides a clean interface for LLMs to use OpenAI's completion APIs
* through the MCP protocol, acting as a bridge between an LLM client and OpenAI's API.
*/
class OpenAICompleteMcpServer {
server;
openai;
model;
activeTasks = new Map();
constructor(apiKey, apiBaseUrl, model = constants_js_1.DEFAULT_MODEL) {
utils_js_1.logger.debug("Server", "Initializing OpenAI Complete MCP Server");
// Initialize OpenAI client
this.openai = new openai_1.default({
apiKey: apiKey,
baseURL: apiBaseUrl,
});
this.model = model;
// Initialize MCP server
this.server = new index_js_1.Server({
name: constants_js_1.SERVER_NAME,
version: constants_js_1.SERVER_VERSION,
}, {
capabilities: {
tools: {},
},
});
this.setupToolHandlers();
this.setupErrorHandling();
}
setupErrorHandling() {
// Error handling
this.server.onerror = (error) => {
utils_js_1.logger.error("Server", `MCP Error: ${error instanceof Error ? error.stack : String(error)}`);
throw error;
};
// Graceful shutdown
process.on("SIGINT", () => {
utils_js_1.logger.info("Shutdown", "Starting server shutdown");
process.exit(0);
});
}
setupToolHandlers() {
this.server.setRequestHandler(types_js_1.ListToolsRequestSchema, () => Promise.resolve({
tools: [
{
name: "complete",
description: "Generate text completions using OpenAI models",
inputSchema: {
type: "object",
properties: {
prompt: {
type: "string",
description: "The text prompt to complete",
},
max_tokens: {
type: "integer",
description: "Maximum tokens to generate",
default: constants_js_1.DEFAULT_MAX_TOKENS,
},
temperature: {
type: "number",
description: "Controls randomness (0-1)",
default: constants_js_1.DEFAULT_TEMPERATURE,
},
top_p: {
type: "number",
description: "Controls diversity via nucleus sampling",
default: constants_js_1.DEFAULT_TOP_P,
},
frequency_penalty: {
type: "number",
description: "Decreases repetition of token sequences",
default: constants_js_1.DEFAULT_FREQUENCY_PENALTY,
},
presence_penalty: {
type: "number",
description: "Increases likelihood of talking about new topics",
default: constants_js_1.DEFAULT_PRESENCE_PENALTY,
},
},
required: ["prompt"],
},
},
],
}));
this.server.setRequestHandler(types_js_1.CallToolRequestSchema, async (request) => {
if (request.params.name !== "complete") {
throw new types_js_1.McpError(types_js_1.ErrorCode.MethodNotFound, `Unknown tool: ${request.params.name}`);
}
return await this.handleCompleteTool(request.params.arguments || {});
});
}
async handleCompleteTool(args) {
if (!(0, utils_js_1.isValidCompletionArgs)(args)) {
throw new types_js_1.McpError(types_js_1.ErrorCode.InvalidParams, "Invalid completion arguments");
}
try {
// Generate a task ID
const taskId = (0, uuid_1.v4)();
// Start completion task
const result = await this.complete(taskId, args);
// Return completion result
return {
content: [
{
type: "text",
text: result.text,
},
],
};
}
catch (error) {
// Handle specific errors
if (error instanceof types_js_2.CompletionTimeoutError) {
return {
content: [
{
type: "text",
text: "The completion request timed out. Please try again with a shorter prompt or fewer tokens.",
},
],
};
}
else if (error instanceof types_js_2.CompletionCancelledError) {
return {
content: [
{
type: "text",
text: "The request was cancelled.",
},
],
};
}
// Re-throw other errors
throw error;
}
}
async complete(taskId, args) {
const { prompt, max_tokens = constants_js_1.DEFAULT_MAX_TOKENS, temperature = constants_js_1.DEFAULT_TEMPERATURE, top_p = constants_js_1.DEFAULT_TOP_P, frequency_penalty = constants_js_1.DEFAULT_FREQUENCY_PENALTY, presence_penalty = constants_js_1.DEFAULT_PRESENCE_PENALTY, } = args;
// Create abort controller for cancellation
const abortController = new AbortController();
// Initialize task
const task = {
status: types_js_2.CompletionStatusEnum.Pending,
created_at: Date.now(),
timeout_at: Date.now() + constants_js_1.COMPLETION_TIMEOUT_MS,
progress: 0,
abortController,
};
this.activeTasks.set(taskId, task);
try {
// Update task status
task.status = types_js_2.CompletionStatusEnum.Processing;
this.activeTasks.set(taskId, task);
// Start API call with timeout
utils_js_1.logger.debug("Completion", `Starting completion request for task ${taskId}`);
const response = await Promise.race([
this.makeApiCall(prompt, max_tokens, temperature, top_p, frequency_penalty, presence_penalty, abortController.signal),
this.createTimeoutPromise(taskId),
]);
// Process response
const completionResult = {
text: response.choices[0].text || "",
model: this.model,
finish_reason: response.choices[0].finish_reason || undefined,
};
// Add usage info if available
if (response.usage) {
completionResult.usage = {
prompt_tokens: response.usage.prompt_tokens,
completion_tokens: response.usage.completion_tokens,
total_tokens: response.usage.total_tokens,
};
}
// Update task with result
const updatedTask = this.activeTasks.get(taskId);
if (updatedTask) {
updatedTask.status = types_js_2.CompletionStatusEnum.Complete;
updatedTask.result = completionResult;
updatedTask.progress = 100;
this.activeTasks.set(taskId, updatedTask);
}
utils_js_1.logger.debug("Completion", `Task ${taskId} completed successfully`);
return completionResult;
}
catch (error) {
const updatedTask = this.activeTasks.get(taskId);
if (updatedTask) {
updatedTask.status = types_js_2.CompletionStatusEnum.Error;
updatedTask.error =
error instanceof Error ? error.message : "Unknown error";
this.activeTasks.set(taskId, updatedTask);
}
if (error instanceof types_js_2.CompletionTimeoutError ||
error instanceof types_js_2.CompletionCancelledError) {
throw error;
}
if (abortController.signal.aborted) {
throw new types_js_2.CompletionCancelledError();
}
throw new types_js_1.McpError(types_js_1.ErrorCode.InvalidRequest, `OpenAI API error: ${error instanceof Error ? error.message : String(error)}`);
}
finally {
// Keep task in map for a while for status checks, but eventually clean up
setTimeout(() => {
this.activeTasks.delete(taskId);
}, constants_js_1.COMPLETION_TIMEOUT_MS);
}
}
async makeApiCall(prompt, max_tokens, temperature, top_p, frequency_penalty, presence_penalty, signal) {
return this.openai.completions.create({
model: this.model,
prompt,
max_tokens,
temperature,
top_p,
frequency_penalty,
presence_penalty,
}, { signal });
}
createTimeoutPromise(taskId) {
return new Promise((_, reject) => {
setTimeout(() => {
const task = this.activeTasks.get(taskId);
if (task) {
task.status = types_js_2.CompletionStatusEnum.Error;
task.error = "Completion timed out";
this.activeTasks.set(taskId, task);
}
reject(new types_js_2.CompletionTimeoutError());
}, constants_js_1.COMPLETION_TIMEOUT_MS);
});
}
cancelTask(taskId) {
const task = this.activeTasks.get(taskId);
if (task &&
task.status !== types_js_2.CompletionStatusEnum.Complete &&
task.status !== types_js_2.CompletionStatusEnum.Error) {
// Abort the request
task.abortController.abort();
// Update task status
task.status = types_js_2.CompletionStatusEnum.Error;
task.error = "Task cancelled by user";
this.activeTasks.set(taskId, task);
utils_js_1.logger.info("Completion", `Task ${taskId} cancelled`);
return true;
}
return false;
}
async run() {
const transport = new stdio_js_1.StdioServerTransport();
await this.server.connect(transport);
utils_js_1.logger.info("Server", "OpenAI Complete MCP server running on stdio");
}
}
function startServer() {
const apiKey = process.env.OPENAI_API_KEY;
const apiBaseUrl = process.env.OPENAI_API_BASE;
const model = process.env.OPENAI_MODEL || constants_js_1.DEFAULT_MODEL;
if (!apiKey) {
utils_js_1.logger.error("Config", "OPENAI_API_KEY environment variable is required");
process.exit(1);
}
if (!apiBaseUrl) {
utils_js_1.logger.error("Config", "OPENAI_API_BASE environment variable is required");
process.exit(1);
}
if (!model) {
utils_js_1.logger.error("Config", "OPENAI_MODEL environment variable is required");
process.exit(1);
}
const server = new OpenAICompleteMcpServer(apiKey, apiBaseUrl, model);
void server.run().catch((error) => {
utils_js_1.logger.error("Startup", `Failed to start server: ${error instanceof Error ? error.message : String(error)}`);
process.exit(1);
});
}
//# sourceMappingURL=index.js.map