openai-mock-api
Version:
A mock OpenAI API server for testing LLM applications
187 lines • 7.23 kB
JavaScript
"use strict";
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.MockServer = void 0;
const express_1 = __importDefault(require("express"));
const auth_middleware_1 = require("./middleware/auth.middleware");
const error_middleware_1 = require("./middleware/error.middleware");
const logging_middleware_1 = require("./middleware/logging.middleware");
const chat_completion_service_1 = require("./services/chat-completion.service");
const stream_service_1 = require("./services/stream.service");
const token_counter_1 = require("./services/token-counter");
const request_validator_1 = require("./services/request-validator");
class MockServer {
constructor(config, logger) {
this.config = config;
this.logger = logger;
this.app = (0, express_1.default)();
// Initialize services
this.tokenCounter = new token_counter_1.TokenCounter(logger);
this.validator = new request_validator_1.RequestValidator(logger);
this.authService = new auth_middleware_1.AuthenticationService(config.apiKey);
this.errorHandler = new error_middleware_1.ErrorHandler(logger);
this.chatCompletionService = new chat_completion_service_1.ChatCompletionService(logger, this.validator, this.tokenCounter, config.responses);
this.streamService = new stream_service_1.StreamService(logger);
this.setupMiddleware();
this.setupRoutes();
this.setupErrorHandling();
}
setupMiddleware() {
// Body parsing
this.app.use(express_1.default.json());
// CORS
this.app.use(this.createCorsMiddleware());
// Logging
const loggingMiddleware = new logging_middleware_1.LoggingMiddleware(this.logger);
this.app.use(loggingMiddleware.middleware());
// Authentication
const authMiddleware = new auth_middleware_1.AuthenticationMiddleware(this.authService, this.logger);
this.app.use(authMiddleware.middleware());
}
createCorsMiddleware() {
return (req, res, next) => {
res.header('Access-Control-Allow-Origin', '*');
res.header('Access-Control-Allow-Methods', 'GET, POST, PUT, DELETE, OPTIONS');
res.header('Access-Control-Allow-Headers', 'Origin, X-Requested-With, Content-Type, Accept, Authorization');
if (req.method === 'OPTIONS') {
res.sendStatus(200);
}
else {
next();
}
};
}
setupRoutes() {
// Health check
this.app.get('/health', (req, res) => {
res.json({ status: 'ok', timestamp: new Date().toISOString() });
});
// OpenAI Models endpoint
this.app.get('/v1/models', (req, res) => {
res.json({
object: 'list',
data: [
{
id: 'gpt-3.5-turbo',
object: 'model',
created: Math.floor(Date.now() / 1000),
owned_by: 'openai-mock',
},
{
id: 'gpt-4',
object: 'model',
created: Math.floor(Date.now() / 1000),
owned_by: 'openai-mock',
},
],
});
});
// Chat completions endpoint
this.app.post('/v1/chat/completions', this.asyncHandler(this.handleChatCompletion.bind(this)));
// 404 handler
this.app.use((req, res) => {
if (req.path.startsWith('/v1')) {
throw new error_middleware_1.ValidationError(`Endpoint ${req.path} is not supported`);
}
else {
res.status(404).json({ error: 'Not found' });
}
});
}
setupErrorHandling() {
const errorMiddleware = new error_middleware_1.ErrorHandlingMiddleware(this.errorHandler, this.logger);
this.app.use(errorMiddleware.middleware());
}
asyncHandler(fn) {
return (req, res, next) => {
Promise.resolve(fn(req, res, next)).catch(next);
};
}
async handleChatCompletion(req, res) {
const request = req.body;
// Handle chat completion through service
const response = await this.chatCompletionService.handleChatCompletion(request);
// Handle streaming vs non-streaming
if (request.stream) {
await this.handleStreamingResponse(res, response, request);
}
else {
// Create OpenAI-compatible response
const openAIResponse = {
id: response.id,
object: 'chat.completion',
created: Math.floor(Date.now() / 1000),
model: response.model,
choices: [
{
index: 0,
message: {
role: 'assistant',
content: response.assistantMessage.content,
tool_calls: response.assistantMessage.tool_calls,
},
finish_reason: 'stop',
},
],
usage: response.usage,
};
res.json(openAIResponse);
}
}
async handleStreamingResponse(res, response, request) {
// Set up SSE headers
res.writeHead(200, {
'Content-Type': 'text/plain; charset=utf-8',
'Cache-Control': 'no-cache',
Connection: 'keep-alive',
'X-Accel-Buffering': 'no',
});
const streamOptions = {
model: request.model,
created: Math.floor(Date.now() / 1000),
delayMs: 50,
};
// Stream the response
for await (const chunk of this.streamService.streamResponse(response, streamOptions)) {
res.write(chunk);
}
res.end();
}
async start(port) {
return new Promise((resolve, reject) => {
try {
this.server = this.app.listen(port, () => {
this.logger.info(`Server started on port ${port}`);
resolve();
});
this.server.on('error', (error) => {
this.logger.error('Server error', error);
reject(error);
});
}
catch (error) {
reject(error);
}
});
}
async stop() {
return new Promise((resolve) => {
if (this.tokenCounter && 'dispose' in this.tokenCounter) {
this.tokenCounter.dispose();
}
if (this.server) {
this.server.close(() => {
this.logger.info('Server stopped');
resolve();
});
}
else {
resolve();
}
});
}
}
exports.MockServer = MockServer;
//# sourceMappingURL=server.js.map