openai-mock-api
Version:
A mock OpenAI API server for testing LLM applications
50 lines • 2.29 kB
JavaScript
;
Object.defineProperty(exports, "__esModule", { value: true });
exports.ChatCompletionService = void 0;
const matcher_1 = require("../matcher");
const error_middleware_1 = require("../middleware/error.middleware");
class ChatCompletionService {
constructor(logger, validator, tokenCounter, responses) {
this.logger = logger;
this.validator = validator;
this.tokenCounter = tokenCounter;
this.responses = responses;
this.matcher = new matcher_1.MessageMatcherService(logger);
}
async handleChatCompletion(request) {
// Validate request
const validatedRequest = this.validator.validateChatCompletionRequest(request);
// Find matching response
const matchResult = this.matcher.findMatch(validatedRequest, this.responses);
if (!matchResult) {
throw new error_middleware_1.ValidationError('No matching response found for the provided messages');
}
const { response: mockResponse, matchedLength } = matchResult;
// Find the appropriate assistant response for this match
const assistantResponse = this.matcher.findResponseForMatch(mockResponse.messages, matchedLength);
if (!assistantResponse) {
throw new Error('No assistant response found in conversation flow');
}
// Validate tool calls if present
if (assistantResponse.tool_calls) {
this.validator.validateToolCalls(assistantResponse.tool_calls);
}
// Calculate tokens if not streaming
const usage = !validatedRequest.stream
? this.tokenCounter.calculateTokens(validatedRequest, assistantResponse.content || '')
: undefined;
this.logger.info(`Matched request to response: ${mockResponse.id}`);
return {
id: `chatcmpl-${this.generateId()}`,
assistantMessage: assistantResponse,
matchedResponseId: mockResponse.id,
model: validatedRequest.model,
usage,
};
}
generateId() {
return (Math.random().toString(36).substring(2, 15) + Math.random().toString(36).substring(2, 15));
}
}
exports.ChatCompletionService = ChatCompletionService;
//# sourceMappingURL=chat-completion.service.js.map