UNPKG

openai-mock-api

Version:

A mock OpenAI API server for testing LLM applications

92 lines 3.16 kB
"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.StreamService = void 0; class StreamService { constructor(logger) { this.logger = logger; } async *streamResponse(response, options) { const streamId = response.id; const { model, created, delayMs = 50 } = options; this.logger.info(`Starting streaming response for: ${response.matchedResponseId}`); // Send initial chunk with role const initialChunk = { id: streamId, object: 'chat.completion.chunk', created, model, choices: [ { index: 0, delta: { role: 'assistant' }, finish_reason: null, }, ], }; yield `data: ${JSON.stringify(initialChunk)}\n\n`; // Stream tool calls if present if (response.assistantMessage.tool_calls) { for (const toolCall of response.assistantMessage.tool_calls) { const toolChunk = { id: streamId, object: 'chat.completion.chunk', created, model, choices: [ { index: 0, delta: { tool_calls: [toolCall] }, finish_reason: null, }, ], }; yield `data: ${JSON.stringify(toolChunk)}\n\n`; await this.delay(delayMs); } } // Stream the content word by word if (response.assistantMessage.content) { const words = response.assistantMessage.content.split(' '); for (let i = 0; i < words.length; i++) { const word = words[i] + (i < words.length - 1 ? ' ' : ''); const chunk = { id: streamId, object: 'chat.completion.chunk', created, model, choices: [ { index: 0, delta: { content: word }, finish_reason: null, }, ], }; yield `data: ${JSON.stringify(chunk)}\n\n`; await this.delay(delayMs); } } // Send final chunk with finish_reason const finalChunk = { id: streamId, object: 'chat.completion.chunk', created, model, choices: [ { index: 0, delta: {}, finish_reason: 'stop', }, ], }; yield `data: ${JSON.stringify(finalChunk)}\n\n`; // Send done signal yield 'data: [DONE]\n\n'; } delay(ms) { return new Promise((resolve) => setTimeout(resolve, ms)); } } exports.StreamService = StreamService; //# sourceMappingURL=stream.service.js.map