@agentica/core
Version:
Agentic AI Library specialized in LLM Function Calling
856 lines • 38.3 kB
JavaScript
;
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
var __asyncValues = (this && this.__asyncValues) || function (o) {
if (!Symbol.asyncIterator) throw new TypeError("Symbol.asyncIterator is not defined.");
var m = o[Symbol.asyncIterator], i;
return m ? m.call(o) : (o = typeof __values === "function" ? __values(o) : o[Symbol.iterator](), i = {}, verb("next"), verb("throw"), verb("return"), i[Symbol.asyncIterator] = function () { return this; }, i);
function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }
function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }
};
Object.defineProperty(exports, "__esModule", { value: true });
const vitest_1 = require("vitest");
const ChatGptCompletionStreamingUtil_1 = require("./ChatGptCompletionStreamingUtil");
const StreamUtil_1 = require("./StreamUtil");
(0, vitest_1.describe)("reduceStreamingWithDispatch", () => {
(0, vitest_1.beforeEach)(() => {
vitest_1.vi.clearAllMocks();
});
(0, vitest_1.describe)("basic functionality", () => {
(0, vitest_1.it)("should process single chunk successfully", () => __awaiter(void 0, void 0, void 0, function* () {
const mockChunk = {
id: "test-id",
object: "chat.completion.chunk",
created: 1234567890,
model: "gpt-3.5-turbo",
choices: [
{
index: 0,
delta: { content: "Hello" },
finish_reason: null,
},
],
};
const stream = new ReadableStream({
start(controller) {
controller.enqueue(mockChunk);
controller.close();
},
});
const eventProcessor = vitest_1.vi.fn();
const result = yield (0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor);
(0, vitest_1.expect)(result).toBeDefined();
(0, vitest_1.expect)(result.object).toBe("chat.completion");
(0, vitest_1.expect)(eventProcessor).toHaveBeenCalledTimes(1);
}));
(0, vitest_1.it)("should handle multiple chunks with content accumulation", () => __awaiter(void 0, void 0, void 0, function* () {
var _a;
const chunks = [
{
id: "test-id",
object: "chat.completion.chunk",
created: 1234567890,
model: "gpt-3.5-turbo",
choices: [
{
index: 0,
delta: { content: "Hello" },
finish_reason: null,
},
],
},
{
id: "test-id",
object: "chat.completion.chunk",
created: 1234567890,
model: "gpt-3.5-turbo",
choices: [
{
index: 0,
delta: { content: " World" },
finish_reason: null,
},
],
},
{
id: "test-id",
object: "chat.completion.chunk",
created: 1234567890,
model: "gpt-3.5-turbo",
choices: [
{
index: 0,
delta: { content: "!" },
finish_reason: "stop",
},
],
},
];
const stream = new ReadableStream({
start(controller) {
chunks.forEach(chunk => controller.enqueue(chunk));
controller.close();
},
});
const eventProcessor = vitest_1.vi.fn();
const result = yield (0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor);
(0, vitest_1.expect)(result).toBeDefined();
(0, vitest_1.expect)(result.object).toBe("chat.completion");
(0, vitest_1.expect)(eventProcessor).toHaveBeenCalledTimes(1);
const eventCall = (_a = eventProcessor.mock.calls[0]) === null || _a === void 0 ? void 0 : _a[0];
(0, vitest_1.expect)(eventCall.get()).toBe("Hello World!");
}));
(0, vitest_1.it)("should handle empty content chunks", () => __awaiter(void 0, void 0, void 0, function* () {
var _a;
const chunks = [
{
id: "test-id",
object: "chat.completion.chunk",
created: 1234567890,
model: "gpt-3.5-turbo",
choices: [
{
index: 0,
delta: { content: "" },
finish_reason: null,
},
],
},
{
id: "test-id",
object: "chat.completion.chunk",
created: 1234567890,
model: "gpt-3.5-turbo",
choices: [
{
index: 0,
delta: { content: "Hello" },
finish_reason: null,
},
],
},
];
const stream = new ReadableStream({
start(controller) {
chunks.forEach(chunk => controller.enqueue(chunk));
controller.close();
},
});
const eventProcessor = vitest_1.vi.fn();
const result = yield (0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor);
(0, vitest_1.expect)(result).toBeDefined();
(0, vitest_1.expect)(eventProcessor).toHaveBeenCalledTimes(1);
const eventCall = (_a = eventProcessor.mock.calls[0]) === null || _a === void 0 ? void 0 : _a[0];
(0, vitest_1.expect)(eventCall.get()).toBe("Hello");
}));
});
(0, vitest_1.describe)("multiple choices handling", () => {
(0, vitest_1.it)("should handle multiple choices with different indices", () => __awaiter(void 0, void 0, void 0, function* () {
var _a, _b;
const chunks = [
{
id: "test-id",
object: "chat.completion.chunk",
created: 1234567890,
model: "gpt-3.5-turbo",
choices: [
{
index: 0,
delta: { content: "Choice 1" },
finish_reason: null,
},
{
index: 1,
delta: { content: "Choice 2" },
finish_reason: null,
},
],
},
{
id: "test-id",
object: "chat.completion.chunk",
created: 1234567890,
model: "gpt-3.5-turbo",
choices: [
{
index: 0,
delta: { content: " continued" },
finish_reason: "stop",
},
{
index: 1,
delta: { content: " continued" },
finish_reason: "stop",
},
],
},
];
const stream = new ReadableStream({
start(controller) {
chunks.forEach(chunk => controller.enqueue(chunk));
controller.close();
},
});
const eventProcessor = vitest_1.vi.fn();
const result = yield (0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor);
(0, vitest_1.expect)(result).toBeDefined();
(0, vitest_1.expect)(eventProcessor).toHaveBeenCalledTimes(2);
const firstCall = (_a = eventProcessor.mock.calls[0]) === null || _a === void 0 ? void 0 : _a[0];
const secondCall = (_b = eventProcessor.mock.calls[1]) === null || _b === void 0 ? void 0 : _b[0];
(0, vitest_1.expect)(firstCall.get()).toBe("Choice 1 continued");
(0, vitest_1.expect)(secondCall.get()).toBe("Choice 2 continued");
}));
});
(0, vitest_1.describe)("finish reason handling", () => {
(0, vitest_1.it)("should close context when finish_reason is provided", () => __awaiter(void 0, void 0, void 0, function* () {
var _a;
const chunks = [
{
id: "test-id",
object: "chat.completion.chunk",
created: 1234567890,
model: "gpt-3.5-turbo",
choices: [
{
index: 0,
delta: { content: "Hello" },
finish_reason: null,
},
],
},
{
id: "test-id",
object: "chat.completion.chunk",
created: 1234567890,
model: "gpt-3.5-turbo",
choices: [
{
index: 0,
delta: { content: " World" },
finish_reason: "stop",
},
],
},
];
const stream = new ReadableStream({
start(controller) {
chunks.forEach(chunk => controller.enqueue(chunk));
controller.close();
},
});
const eventProcessor = vitest_1.vi.fn();
const result = yield (0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor);
(0, vitest_1.expect)(result).toBeDefined();
(0, vitest_1.expect)(eventProcessor).toHaveBeenCalledTimes(1);
const eventCall = (_a = eventProcessor.mock.calls[0]) === null || _a === void 0 ? void 0 : _a[0];
(0, vitest_1.expect)(eventCall.get()).toBe("Hello World");
(0, vitest_1.expect)(eventCall.done()).toBe(true);
}));
});
(0, vitest_1.describe)("stream processing", () => {
(0, vitest_1.it)("should provide working stream in event processor", () => __awaiter(void 0, void 0, void 0, function* () {
const chunks = [
{
id: "test-id",
object: "chat.completion.chunk",
created: 1234567890,
model: "gpt-3.5-turbo",
choices: [
{
index: 0,
delta: { content: "Hello" },
finish_reason: null,
},
],
},
{
id: "test-id",
object: "chat.completion.chunk",
created: 1234567890,
model: "gpt-3.5-turbo",
choices: [
{
index: 0,
delta: { content: " World" },
finish_reason: "stop",
},
],
},
];
const stream = new ReadableStream({
start(controller) {
chunks.forEach(chunk => controller.enqueue(chunk));
controller.close();
},
});
const streamedContent = [];
yield new Promise((resolve) => __awaiter(void 0, void 0, void 0, function* () {
const eventProcessor = vitest_1.vi.fn(({ stream: contentStream }) => {
void (() => __awaiter(void 0, void 0, void 0, function* () {
var _a, e_1, _b, _c;
try {
for (var _d = true, contentStream_1 = __asyncValues(contentStream), contentStream_1_1; contentStream_1_1 = yield contentStream_1.next(), _a = contentStream_1_1.done, !_a; _d = true) {
_c = contentStream_1_1.value;
_d = false;
const content = _c;
streamedContent.push(content);
}
}
catch (e_1_1) { e_1 = { error: e_1_1 }; }
finally {
try {
if (!_d && !_a && (_b = contentStream_1.return)) yield _b.call(contentStream_1);
}
finally { if (e_1) throw e_1.error; }
}
resolve(true);
}))().catch(() => { });
});
yield (0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor);
}));
(0, vitest_1.expect)(streamedContent).toEqual(["Hello", " World"]);
}));
(0, vitest_1.it)("should provide working join function", () => __awaiter(void 0, void 0, void 0, function* () {
const chunks = [
{
id: "test-id",
object: "chat.completion.chunk",
created: 1234567890,
model: "gpt-3.5-turbo",
choices: [
{
index: 0,
delta: { content: "Hello" },
finish_reason: null,
},
],
},
{
id: "test-id",
object: "chat.completion.chunk",
created: 1234567890,
model: "gpt-3.5-turbo",
choices: [
{
index: 0,
delta: { content: " World" },
finish_reason: "stop",
},
],
},
];
const stream = new ReadableStream({
start(controller) {
chunks.forEach(chunk => controller.enqueue(chunk));
controller.close();
},
});
let joinedContent = "";
const eventProcessor = vitest_1.vi.fn((_a) => __awaiter(void 0, [_a], void 0, function* ({ join }) {
joinedContent = yield join();
}));
const result = yield (0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor);
(0, vitest_1.expect)(result).toBeDefined();
(0, vitest_1.expect)(joinedContent).toBe("Hello World");
}));
});
(0, vitest_1.describe)("error handling", () => {
(0, vitest_1.it)("should throw error for empty stream", () => __awaiter(void 0, void 0, void 0, function* () {
const stream = new ReadableStream({
start(controller) {
controller.close();
},
});
const eventProcessor = vitest_1.vi.fn();
yield (0, vitest_1.expect)((0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor)).rejects.toThrow("StreamUtil.reduce did not produce a ChatCompletion");
}));
(0, vitest_1.it)("should handle stream with only finish_reason chunks", () => __awaiter(void 0, void 0, void 0, function* () {
const chunks = [
{
id: "test-id",
object: "chat.completion.chunk",
created: 1234567890,
model: "gpt-3.5-turbo",
choices: [
{
index: 0,
delta: { content: null },
finish_reason: "stop",
},
],
},
];
const stream = new ReadableStream({
start(controller) {
chunks.forEach(chunk => controller.enqueue(chunk));
controller.close();
},
});
const eventProcessor = vitest_1.vi.fn();
const result = yield (0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor);
(0, vitest_1.expect)(result).toBeDefined();
(0, vitest_1.expect)(eventProcessor).not.toHaveBeenCalled();
}));
});
(0, vitest_1.describe)("complex scenarios", () => {
(0, vitest_1.it)("should handle mixed content and finish_reason chunks", () => __awaiter(void 0, void 0, void 0, function* () {
var _a;
const chunks = [
{
id: "test-id",
object: "chat.completion.chunk",
created: 1234567890,
model: "gpt-3.5-turbo",
choices: [
{
index: 0,
delta: { content: "Hello" },
finish_reason: null,
},
],
},
{
id: "test-id",
object: "chat.completion.chunk",
created: 1234567890,
model: "gpt-3.5-turbo",
choices: [
{
index: 0,
delta: { content: null },
finish_reason: null,
},
],
},
{
id: "test-id",
object: "chat.completion.chunk",
created: 1234567890,
model: "gpt-3.5-turbo",
choices: [
{
index: 0,
delta: { content: " World" },
finish_reason: null,
},
],
},
{
id: "test-id",
object: "chat.completion.chunk",
created: 1234567890,
model: "gpt-3.5-turbo",
choices: [
{
index: 0,
delta: { content: null },
finish_reason: "stop",
},
],
},
];
const stream = new ReadableStream({
start(controller) {
chunks.forEach(chunk => controller.enqueue(chunk));
controller.close();
},
});
const eventProcessor = vitest_1.vi.fn();
const result = yield (0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor);
(0, vitest_1.expect)(result).toBeDefined();
(0, vitest_1.expect)(eventProcessor).toHaveBeenCalledTimes(1);
const eventCall = (_a = eventProcessor.mock.calls[0]) === null || _a === void 0 ? void 0 : _a[0];
(0, vitest_1.expect)(eventCall.get()).toBe("Hello World");
}));
});
(0, vitest_1.describe)("edge cases and exceptions", () => {
(0, vitest_1.it)("should handle null delta content", () => __awaiter(void 0, void 0, void 0, function* () {
var _a;
const chunks = [
{
id: "test-id",
object: "chat.completion.chunk",
created: 1234567890,
model: "gpt-3.5-turbo",
choices: [
{
index: 0,
delta: { content: null },
finish_reason: null,
},
],
},
{
id: "test-id",
object: "chat.completion.chunk",
created: 1234567890,
model: "gpt-3.5-turbo",
choices: [
{
index: 0,
delta: { content: "Hello" },
finish_reason: "stop",
},
],
},
];
const stream = new ReadableStream({
start(controller) {
chunks.forEach(chunk => controller.enqueue(chunk));
controller.close();
},
});
const eventProcessor = vitest_1.vi.fn();
const result = yield (0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor);
(0, vitest_1.expect)(result).toBeDefined();
(0, vitest_1.expect)(eventProcessor).toHaveBeenCalledTimes(1);
const eventCall = (_a = eventProcessor.mock.calls[0]) === null || _a === void 0 ? void 0 : _a[0];
(0, vitest_1.expect)(eventCall.get()).toBe("Hello");
}));
(0, vitest_1.it)("should handle missing delta object", () => __awaiter(void 0, void 0, void 0, function* () {
var _a;
const chunks = [
{
id: "test-id",
object: "chat.completion.chunk",
created: 1234567890,
model: "gpt-3.5-turbo",
choices: [
{
index: 0,
delta: {},
finish_reason: null,
},
],
},
{
id: "test-id",
object: "chat.completion.chunk",
created: 1234567890,
model: "gpt-3.5-turbo",
choices: [
{
index: 0,
delta: { content: "Hello" },
finish_reason: "stop",
},
],
},
];
const stream = new ReadableStream({
start(controller) {
chunks.forEach(chunk => controller.enqueue(chunk));
controller.close();
},
});
const eventProcessor = vitest_1.vi.fn();
const result = yield (0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor);
(0, vitest_1.expect)(result).toBeDefined();
(0, vitest_1.expect)(eventProcessor).toHaveBeenCalledTimes(1);
const eventCall = (_a = eventProcessor.mock.calls[0]) === null || _a === void 0 ? void 0 : _a[0];
(0, vitest_1.expect)(eventCall.get()).toBe("Hello");
}));
(0, vitest_1.it)("should handle chunks with no choices", () => __awaiter(void 0, void 0, void 0, function* () {
var _a;
const chunks = [
{
id: "test-id",
object: "chat.completion.chunk",
created: 1234567890,
model: "gpt-3.5-turbo",
choices: [],
},
{
id: "test-id",
object: "chat.completion.chunk",
created: 1234567890,
model: "gpt-3.5-turbo",
choices: [
{
index: 0,
delta: { content: "Hello" },
finish_reason: "stop",
},
],
},
];
const stream = new ReadableStream({
start(controller) {
chunks.forEach(chunk => controller.enqueue(chunk));
controller.close();
},
});
const eventProcessor = vitest_1.vi.fn();
const result = yield (0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor);
(0, vitest_1.expect)(result).toBeDefined();
(0, vitest_1.expect)(eventProcessor).toHaveBeenCalledTimes(1);
const eventCall = (_a = eventProcessor.mock.calls[0]) === null || _a === void 0 ? void 0 : _a[0];
(0, vitest_1.expect)(eventCall.get()).toBe("Hello");
}));
(0, vitest_1.it)("should handle very large content chunks", () => __awaiter(void 0, void 0, void 0, function* () {
var _a;
const largeContent = "x".repeat(10000);
const chunks = [
{
id: "test-id",
object: "chat.completion.chunk",
created: 1234567890,
model: "gpt-3.5-turbo",
choices: [
{
index: 0,
delta: { content: largeContent },
finish_reason: "stop",
},
],
},
];
const stream = new ReadableStream({
start(controller) {
chunks.forEach(chunk => controller.enqueue(chunk));
controller.close();
},
});
const eventProcessor = vitest_1.vi.fn();
const result = yield (0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor);
(0, vitest_1.expect)(result).toBeDefined();
// Now single chunk with content should trigger eventProcessor
(0, vitest_1.expect)(eventProcessor).toHaveBeenCalledOnce();
const eventCall = (_a = eventProcessor.mock.calls[0]) === null || _a === void 0 ? void 0 : _a[0];
(0, vitest_1.expect)(eventCall.get()).toBe(largeContent);
}));
(0, vitest_1.it)("should handle rapid consecutive chunks", () => __awaiter(void 0, void 0, void 0, function* () {
var _a;
const chunks = Array.from({ length: 100 }, (_, i) => ({
id: "test-id",
object: "chat.completion.chunk",
created: 1234567890,
model: "gpt-3.5-turbo",
choices: [
{
index: 0,
delta: { content: i.toString() },
finish_reason: i === 99 ? "stop" : null,
},
],
}));
const stream = new ReadableStream({
start(controller) {
chunks.forEach(chunk => controller.enqueue(chunk));
controller.close();
},
});
const eventProcessor = vitest_1.vi.fn();
const result = yield (0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor);
(0, vitest_1.expect)(result).toBeDefined();
(0, vitest_1.expect)(eventProcessor).toHaveBeenCalledTimes(1);
const eventCall = (_a = eventProcessor.mock.calls[0]) === null || _a === void 0 ? void 0 : _a[0];
const expectedContent = Array.from({ length: 100 }, (_, i) => i.toString()).join("");
(0, vitest_1.expect)(eventCall.get()).toBe(expectedContent);
}));
(0, vitest_1.it)("should handle out-of-order choice indices", () => __awaiter(void 0, void 0, void 0, function* () {
const chunks = [
{
id: "test-id",
object: "chat.completion.chunk",
created: 1234567890,
model: "gpt-3.5-turbo",
choices: [
{
index: 2,
delta: { content: "Third" },
finish_reason: null,
},
{
index: 0,
delta: { content: "First" },
finish_reason: null,
},
{
index: 1,
delta: { content: "Second" },
finish_reason: null,
},
],
},
{
id: "test-id",
object: "chat.completion.chunk",
created: 1234567890,
model: "gpt-3.5-turbo",
choices: [
{
index: 0,
delta: { content: " content" },
finish_reason: "stop",
},
{
index: 1,
delta: { content: " content" },
finish_reason: "stop",
},
{
index: 2,
delta: { content: " content" },
finish_reason: "stop",
},
],
},
];
const stream = new ReadableStream({
start(controller) {
chunks.forEach(chunk => controller.enqueue(chunk));
controller.close();
},
});
const eventProcessor = vitest_1.vi.fn();
const result = yield (0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor);
(0, vitest_1.expect)(result).toBeDefined();
(0, vitest_1.expect)(eventProcessor).toHaveBeenCalledTimes(3);
const calls = eventProcessor.mock.calls.map(call => call[0]);
(0, vitest_1.expect)(calls[0].get()).toBe("Third content");
(0, vitest_1.expect)(calls[1].get()).toBe("First content");
(0, vitest_1.expect)(calls[2].get()).toBe("Second content");
}));
(0, vitest_1.it)("should handle mixed finish reasons", () => __awaiter(void 0, void 0, void 0, function* () {
var _a, _b;
const chunks = [
{
id: "test-id",
object: "chat.completion.chunk",
created: 1234567890,
model: "gpt-3.5-turbo",
choices: [
{
index: 0,
delta: { content: "Hello" },
finish_reason: null,
},
{
index: 1,
delta: { content: "World" },
finish_reason: null,
},
],
},
{
id: "test-id",
object: "chat.completion.chunk",
created: 1234567890,
model: "gpt-3.5-turbo",
choices: [
{
index: 0,
delta: { content: " there" },
finish_reason: "stop",
},
{
index: 1,
delta: { content: "!" },
finish_reason: "length",
},
],
},
];
const stream = StreamUtil_1.StreamUtil.from(...chunks);
const eventProcessor = vitest_1.vi.fn();
const result = yield (0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor);
(0, vitest_1.expect)(result).toBeDefined();
(0, vitest_1.expect)(eventProcessor).toHaveBeenCalledTimes(2);
const firstCall = (_a = eventProcessor.mock.calls[0]) === null || _a === void 0 ? void 0 : _a[0];
const secondCall = (_b = eventProcessor.mock.calls[1]) === null || _b === void 0 ? void 0 : _b[0];
(0, vitest_1.expect)(firstCall.get()).toBe("Hello there");
(0, vitest_1.expect)(secondCall.get()).toBe("World!");
yield firstCall.join();
yield secondCall.join();
(0, vitest_1.expect)(firstCall.done()).toBe(true);
(0, vitest_1.expect)(secondCall.done()).toBe(true);
}));
(0, vitest_1.it)("should handle Unicode and special characters", () => __awaiter(void 0, void 0, void 0, function* () {
var _a;
const specialContent = "Hello 🌍! 안녕하세요 مرحبا 🚀 ñáéíóú";
const chunks = [
{
id: "test-id",
object: "chat.completion.chunk",
created: 1234567890,
model: "gpt-3.5-turbo",
choices: [
{
index: 0,
delta: { content: specialContent },
finish_reason: "stop",
},
],
},
];
const stream = StreamUtil_1.StreamUtil.from(...chunks);
const eventProcessor = vitest_1.vi.fn();
const result = yield (0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor);
(0, vitest_1.expect)(result).toBeDefined();
// Now single chunk with content should trigger eventProcessor
(0, vitest_1.expect)(eventProcessor).toHaveBeenCalledOnce();
const eventCall = (_a = eventProcessor.mock.calls[0]) === null || _a === void 0 ? void 0 : _a[0];
(0, vitest_1.expect)(eventCall.get()).toBe(specialContent);
}));
(0, vitest_1.it)("should handle stream reader errors gracefully", () => __awaiter(void 0, void 0, void 0, function* () {
const chunks = [
{
id: "test-id",
object: "chat.completion.chunk",
created: 1234567890,
model: "gpt-3.5-turbo",
choices: [
{
index: 0,
delta: { content: "Hello" },
finish_reason: null,
},
],
},
];
const stream = new ReadableStream({
start(controller) {
controller.enqueue(chunks[0]);
// Simulate an error in the stream
controller.error(new Error("Stream error"));
},
});
const eventProcessor = vitest_1.vi.fn();
yield (0, vitest_1.expect)((0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor))
.rejects
.toThrow("Stream error");
}));
(0, vitest_1.it)("should handle completely malformed chunks gracefully", () => __awaiter(void 0, void 0, void 0, function* () {
const malformedChunk = {
// Missing required fields
object: "chat.completion.chunk",
choices: [
{
// Missing index
delta: { content: "Hello" },
finish_reason: null,
},
],
};
const stream = new ReadableStream({
start(controller) {
controller.enqueue(malformedChunk);
controller.close();
},
});
const eventProcessor = vitest_1.vi.fn();
// Should not throw, but should handle gracefully
const result = yield (0, ChatGptCompletionStreamingUtil_1.reduceStreamingWithDispatch)(stream, eventProcessor);
(0, vitest_1.expect)(result).toBeDefined();
}));
});
});
//# sourceMappingURL=ChatGptCompletionStreamingUtil.spec.js.map