@agentica/core
Version:
Agentic AI Library specialized in LLM Function Calling
86 lines • 5.03 kB
JavaScript
;
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.reduceStreamingWithDispatch = reduceStreamingWithDispatch;
const _1 = require(".");
function reduceStreamingWithDispatch(stream, eventProcessor, abortSignal) {
return __awaiter(this, void 0, void 0, function* () {
const streamContext = new Map();
const nullableCompletion = yield _1.StreamUtil.reduce(stream, (accPromise, chunk) => __awaiter(this, void 0, void 0, function* () {
const acc = yield accPromise;
const registerContext = (choices) => {
for (const choice of choices) {
// Handle content first, even if finish_reason is present
if (choice.delta.content != null && choice.delta.content !== "") {
// Process content logic (moved up from below)
if (streamContext.has(choice.index)) {
const context = streamContext.get(choice.index);
context.content += choice.delta.content;
context.mpsc.produce(choice.delta.content);
}
else {
const mpsc = new _1.MPSC();
streamContext.set(choice.index, {
content: choice.delta.content,
mpsc,
});
mpsc.produce(choice.delta.content);
eventProcessor({
stream: (0, _1.streamDefaultReaderToAsyncGenerator)(mpsc.consumer.getReader()),
done: () => mpsc.done(),
get: () => { var _a, _b; return (_b = (_a = streamContext.get(choice.index)) === null || _a === void 0 ? void 0 : _a.content) !== null && _b !== void 0 ? _b : ""; },
join: () => __awaiter(this, void 0, void 0, function* () {
yield mpsc.waitClosed();
return streamContext.get(choice.index).content;
}),
});
}
}
// Handle finish_reason after content processing
if (choice.finish_reason != null) {
const context = streamContext.get(choice.index);
if (context != null) {
context.mpsc.close();
}
}
}
};
if (acc.object === "chat.completion.chunk") {
registerContext([acc, chunk].flatMap(v => v.choices));
return _1.ChatGptCompletionMessageUtil.merge([acc, chunk]);
}
registerContext(chunk.choices);
return _1.ChatGptCompletionMessageUtil.accumulate(acc, chunk);
}), { abortSignal });
if (nullableCompletion == null) {
throw new Error("StreamUtil.reduce did not produce a ChatCompletion. Possible causes: the input stream was empty, invalid, or closed prematurely. "
+ "To debug: check that the stream is properly initialized and contains valid ChatCompletionChunk data. "
+ "You may also enable verbose logging upstream to inspect the stream contents. "
+ `Stream locked: ${stream.locked}.`);
}
if (nullableCompletion.object === "chat.completion.chunk") {
const completion = _1.ChatGptCompletionMessageUtil.merge([nullableCompletion]);
completion.choices.forEach((choice) => {
if (choice.message.content != null && choice.message.content !== "") {
eventProcessor({
stream: (0, _1.toAsyncGenerator)(choice.message.content),
done: () => true,
get: () => choice.message.content,
join: () => __awaiter(this, void 0, void 0, function* () { return choice.message.content; }),
});
}
});
return completion;
}
return nullableCompletion;
});
}
//# sourceMappingURL=ChatGptCompletionStreamingUtil.js.map