@agentica/core
Version:
Agentic AI Library specialized in LLM Function Calling
83 lines • 4.42 kB
JavaScript
;
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.getChatCompletionWithStreamingFunction = getChatCompletionWithStreamingFunction;
const uuid_1 = require("uuid");
const AgenticaTokenUsageAggregator_1 = require("../context/internal/AgenticaTokenUsageAggregator");
const factory_1 = require("../factory");
const ChatGptCompletionMessageUtil_1 = require("./ChatGptCompletionMessageUtil");
const StreamUtil_1 = require("./StreamUtil");
function getChatCompletionWithStreamingFunction(props) {
return (source, body) => __awaiter(this, void 0, void 0, function* () {
var _a, _b;
const event = (0, factory_1.createRequestEvent)({
source,
body: Object.assign(Object.assign({}, body), { model: props.vendor.model, stream: true, stream_options: {
include_usage: true,
} }),
options: Object.assign(Object.assign({}, props.vendor.options), { signal: props.abortSignal }),
});
yield props.dispatch(event);
// completion
const backoffStrategy = (_b = (_a = props.config) === null || _a === void 0 ? void 0 : _a.backoffStrategy) !== null && _b !== void 0 ? _b : ((props) => {
throw props.error;
});
const completion = yield (() => __awaiter(this, void 0, void 0, function* () {
let count = 0;
while (true) {
try {
return yield props.vendor.api.chat.completions.create(event.body, event.options);
}
catch (error) {
const waiting = backoffStrategy({ count, error });
yield new Promise(resolve => setTimeout(resolve, waiting));
count++;
}
}
}))();
const [streamForEvent, temporaryStream] = StreamUtil_1.StreamUtil.transform(completion.toReadableStream(), value => ChatGptCompletionMessageUtil_1.ChatGptCompletionMessageUtil.transformCompletionChunk(value), props.abortSignal).tee();
const [streamForAggregate, streamForReturn] = temporaryStream.tee();
(() => __awaiter(this, void 0, void 0, function* () {
var _a;
const reader = streamForAggregate.getReader();
while (true) {
const chunk = yield reader.read();
if (chunk.done || ((_a = props.abortSignal) === null || _a === void 0 ? void 0 : _a.aborted) === true) {
break;
}
if (chunk.value.usage != null) {
AgenticaTokenUsageAggregator_1.AgenticaTokenUsageAggregator.aggregate({
kind: source,
completionUsage: chunk.value.usage,
usage: props.usage,
});
}
}
}))().catch(() => { });
const [streamForStream, streamForJoin] = streamForEvent.tee();
void props.dispatch({
id: (0, uuid_1.v4)(),
type: "response",
request_id: event.id,
source,
stream: (0, StreamUtil_1.streamDefaultReaderToAsyncGenerator)(streamForStream.getReader(), props.abortSignal),
body: event.body,
options: event.options,
join: () => __awaiter(this, void 0, void 0, function* () {
const chunks = yield StreamUtil_1.StreamUtil.readAll(streamForJoin, props.abortSignal);
return ChatGptCompletionMessageUtil_1.ChatGptCompletionMessageUtil.merge(chunks);
}),
created_at: new Date().toISOString(),
}).catch(() => { });
return streamForReturn;
});
}
//# sourceMappingURL=request.js.map