node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
14 lines • 461 B
JavaScript
export function wrapAbortSignal(abortSignal) {
const controller = new AbortController();
function onAbort() {
controller.abort(abortSignal.reason);
}
function dispose() {
if (abortSignal != null)
abortSignal.removeEventListener("abort", onAbort);
}
if (abortSignal != null)
abortSignal.addEventListener("abort", onAbort);
return [controller, dispose];
}
//# sourceMappingURL=wrapAbortSignal.js.map