node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
18 lines • 656 B
JavaScript
import { contextSizePad } from "../../../config.js";
export function padSafeContextSize(value, padDirection, padding = contextSizePad) {
const paddedSize = ggmlPad(value, padding);
if (paddedSize === value)
return value;
else if (padDirection === "up")
return paddedSize;
else if (padDirection === "down") {
const smallerPaddedSize = ggmlPad(value - padding, padding);
if (smallerPaddedSize >= padding)
return smallerPaddedSize;
}
return paddedSize;
}
function ggmlPad(value, padding) {
return ((value + padding - 1) & ~(padding - 1));
}
//# sourceMappingURL=padSafeContextSize.js.map