node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
21 lines • 757 B
JavaScript
export function resolveHeaderFlag(header) {
if (typeof header === "string")
header = [header];
if (header == null || header.length === 0)
return {};
const res = {};
for (const headerItem of header) {
const colonIndex = headerItem.indexOf(":");
if (colonIndex < 0)
throw new Error(`Invalid header item: ${headerItem}`);
const key = headerItem.slice(0, colonIndex).trim();
if (Object.hasOwn(res, key))
throw new Error(`Duplicate header key: ${key}`);
let value = headerItem.slice(colonIndex + 1);
if (value.startsWith(" "))
value = value.slice(1);
res[key] = value;
}
return res;
}
//# sourceMappingURL=resolveHeaderFlag.js.map