node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
27 lines • 967 B
JavaScript
export function getGgufMetadataKeyValue(metadata, key) {
return readMedataKey(metadata, key.split("."));
}
function readMedataKey(metadata, keyParts) {
for (const [metadataKey, value] of Object.entries(metadata)) {
const matchLength = checkMatchLength(metadataKey, keyParts);
if (matchLength === 0)
continue;
if (matchLength === keyParts.length)
return value;
const res = readMedataKey(value, keyParts.slice(matchLength));
if (res !== undefined)
return res;
}
return undefined;
}
function checkMatchLength(metadataKey, keyParts) {
const metadataKeyParts = metadataKey.split(".");
if (metadataKeyParts.length > keyParts.length)
return 0;
for (let i = 0; i < metadataKeyParts.length; i++) {
if (metadataKeyParts[i] !== keyParts[i])
return 0;
}
return metadataKeyParts.length;
}
//# sourceMappingURL=getGgufMetadataKeyValue.js.map