UNPKG

node-llama-cpp

Version:

Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level

18 lines 708 B
export function getModelLinageNames(ggufMetadata) { const res = []; if (ggufMetadata == null) return res; const currentModelInfo = [ggufMetadata?.general?.name, ggufMetadata?.general?.basename] .filter((v) => v != null); if (currentModelInfo.length > 0) res.push(currentModelInfo); if (typeof ggufMetadata?.general?.base_model?.count === "number") { for (let i = 0; i < ggufMetadata.general.base_model.count; i++) { const baseModel = ggufMetadata.general.base_model[String(i)]; if (baseModel?.name != null) res.push([baseModel.name]); } } return res; } //# sourceMappingURL=getModelLinageNames.js.map