UNPKG

node-llama-cpp

Version:

Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level

18 lines 969 B
// source: `enum gguf_type` in `ggml.h` in the `llama.cpp` source code export var GgufValueType; (function (GgufValueType) { GgufValueType[GgufValueType["Uint8"] = 0] = "Uint8"; GgufValueType[GgufValueType["Int8"] = 1] = "Int8"; GgufValueType[GgufValueType["Uint16"] = 2] = "Uint16"; GgufValueType[GgufValueType["Int16"] = 3] = "Int16"; GgufValueType[GgufValueType["Uint32"] = 4] = "Uint32"; GgufValueType[GgufValueType["Int32"] = 5] = "Int32"; GgufValueType[GgufValueType["Float32"] = 6] = "Float32"; GgufValueType[GgufValueType["Bool"] = 7] = "Bool"; GgufValueType[GgufValueType["String"] = 8] = "String"; GgufValueType[GgufValueType["Array"] = 9] = "Array"; GgufValueType[GgufValueType["Uint64"] = 10] = "Uint64"; GgufValueType[GgufValueType["Int64"] = 11] = "Int64"; GgufValueType[GgufValueType["Float64"] = 12] = "Float64"; })(GgufValueType || (GgufValueType = {})); //# sourceMappingURL=GgufFileInfoTypes.js.map