UNPKG

node-llama-cpp

Version:

Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level

40 lines 1.66 kB
import { GgufFileType } from "../types/GgufMetadataTypes.js"; export const ggufQuantNames = new Map([ ["Q4_0", GgufFileType.MOSTLY_Q4_0], ["Q4_1", GgufFileType.MOSTLY_Q4_1], ["Q5_0", GgufFileType.MOSTLY_Q5_0], ["Q5_1", GgufFileType.MOSTLY_Q5_1], ["IQ2_XXS", GgufFileType.MOSTLY_IQ2_XXS], ["IQ2_XS", GgufFileType.MOSTLY_IQ2_XS], ["IQ2_S", GgufFileType.MOSTLY_IQ2_S], ["IQ2_M", GgufFileType.MOSTLY_IQ2_M], ["IQ1_S", GgufFileType.MOSTLY_IQ1_S], ["IQ1_M", GgufFileType.MOSTLY_IQ1_M], ["TQ1_0", GgufFileType.MOSTLY_TQ1_0], ["TQ2_0", GgufFileType.MOSTLY_TQ2_0], ["Q2_K", GgufFileType.MOSTLY_Q2_K], ["Q2_K_S", GgufFileType.MOSTLY_Q2_K_S], ["IQ3_XXS", GgufFileType.MOSTLY_IQ3_XXS], ["IQ3_S", GgufFileType.MOSTLY_IQ3_S], ["IQ3_M", GgufFileType.MOSTLY_IQ3_M], ["Q3_K", GgufFileType.MOSTLY_Q3_K_M], ["IQ3_XS", GgufFileType.MOSTLY_IQ3_XS], ["Q3_K_S", GgufFileType.MOSTLY_Q3_K_S], ["Q3_K_M", GgufFileType.MOSTLY_Q3_K_M], ["Q3_K_L", GgufFileType.MOSTLY_Q3_K_L], ["IQ4_NL", GgufFileType.MOSTLY_IQ4_NL], ["IQ4_XS", GgufFileType.MOSTLY_IQ4_XS], ["Q4_K", GgufFileType.MOSTLY_Q4_K_M], ["Q4_K_S", GgufFileType.MOSTLY_Q4_K_S], ["Q4_K_M", GgufFileType.MOSTLY_Q4_K_M], ["Q5_K", GgufFileType.MOSTLY_Q5_K_M], ["Q5_K_S", GgufFileType.MOSTLY_Q5_K_S], ["Q5_K_M", GgufFileType.MOSTLY_Q5_K_M], ["Q6_K", GgufFileType.MOSTLY_Q6_K], ["Q8_0", GgufFileType.MOSTLY_Q8_0], ["F16", GgufFileType.MOSTLY_F16], ["BF16", GgufFileType.MOSTLY_BF16], ["F32", GgufFileType.ALL_F32], ["COPY", GgufFileType.ALL_F32] ]); //# sourceMappingURL=ggufQuantNames.js.map