UNPKG

node-llama-cpp

Version:

Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level

13 lines (12 loc) 580 B
import process from "process"; import { BuildGpu } from "../types.js"; import { LlamaOptions } from "../getLlama.js"; import { BinaryPlatform } from "./getPlatform.js"; export declare function getGpuTypesToUseForOption(gpu: Required<LlamaOptions>["gpu"], { platform, arch }?: { platform?: BinaryPlatform; arch?: typeof process.arch; }): Promise<BuildGpu[]>; export declare function resolveValidGpuOptionForPlatform(gpu: BuildGpu | "auto", { platform, arch }: { platform: BinaryPlatform; arch: typeof process.arch; }): false | "cuda" | "vulkan" | "metal" | "auto";