UNPKG

node-llama-cpp

Version:

Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level

10 lines (9 loc) 471 B
import process from "process"; import { BuildGpu } from "../types.js"; import { BinaryPlatform } from "./getPlatform.js"; export declare function getBestComputeLayersAvailable(): Promise<(false | "cuda" | "vulkan" | "metal")[]>; export declare function detectBestComputeLayersAvailable({ platform, arch, hasCudaWithStaticBinaryBuild }?: { platform?: BinaryPlatform; arch?: typeof process.arch; hasCudaWithStaticBinaryBuild?: boolean; }): Promise<BuildGpu[]>;