UNPKG

node-llama-cpp

Version:

Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level

16 lines (15 loc) 483 B
import { BinaryPlatform } from "./getPlatform.js"; export declare function detectAvailableComputeLayers({ platform }?: { platform?: BinaryPlatform; }): Promise<{ cuda: { hasNvidiaDriver: boolean; hasCudaRuntime: boolean; }; vulkan: boolean; metal: boolean; }>; export declare function getCudaNvccPaths({ platform }?: { platform?: BinaryPlatform; }): Promise<string[]>; export declare function getWindowsProgramFilesPaths(): Promise<string[]>;