node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
21 lines (20 loc) • 1.08 kB
TypeScript
import process from "process";
import { BuildMetadataFile, BuildOptions } from "../types.js";
export declare function compileLlamaCpp(buildOptions: BuildOptions, compileOptions: {
nodeTarget?: string;
updateLastBuildInfo?: boolean;
includeBuildOptionsInBinaryFolderName?: boolean;
ensureLlamaCppRepoIsCloned?: boolean;
downloadCmakeIfNeeded?: boolean;
ignoreWorkarounds?: ("cudaArchitecture" | "reduceParallelBuildThreads" | "singleBuildThread" | "avoidWindowsLlvm")[];
envVars?: typeof process.env;
ciMode?: boolean;
}): Promise<void>;
export declare function getLocalBuildBinaryPath(folderName: string): Promise<string | null>;
export declare function getLocalBuildBinaryBuildMetadata(folderName: string): Promise<BuildMetadataFile>;
export declare function getPrebuiltBinaryPath(buildOptions: BuildOptions, folderName: string): Promise<{
binaryPath: string;
folderName: string;
folderPath: string;
} | null>;
export declare function getPrebuiltBinaryBuildMetadata(folderPath: string, folderName: string): Promise<BuildMetadataFile>;