UNPKG

node-llama-cpp

Version:

Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level

74 lines (73 loc) 3.38 kB
import { LlamaLogLevel } from "./bindings/types.js"; export declare const llamaDirectory: string; export declare const llamaToolchainsDirectory: string; export declare const llamaPrebuiltBinsDirectory: string; export declare const llamaLocalBuildBinsDirectory: string; export declare const llamaBinsGrammarsDirectory: string; export declare const projectTemplatesDirectory: string; export declare const packedProjectTemplatesDirectory: string; export declare const llamaCppDirectory: string; export declare const llamaCppGrammarsDirectory: string; export declare const tempDownloadDirectory: string; export declare const cliHomedirDirectory: string; export declare const chatCommandHistoryFilePath: string; export declare const cliModelsDirectory: string; export declare const lastBuildInfoJsonPath: string; export declare const binariesGithubReleasePath: string; export declare const llamaCppDirectoryInfoFilePath: string; export declare const currentReleaseGitBundlePath: string; export declare const xpackDirectory: string; export declare const localXpacksStoreDirectory: string; export declare const localXpacksCacheDirectory: string; export declare const buildMetadataFileName = "_nlcBuildMetadata.json"; export declare const xpmVersion = "^0.16.3"; export declare const builtinLlamaCppGitHubRepo = "ggml-org/llama.cpp"; export declare const builtinLlamaCppRelease: string; export declare const isCI: boolean; export declare const isRunningInsideGoogleColab: boolean; export declare const useCiLogs: boolean; export declare const defaultLlamaCppGitHubRepo: string; export declare const defaultLlamaCppRelease: string; export declare const defaultLlamaCppGpuSupport: false | "metal" | "cuda" | "vulkan" | "auto"; export declare const defaultLlamaCppLogLevel: LlamaLogLevel; export declare const defaultLlamaCppDebugMode: boolean; export declare const defaultSkipDownload: boolean; export declare const defaultXpacksStoreDirectory: string; export declare const defaultXpacksCacheDirectory: string; export declare const customCmakeOptionsEnvVarPrefix = "NODE_LLAMA_CPP_CMAKE_OPTION_"; export declare const defaultChatSystemPrompt: string; export declare const cliBinName = "node-llama-cpp"; export declare const npxRunPrefix = "npx --no "; export declare const enableRecursiveClone = false; export declare const documentationPageUrls: { readonly CUDA: string; readonly Vulkan: string; readonly CLI: { readonly index: string; readonly Pull: string; readonly Chat: string; readonly Init: string; readonly Complete: string; readonly Infill: string; readonly Inspect: { readonly index: string; readonly GPU: string; readonly GGUF: string; readonly Measure: string; readonly Estimate: string; }; readonly Source: { readonly index: string; readonly Download: string; readonly Build: string; readonly Clear: string; }; }; readonly troubleshooting: { readonly RosettaIllegalHardwareInstruction: string; }; }; export declare const newGithubIssueUrl = "https://github.com/withcatai/node-llama-cpp/issues"; export declare const recommendedBaseDockerImage = "node:20"; export declare const minAllowedContextSizeInCalculations = 24; export declare const contextSizePad = 256;