UNPKG

node-llama-cpp

Version:

Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level

64 lines 7.07 kB
import { DisposedError } from "lifecycle-utils"; import { Llama } from "./bindings/Llama.js"; import { getLlama } from "./bindings/getLlama.js"; import { getLlamaGpuTypes } from "./bindings/utils/getLlamaGpuTypes.js"; import { NoBinaryFoundError } from "./bindings/utils/NoBinaryFoundError.js"; import { LlamaLogLevel, LlamaLogLevelGreaterThan, LlamaLogLevelGreaterThanOrEqual, LlamaVocabularyType } from "./bindings/types.js"; import { resolveModelFile } from "./utils/resolveModelFile.js"; import { LlamaModel, LlamaModelInfillTokens, LlamaModelTokens } from "./evaluator/LlamaModel/LlamaModel.js"; import { TokenAttributes } from "./evaluator/LlamaModel/utils/TokenAttributes.js"; import { LlamaGrammar } from "./evaluator/LlamaGrammar.js"; import { LlamaJsonSchemaGrammar } from "./evaluator/LlamaJsonSchemaGrammar.js"; import { LlamaJsonSchemaValidationError } from "./utils/gbnfJson/utils/validateObjectAgainstGbnfSchema.js"; import { LlamaGrammarEvaluationState } from "./evaluator/LlamaGrammarEvaluationState.js"; import { LlamaContext, LlamaContextSequence } from "./evaluator/LlamaContext/LlamaContext.js"; import { LlamaEmbeddingContext } from "./evaluator/LlamaEmbeddingContext.js"; import { LlamaEmbedding } from "./evaluator/LlamaEmbedding.js"; import { LlamaRankingContext } from "./evaluator/LlamaRankingContext.js"; import { TokenBias } from "./evaluator/TokenBias.js"; import { LlamaChatSession } from "./evaluator/LlamaChatSession/LlamaChatSession.js"; import { defineChatSessionFunction } from "./evaluator/LlamaChatSession/utils/defineChatSessionFunction.js"; import { LlamaChat } from "./evaluator/LlamaChat/LlamaChat.js"; import { LlamaChatSessionPromptCompletionEngine } from "./evaluator/LlamaChatSession/utils/LlamaChatSessionPromptCompletionEngine.js"; import { LlamaCompletion } from "./evaluator/LlamaCompletion.js"; import { TokenMeter } from "./evaluator/TokenMeter.js"; import { UnsupportedError } from "./utils/UnsupportedError.js"; import { InsufficientMemoryError } from "./utils/InsufficientMemoryError.js"; import { ChatWrapper } from "./ChatWrapper.js"; import { EmptyChatWrapper } from "./chatWrappers/EmptyChatWrapper.js"; import { DeepSeekChatWrapper } from "./chatWrappers/DeepSeekChatWrapper.js"; import { QwenChatWrapper } from "./chatWrappers/QwenChatWrapper.js"; import { Llama3_2LightweightChatWrapper } from "./chatWrappers/Llama3_2LightweightChatWrapper.js"; import { Llama3_1ChatWrapper } from "./chatWrappers/Llama3_1ChatWrapper.js"; import { Llama3ChatWrapper } from "./chatWrappers/Llama3ChatWrapper.js"; import { Llama2ChatWrapper } from "./chatWrappers/Llama2ChatWrapper.js"; import { MistralChatWrapper } from "./chatWrappers/MistralChatWrapper.js"; import { GeneralChatWrapper } from "./chatWrappers/GeneralChatWrapper.js"; import { ChatMLChatWrapper } from "./chatWrappers/ChatMLChatWrapper.js"; import { FalconChatWrapper } from "./chatWrappers/FalconChatWrapper.js"; import { AlpacaChatWrapper } from "./chatWrappers/AlpacaChatWrapper.js"; import { FunctionaryChatWrapper } from "./chatWrappers/FunctionaryChatWrapper.js"; import { GemmaChatWrapper } from "./chatWrappers/GemmaChatWrapper.js"; import { HarmonyChatWrapper } from "./chatWrappers/HarmonyChatWrapper.js"; import { TemplateChatWrapper } from "./chatWrappers/generic/TemplateChatWrapper.js"; import { JinjaTemplateChatWrapper } from "./chatWrappers/generic/JinjaTemplateChatWrapper.js"; import { resolvableChatWrapperTypeNames, specializedChatWrapperTypeNames, templateChatWrapperTypeNames, resolveChatWrapper, chatWrappers } from "./chatWrappers/utils/resolveChatWrapper.js"; import { ChatModelFunctionsDocumentationGenerator } from "./chatWrappers/utils/ChatModelFunctionsDocumentationGenerator.js"; import { LlamaText, SpecialTokensText, SpecialToken, isLlamaText, tokenizeText } from "./utils/LlamaText.js"; import { appendUserMessageToChatHistory } from "./utils/appendUserMessageToChatHistory.js"; import { TokenPredictor } from "./evaluator/LlamaContext/TokenPredictor.js"; import { DraftSequenceTokenPredictor } from "./evaluator/LlamaContext/tokenPredictors/DraftSequenceTokenPredictor.js"; import { InputLookupTokenPredictor } from "./evaluator/LlamaContext/tokenPredictors/InputLookupTokenPredictor.js"; import { getModuleVersion } from "./utils/getModuleVersion.js"; import { readGgufFileInfo } from "./gguf/readGgufFileInfo.js"; import { GgufInsights } from "./gguf/insights/GgufInsights.js"; import { GgufInsightsConfigurationResolver } from "./gguf/insights/GgufInsightsConfigurationResolver.js"; import { GgufInsightsTokens } from "./gguf/insights/GgufInsightsTokens.js"; import { createModelDownloader, ModelDownloader, combineModelDownloaders, CombinedModelDownloader } from "./utils/createModelDownloader.js"; import { jsonDumps } from "./chatWrappers/utils/jsonDumps.js"; import { experimentalChunkDocument } from "./evaluator/utils/chunkDocument.js"; import { isChatModelResponseFunctionCall, isChatModelResponseSegment } from "./types.js"; import { GgufArchitectureType, GgufFileType, GgufMetadataTokenizerTokenType, GgufMetadataArchitecturePoolingType, isGgufMetadataOfArchitectureType } from "./gguf/types/GgufMetadataTypes.js"; import { GgmlType } from "./gguf/types/GgufTensorInfoTypes.js"; export { Llama, getLlama, getLlamaGpuTypes, LlamaLogLevel, NoBinaryFoundError, resolveModelFile, LlamaModel, LlamaModelTokens, LlamaModelInfillTokens, TokenAttributes, LlamaGrammar, LlamaJsonSchemaGrammar, LlamaJsonSchemaValidationError, LlamaGrammarEvaluationState, LlamaContext, LlamaContextSequence, TokenBias, LlamaEmbeddingContext, LlamaEmbedding, LlamaRankingContext, LlamaChatSession, defineChatSessionFunction, LlamaChat, LlamaChatSessionPromptCompletionEngine, LlamaCompletion, TokenMeter, UnsupportedError, InsufficientMemoryError, DisposedError, ChatWrapper, EmptyChatWrapper, DeepSeekChatWrapper, QwenChatWrapper, Llama3_2LightweightChatWrapper, Llama3_1ChatWrapper, Llama3ChatWrapper, Llama2ChatWrapper, MistralChatWrapper, GeneralChatWrapper, ChatMLChatWrapper, FalconChatWrapper, AlpacaChatWrapper, FunctionaryChatWrapper, GemmaChatWrapper, HarmonyChatWrapper, TemplateChatWrapper, JinjaTemplateChatWrapper, resolveChatWrapper, resolvableChatWrapperTypeNames, specializedChatWrapperTypeNames, templateChatWrapperTypeNames, chatWrappers, ChatModelFunctionsDocumentationGenerator, LlamaText, SpecialTokensText, SpecialToken, isLlamaText, tokenizeText, TokenPredictor, DraftSequenceTokenPredictor, InputLookupTokenPredictor, appendUserMessageToChatHistory, getModuleVersion, isChatModelResponseFunctionCall, isChatModelResponseSegment, LlamaVocabularyType, LlamaLogLevelGreaterThan, LlamaLogLevelGreaterThanOrEqual, readGgufFileInfo, GgufArchitectureType, GgufFileType, GgufMetadataTokenizerTokenType, GgufMetadataArchitecturePoolingType, GgmlType, isGgufMetadataOfArchitectureType, GgufInsights, GgufInsightsTokens, GgufInsightsConfigurationResolver, createModelDownloader, ModelDownloader, combineModelDownloaders, CombinedModelDownloader, jsonDumps, experimentalChunkDocument }; //# sourceMappingURL=index.js.map