node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
28 lines • 1.17 kB
JavaScript
import { LlamaVocabularyType } from "../bindings/types.js";
/**
* Resolve whether a token has to be prepended at the beginning of the input, and what should it be,
* based on the tokenizer implementation in `llama.cpp` under the `llama_tokenize_internal` function in `llama-vocab.cpp`
*/
export function resolveBeginningTokenToPrepend(vocabularyType, tokens) {
if (vocabularyType === LlamaVocabularyType.rwkv)
return null;
if (vocabularyType === LlamaVocabularyType.wpm)
return tokens.bos;
if (tokens.shouldPrependBosToken)
return tokens.bos;
return null;
}
/**
* Resolve whether a token has to be appended at the end of the input, and what should it be,
* based on the tokenizer implementation in `llama.cpp` under the `llama_tokenize_internal` function in `llama-vocab.cpp`
*/
export function resolveEndTokenToAppend(vocabularyType, tokens) {
if (vocabularyType === LlamaVocabularyType.rwkv)
return null;
if (vocabularyType === LlamaVocabularyType.wpm)
return tokens.sep;
if (tokens.shouldAppendEosToken)
return tokens.eos;
return null;
}
//# sourceMappingURL=tokenizerUtils.js.map