node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
38 lines (37 loc) • 1.73 kB
TypeScript
import { Token, Tokenizer } from "../types.js";
import { LlamaText } from "../utils/LlamaText.js";
import type { LlamaModel } from "./LlamaModel/LlamaModel.js";
/**
* @see [Using Token Bias](https://node-llama-cpp.withcat.ai/guide/token-bias) tutorial
*/
export declare class TokenBias {
constructor(tokenizer: Tokenizer);
/**
* Adjust the bias of the given token(s).
*
* If a text is provided, the bias will be applied to each individual token in the text.
*
* Setting a bias to `"never"` will prevent the token from being generated, unless it is required to comply with a grammar.
*
* Setting the bias of the EOS or EOT tokens to `"never"` has no effect and will be ignored.
* @param input - The token(s) to apply the bias to
* @param bias - The probability bias to apply to the token(s).
*
* Setting to a positive number increases the probability of the token(s) being generated.
*
* Setting to a negative number decreases the probability of the token(s) being generated.
*
* Setting to `0` has no effect.
*
* For example, setting to `0.5` will increase the probability of the token(s) being generated by 50%.
* Setting to `-0.5` will decrease the probability of the token(s) being generated by 50%.
*
* Setting to `"never"` will prevent the token from being generated, unless it is required to comply with a grammar.
*
* Try to play around with values between `0.9` and `-0.9` to see what works for your use case.
*/
set(input: Token | Token[] | string | LlamaText, bias: "never" | number | {
logit: number;
}): this;
static for(modelOrTokenizer: LlamaModel | Tokenizer): TokenBias;
}