UNPKG

node-llama-cpp

Version:

Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level

46 lines (45 loc) 1.28 kB
/** * Tracks the usage of tokens. */ export declare class TokenMeter { private _inputTokens; private _outputTokens; /** * The number of input tokens used */ get usedInputTokens(): number; /** * The number of tokens generated by a model */ get usedOutputTokens(): number; /** * Get the current state of the token meter */ getState(): TokenMeterState; /** * Log the usage of tokens */ useTokens(tokens: number, type: "input" | "output"): void; /** * Get the difference between the current meter and another meter */ diff(meter: TokenMeter | TokenMeterState): { usedInputTokens: number; usedOutputTokens: number; }; /** * Log the usage of tokens on multiple meters */ static useTokens(meters: null | undefined | TokenMeter | readonly TokenMeter[] | ReadonlySet<TokenMeter>, tokens: number, type: "input" | "output"): void; /** * Get the difference between two meters */ static diff(meter1: TokenMeter | TokenMeterState, meter2: TokenMeter | TokenMeterState): { usedInputTokens: number; usedOutputTokens: number; }; } export type TokenMeterState = { usedInputTokens: number; usedOutputTokens: number; };