ai-utils.js
Version:
Build AI applications, chatbots, and agents with JavaScript and TypeScript.
39 lines (37 loc) • 1.43 kB
TypeScript
import z from "zod";
import { BasicTokenizer } from "../../model-function/tokenize-text/Tokenizer.js";
import { Run } from "../../run/Run.js";
import { RetryFunction } from "../../util/api/RetryFunction.js";
import { ThrottleFunction } from "../../util/api/ThrottleFunction.js";
export interface LlamaCppTokenizerSettings {
baseUrl?: string;
retry?: RetryFunction;
throttle?: ThrottleFunction;
}
/**
* Tokenizer for LlamaCpp.
* @example
* const tokenizer = new LlamaCppTokenizer();
*
* const text = "At first, Nox didn't know what to do with the pup.";
*
* const tokenCount = await countTokens(tokenizer, text);
* const tokens = await tokenizer.tokenize(text);
* const tokensAndTokenTexts = await tokenizer.tokenizeWithTexts(text);
* const reconstructedText = await tokenizer.detokenize(tokens);
*/
export declare class LlamaCppTokenizer implements BasicTokenizer {
readonly settings: LlamaCppTokenizerSettings;
constructor(settings?: LlamaCppTokenizerSettings);
callTokenizeAPI(text: string, context?: Run): Promise<LlamaCppTokenizationResponse>;
tokenize(text: string): Promise<number[]>;
}
declare const llamaCppTokenizationResponseSchema: z.ZodObject<{
tokens: z.ZodArray<z.ZodNumber, "many">;
}, "strip", z.ZodTypeAny, {
tokens: number[];
}, {
tokens: number[];
}>;
export type LlamaCppTokenizationResponse = z.infer<typeof llamaCppTokenizationResponseSchema>;
export {};