ai-utils.js
Version:
Build AI applications, chatbots, and agents with JavaScript and TypeScript.
41 lines (40 loc) • 1.97 kB
TypeScript
import z from "zod";
import { AbstractModel } from "../../model-function/AbstractModel.js";
import { FunctionOptions } from "../../model-function/FunctionOptions.js";
import { TextEmbeddingModel, TextEmbeddingModelSettings } from "../../model-function/embed-text/TextEmbeddingModel.js";
import { RetryFunction } from "../../util/api/RetryFunction.js";
import { ThrottleFunction } from "../../util/api/ThrottleFunction.js";
export interface LlamaCppEmbeddingModelSettings extends TextEmbeddingModelSettings {
baseUrl?: string;
retry?: RetryFunction;
throttle?: ThrottleFunction;
tokenizerSettings?: {
retry?: RetryFunction;
throttle?: ThrottleFunction;
};
}
export declare class LlamaCppTextEmbeddingModel extends AbstractModel<LlamaCppEmbeddingModelSettings> implements TextEmbeddingModel<LlamaCppTextEmbeddingResponse, LlamaCppEmbeddingModelSettings> {
constructor(settings?: LlamaCppEmbeddingModelSettings);
readonly provider: "llamacpp";
get modelName(): null;
readonly maxTextsPerCall = 1;
readonly contextWindowSize: undefined;
readonly embeddingDimensions: undefined;
private readonly tokenizer;
tokenize(text: string): Promise<number[]>;
callAPI(texts: Array<string>, options?: FunctionOptions<LlamaCppEmbeddingModelSettings>): Promise<LlamaCppTextEmbeddingResponse>;
generateEmbeddingResponse(texts: string[], options?: FunctionOptions<LlamaCppEmbeddingModelSettings>): Promise<{
embedding: number[];
}>;
extractEmbeddings(response: LlamaCppTextEmbeddingResponse): number[][];
withSettings(additionalSettings: Partial<LlamaCppEmbeddingModelSettings>): this;
}
declare const llamaCppTextEmbeddingResponseSchema: z.ZodObject<{
embedding: z.ZodArray<z.ZodNumber, "many">;
}, "strip", z.ZodTypeAny, {
embedding: number[];
}, {
embedding: number[];
}>;
export type LlamaCppTextEmbeddingResponse = z.infer<typeof llamaCppTextEmbeddingResponseSchema>;
export {};