node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
16 lines (15 loc) • 898 B
TypeScript
import { GbnfJsonDefList, GbnfJsonSchema, GbnfJsonSchemaToType } from "../../../utils/gbnfJson/types.js";
import { ChatSessionModelFunction } from "../../../types.js";
/**
* Define a function that can be used by the model in a chat session, and return it.
*
* This is a helper function to facilitate defining functions with full TypeScript type information.
*
* The handler function can return a Promise, and the return value will be awaited before being returned to the model.
* @param functionDefinition
*/
export declare function defineChatSessionFunction<const Params extends GbnfJsonSchema<Defs>, const Defs extends GbnfJsonDefList<Defs>>({ description, params, handler }: {
description?: string;
params?: Readonly<Params> & GbnfJsonSchema<Defs>;
handler: (params: GbnfJsonSchemaToType<NoInfer<Params>>) => Promise<any> | any;
}): ChatSessionModelFunction<NoInfer<Params>>;