UNPKG

llama-flow

Version:

The Typescript-first prompt engineering toolkit for working with chat based LLMs.

14 lines 738 B
import { Model } from './models/interface'; import type { ChatConfig, ChatRequestOptions, ChatResponse, Message, Persona, RawPrompt } from './types'; export type SplitRequestFn<T> = (text: string, chunkSize: number) => RawPrompt<T>; export declare class Chat { persona: Persona; config: ChatConfig; model: Model; messages: Message[]; constructor(persona: Persona, config: ChatConfig, model: Model); request<T>(prompt: RawPrompt<T>, opt?: ChatRequestOptions): Promise<ChatResponse<T>>; requestWithSplit<T>(originalText: string, requestFn: SplitRequestFn<T>, opt?: ChatRequestOptions, chunkSize?: number, minumChunkSize?: number): Promise<ChatResponse<T>>; reset(): void; } //# sourceMappingURL=chat.d.ts.map