llama-flow
Version:
The Typescript-first prompt engineering toolkit for working with chat based LLMs.
16 lines • 834 B
TypeScript
import { OpenAIApi } from 'openai-edge';
import { Chat } from '../chat';
import type { ChatConfig, ChatRequestOptions, ChatResponse, Message, ModelConfig, OpenAIConfigurationParameters, Persona } from '../types';
import type { Model } from './interface';
export declare class OpenAI implements Model {
_model: OpenAIApi;
_isAzure: boolean;
_headers?: Record<string, string>;
defaults: ModelConfig;
config: ChatConfig;
constructor(config: OpenAIConfigurationParameters, defaults?: ModelConfig, chatConfig?: ChatConfig);
chat(persona: Persona, config?: ChatConfig): Chat;
getTokensFromMessages(messages: Message[]): number;
request(messages: Message[], config?: Partial<ModelConfig>, requestOptions?: Partial<ChatRequestOptions>): Promise<ChatResponse<string>>;
}
//# sourceMappingURL=openai.d.ts.map