llama-flow
Version:
The Typescript-first prompt engineering toolkit for working with chat based LLMs.
80 lines • 2.05 kB
TypeScript
/// <reference types="node" />
import EventEmitter from 'events';
import { ConfigurationParameters } from 'openai-edge';
import { z } from 'zod';
import { MaybePromise } from './utils';
export type OpenAIConfigurationParameters = ConfigurationParameters & {
azureEndpoint?: string;
azureDeployment?: string;
};
export interface ModelConfig {
model?: string;
maxTokens?: number;
temperature?: number;
topP?: number;
stop?: string | string[];
presencePenalty?: number;
frequencyPenalty?: number;
logitBias?: Record<string, number>;
user?: string;
stream?: boolean;
}
export interface ChatConfig {
retainMemory?: boolean;
options?: ChatRequestOptions;
}
export interface Persona {
prompt: string | (() => string);
qualifiers?: string[];
config?: Partial<ModelConfig>;
}
export type ChatRequestOptions = {
retries?: number;
retryInterval?: number;
timeout?: number;
minimumResponseTokens?: number;
messages?: Message[];
events?: EventEmitter;
};
export interface ChatResponse<T = string> {
content: T;
isStream: boolean;
usage?: {
promptTokens: number;
completionTokens: number;
totalTokens: number;
};
}
export interface Message {
role: 'user' | 'assistant' | 'system';
content: string;
}
export interface JSONPrompt<T extends z.ZodType> {
message: string;
schema: T;
parseResponse?: (res: string) => MaybePromise<z.infer<T>>;
retryMessage?: string;
promptRetries?: number;
}
export interface BulletPointsPrompt {
message: string;
amount?: number;
length?: number;
promptRetries?: number;
}
export interface BooleanPrompt {
message: string;
promptRetries?: number;
}
export interface RawPrompt<T = string> {
message: string;
parse?: (response: ChatResponse<string>) => MaybePromise<{
success: false;
retryPrompt?: string;
} | {
success: true;
data: T;
}>;
promptRetries?: number;
}
//# sourceMappingURL=types.d.ts.map