@llumiverse/drivers
Version:
LLM driver implementations. Currently supported are: openai, huggingface, bedrock, replicate.
20 lines • 969 B
TypeScript
import { AIModel, Completion, CompletionChunkObject, ExecutionOptions, PromptOptions, PromptSegment } from "@llumiverse/core";
import { VertexAIDriver } from "../index.js";
import { ModelDefinition } from "../models.js";
interface LLamaMessage {
role: string;
content: string;
}
interface LLamaPrompt {
messages: LLamaMessage[];
}
export declare class LLamaModelDefinition implements ModelDefinition<LLamaPrompt> {
model: AIModel;
constructor(modelId: string);
getLlamaModelRegion(modelName: string): string;
createPrompt(_driver: VertexAIDriver, segments: PromptSegment[], options: PromptOptions): Promise<LLamaPrompt>;
requestTextCompletion(driver: VertexAIDriver, prompt: LLamaPrompt, options: ExecutionOptions): Promise<Completion>;
requestTextCompletionStream(driver: VertexAIDriver, prompt: LLamaPrompt, options: ExecutionOptions): Promise<AsyncIterable<CompletionChunkObject>>;
}
export {};
//# sourceMappingURL=llama.d.ts.map