@llumiverse/drivers
Version:
LLM driver implementations. Currently supported are: openai, huggingface, bedrock, replicate.
18 lines • 1.29 kB
TypeScript
import { Content, GenerateContentResponseUsageMetadata } from "@google/genai";
import { AIModel, Completion, CompletionChunkObject, ExecutionOptions, ExecutionTokenUsage, PromptSegment } from "@llumiverse/core";
import { VertexAIDriver, GenerateContentPrompt } from "../index.js";
import { ModelDefinition } from "../models.js";
export declare function mergeConsecutiveRole(contents: Content[] | undefined): Content[];
export declare class GeminiModelDefinition implements ModelDefinition<GenerateContentPrompt> {
model: AIModel;
constructor(modelId: string);
preValidationProcessing(result: Completion, options: ExecutionOptions): {
result: Completion;
options: ExecutionOptions;
};
createPrompt(_driver: VertexAIDriver, segments: PromptSegment[], options: ExecutionOptions): Promise<GenerateContentPrompt>;
usageMetadataToTokenUsage(usageMetadata: GenerateContentResponseUsageMetadata | undefined): ExecutionTokenUsage;
requestTextCompletion(driver: VertexAIDriver, prompt: GenerateContentPrompt, options: ExecutionOptions): Promise<Completion>;
requestTextCompletionStream(driver: VertexAIDriver, prompt: GenerateContentPrompt, options: ExecutionOptions): Promise<AsyncIterable<CompletionChunkObject>>;
}
//# sourceMappingURL=gemini.d.ts.map