@llumiverse/drivers
Version:
LLM driver implementations. Currently supported are: openai, huggingface, bedrock, replicate.
50 lines • 1.47 kB
TypeScript
import { ExecutionOptions } from "@llumiverse/core";
import { PromptSegment } from "@llumiverse/core";
export interface TwelvelabsPegasusRequest {
inputPrompt: string;
temperature?: number;
responseFormat?: {
type: "json_schema";
json_schema: {
name: string;
schema: any;
};
};
mediaSource: {
base64String?: string;
s3Location?: {
uri: string;
bucketOwner?: string;
};
};
maxOutputTokens?: number;
}
export interface TwelvelabsPegasusResponse {
message: string;
finishReason: "stop" | "length";
}
export interface TwelvelabsMarengoRequest {
inputType: "text" | "image" | "video" | "audio";
inputText?: string;
textTruncate?: "start" | "end";
mediaSource?: {
base64String?: string;
s3Location?: {
uri: string;
bucketOwner?: string;
};
};
embeddingOption?: "visual-text" | "visual-image" | "audio";
startSec?: number;
lengthSec?: number;
useFixedLengthSec?: boolean;
minClipSec?: number;
}
export interface TwelvelabsMarengoResponse {
embedding: number[];
embeddingOption: "visual-text" | "visual-image" | "audio";
startSec: number;
endSec: number;
}
export declare function formatTwelvelabsPegasusPrompt(segments: PromptSegment[], options: ExecutionOptions): Promise<TwelvelabsPegasusRequest>;
//# sourceMappingURL=twelvelabs.d.ts.map