UNPKG

@llumiverse/drivers

Version:

LLM driver implementations. Currently supported are: openai, huggingface, bedrock, replicate.

24 lines 975 B
//Currently we are only supporting either text or images sent to the multimodal model. export async function getEmbeddingsForImages(driver, options) { // API is returns a 400 Error if a property is empty, so you undefined and "as" to remove the property entirely. const prompt = { instances: [{ text: options.image ? undefined : options.text, image: options.image ? { bytesBase64Encoded: options.image } : undefined, }] }; const model = options.model || "multimodalembedding@001"; const client = driver.getFetchClient(); const result = await client.post(`/publishers/google/models/${model}:predict`, { payload: prompt }); return { values: result.predictions[0].imageEmbedding ?? result.predictions[0].textEmbedding, model: model, }; } //# sourceMappingURL=embeddings-image.js.map