@elizaos/plugin-bedrock
Version:
Amazon Bedrock plugin for ElizaOS - integrates Claude, Titan, and other foundation models
761 lines (760 loc) • 27 kB
JavaScript
// src/index.ts
import {
BedrockRuntimeClient,
InvokeModelCommand
} from "@aws-sdk/client-bedrock-runtime";
import { fromEnv, fromIni } from "@aws-sdk/credential-providers";
import { EventType, logger, ModelType } from "@elizaos/core";
function getSetting(runtime, key, defaultValue) {
return runtime.getSetting(key) ?? process.env[key] ?? defaultValue;
}
function createBedrockClient(runtime) {
const region = getSetting(runtime, "AWS_REGION", "us-east-1");
const profile = getSetting(runtime, "AWS_PROFILE");
const clientConfig = {
region,
credentials: profile ? fromIni({ profile }) : fromEnv()
};
return new BedrockRuntimeClient(clientConfig);
}
function getSmallModel(runtime) {
const region = getSetting(runtime, "AWS_REGION", "us-east-1");
const customModel = getSetting(runtime, "BEDROCK_SMALL_MODEL");
if (customModel) {
return customModel;
}
if (region.startsWith("eu-")) {
return "eu.amazon.nova-micro-v1:0";
} else if (region.startsWith("us-")) {
return "us.amazon.nova-micro-v1:0";
}
return "amazon.nova-micro-v1:0";
}
function getLargeModel(runtime) {
const region = getSetting(runtime, "AWS_REGION", "us-east-1");
const customModel = getSetting(runtime, "BEDROCK_LARGE_MODEL");
if (customModel) {
return customModel;
}
if (region.startsWith("eu-")) {
return "eu.amazon.nova-pro-v1:0";
} else if (region.startsWith("us-")) {
return "us.amazon.nova-pro-v1:0";
}
return "amazon.nova-pro-v1:0";
}
function getEmbeddingModel(runtime) {
return getSetting(runtime, "BEDROCK_EMBEDDING_MODEL", "amazon.titan-embed-text-v2:0");
}
function getImageModel(runtime) {
return getSetting(runtime, "BEDROCK_IMAGE_MODEL", "amazon.titan-image-generator-v2:0");
}
function getImageDescriptionModel(runtime) {
const region = getSetting(runtime, "AWS_REGION", "us-east-1");
const customModel = getSetting(runtime, "BEDROCK_IMAGE_DESCRIPTION_MODEL");
if (customModel) {
return customModel;
}
if (region.startsWith("eu-")) {
return "eu.mistral.pixtral-large-2502-v1:0";
}
return "mistral.pixtral-large-2411:0";
}
function emitModelUsageEvent(runtime, type, prompt, usage) {
runtime.emitEvent(EventType.MODEL_USED, {
provider: "bedrock",
type,
prompt,
tokens: {
prompt: usage.promptTokens,
completion: usage.completionTokens,
total: usage.totalTokens
}
});
}
async function generateTextWithClaude(runtime, modelId, params) {
const client = createBedrockClient(runtime);
const payload = {
anthropic_version: "bedrock-2023-05-31",
max_tokens: params.maxTokens || 8192,
temperature: params.temperature ?? 0.7,
top_p: params.presencePenalty ?? 0.9,
top_k: params.frequencyPenalty ? Math.floor(params.frequencyPenalty * 100) : 250,
messages: [
{
role: "user",
content: params.prompt
}
],
...runtime.character.system && { system: runtime.character.system },
...params.stopSequences && { stop_sequences: params.stopSequences }
};
const input = {
modelId,
contentType: "application/json",
accept: "application/json",
body: JSON.stringify(payload)
};
try {
const command = new InvokeModelCommand(input);
const response = await client.send(command);
const responseBody = JSON.parse(new TextDecoder().decode(response.body));
if (responseBody.usage) {
emitModelUsageEvent(
runtime,
modelId.includes("haiku") ? ModelType.TEXT_SMALL : ModelType.TEXT_LARGE,
params.prompt,
{
promptTokens: responseBody.usage.input_tokens || 0,
completionTokens: responseBody.usage.output_tokens || 0,
totalTokens: (responseBody.usage.input_tokens || 0) + (responseBody.usage.output_tokens || 0)
}
);
}
return responseBody.content[0].text;
} catch (error) {
logger.error(`Error generating text with Claude: ${error}`);
throw error;
}
}
async function generateEmbedding(runtime, text) {
const client = createBedrockClient(runtime);
const modelId = getEmbeddingModel(runtime);
const region = getSetting(runtime, "AWS_REGION", "us-east-1");
const payload = {
inputText: text,
dimensions: 1024,
// Titan v2 supports 256, 512, or 1024
normalize: true
};
const input = {
modelId,
contentType: "application/json",
accept: "application/json",
body: JSON.stringify(payload)
};
try {
const command = new InvokeModelCommand(input);
const response = await client.send(command);
const responseBody = JSON.parse(new TextDecoder().decode(response.body));
if (responseBody.inputTextTokenCount) {
emitModelUsageEvent(runtime, ModelType.TEXT_EMBEDDING, text, {
promptTokens: responseBody.inputTextTokenCount || 0,
completionTokens: 0,
totalTokens: responseBody.inputTextTokenCount || 0
});
}
return responseBody.embedding;
} catch (error) {
if (error.name === "AccessDeniedException") {
logger.error(`Access denied to embedding model ${modelId} in region ${region}`);
logger.error(`Please ensure you have requested and been granted access to this model.`);
logger.error(
`Visit: https://console.aws.amazon.com/bedrock/home?region=${region}#/modelaccess`
);
logger.error(
`Current region: ${region}. If you have access in a different region, set AWS_REGION environment variable.`
);
}
logger.error(`Error generating embedding: ${error}`);
throw error;
}
}
async function generateImage(runtime, params) {
const client = createBedrockClient(runtime);
const modelId = getImageModel(runtime);
const [width, height] = (params.size || "1024x1024").split("x").map(Number);
const payload = {
textToImageParams: {
text: params.prompt
},
imageGenerationConfig: {
numberOfImages: params.n || 1,
height,
width,
cfgScale: 8,
seed: Math.floor(Math.random() * 2147483647)
}
};
const input = {
modelId,
contentType: "application/json",
accept: "application/json",
body: JSON.stringify(payload)
};
try {
const command = new InvokeModelCommand(input);
const response = await client.send(command);
const responseBody = JSON.parse(new TextDecoder().decode(response.body));
return responseBody.images.map((base64Image) => ({
url: `data:image/png;base64,${base64Image}`
}));
} catch (error) {
logger.error(`Error generating image: ${error}`);
throw error;
}
}
async function describeImage(runtime, params) {
const client = createBedrockClient(runtime);
const modelId = getImageDescriptionModel(runtime);
let imageUrl;
let promptText;
if (typeof params === "string") {
imageUrl = params;
promptText = "Please analyze this image and provide a title and detailed description.";
} else {
imageUrl = params.imageUrl;
promptText = params.prompt || "Please analyze this image and provide a title and detailed description.";
}
let imageData;
if (imageUrl.startsWith("data:")) {
imageData = imageUrl.split(",")[1];
} else {
const response = await fetch(imageUrl);
const buffer = await response.arrayBuffer();
imageData = Buffer.from(buffer).toString("base64");
}
if (modelId.includes("mistral") || modelId.includes("pixtral")) {
const payload = {
prompt: `<s>[INST]${promptText}
[IMG][/INST]`,
images: [imageData],
max_tokens: 8192,
temperature: 0.7
};
const input = {
modelId,
contentType: "application/json",
accept: "application/json",
body: JSON.stringify(payload)
};
try {
const command = new InvokeModelCommand(input);
const response = await client.send(command);
const responseBody = JSON.parse(new TextDecoder().decode(response.body));
const content = responseBody.outputs[0].text;
if (responseBody.usage) {
emitModelUsageEvent(
runtime,
ModelType.IMAGE_DESCRIPTION,
typeof params === "string" ? params : params.prompt || "",
{
promptTokens: responseBody.usage.prompt_tokens || 0,
completionTokens: responseBody.usage.completion_tokens || 0,
totalTokens: responseBody.usage.total_tokens || 0
}
);
}
const isCustomPrompt = typeof params === "object" && params.prompt && params.prompt !== "Please analyze this image and provide a title and detailed description.";
if (isCustomPrompt) {
return content;
}
const titleMatch = content.match(/title[:\s]+(.+?)(?:\n|$)/i);
const title = titleMatch?.[1]?.trim() || "Image Analysis";
const description = content.replace(/title[:\s]+(.+?)(?:\n|$)/i, "").trim();
return { title, description };
} catch (error) {
logger.error(`Error describing image with Mistral: ${error}`);
throw error;
}
} else {
const payload = {
anthropic_version: "bedrock-2023-05-31",
max_tokens: 8192,
messages: [
{
role: "user",
content: [
{
type: "image",
source: {
type: "base64",
media_type: "image/jpeg",
data: imageData
}
},
{
type: "text",
text: promptText
}
]
}
]
};
const input = {
modelId,
contentType: "application/json",
accept: "application/json",
body: JSON.stringify(payload)
};
try {
const command = new InvokeModelCommand(input);
const response = await client.send(command);
const responseBody = JSON.parse(new TextDecoder().decode(response.body));
const content = responseBody.content[0].text;
if (responseBody.usage) {
emitModelUsageEvent(
runtime,
ModelType.IMAGE_DESCRIPTION,
typeof params === "string" ? params : params.prompt || "",
{
promptTokens: responseBody.usage.input_tokens || 0,
completionTokens: responseBody.usage.output_tokens || 0,
totalTokens: (responseBody.usage.input_tokens || 0) + (responseBody.usage.output_tokens || 0)
}
);
}
const isCustomPrompt = typeof params === "object" && params.prompt && params.prompt !== "Please analyze this image and provide a title and detailed description.";
if (isCustomPrompt) {
return content;
}
const titleMatch = content.match(/title[:\s]+(.+?)(?:\n|$)/i);
const title = titleMatch?.[1]?.trim() || "Image Analysis";
const description = content.replace(/title[:\s]+(.+?)(?:\n|$)/i, "").trim();
return { title, description };
} catch (error) {
logger.error(`Error describing image with Claude: ${error}`);
throw error;
}
}
}
async function generateObject(runtime, params, modelId) {
const client = createBedrockClient(runtime);
const enhancedPrompt = `${params.prompt}
Please respond with valid JSON only, no additional text or markdown formatting.`;
let payload;
let responseParser;
if (modelId.includes("anthropic") || modelId.includes("claude")) {
payload = {
anthropic_version: "bedrock-2023-05-31",
max_tokens: 8192,
temperature: params.temperature ?? 0,
messages: [
{
role: "user",
content: enhancedPrompt
}
],
...runtime.character.system && { system: runtime.character.system }
};
responseParser = (body) => body.content[0].text;
} else if (modelId.includes("nova")) {
payload = {
messages: [
{
role: "user",
content: [
{
text: enhancedPrompt
}
]
}
],
inferenceConfig: {
maxTokens: 8192,
temperature: params.temperature ?? 0
},
...runtime.character.system && {
system: [
{
text: runtime.character.system
}
]
}
};
responseParser = (body) => body.output.message.content[0].text;
} else if (modelId.includes("mistral")) {
payload = {
prompt: `<s>[INST] ${enhancedPrompt} [/INST]`,
max_tokens: 8192,
temperature: params.temperature ?? 0
};
responseParser = (body) => body.outputs[0].text;
} else {
throw new Error(`Unsupported model for object generation: ${modelId}`);
}
const input = {
modelId,
contentType: "application/json",
accept: "application/json",
body: JSON.stringify(payload)
};
try {
const command = new InvokeModelCommand(input);
const response = await client.send(command);
const responseBody = JSON.parse(new TextDecoder().decode(response.body));
const content = responseParser(responseBody);
if (responseBody.usage) {
const usage = modelId.includes("nova") ? {
promptTokens: responseBody.usage.inputTokens || 0,
completionTokens: responseBody.usage.outputTokens || 0,
totalTokens: (responseBody.usage.inputTokens || 0) + (responseBody.usage.outputTokens || 0)
} : modelId.includes("mistral") ? {
promptTokens: responseBody.usage.prompt_tokens || 0,
completionTokens: responseBody.usage.completion_tokens || 0,
totalTokens: responseBody.usage.total_tokens || 0
} : {
promptTokens: responseBody.usage.input_tokens || 0,
completionTokens: responseBody.usage.output_tokens || 0,
totalTokens: (responseBody.usage.input_tokens || 0) + (responseBody.usage.output_tokens || 0)
};
emitModelUsageEvent(
runtime,
modelId.includes("haiku") || modelId.includes("micro") || modelId.includes("lite") ? ModelType.OBJECT_SMALL : ModelType.OBJECT_LARGE,
params.prompt,
usage
);
}
try {
const cleanedContent = content.replace(/```json\n|\n```|```/g, "").trim();
return JSON.parse(cleanedContent);
} catch (parseError) {
logger.error(`Failed to parse JSON from response: ${parseError}`);
const jsonMatch = content.match(/\{[\s\S]*\}/);
if (jsonMatch) {
return JSON.parse(jsonMatch[0]);
}
throw new Error("Failed to extract valid JSON from model response");
}
} catch (error) {
logger.error(`Error generating object: ${error}`);
throw error;
}
}
async function generateTextWithNova(runtime, modelId, params) {
const client = createBedrockClient(runtime);
const payload = {
messages: [
{
role: "user",
content: [
{
text: params.prompt
}
]
}
],
inferenceConfig: {
maxTokens: params.maxTokens || 8192,
temperature: params.temperature ?? 0.7,
topP: params.presencePenalty ?? 0.9,
...params.stopSequences && { stopSequences: params.stopSequences }
},
...runtime.character.system && {
system: [
{
text: runtime.character.system
}
]
}
};
const input = {
modelId,
contentType: "application/json",
accept: "application/json",
body: JSON.stringify(payload)
};
try {
const command = new InvokeModelCommand(input);
const response = await client.send(command);
const responseBody = JSON.parse(new TextDecoder().decode(response.body));
if (responseBody.usage) {
emitModelUsageEvent(
runtime,
modelId.includes("micro") || modelId.includes("lite") ? ModelType.TEXT_SMALL : ModelType.TEXT_LARGE,
params.prompt,
{
promptTokens: responseBody.usage.inputTokens || 0,
completionTokens: responseBody.usage.outputTokens || 0,
totalTokens: (responseBody.usage.inputTokens || 0) + (responseBody.usage.outputTokens || 0)
}
);
}
return responseBody.output.message.content[0].text;
} catch (error) {
logger.error(`Error generating text with Nova: ${error}`);
throw error;
}
}
async function generateTextWithMistral(runtime, modelId, params) {
const client = createBedrockClient(runtime);
const payload = {
prompt: `<s>[INST] ${params.prompt} [/INST]`,
max_tokens: params.maxTokens || 8192,
temperature: params.temperature ?? 0.7,
top_p: params.presencePenalty ?? 0.9,
top_k: params.frequencyPenalty ? Math.floor(params.frequencyPenalty * 100) : 50,
...params.stopSequences && { stop: params.stopSequences }
};
const input = {
modelId,
contentType: "application/json",
accept: "application/json",
body: JSON.stringify(payload)
};
try {
const command = new InvokeModelCommand(input);
const response = await client.send(command);
const responseBody = JSON.parse(new TextDecoder().decode(response.body));
if (responseBody.usage) {
emitModelUsageEvent(runtime, ModelType.TEXT_LARGE, params.prompt, {
promptTokens: responseBody.usage.prompt_tokens || 0,
completionTokens: responseBody.usage.completion_tokens || 0,
totalTokens: responseBody.usage.total_tokens || 0
});
}
return responseBody.outputs[0].text;
} catch (error) {
logger.error(`Error generating text with Mistral: ${error}`);
throw error;
}
}
async function generateText(runtime, modelId, params) {
if (modelId.includes("anthropic") || modelId.includes("claude")) {
return generateTextWithClaude(runtime, modelId, params);
} else if (modelId.includes("nova")) {
return generateTextWithNova(runtime, modelId, params);
} else if (modelId.includes("mistral")) {
return generateTextWithMistral(runtime, modelId, params);
} else {
throw new Error(`Unsupported model: ${modelId}`);
}
}
var bedrockPlugin = {
name: "bedrock",
description: "Amazon Bedrock plugin for AI model integration",
config: {
AWS_REGION: process.env.AWS_REGION,
AWS_PROFILE: process.env.AWS_PROFILE,
AWS_ACCESS_KEY_ID: process.env.AWS_ACCESS_KEY_ID,
AWS_SECRET_ACCESS_KEY: process.env.AWS_SECRET_ACCESS_KEY,
BEDROCK_SMALL_MODEL: process.env.BEDROCK_SMALL_MODEL,
BEDROCK_LARGE_MODEL: process.env.BEDROCK_LARGE_MODEL,
BEDROCK_EMBEDDING_MODEL: process.env.BEDROCK_EMBEDDING_MODEL,
BEDROCK_IMAGE_MODEL: process.env.BEDROCK_IMAGE_MODEL,
BEDROCK_IMAGE_DESCRIPTION_MODEL: process.env.BEDROCK_IMAGE_DESCRIPTION_MODEL
},
async init(_config, runtime) {
try {
const region = getSetting(runtime, "AWS_REGION", "us-east-1");
logger.log(`Initializing Amazon Bedrock plugin in region: ${region}`);
try {
const client = createBedrockClient(runtime);
logger.log("Amazon Bedrock client initialized successfully");
logger.log(`Configured models:`);
logger.log(` TEXT_SMALL: ${getSmallModel(runtime)}`);
logger.log(` TEXT_LARGE: ${getLargeModel(runtime)}`);
logger.log(` EMBEDDING: ${getEmbeddingModel(runtime)}`);
logger.log(` IMAGE: ${getImageModel(runtime)}`);
logger.log(` IMAGE_DESCRIPTION: ${getImageDescriptionModel(runtime)}`);
try {
const testEmbedding = await generateEmbedding(runtime, "test");
logger.log(`Embedding model test successful, dimensions: ${testEmbedding.length}`);
} catch (embeddingError) {
if (embeddingError.message?.includes("AccessDeniedException")) {
logger.warn(`No access to embedding model ${getEmbeddingModel(runtime)}`);
logger.warn(
`Please ensure you have requested and been granted access to this model in the ${region} region`
);
} else {
logger.warn(`Embedding model test failed: ${embeddingError.message}`);
}
}
} catch (error) {
logger.warn(`Failed to initialize Bedrock client: ${error}`);
logger.warn(
"Bedrock functionality will be limited until proper AWS credentials are configured"
);
}
} catch (error) {
logger.error(`Bedrock plugin initialization error: ${error}`);
}
},
models: {
[ModelType.TEXT_EMBEDDING]: async (runtime, params) => {
if (params === null) {
const testVector = Array(1024).fill(0);
testVector[0] = 0.1;
return testVector;
}
let text;
if (typeof params === "string") {
text = params;
} else if (typeof params === "object" && params.text) {
text = params.text;
} else {
logger.warn("Invalid input format for embedding");
const fallbackVector = Array(1024).fill(0);
fallbackVector[0] = 0.2;
return fallbackVector;
}
if (!text.trim()) {
logger.warn("Empty text for embedding");
const emptyVector = Array(1024).fill(0);
emptyVector[0] = 0.3;
return emptyVector;
}
return await generateEmbedding(runtime, text);
},
[ModelType.TEXT_SMALL]: async (runtime, params) => {
const modelId = getSmallModel(runtime);
logger.log(`[Bedrock] Using TEXT_SMALL model: ${modelId}`);
return await generateText(runtime, modelId, params);
},
[ModelType.TEXT_LARGE]: async (runtime, params) => {
const modelId = getLargeModel(runtime);
logger.log(`[Bedrock] Using TEXT_LARGE model: ${modelId}`);
return await generateText(runtime, modelId, params);
},
[ModelType.IMAGE]: async (runtime, params) => {
logger.log(`[Bedrock] Generating image with prompt: ${params.prompt}`);
return await generateImage(runtime, params);
},
[ModelType.IMAGE_DESCRIPTION]: async (runtime, params) => {
logger.log("[Bedrock] Analyzing image");
return await describeImage(runtime, params);
},
[ModelType.OBJECT_SMALL]: async (runtime, params) => {
const modelId = getSmallModel(runtime);
logger.log(`[Bedrock] Using OBJECT_SMALL model: ${modelId}`);
return await generateObject(runtime, params, modelId);
},
[ModelType.OBJECT_LARGE]: async (runtime, params) => {
const modelId = getLargeModel(runtime);
logger.log(`[Bedrock] Using OBJECT_LARGE model: ${modelId}`);
return await generateObject(runtime, params, modelId);
}
// Note: Bedrock doesn't have native tokenization, transcription, or TTS support
// These would need to be implemented using other AWS services or third-party solutions
},
tests: [
{
name: "bedrock_plugin_tests",
tests: [
{
name: "bedrock_test_connection",
fn: async (runtime) => {
try {
const client = createBedrockClient(runtime);
logger.log("Bedrock client created successfully");
} catch (error) {
throw new Error(`Failed to create Bedrock client: ${error}`);
}
}
},
{
name: "bedrock_test_text_embedding",
fn: async (runtime) => {
try {
const embedding = await runtime.useModel(ModelType.TEXT_EMBEDDING, {
text: "Hello, world!"
});
logger.log("Embedding generated:", embedding.length, "dimensions");
if (embedding.length !== 1024) {
throw new Error(`Expected 1024 dimensions, got ${embedding.length}`);
}
} catch (error) {
logger.error(`Error in test_text_embedding: ${error}`);
throw error;
}
}
},
{
name: "bedrock_test_text_small",
fn: async (runtime) => {
try {
const text = await runtime.useModel(ModelType.TEXT_SMALL, {
prompt: "What is the nature of reality in 10 words?"
});
if (text.length === 0) {
throw new Error("Failed to generate text");
}
logger.log("Generated with TEXT_SMALL:", text);
} catch (error) {
logger.error(`Error in test_text_small: ${error}`);
throw error;
}
}
},
{
name: "bedrock_test_text_large",
fn: async (runtime) => {
try {
const text = await runtime.useModel(ModelType.TEXT_LARGE, {
prompt: "What is the nature of reality in 10 words?"
});
if (text.length === 0) {
throw new Error("Failed to generate text");
}
logger.log("Generated with TEXT_LARGE:", text);
} catch (error) {
logger.error(`Error in test_text_large: ${error}`);
throw error;
}
}
},
{
name: "bedrock_test_image_generation",
fn: async (runtime) => {
try {
const images = await runtime.useModel(ModelType.IMAGE, {
prompt: "A beautiful sunset over a calm ocean",
n: 1,
size: "1024x1024"
});
logger.log("Generated images:", images.length);
if (images.length === 0) {
throw new Error("No images generated");
}
} catch (error) {
logger.error(`Error in test_image_generation: ${error}`);
throw error;
}
}
},
{
name: "bedrock_test_image_description",
fn: async (runtime) => {
try {
const result = await runtime.useModel(
ModelType.IMAGE_DESCRIPTION,
"https://upload.wikimedia.org/wikipedia/commons/thumb/1/1c/Vitalik_Buterin_TechCrunch_London_2015_%28cropped%29.jpg/537px-Vitalik_Buterin_TechCrunch_London_2015_%28cropped%29.jpg"
);
if (result && typeof result === "object" && "title" in result && "description" in result) {
logger.log("Image description:", result);
} else {
logger.error("Invalid image description result format:", result);
}
} catch (error) {
logger.error(`Error in test_image_description: ${error}`);
throw error;
}
}
},
{
name: "bedrock_test_object_generation",
fn: async (runtime) => {
try {
const result = await runtime.useModel(ModelType.OBJECT_SMALL, {
prompt: "Generate a JSON object with fields: name (string), age (number), and active (boolean)"
});
logger.log("Generated object:", result);
if (typeof result !== "object") {
throw new Error("Expected object output");
}
} catch (error) {
logger.error(`Error in test_object_generation: ${error}`);
throw error;
}
}
}
]
}
]
};
var index_default = bedrockPlugin;
export {
bedrockPlugin,
index_default as default
};
//# sourceMappingURL=index.js.map