@llumiverse/common
Version:
Public types, enums and options used by Llumiverse API.
462 lines • 18.1 kB
JavaScript
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.getMaxTokensLimitBedrock = getMaxTokensLimitBedrock;
exports.getBedrockOptions = getBedrockOptions;
const types_js_1 = require("../types.js");
const fallback_js_1 = require("./fallback.js");
function getMaxTokensLimitBedrock(model) {
// Claude models
if (model.includes("claude")) {
if (model.includes("-4-")) {
if (model.includes("opus-")) {
return 32768;
}
return 65536;
}
else if (model.includes("-3-7-")) {
return 131072;
}
else if (model.includes("-3-5-")) {
return 8192;
}
else {
return 4096;
}
}
// Amazon models
else if (model.includes("amazon")) {
if (model.includes("titan")) {
if (model.includes("lite")) {
return 4096;
}
else if (model.includes("express")) {
return 8192;
}
else if (model.includes("premier")) {
return 3072;
}
}
else if (model.includes("nova")) {
return 10000;
}
}
// Mistral models
else if (model.includes("mistral")) {
if (model.includes("8x7b")) {
return 4096;
}
if (model.includes("pixtral-large")) {
return 131072;
}
return 8192;
}
// AI21 models
else if (model.includes("ai21")) {
if (model.includes("j2")) {
if (model.includes("large") || model.includes("mid") || model.includes("ultra")) {
return 8191;
}
return 2048;
}
if (model.includes("jamba")) {
return 4096;
}
}
// Cohere models
else if (model.includes("cohere.command")) {
if (model.includes("command-a")) {
return 8192;
}
return 4096;
}
// Meta models
else if (model.includes("llama")) {
if (model.includes("3-70b") || model.includes("3-8b")) {
return 2048;
}
return 8192;
}
//Writer models
else if (model.includes("writer")) {
if (model.includes("palmyra-x5")) {
return 8192;
}
else if (model.includes("palmyra-x4")) {
return 8192;
}
}
// OpenAI gpt-oss models
if (model.includes("gpt-oss")) {
return 128000;
}
// TwelveLabs models
else if (model.includes("twelvelabs")) {
if (model.includes("pegasus")) {
return 4096; // Max output tokens for Pegasus
}
// Marengo is an embedding model, doesn't generate text
return undefined;
}
// Default fallback
return undefined;
}
function getBedrockOptions(model, option) {
if (model.includes("canvas")) {
const taskTypeList = {
name: "taskType",
type: types_js_1.OptionType.enum,
enum: {
"Text-To-Image": "TEXT_IMAGE",
"Text-To-Image-with-Image-Conditioning": "TEXT_IMAGE_WITH_IMAGE_CONDITIONING",
"Color-Guided-Generation": "COLOR_GUIDED_GENERATION",
"Image-Variation": "IMAGE_VARIATION",
"Inpainting": "INPAINTING",
"Outpainting": "OUTPAINTING",
"Background-Removal": "BACKGROUND_REMOVAL",
},
default: "TEXT_IMAGE",
description: "The type of task to perform",
refresh: true,
};
let otherOptions = [
{ name: "width", type: types_js_1.OptionType.numeric, min: 320, max: 4096, default: 512, step: 16, integer: true, description: "The width of the generated image" },
{ name: "height", type: types_js_1.OptionType.numeric, min: 320, max: 4096, default: 512, step: 16, integer: true, description: "The height of the generated image" },
{
name: "quality",
type: types_js_1.OptionType.enum,
enum: { "standard": "standard", "premium": "premium" },
default: "standard",
description: "The quality of the generated image"
},
{ name: "cfgScale", type: types_js_1.OptionType.numeric, min: 1.1, max: 10.0, default: 6.5, step: 0.1, integer: false, description: "The scale of the generated image" },
{ name: "seed", type: types_js_1.OptionType.numeric, min: 0, max: 858993459, default: 12, integer: true, description: "The seed of the generated image" },
{ name: "numberOfImages", type: types_js_1.OptionType.numeric, min: 1, max: 5, default: 1, integer: true, description: "The number of images to generate" },
];
let dependentOptions = [];
switch (option?.taskType ?? "TEXT_IMAGE") {
case "TEXT_IMAGE_WITH_IMAGE_CONDITIONING":
dependentOptions.push({
name: "controlMode", type: types_js_1.OptionType.enum, enum: { "CANNY_EDGE": "CANNY_EDGE", "SEGMENTATION": "SEGMENTATION" },
default: "CANNY_EDGE", description: "The control mode of the generated image"
}, { name: "controlStrength", type: types_js_1.OptionType.numeric, min: 0, max: 1, default: 0.7, description: "The control strength of the generated image" });
break;
case "COLOR_GUIDED_GENERATION":
dependentOptions.push({ name: "colors", type: types_js_1.OptionType.string_list, value: [], description: "Hexadecimal color values to guide generation" });
break;
case "IMAGE_VARIATION":
dependentOptions.push({ name: "similarityStrength", type: types_js_1.OptionType.numeric, min: 0.2, max: 1, default: 0.7, description: "The similarity strength of the generated image" });
break;
case "INPAINTING":
//No changes
break;
case "OUTPAINTING":
dependentOptions.push({
name: "outPaintingMode", type: types_js_1.OptionType.enum, enum: { "DEFAULT": "DEFAULT", "PRECISE": "PRECISE" },
default: "default", description: "The outpainting mode of the generated image"
});
break;
case "BACKGROUND_REMOVAL":
dependentOptions = [];
otherOptions = [];
break;
}
return {
_option_id: "bedrock-nova-canvas",
options: [
taskTypeList,
...otherOptions,
...dependentOptions,
]
};
}
else {
const max_tokens_limit = getMaxTokensLimitBedrock(model);
//Not canvas, i.e normal AWS bedrock converse
const baseConverseOptions = [
{
name: "max_tokens",
type: types_js_1.OptionType.numeric,
min: 1,
max: max_tokens_limit,
integer: true,
step: 200,
description: "The maximum number of tokens to generate",
},
{
name: "temperature",
type: types_js_1.OptionType.numeric,
min: 0.0,
default: 0.7,
step: 0.1,
description: "A higher temperature biases toward less likely tokens, making the model more creative"
},
{
name: "top_p",
type: types_js_1.OptionType.numeric,
min: 0,
max: 1,
step: 0.1,
description: "Limits token sampling to the cumulative probability of the top p tokens"
},
{
name: "stop_sequence",
type: types_js_1.OptionType.string_list,
value: [],
description: "The generation will halt if one of the stop sequences is output"
}
];
if (model.includes("claude")) {
const claudeConverseOptions = [
{
name: "top_k",
type: types_js_1.OptionType.numeric,
min: 1,
integer: true,
step: 1,
description: "Limits token sampling to the top k tokens"
},
];
if (model.includes("-3-7-") || model.includes("-4-")) {
const claudeModeOptions = [
{
name: "thinking_mode",
type: types_js_1.OptionType.boolean,
default: false,
description: "If true, use the extended reasoning mode"
},
];
const claudeThinkingOptions = option?.thinking_mode ? [
{
name: "thinking_budget_tokens",
type: types_js_1.OptionType.numeric,
min: 1024,
default: 1024,
integer: true,
step: 100,
description: "The target number of tokens to use for reasoning, not a hard limit."
},
{
name: "include_thoughts",
type: types_js_1.OptionType.boolean,
default: false,
description: "If true, include the reasoning in the response"
},
] : [];
return {
_option_id: "bedrock-claude",
options: [
...baseConverseOptions,
...claudeConverseOptions,
...claudeModeOptions,
...claudeThinkingOptions
]
};
}
return {
_option_id: "bedrock-claude",
options: [...baseConverseOptions, ...claudeConverseOptions]
};
}
else if (model.includes("amazon")) {
//Titan models also exists but does not support any additional options
if (model.includes("nova")) {
const novaConverseOptions = [
{
name: "top_k",
type: types_js_1.OptionType.numeric,
min: 1,
integer: true,
step: 1,
description: "Limits token sampling to the top k tokens"
},
];
return {
_option_id: "bedrock-nova",
options: [...baseConverseOptions, ...novaConverseOptions]
};
}
}
else if (model.includes("mistral")) {
//7b and 8x7b instruct
if (model.includes("7b")) {
const mistralConverseOptions = [
{
name: "top_k",
type: types_js_1.OptionType.numeric,
min: 1,
integer: true,
step: 1,
description: "Limits token sampling to the top k tokens"
},
];
return {
_option_id: "bedrock-mistral",
options: [...baseConverseOptions, ...mistralConverseOptions]
};
}
//Other models such as Mistral Small, Large and Large 2
//Support no additional options
}
else if (model.includes("ai21")) {
const ai21ConverseOptions = [
{
name: "presence_penalty",
type: types_js_1.OptionType.numeric,
min: -2,
max: 2,
default: 0,
step: 0.1,
description: "A higher presence penalty encourages the model to talk about new topics"
},
{
name: "frequency_penalty",
type: types_js_1.OptionType.numeric,
min: -2,
max: 2,
default: 0,
step: 0.1,
description: "A higher frequency penalty encourages the model to use less common words"
},
];
return {
_option_id: "bedrock-ai21",
options: [...baseConverseOptions, ...ai21ConverseOptions]
};
}
else if (model.includes("cohere.command")) {
const cohereCommandOptions = [
{
name: "top_k",
type: types_js_1.OptionType.numeric,
min: 1,
integer: true,
step: 1,
description: "Limits token sampling to the top k tokens"
},
];
if (model.includes("command-r")) {
const cohereCommandROptions = [
{
name: "frequency_penalty",
type: types_js_1.OptionType.numeric,
min: -2,
max: 2,
default: 0,
step: 0.1,
description: "A higher frequency penalty encourages the model to use less common words"
},
{
name: "presence_penalty",
type: types_js_1.OptionType.numeric,
min: -2,
max: 2,
default: 0,
step: 0.1,
description: "A higher presence penalty encourages the model to talk about new topics"
},
];
return {
_option_id: "bedrock-cohere-command",
options: [...baseConverseOptions, ...cohereCommandOptions, ...cohereCommandROptions]
};
}
}
else if (model.includes("writer")) {
const palmyraConverseOptions = [
{
name: "min_tokens",
type: types_js_1.OptionType.numeric,
min: 1,
max: max_tokens_limit,
integer: false,
step: 100,
},
{
name: "seed",
type: types_js_1.OptionType.numeric,
integer: true,
description: "Random seed for generation"
},
{
name: "frequency_penalty",
type: types_js_1.OptionType.numeric,
min: -2,
max: 2,
default: 0,
step: 0.1,
description: "A higher frequency penalty encourages the model to use less common words"
},
{
name: "presence_penalty",
type: types_js_1.OptionType.numeric,
min: -2,
max: 2,
default: 0,
step: 0.1,
description: "A higher presence penalty encourages the model to talk about new topics"
},
];
return {
_option_id: "bedrock-palmyra",
options: [...baseConverseOptions, ...palmyraConverseOptions]
};
}
else if (model.includes("gpt-oss")) {
const gptOssOptions = [
{
name: "reasoning_effort",
type: types_js_1.OptionType.enum,
enum: {
"low": "low",
"medium": "medium",
"high": "high"
},
default: "medium",
description: "The reasoning effort of the model, which affects the quality and speed of the response"
},
];
const baseConverseOptionsNoStop = [...baseConverseOptions];
// Remove stop_sequence for gpt-oss
baseConverseOptionsNoStop.splice(baseConverseOptionsNoStop.findIndex(o => o.name === "stop_sequence"), 1);
return {
_option_id: "bedrock-gpt-oss",
options: [...baseConverseOptionsNoStop, ...gptOssOptions]
};
}
else if (model.includes("twelvelabs")) {
if (model.includes("pegasus")) {
const pegasusOptions = [
{
name: "temperature",
type: types_js_1.OptionType.numeric,
min: 0.0,
max: 1.0,
default: 0.2,
step: 0.1,
description: "Controls randomness in the output"
},
{
name: "max_tokens",
type: types_js_1.OptionType.numeric,
min: 1,
max: 4096,
integer: true,
step: 100,
description: "The maximum number of tokens to generate"
}
];
return {
_option_id: "bedrock-twelvelabs-pegasus",
options: pegasusOptions
};
}
}
//Fallback to converse standard.
return {
_option_id: "bedrock-converse",
options: baseConverseOptions
};
}
return fallback_js_1.textOptionsFallback;
}
//# sourceMappingURL=bedrock.js.map