iobroker.ai-toolbox
Version:
The ioBroker AI Toolbox Adapter is a powerful integration that enables users to create and manage custom AI tools within their ioBroker smart home environment. This versatile adapter supports multiple Large Language Models (LLMs) and provides a flexible f
1,236 lines (1,130 loc) • 59 kB
JavaScript
"use strict";
/*
* Created with @iobroker/create-adapter v2.6.5
*/
const utils = require("@iobroker/adapter-core");
const axios = require("axios");
const fs = require("fs");
const mime = require("mime-types");
const AnthropicAiProvider = require("./lib/anthropic-ai-provider");
const OpenAiProvider = require("./lib/openai-ai-provider");
const PerplexityAiProvider = require("./lib/perplexity-ai-provider");
const OpenRouterAiProvider = require("./lib/openrouter-ai-provider");
const DeepseekAiProvider = require("./lib/deepseek-ai-provider");
const CustomAiProvider = require("./lib/custom-ai-provider");
class AiToolbox extends utils.Adapter {
/**
* @param [options] - The options object.
*/
constructor(options) {
super({
...options,
name: "ai-toolbox",
});
this.on("ready", this.onReady.bind(this));
this.on("stateChange", this.onStateChange.bind(this));
this.on("message", this.onMessage.bind(this));
this.on("unload", this.onUnload.bind(this));
this.timeouts = [];
}
/**
* Is called when databases are connected and adapter received configuration.
*/
async onReady() {
if (this.config.bots.length == 0) {
this.log.warn("No tools set");
} else {
this.log.debug(`Found ${this.config.bots.length} tools`);
}
// Create Models and Tools objects
await this.setObjectAsync("Models", {
type: "folder",
common: {
name: "AI Models",
},
native: {},
});
await this.setObjectAsync("Tools", {
type: "folder",
common: {
name: "Created AI Tools",
},
native: {},
});
// Create objects for each model
const models = this.getAvailableModels();
for (let model of models) {
const modelName = model.value;
model = this.stringToAlphaNumeric(model.value);
this.log.debug(`Initializing objects for model: ${model}`);
await this.setObjectAsync(`Models.${model}`, {
type: "folder",
common: {
name: model,
},
native: {},
});
await this.setObjectAsync(`Models.${model}.text_request`, {
type: "state",
common: {
name: "Request",
desc: "Start a direct request to the model with the entered text",
type: "string",
role: "text",
read: true,
write: true,
def: "",
},
native: {
model: modelName,
},
});
await this.setObjectNotExistsAsync(`Models.${model}.text_response`, {
type: "state",
common: {
name: "Response",
desc: "The response received from the model",
type: "string",
role: "text",
read: true,
write: false,
def: "",
},
native: {
model: modelName,
},
});
await this.setObjectAsync(`Models.${model}.statistics`, {
type: "folder",
common: {
name: `Statistics for ${modelName}`,
},
native: {},
});
await this.setObjectAsync(`Models.${model}.response`, {
type: "folder",
common: {
name: `Response data for ${modelName}`,
},
native: {},
});
await this.setObjectAsync(`Models.${model}.request`, {
type: "folder",
common: {
name: `Request data for ${modelName}`,
},
native: {},
});
await this.setObjectNotExistsAsync(`Models.${model}.request.state`, {
type: "state",
common: {
name: "State for the running inference request",
type: "string",
role: "text",
read: true,
write: false,
def: "",
},
native: {},
});
await this.setObjectNotExistsAsync(`Models.${model}.request.body`, {
type: "state",
common: {
name: "Sent body for the running inference request",
type: "string",
role: "json",
read: true,
write: false,
def: "",
},
native: {},
});
await this.setObjectNotExistsAsync(`Models.${model}.response.raw`, {
type: "state",
common: {
name: "Raw response from model",
type: "string",
role: "json",
read: true,
write: false,
def: "",
},
native: {},
});
await this.setObjectNotExistsAsync(`Models.${model}.response.error`, {
type: "state",
common: {
name: "Error response from model",
type: "string",
role: "text",
read: true,
write: false,
def: "",
},
native: {},
});
await this.setObjectNotExistsAsync(`Models.${model}.statistics.tokens_input`, {
type: "state",
common: {
name: "Used input tokens for model",
type: "number",
role: "state",
read: true,
write: false,
def: 0,
},
native: {},
});
await this.setObjectNotExistsAsync(`Models.${model}.statistics.tokens_output`, {
type: "state",
common: {
name: "Used output tokens for model",
type: "number",
role: "state",
read: true,
write: false,
def: 0,
},
native: {},
});
await this.setObjectNotExistsAsync(`Models.${model}.statistics.requests_count`, {
type: "state",
common: {
name: "Count of requests for model",
type: "number",
role: "state",
read: true,
write: false,
def: 0,
},
native: {},
});
await this.setObjectNotExistsAsync(`Models.${model}.statistics.last_request`, {
type: "state",
common: {
name: "Last request for model",
type: "string",
role: "date",
read: true,
write: false,
def: "",
},
native: {},
});
}
// Create objects for each tool
for (const bot of this.config.bots) {
bot.bot_name = this.stringToAlphaNumeric(bot.bot_name);
this.log.debug(`Initializing objects for tool: ${bot.bot_name}`);
await this.setObjectAsync(`Tools.${bot.bot_name}`, {
type: "folder",
common: {
name: bot.bot_name,
},
native: bot,
});
if (typeof bot.use_vision !== "undefined" && bot.use_vision) {
await this.setObjectAsync(`Tools.${bot.bot_name}.image_url`, {
type: "state",
common: {
name: "Image URL",
desc: "URL or local filepath to an image to send with the next text request",
type: "string",
role: "text",
read: true,
write: true,
def: "",
},
native: {},
});
}
await this.setObjectAsync(`Tools.${bot.bot_name}.text_request`, {
type: "state",
common: {
name: "Request",
desc: "Start a request to the tool with the entered text",
type: "string",
role: "text",
read: true,
write: true,
def: "",
},
native: bot,
});
await this.setObjectNotExistsAsync(`Tools.${bot.bot_name}.text_response`, {
type: "state",
common: {
name: "Response",
desc: "The response received from the tool",
type: "string",
role: "text",
read: true,
write: false,
def: "",
},
native: bot,
});
await this.setObjectAsync(`Tools.${bot.bot_name}.statistics`, {
type: "folder",
common: {
name: `Statistics for ${bot.bot_name}`,
},
native: {},
});
await this.setObjectAsync(`Tools.${bot.bot_name}.response`, {
type: "folder",
common: {
name: `Response data for ${bot.bot_name}`,
},
native: {},
});
await this.setObjectAsync(`Tools.${bot.bot_name}.request`, {
type: "folder",
common: {
name: `Request data for ${bot.bot_name}`,
},
native: {},
});
await this.setObjectNotExistsAsync(`Tools.${bot.bot_name}.statistics.messages`, {
type: "state",
common: {
name: "Message history",
desc: `Previous messages for tool ${bot.bot_name}`,
type: "string",
role: "json",
read: true,
write: false,
def: '{"messages": []}',
},
native: {},
});
await this.setObjectAsync(`Tools.${bot.bot_name}.statistics.clear_messages`, {
type: "state",
common: {
name: "Clear previous message history",
type: "boolean",
role: "button",
read: false,
write: true,
def: true,
},
native: bot,
});
await this.setObjectNotExistsAsync(`Tools.${bot.bot_name}.request.state`, {
type: "state",
common: {
name: "State for the running inference request",
type: "string",
role: "text",
read: true,
write: false,
def: "",
},
native: {},
});
await this.setObjectNotExistsAsync(`Tools.${bot.bot_name}.request.body`, {
type: "state",
common: {
name: "Sent body for the running inference request",
type: "string",
role: "json",
read: true,
write: false,
def: "",
},
native: {},
});
await this.setObjectNotExistsAsync(`Tools.${bot.bot_name}.response.raw`, {
type: "state",
common: {
name: "Raw response from tool",
type: "string",
role: "json",
read: true,
write: false,
def: "",
},
native: {},
});
await this.setObjectNotExistsAsync(`Tools.${bot.bot_name}.response.error`, {
type: "state",
common: {
name: "Error response from tool",
type: "string",
role: "text",
read: true,
write: false,
def: "",
},
native: {},
});
await this.setObjectNotExistsAsync(`Tools.${bot.bot_name}.statistics.tokens_input`, {
type: "state",
common: {
name: "Used input tokens for tool",
type: "number",
role: "state",
read: true,
write: false,
def: 0,
},
native: {},
});
await this.setObjectNotExistsAsync(`Tools.${bot.bot_name}.statistics.tokens_output`, {
type: "state",
common: {
name: "Used output tokens for tool",
type: "number",
role: "state",
read: true,
write: false,
def: 0,
},
native: {},
});
await this.setObjectNotExistsAsync(`Tools.${bot.bot_name}.statistics.requests_count`, {
type: "state",
common: {
name: "Count of requests for tool",
type: "number",
role: "state",
read: true,
write: false,
def: 0,
},
native: {},
});
await this.setObjectNotExistsAsync(`Tools.${bot.bot_name}.statistics.last_request`, {
type: "state",
common: {
name: "Last request for tool",
type: "string",
role: "date",
read: true,
write: false,
def: "",
},
native: {},
});
}
this.log.debug(`Available models: ${JSON.stringify(this.getAvailableModels())}`);
this.subscribeStates("*");
this.log.info("Adapter ready");
}
/**
* Is called when adapter shuts down - callback has to be called under any circumstances!
*
* @param callback - The callback function.
*/
onUnload(callback) {
try {
for (const timeout of this.timeouts) {
this.clearTimeout(timeout);
}
callback();
} catch (e) {
this.log.error(`Error on unload: ${e}`);
callback();
}
}
/**
* Is called if a subscribed state changes
*
* @param id - The state ID that changed.
* @param state - The state object.
*/
async onStateChange(id, state) {
// Only handle state changes if they are not acknowledged
if (state && state.ack !== false) {
return;
}
if (state) {
// The state was changed
this.log.debug(`state ${id} changed: ${state.val} (ack = ${state.ack})`);
if (id.includes(".clear_messages") && state.val) {
const bot = await this.getObjectAsync(id);
if (bot) {
bot.native.bot_name = this.stringToAlphaNumeric(bot.native.bot_name);
this.log.debug(`Clearing message history for tool ${bot.native.bot_name}`);
await this.setStateAsync(`Tools.${bot.native.bot_name}.statistics.messages`, {
val: '{"messages": []}',
ack: true,
});
await this.setStateAsync(`Tools.${bot.native.bot_name}.response.raw`, {
val: null,
ack: true,
});
await this.setStateAsync(`Tools.${bot.native.bot_name}.text_response`, {
val: null,
ack: true,
});
await this.setStateAsync(`Tools.${bot.native.bot_name}.response.error`, {
val: null,
ack: true,
});
await this.setStateAsync(`Tools.${bot.native.bot_name}.request.body`, {
val: null,
ack: true,
});
await this.setStateAsync(`Tools.${bot.native.bot_name}.request.state`, {
val: null,
ack: true,
});
}
}
if (id.includes("Tools.") && id.includes(".text_request") && state.val) {
const bot = await this.getObjectAsync(id);
if (bot) {
if (bot.native.use_vision) {
const imageUrl = await this.getStateAsync(`Tools.${bot.native.bot_name}.image_url`);
if (imageUrl && imageUrl.val && imageUrl.val != "") {
const imageData = await this.fetchImageAsBase64(imageUrl.val);
if (imageData.success) {
await this.setStateAsync(`Tools.${bot.native.bot_name}.image_url`, {
val: "",
ack: true,
});
this.startBotRequest(bot.native, state.val, imageData);
} else {
this.log.warn(
`Request stopped, image fetch failed for tool ${bot.native.bot_name} URL: ${
imageUrl.val
}`,
);
await this.setStateAsync(`Tools.${bot.native.bot_name}.request.state`, {
val: "error",
ack: true,
});
await this.setStateAsync(`Tools.${bot.native.bot_name}.response.error`, {
val: "fetching image for request failed",
ack: true,
});
await this.setStateAsync(`Tools.${bot.native.bot_name}.image_url`, {
val: "",
ack: true,
});
}
} else {
this.startBotRequest(bot.native, state.val, null);
}
} else {
this.startBotRequest(bot.native, state.val, null);
}
}
}
if (id.includes("Models.") && id.includes(".text_request") && state.val) {
const obj = await this.getObjectAsync(id);
if (obj) {
this.startModelRequest(obj.native.model, [{ role: "user", content: state.val }]);
}
}
}
}
/**
* Starts a request for the selected tool with the specified text.
* Validates the message history and adds the message pair to the history.
* Updates the statistics for the tool with the response data.
* Starts a new request if the previous request failed.
* Logs the request and response data.
* Returns the response data if the request was successful, otherwise false.
*
* @param bot - The bot configuration object.
* @param text - The text to send to the bot.
* @param image - The image to send to the bot.
* @param tries - The number of tries for the request.
* @param try_only_once - If true, the request will only be tried once.
*/
async startBotRequest(bot, text, image = null, tries = 0, try_only_once = false) {
bot.bot_name = this.stringToAlphaNumeric(bot.bot_name);
this.log.info(`Starting request for tool: ${bot.bot_name} Text: ${text}`);
if (tries == 0) {
await this.setStateAsync(`Tools.${bot.bot_name}.request.state`, { val: "start", ack: true });
}
await this.setStateAsync(`Tools.${bot.bot_name}.response.error`, { val: "", ack: true });
const provider = this.getModelProvider(bot.bot_model);
if (provider) {
if (!provider.apiTokenCheck()) {
this.log.warn(`No API token set for provider ${typeof provider}, cant start request!`);
return false;
}
const messages = [];
let messagePairs = { messages: [] };
if (bot.chat_history > 0) {
this.log.debug(`Chat history is enabled for tool ${bot.bot_name}`);
messagePairs = await this.getValidatedMessageHistory(bot);
this.log.debug(`Adding previous message pairs for request: ${JSON.stringify(messagePairs)}`);
}
if (
bot.bot_example_request &&
bot.bot_example_request != "" &&
bot.bot_example_response &&
bot.bot_example_response != ""
) {
messagePairs.messages.unshift({ user: bot.bot_example_request, assistant: bot.bot_example_response });
this.log.debug(`Adding tool example message pair for request: ${JSON.stringify(messagePairs)}`);
}
this.log.debug("Converting message pairs to chat format for request to model");
for (const message of messagePairs.messages) {
if (typeof message.image !== "undefined" && message.image != null) {
this.log.debug(
`Tool ${bot.bot_name} image message detected in chat history message, adding image data`,
);
messages.push({ role: "user", content: message.user, image: message.image });
} else {
messages.push({ role: "user", content: message.user });
}
messages.push({ role: "assistant", content: message.assistant });
}
this.log.debug(`Adding user message to request array: ${text}`);
if (image) {
this.log.debug(`Tool ${bot.bot_name} image request detected, adding image data`);
messages.push({ role: "user", content: text, image: image });
} else {
messages.push({ role: "user", content: text });
}
let modelResponse = {};
modelResponse = await this.startModelRequest(
bot.bot_model,
messages,
bot.bot_system_prompt,
bot.max_tokens,
bot.temperature,
);
let requestCompleted = true;
if (modelResponse.error) {
await this.setStateAsync(`Tools.${bot.bot_name}.request.state`, { val: "error", ack: true });
await this.setStateAsync(`Tools.${bot.bot_name}.request.body`, {
val: JSON.stringify(modelResponse.requestData),
ack: true,
});
await this.setStateAsync(`Tools.${bot.bot_name}.response.error`, {
val: modelResponse.error,
ack: true,
});
await this.setStateAsync(`Tools.${bot.bot_name}.response.raw`, {
val: JSON.stringify(modelResponse.responseData),
ack: true,
});
requestCompleted = false;
} else {
await this.setStateAsync(`Tools.${bot.bot_name}.request.state`, { val: "success", ack: true });
await this.setStateAsync(`Tools.${bot.bot_name}.request.body`, {
val: JSON.stringify(modelResponse.requestData),
ack: true,
});
await this.setStateAsync(`Tools.${bot.bot_name}.response.error`, { val: "", ack: true });
await this.setStateAsync(`Tools.${bot.bot_name}.response.raw`, {
val: JSON.stringify(modelResponse.responseData),
ack: true,
});
await this.setStateAsync(`Tools.${bot.bot_name}.text_response`, {
val: modelResponse.text,
ack: true,
});
this.updateBotStatistics(bot, modelResponse);
}
if (!requestCompleted) {
if (typeof bot.retry_delay == "undefined") {
bot.retry_delay = 15;
}
if (typeof bot.max_retries == "undefined") {
bot.max_retries = 3;
}
await this.setStateAsync(`Tools.${bot.bot_name}.request.state`, { val: "retry", ack: true });
if (tries < bot.max_retries && !try_only_once) {
let retry_delay = bot.retry_delay * 1000;
if (tries == bot.max_retries) {
retry_delay = 0;
}
this.log.debug(
`Try ${tries}${1}/${bot.max_retries} of request for tool ${bot.bot_name} failed Text: ${text}`,
);
tries = tries + 1;
this.log.debug(
`Retry request for tool ${bot.bot_name} in ${bot.retry_delay} seconds Text: ${text}`,
);
this.timeouts.push(
this.setTimeout(
(bot, tries) => {
this.startBotRequest(bot, text, image, tries);
},
retry_delay,
bot,
tries,
),
);
} else {
this.log.error(
`Request for tool ${bot.bot_name} failed after ${bot.max_retries} tries Text: ${text}`,
);
await this.setStateAsync(`Tools.${bot.bot_name}.request.state`, { val: "failed", ack: true });
return false;
}
} else {
this.log.info(
`Request for tool ${bot.bot_name} successful Text: ${text} Response: ${modelResponse.text}`,
);
await this.addMessagePairToHistory(
bot,
text,
image,
modelResponse.text,
modelResponse.tokens_input,
modelResponse.tokens_output,
modelResponse.model,
);
return modelResponse;
}
}
}
/**
* Starts a request for the specified model with the specified messages.
* Validates the request and returns the response data if the request was successful.
* Updates the statistics for the model with the response data.
* Logs the request and response data.
*
* @param model - The model name.
* @param messages - The messages to send to the model.
* @param system_prompt - The system prompt for the model.
* @param max_tokens - The maximum number of tokens to generate.
* @param temperature - The temperature for the model.
* @returns - Returns the response data if the request was successful, otherwise false.
*/
async startModelRequest(model, messages, system_prompt = null, max_tokens = 2000, temperature = 0.6) {
const modelDatapointName = this.stringToAlphaNumeric(model);
this.log.info(`Starting request for model: ${model} Messages: ${JSON.stringify(messages)}`);
const provider = this.getModelProvider(model);
if (provider) {
if (!provider.apiTokenCheck()) {
this.log.warn(`No API token set for provider ${typeof provider}, cant start request!`);
return false;
}
await this.setStateAsync(`Models.${modelDatapointName}.request.state`, { val: "start", ack: true });
await this.setStateAsync(`Models.${modelDatapointName}.response.error`, { val: "", ack: true });
const request = {
model: model,
messages: messages,
max_tokens: max_tokens,
temperature: temperature,
system_prompt: system_prompt,
feedback_device: `Model.${modelDatapointName}`,
};
if (!this.validateRequest(request)) {
await this.setStateAsync(`Models.${modelDatapointName}.request.state`, {
val: "error",
ack: true,
});
await this.setStateAsync(`Models.${modelDatapointName}.response.error`, {
val: "Request Validation failed",
ack: true,
});
this.log.warn(`Request for Model ${model} failed validation, stopping request`);
return;
}
const modelResponse = await provider.request(request);
modelResponse.requestData = provider.requestData;
modelResponse.responseData = provider.responseData;
if (modelResponse.error) {
await this.setStateAsync(`Models.${modelDatapointName}.request.state`, {
val: "error",
ack: true,
});
await this.setStateAsync(`Models.${modelDatapointName}.response.error`, {
val: modelResponse.error,
ack: true,
});
await this.setStateAsync(`Models.${modelDatapointName}.request.body`, {
val: JSON.stringify(modelResponse.requestData),
ack: true,
});
await this.setStateAsync(`Models.${modelDatapointName}.response.raw`, {
val: JSON.stringify(modelResponse.responseData),
ack: true,
});
} else {
await this.setStateAsync(`Models.${modelDatapointName}.request.state`, {
val: "success",
ack: true,
});
await this.setStateAsync(`Models.${modelDatapointName}.response.error`, { val: "", ack: true });
await this.setStateAsync(`Models.${modelDatapointName}.request.body`, {
val: JSON.stringify(modelResponse.requestData),
ack: true,
});
await this.setStateAsync(`Models.${modelDatapointName}.response.raw`, {
val: JSON.stringify(modelResponse.responseData),
ack: true,
});
await this.setStateAsync(`Models.${modelDatapointName}.text_response`, {
val: modelResponse.text,
ack: true,
});
this.updateModelStatistics(model, modelResponse);
}
return modelResponse;
}
}
/**
* Validates the request object and sets default values if necessary.
* Logs a warning if the request is invalid.
* Returns the validated request object or false if the request is invalid.
*
* @param requestObj - The request object.
* @param requestObj.model - The model name.
* @param requestObj.messages - The messages to send to the model.
* @param requestObj.feedback_device - The feedback device for the model.
* @param requestObj.max_tokens - The maximum number of tokens to generate.
* @param requestObj.temperature - The temperature for the model.
* @param requestObj.system_prompt - The system prompt for the model.
* @returns - The validated request object or false if the request is invalid.
*/
validateRequest(requestObj) {
if (!requestObj.model || requestObj.model == "") {
this.log.warn(`No model provided in request, validation failed`);
return false;
}
if (!requestObj.messages || requestObj.messages.length == 0) {
this.log.warn(`No messages provided in request, validation failed`);
return false;
}
if (!requestObj.feedback_device || requestObj.feedback_device == "") {
this.log.debug(`No path for feedback objects provided in request, using Model default`);
requestObj.feedback_device = `Models.${this.stringToAlphaNumeric(requestObj.model)}`;
}
if (!requestObj.max_tokens || requestObj.max_tokens == "") {
this.log.debug(`No max_tokens provided in request, using default value: 2000`);
requestObj.max_tokens = 2000;
}
if (!requestObj.temperature || requestObj.temperature == "") {
this.log.debug(`No temperature provided in request, using default value: 0.6`);
requestObj.temperature = 0.6;
}
if (!requestObj.system_prompt || requestObj.system_prompt.trim() == "") {
this.log.debug(`No system prompt provided in request`);
requestObj.system_prompt = null;
}
return requestObj;
}
/**
* Retrieves the message history for the specified bot.
* Validates the message history and returns an array of messages.
*
* @param bot - The bot configuration object.
* @returns Object - The validated message history object.
*/
async getValidatedMessageHistory(bot) {
bot.bot_name = this.stringToAlphaNumeric(bot.bot_name);
this.log.debug("Getting previous message pairs for request");
const validatedObject = { messages: [] };
const messageObject = await this.getStateAsync(`Tools.${bot.bot_name}.statistics.messages`);
if (messageObject && messageObject.val != null && messageObject.val != "") {
this.log.debug(`Message history object for ${bot.bot_name} found data: ${messageObject.val}`);
this.log.debug(`Trying to decode history json data: ${messageObject.val}`);
const messagesData = JSON.parse(messageObject.val);
if (messagesData && messagesData.messages && messagesData.messages.length > 0) {
for (const message of messagesData.messages) {
if (typeof bot.include_vision_in_history !== "undefined" && bot.include_vision_in_history) {
validatedObject.messages.push(message);
} else {
if (message.image) {
delete message.image;
}
validatedObject.messages.push(message);
}
}
}
this.log.debug(`Validated object: ${JSON.stringify(validatedObject)}`);
return validatedObject;
}
this.log.warn(`Message history object for ${bot.bot_name} not found`);
return validatedObject;
}
/**
* Adds a message pair to the message history for the specified bot.
*
* @param bot - The bot configuration object.
* @param user - The user message.
* @param image - The image data.
* @param assistant - The assistant response.
* @param tokens_input - The number of input tokens used in the request.
* @param tokens_output - The number of output tokens used in the response.
* @param model - The model name.
* @returns - Returns true if the message pair was added successfully, otherwise false.
*/
async addMessagePairToHistory(bot, user, image, assistant, tokens_input, tokens_output, model) {
if (bot.chat_history > 0) {
bot.bot_name = this.stringToAlphaNumeric(bot.bot_name);
const messagesData = await this.getValidatedMessageHistory(bot);
this.log.debug(`Adding to message object with data: ${JSON.stringify(messagesData)}`);
messagesData.messages.push({
user: user,
assistant: assistant,
image: image,
timestamp: Date.now(),
model: model,
tokens_input: tokens_input,
tokens_output: tokens_output,
});
this.log.debug(`New message object: ${JSON.stringify(messagesData)}`);
while (messagesData.messages.length > bot.chat_history) {
this.log.debug("Removing message entry because chat history too big");
messagesData.messages.shift();
}
this.log.debug(`Final message object: ${JSON.stringify(messagesData)}`);
await this.setStateAsync(`Tools.${bot.bot_name}.statistics.messages`, {
val: JSON.stringify(messagesData),
ack: true,
});
return true;
}
this.log.debug(`Chat history disabled for tool ${bot.bot_name}`);
return false;
}
/**
* Updates the statistics for the specified bot with the response data.
*
* @param bot - The bot configuration object.
* @param response - The response from the assistant.
* @param response.tokens_input - The number of input tokens used in the request.
* @param response.tokens_output - The number of output tokens used in the response.
*/
async updateBotStatistics(bot, response) {
bot.bot_name = this.stringToAlphaNumeric(bot.bot_name);
this.log.debug(`Updating statistics for tool ${bot.bot_name} with response: ${JSON.stringify(response)}`);
let input_tokens = await this.getStateAsync(`Tools.${bot.bot_name}.statistics.tokens_input`);
let output_tokens = await this.getStateAsync(`Tools.${bot.bot_name}.statistics.tokens_output`);
let requests_count = await this.getStateAsync(`Tools.${bot.bot_name}.statistics.requests_count`);
if (!input_tokens || input_tokens.val == null || input_tokens.val == "") {
input_tokens = 0 + response.tokens_input;
} else {
input_tokens = input_tokens.val + response.tokens_input;
}
if (!output_tokens || output_tokens.val == null || output_tokens.val == "") {
output_tokens = 0 + response.tokens_output;
} else {
output_tokens = output_tokens.val + response.tokens_output;
}
if (!requests_count || requests_count.val == null || requests_count.val == "") {
requests_count = 0 + 1;
} else {
requests_count = parseInt(requests_count.val) + 1;
}
this.setStateAsync(`Tools.${bot.bot_name}.statistics.tokens_input`, { val: input_tokens, ack: true });
this.setStateAsync(`Tools.${bot.bot_name}.statistics.tokens_output`, { val: output_tokens, ack: true });
this.setStateAsync(`Tools.${bot.bot_name}.statistics.requests_count`, { val: requests_count, ack: true });
this.setStateAsync(`Tools.${bot.bot_name}.statistics.last_request`, {
val: new Date().toISOString(),
ack: true,
});
}
/**
* Converts a string to alphanumeric characters only.
*
* @param str - The string to convert.
* @returns - The converted string.
*/
stringToAlphaNumeric(str) {
return str.replace(/[^a-zA-Z0-9-_]/g, "");
}
/**
* Updates the statistics for the specified model with the response data.
*
* @param model - The model name.
* @param response - The response from the model.
* @param response.tokens_input - The number of input tokens used in the request.
* @param response.tokens_output - The number of output tokens used in the response.
*/
async updateModelStatistics(model, response) {
this.log.debug(`Updating model statistics for model ${model} with response: ${JSON.stringify(response)}`);
model = this.stringToAlphaNumeric(model);
let input_tokens = await this.getStateAsync(`Models.${model}.statistics.tokens_input`);
let output_tokens = await this.getStateAsync(`Models.${model}.statistics.tokens_output`);
let requests_count = await this.getStateAsync(`Models.${model}.statistics.requests_count`);
if (!input_tokens || input_tokens.val == null || input_tokens.val == "") {
input_tokens = 0 + response.tokens_input;
} else {
input_tokens = input_tokens.val + response.tokens_input;
}
if (!output_tokens || output_tokens.val == null || output_tokens.val == "") {
output_tokens = 0 + response.tokens_output;
} else {
output_tokens = output_tokens.val + response.tokens_output;
}
if (!requests_count || requests_count.val == null || requests_count.val == "") {
requests_count = 0 + 1;
} else {
requests_count = parseInt(requests_count.val) + 1;
}
this.setStateAsync(`Models.${model}.statistics.tokens_input`, { val: input_tokens, ack: true });
this.setStateAsync(`Models.${model}.statistics.tokens_output`, { val: output_tokens, ack: true });
this.setStateAsync(`Models.${model}.statistics.requests_count`, { val: requests_count, ack: true });
this.setStateAsync(`Models.${model}.statistics.last_request`, {
val: new Date().toISOString(),
ack: true,
});
}
/**
* Retrieves the available models from the configuration.
* Combines models from both Anthropic and OpenAI configurations.
*
* @returns An array of available models with their labels and values.
*/
getAvailableModels() {
const models = [];
if (this.config.anth_models) {
for (const model of this.config.anth_models) {
models.push({ label: `(Anthropic) ${model.model_name}`, value: model.model_name });
}
}
if (this.config.opai_models) {
for (const model of this.config.opai_models) {
models.push({ label: `(OpenAI) ${model.model_name}`, value: model.model_name });
}
}
if (this.config.custom_models) {
for (const model of this.config.custom_models) {
models.push({ label: `(Custom) ${model.model_name}`, value: model.model_name });
}
}
if (this.config.pplx_models) {
for (const model of this.config.pplx_models) {
models.push({ label: `(Perplexity) ${model.model_name}`, value: model.model_name });
}
}
if (this.config.oprt_models) {
for (const model of this.config.oprt_models) {
models.push({ label: `(OpenRouter) ${model.model_name}`, value: model.model_name });
}
}
if (this.config.deep_models) {
for (const model of this.config.deep_models) {
models.push({ label: `(Deepseek) ${model.model_name}`, value: model.model_name });
}
}
return models;
}
/**
* Retrieves the provider for the specified model.
* Checks the configuration for the model and returns the provider.
* If no provider is found, a warning is logged and null is returned.
* If a provider is found, the provider is returned.
* Providers are checked in the following order: Anthropic, OpenAI, Custom, Perplexity, OpenRouter.
*
* @param requestedModel - The model name to get the provider for.
* @returns - The provider for the specified model.
*/
getModelProvider(requestedModel) {
this.log.debug(`Getting provider for Model${requestedModel}`);
const anth_models = this.config.anth_models;
const opai_models = this.config.opai_models;
const pplx_models = this.config.pplx_models;
const oprt_models = this.config.oprt_models;
const deep_models = this.config.deep_models;
const custom_models = this.config.custom_models;
if (anth_models) {
for (const model of anth_models) {
if (model.model_name == requestedModel && model.model_active) {
this.log.debug(`Provider for Model ${model.model_name} is Anthropic`);
return new AnthropicAiProvider(this);
}
}
}
if (opai_models) {
for (const model of opai_models) {
if (model.model_name == requestedModel && model.model_active) {
this.log.debug(`Provider for Model ${model.model_name} is OpenAI`);
return new OpenAiProvider(this);
}
}
}
if (custom_models) {
for (const model of custom_models) {
if (model.model_name == requestedModel && model.model_active) {
this.log.debug(`Provider for Model ${model.model_name} is Custom/Selfhosted`);
return new CustomAiProvider(this);
}
}
}
if (pplx_models) {
for (const model of pplx_models) {
if (model.model_name == requestedModel && model.model_active) {
this.log.debug(`Provider for Model ${model.model_name} is Perplexity`);
return new PerplexityAiProvider(this);
}
}
}
if (oprt_models) {
for (const model of oprt_models) {
if (model.model_name == requestedModel && model.model_active) {
this.log.debug(`Provider for Model ${model.model_name} is OpenRouter`);
return new OpenRouterAiProvider(this);
}
}
}
if (deep_models) {
for (const model of deep_models) {
if (model.model_name == requestedModel && model.model_active) {
this.log.debug(`Provider for Model ${model.model_name} is Deepseek`);
return new DeepseekAiProvider(this);
}
}
}
this.log.warn(`No provider found for model ${requestedModel}`);
return null;
}
/**
* Fetches an image from the specified URL and returns it as a base64 string.
*
* @param url - The URL of the image to fetch.
* @returns - The fetched image as a base64 string
*/
async fetchImageAsBase64(url) {
const responseObject = { mimeType: "", base64: "", base64withMime: "", url: url, success: false };
if (!url || url.trim() == "") {
this.log.warn("Empty or invalid URL for image fetch");
return responseObject;
}
// Check if url or local file
if (url.startsWith("https://") || url.startsWith("http://")) {
this.log.debug(`Fetching image from URL: ${url}`);
try {
const response = await axios.get(url, { responseType: "arraybuffer", timeout: 5000 });
if (response.status !== 200) {
this.log.warn(`Failed to fetch image from ${url} with status: ${response.status}`);
return responseObject;
}
const mimeType = response.headers["content-type"];
if (!mimeType || !mimeType.includes("image")) {
this.log.warn(`Response from ${url} is not an image, mimeType: ${mimeType}`);
return responseObject;
}
const buffer = response.data;
if (!buffer) {
this.log.warn(`Failed to fetch image from ${url} as array buffer`);
return responseObject;
}
const base64 = Buffer.from(buffer, "binary").toString("base64");
if (!base64) {
this.log.warn(`Failed to fetch image from ${url} as base64`);
return responseObject;
}
if (mimeType && base64) {
this.log.info(`Fetched image from ${url} as base64, mimeType: ${mimeType}`);
responseObject.mimeType = mimeType;
responseObject.base64 = base64;
responseObject.base64withMime = `data:${mimeType};base64,${base64}`;
responseObject.success = true;
}
} catch (e) {
this.log.error(`Failed to fetch image from ${url} with error: ${e}`);
}
} else {
// Read file from filesystem
try {
this.log.debug(`Reading image file: ${url}`);
const file = fs.readFileSync(url);
if (!file) {
this.log.warn(`Failed to read image file from ${url}`);
return responseObject;
}
const mimeT