express-ai-error-handler
Version:
AI-powered error handler for Express with AI failover support
123 lines (122 loc) • 5 kB
JavaScript
;
Object.defineProperty(exports, "__esModule", { value: true });
const generative_ai_1 = require("@google/generative-ai");
/**
* AI Error Handler Class
* Handles API errors by generating intelligent suggestions using GoogleGenerativeAI.
* This package is designed for Express applications to provide AI-powered error handling.
*/
class AIErrorHandler {
apiKeys;
currentKeyIndex;
options;
/**
* Initializes AIErrorHandler with API keys and configuration options.
* @param {string[]} apiKeys - Array of API keys for GoogleGenerativeAI.
* @param {Partial<{ defaultMessage: string; model: string; maxTokens: number; disableLogging?: boolean }>} options - Configuration options.
* @throws {Error} If no API keys are provided.
*/
constructor(apiKeys, options = {}) {
if (!Array.isArray(apiKeys) || apiKeys.length === 0) {
throw new Error("At least one API key is required.");
}
this.apiKeys = apiKeys;
this.currentKeyIndex = 0;
this.options = {
defaultMessage: "Check API documentation.",
model: "gemini-1.5-flash",
maxTokens: 50,
...options,
};
}
/**
* Generates a smart AI-powered response to an API error message.
* @param {string} errorMessage - The error message encountered by the API user.
* @returns {Promise<string>} AI-generated suggestion or default message.
* @throws {Error} If API response is not received or invalid.
*/
async generateSmartResponse(errorMessage) {
const maxRetries = this.apiKeys.length;
let attempt = 0;
while (attempt < maxRetries) {
const apiKey = this.apiKeys[this.currentKeyIndex];
try {
const prompt = `A user encountered this API error: "${errorMessage}". suggest the correct solution to the error.`;
// const prompt = `A user encountered this API error: \"${errorMessage}\". Suggest the correct API request format.`;
const generationConfig = { responseMimeType: "application/json" };
const googleAI = new generative_ai_1.GoogleGenerativeAI(apiKey);
const model = googleAI.getGenerativeModel({ model: this.options.model, generationConfig });
const result = await model.generateContent(prompt);
if (!result || !result.response) {
console.log("No response received from the AI model. || Service Unavailable");
return "Service unavailable!";
}
const responseText = await result.response.text();
return JSON.parse(responseText);
}
catch (error) {
if (error instanceof Error) {
console.error(`API error with key ${this.currentKeyIndex}: ${error.message}`);
}
else {
console.error(`API error with key ${this.currentKeyIndex}:`, error);
}
this.currentKeyIndex = (this.currentKeyIndex + 1) % this.apiKeys.length;
attempt++;
}
}
return this.options.defaultMessage;
}
/**
* Express middleware for handling errors and providing AI-generated suggestions.
* @returns {Function} Express error-handling middleware.
* @example
* import express from 'express';
* import AIErrorHandler from 'ai-error-handler';
*
* const app = express();
* const errorHandler = new AIErrorHandler(["your-api-key"], { model: "gemini-1.5-flash", maxTokens: 100 });
*
* app.use(errorHandler.middleware());
*/
middleware() {
return async (err, _req, res) => {
// console.error(err.message);
const aiSuggestion = await this.generateSmartResponse(err.message);
console.log("Ai said this ", aiSuggestion);
res.status(500).json({ error: err.message, suggestion: aiSuggestion });
};
}
}
exports.default = AIErrorHandler;
/**
* Usage:
* Install the package via npm:
* ```sh
* npm install ai-error-handler
* ```
*
* Import and use it in an Express app:
* ```ts
* import express from 'express';
* import AIErrorHandler from 'ai-error-handler';
*
* const app = express();
* const errorHandler = new AIErrorHandler(["your-api-key"], {
* defaultMessage: "Something went wrong. Please try again.",
* model: "gemini-1.5-flash",
* maxTokens: 100,
* disableLogging: false,
* });
*
* app.use(errorHandler.middleware());
*
* app.listen(3000, () => console.log("Server running on port 3000"));
* ```
*
* Configuration options:
* - `defaultMessage`: Custom default error message if AI response fails.
* - `model`: The AI model to use (default: "gemini-1.5-flash").
* - `maxTokens`: Maximum number of tokens in AI response (default: 50).
* - `disableLogging`: Set to `true` to disable logging (default: `false`).
*/