intellinode
Version:
Create AI agents using the latest models, including ChatGPT, Llama, Diffusion, Cohere, Gemini, and Hugging Face.
1,286 lines (1,103 loc) • 263 kB
JavaScript
(function(f){if(typeof exports==="object"&&typeof module!=="undefined"){module.exports=f()}else if(typeof define==="function"&&define.amd){define([],f)}else{var g;if(typeof window!=="undefined"){g=window}else if(typeof global!=="undefined"){g=global}else if(typeof self!=="undefined"){g=self}else{g=this}g.IntelliNode = f()}})(function(){var define,module,exports;return (function(){function r(e,n,t){function o(i,f){if(!n[i]){if(!e[i]){var c="function"==typeof require&&require;if(!f&&c)return c(i,!0);if(u)return u(i,!0);var a=new Error("Cannot find module '"+i+"'");throw a.code="MODULE_NOT_FOUND",a}var p=n[i]={exports:{}};e[i][0].call(p.exports,function(r){var n=e[i][1][r];return o(n||r)},p,p.exports,r,e,n,t)}return n[i].exports}for(var u="function"==typeof require&&require,i=0;i<t.length;i++)o(t[i]);return o}return r})()({1:[function(require,module,exports){
module.exports={
"url": {
"intellicloud": {
"base": "https://ue8sdr9bij.execute-api.us-east-2.amazonaws.com/v1",
"semantic_search": "/semantic_search/"
},
"openai": {
"base": "https://api.openai.com",
"completions": "/v1/completions",
"chatgpt": "/v1/chat/completions",
"responses": "/v1/responses",
"imagegenerate": "/v1/images/generations",
"embeddings": "/v1/embeddings",
"audiotranscriptions": "/v1/audio/transcriptions",
"audiospeech": "/v1/audio/speech",
"files": "/v1/files",
"finetuning": "/v1/fine_tuning/jobs",
"organization": null
},
"azure_openai": {
"base": "https://{resource-name}.openai.azure.com/openai",
"completions": "/deployments/{deployment-id}/completions?api-version={api-version}",
"chatgpt": "/deployments/{deployment-id}/chat/completions?api-version={api-version}",
"responses": "/deployments/{deployment-id}/responses?api-version={api-version}",
"imagegenerate": "/images/generations:submit?api-version={api-version}",
"embeddings": "/deployments/{deployment-id}/embeddings?api-version={api-version}",
"audiotranscriptions": "/deployments/{deployment-id}/audio/transcriptions?api-version={api-version}",
"audiospeech": "/deployments/{deployment-id}/audio/speech?api-version={api-version}",
"files": "/files?api-version={api-version}",
"finetuning": "/fine_tuning/jobs?api-version={api-version}"
},
"cohere": {
"base": "https://api.cohere.ai",
"completions": "/generate",
"embed": "/v1/embed",
"version": "2022-12-06"
},
"google": {
"base": "https://{1}.googleapis.com/v1/",
"speech": {
"prefix": "texttospeech",
"synthesize": {
"postfix": "text:synthesize"
}
}
},
"stability": {
"base": "https://api.stability.ai",
"text_to_image": "/v1/generation/{1}/text-to-image",
"upscale": "/v1/generation/{1}/image-to-image/upscale",
"image_to_image": "/v1/generation/{1}/image-to-image",
"inpaint": "/v2beta/stable-image/edit/inpaint",
"outpaint": "/v2beta/stable-image/edit/outpaint",
"image_to_video": "/v2beta/image-to-video",
"fetch_video": "/v2beta/image-to-video/result/",
"control_sketch": "/v2beta/stable-image/control/sketch",
"control_structure": "/v2beta/stable-image/control/structure",
"control_style": "/v2beta/stable-image/control/style"
},
"huggingface": {
"base": "https://api-inference.huggingface.co/models"
},
"replicate": {
"base": "https://api.replicate.com",
"predictions": "/v1/predictions"
},
"mistral": {
"base": "https://api.mistral.ai",
"completions": "/v1/chat/completions",
"embed": "/v1/embeddings"
},
"gemini": {
"base": "https://generativelanguage.googleapis.com/v1beta/models/",
"contentEndpoint": "gemini-pro:generateContent",
"visionEndpoint": "gemini-pro-vision:generateContent",
"embeddingEndpoint": "embedding-001:embedContent",
"batchEmbeddingEndpoint": "embedding-001:batchEmbedContents"
},
"anthropic": {
"base": "https://api.anthropic.com",
"messages": "/v1/messages",
"version": "2023-06-01"
}
},
"nvidia": {
"base": "https://integrate.api.nvidia.com",
"chat": "/v1/chat/completions",
"retrieval": "/v1/retrieval",
"version": "v1"
},
"models": {
"replicate": {
"llama": {
"70b": "70b-chat",
"13b": "13b-chat",
"70b-chat": "70b-chat",
"13b-chat": "13b-chat",
"34b-code": "34b-code",
"34b-python": "34b-python",
"13b-code-instruct": "13b-code-instruct",
"llama-2-13b-embeddings": "llama-2-13b-embeddings",
"70b-chat-version": "02e509c789964a7ea8736978a43525956ef40397be9033abf9fd2badfe68c9e3",
"13b-chat-version": "f4e2de70d66816a838a89eeeb621910adffb0dd0baba3976c96980970978018d",
"34b-code-version": "efbd2ef6feefb242f359030fa6fe08ce32bfced18f3868b2915db41d41251b46",
"34b-python-version": "482ba325daab209d121f45a0030f2f3ed942df98b185d41635ab3f19165a3547",
"13b-code-instruct-version": "ca8c51bf3c1aaf181f9df6f10f31768f065c9dddce4407438adc5975a59ce530",
"llama-2-13b-embeddings-version": "7115a4c65b86815e31412e53de1211c520164c190945a84c425b59dccbc47148"
}
}
}
}
},{}],2:[function(require,module,exports){
const OpenAIWrapper = require('../wrappers/OpenAIWrapper');
const CohereAIWrapper = require('../wrappers/CohereAIWrapper');
const ReplicateWrapper = require('../wrappers/ReplicateWrapper');
const GeminiAIWrapper = require('../wrappers/GeminiAIWrapper');
const EmbedInput = require('../model/input/EmbedInput');
const VLLMWrapper = require('../wrappers/VLLMWrapper');
const SupportedEmbedModels = {
OPENAI: 'openai',
COHERE: 'cohere',
REPLICATE: 'replicate',
GEMINI: 'gemini',
NVIDIA: 'nvidia',
VLLM: "vllm"
};
class RemoteEmbedModel {
constructor(keyValue, provider, customProxyHelper = null) {
if (!provider) {
provider = SupportedEmbedModels.OPENAI;
}
const supportedModels = this.getSupportedModels();
if (supportedModels.includes(provider)) {
this.initiate(keyValue, provider, customProxyHelper);
} else {
const models = supportedModels.join(' - ');
throw new Error(`The received keyValue is not supported. Send any model from: ${models}`);
}
}
initiate(keyValue, keyType, customProxyHelper = null) {
this.keyType = keyType;
if (keyType === SupportedEmbedModels.OPENAI) {
this.openaiWrapper = new OpenAIWrapper(keyValue, customProxyHelper);
} else if (keyType === SupportedEmbedModels.COHERE) {
this.cohereWrapper = new CohereAIWrapper(keyValue);
} else if (keyType === SupportedEmbedModels.REPLICATE) {
this.replicateWrapper = new ReplicateWrapper(keyValue);
} else if (keyType === SupportedEmbedModels.GEMINI) {
this.geminiWrapper = new GeminiAIWrapper(keyValue);
} else if (keyType === SupportedEmbedModels.NVIDIA) {
this.nvidiaWrapper = new NvidiaWrapper(keyValue, customProxyHelper);
} else if (keyType === SupportedEmbedModels.VLLM) {
const baseUrl = customProxyHelper.baseUrl;
this.vllmWrapper = new VLLMWrapper(baseUrl);
} else {
throw new Error('Invalid provider name');
}
}
getSupportedModels() {
return Object.values(SupportedEmbedModels);
}
async getEmbeddings(embedInput) {
let inputs;
if (embedInput instanceof EmbedInput) {
if (this.keyType === SupportedEmbedModels.OPENAI) {
inputs = embedInput.getOpenAIInputs();
} else if (this.keyType === SupportedEmbedModels.COHERE) {
inputs = embedInput.getCohereInputs();
} else if (this.keyType === SupportedEmbedModels.REPLICATE) {
inputs = embedInput.getLlamaReplicateInput();
} else if (this.keyType === SupportedEmbedModels.GEMINI) {
inputs = embedInput.getGeminiInputs();
} else if (this.keyType === SupportedEmbedModels.NVIDIA) {
inputs = embedInput.getNvidiaInputs();
} else if (this.keyType === SupportedEmbedModels.VLLM) {
inputs = embedInput.getVLLMInputs();
} else {
throw new Error('The keyType is not supported');
}
} else if (typeof embedInput === 'object') {
inputs = embedInput;
} else {
throw new Error('Invalid input: Must be an instance of EmbedInput or a dictionary');
}
if (this.keyType === SupportedEmbedModels.OPENAI) {
const results = await this.openaiWrapper.getEmbeddings(inputs);
return results.data;
} else if (this.keyType === SupportedEmbedModels.COHERE) {
const results = await this.cohereWrapper.getEmbeddings(inputs);
let embeddings = results.embeddings;
embeddings = embeddings.map((embedding, index) => ({
object: "embedding",
index: index,
embedding: embedding
}));
return embeddings;
} else if (this.keyType === SupportedEmbedModels.REPLICATE) {
const prediction = await this.replicateWrapper.predict('replicate', inputs);
// Return a Promise that resolves with unified embedding result
return new Promise((resolve, reject) => {
const poll = setInterval(async () => {
try {
const status = await this.replicateWrapper.getPredictionStatus(prediction.id);
if (status.status === 'succeeded' || status.status === 'failed') {
clearInterval(poll); // Stop polling
if (status.status === 'succeeded') {
let embeddings = status.output;
embeddings = embeddings.map((embedding, index) => ({
object: "embedding",
index: index,
embedding: embedding
}));
resolve(embeddings);
} else {
reject(new Error('Replicate prediction failed: ' + status.error));
}
}
} catch (error) {
clearInterval(poll);
reject(new Error('Error while polling for Replicate prediction status: ' + error.message));
}
}, 1000);
});
} else if (this.keyType === SupportedEmbedModels.GEMINI) {
return await this.geminiWrapper.getEmbeddings(inputs);
} else if (this.keyType === SupportedEmbedModels.NVIDIA) {
const result = await this.nvidiaWrapper.generateRetrieval(inputs);
return Array.isArray(result) ? result : [];
} else if (this.keyType === SupportedEmbedModels.VLLM) {
const results = await this.vllmWrapper.getEmbeddings(inputs.texts);
return results.embeddings.map((embedding, index) => ({
object: "embedding",
index: index,
embedding: embedding
}));
}else {
throw new Error('The keyType is not supported');
}
}
}
module.exports = {
RemoteEmbedModel,
SupportedEmbedModels,
};
},{"../model/input/EmbedInput":14,"../wrappers/CohereAIWrapper":43,"../wrappers/GeminiAIWrapper":44,"../wrappers/OpenAIWrapper":50,"../wrappers/ReplicateWrapper":51,"../wrappers/VLLMWrapper":53}],3:[function(require,module,exports){
/*
Apache License
Copyright 2023 Github.com/Barqawiz/IntelliNode
Licensed under the Apache License, Version 2.0 (the "License");
*/
const OpenAIWrapper = require('../wrappers/OpenAIWrapper');
const FineTuneInput = require('../model/input/FineTuneInput');
const SupportedFineTuneModels = {
OPENAI: 'openAi',
};
class RemoteFineTuneModel {
constructor(keyValue, provider) {
if (!provider) {
provider = SupportedFineTuneModels.OPENAI;
}
const supportedModels = this.getSupportedModels();
if (supportedModels.includes(provider)) {
this.initiate(keyValue, provider);
} else {
const models = supportedModels.join(' - ');
throw new Error(`The received keyValue is not supported. Send any model from: ${models}`);
}
}
initiate(keyValue, keyType) {
this.keyType = keyType;
if (keyType === SupportedFineTuneModels.OPENAI) {
this.openAIWrapper = new OpenAIWrapper(keyValue);
} else {
throw new Error('Invalid provider name');
}
}
getSupportedModels() {
return Object.values(SupportedFineTuneModels);
}
async generateFineTune(input) {
if (this.keyType === SupportedFineTuneModels.OPENAI) {
let params;
if (input instanceof FineTuneInput) {
params = input.getOpenAIInput();
} else if (typeof input === 'object') {
params = input;
} else {
throw new Error('Invalid input: Must be an instance of FineTuneInput or a dictionary');
}
const response = await this.openAIWrapper.storeFineTuningData(params);
return response;
} else {
throw new Error('The keyType is not supported');
}
}
async listFineTune(input) {
if (this.keyType === SupportedFineTuneModels.OPENAI) {
const response = await this.openAIWrapper.listFineTuningData(input);
return response;
} else {
throw new Error('The keyType is not supported');
}
}
async uploadFile(filePayload) {
if (this.keyType === SupportedFineTuneModels.OPENAI) {
return await this.openAIWrapper.uploadFile(filePayload);
} else {
throw new Error('The keyType is not supported');
}
}
}
module.exports = {
RemoteFineTuneModel,
SupportedFineTuneModels,
};
},{"../model/input/FineTuneInput":15,"../wrappers/OpenAIWrapper":50}],4:[function(require,module,exports){
/*
Apache License
Copyright 2023 Github.com/Barqawiz/IntelliNode
*/
const SupportedImageModels = {
OPENAI: "openai",
STABILITY: "stability",
};
const OpenAIWrapper = require("../wrappers/OpenAIWrapper");
const StabilityAIWrapper = require("../wrappers/StabilityAIWrapper");
const ImageModelInput = require("../model/input/ImageModelInput");
class RemoteImageModel {
constructor(keyValue, provider) {
if (!provider) {
provider = SupportedImageModels.OPENAI;
}
const supportedModels = RemoteImageModel.getSupportedModels();
if (supportedModels.includes(provider)) {
this.initiate(keyValue, provider);
} else {
const models = supportedModels.join(" - ");
throw new Error(
`The received keyValue is not supported. Send any model from: ${models}`
);
}
}
initiate(keyValue, keyType) {
this.keyType = keyType;
if (keyType === SupportedImageModels.OPENAI) {
this.openaiWrapper = new OpenAIWrapper(keyValue);
} else if (keyType === SupportedImageModels.STABILITY) {
this.stabilityWrapper = new StabilityAIWrapper(keyValue);
} else {
throw new Error("Invalid provider name");
}
}
static getSupportedModels() {
return Object.values(SupportedImageModels);
}
async generateImages(imageInput) {
let inputs;
if (imageInput instanceof ImageModelInput) {
if (this.keyType === SupportedImageModels.OPENAI) {
inputs = imageInput.getOpenAIInputs();
} else if (this.keyType === SupportedImageModels.STABILITY) {
inputs = imageInput.getStabilityInputs();
} else {
throw new Error("The keyType is not supported");
}
} else if (typeof imageInput === "object") {
inputs = imageInput;
} else {
throw new Error(
"Invalid input: Must be an instance of ImageModelInput or a dictionary"
);
}
if (this.keyType === SupportedImageModels.OPENAI) {
const results = await this.openaiWrapper.generateImages(inputs);
/*console.log('results: ', results)*/
return results.data.map((data) => {
if (data.url) {
return data.url;
} else if (data.b64_json) {
return data.b64_json;
} else {
throw new Error('Unexpected image data format');
}
});
} else if (this.keyType === SupportedImageModels.STABILITY) {
const results = await this.stabilityWrapper.generateImageDispatcher(inputs);
return results.artifacts.map((imageObj) => imageObj.base64);
} else {
throw new Error(`This version supports ${SupportedImageModels.OPENAI} keyType only`);
}
}
}
module.exports = {
RemoteImageModel,
SupportedImageModels,
};
},{"../model/input/ImageModelInput":17,"../wrappers/OpenAIWrapper":50,"../wrappers/StabilityAIWrapper":52}],5:[function(require,module,exports){
/*
Apache License
Copyright 2023 Github.com/Barqawiz/IntelliNode
Licensed under the Apache License, Version 2.0 (the "License");
*/
const OpenAIWrapper = require('../wrappers/OpenAIWrapper');
const CohereAIWrapper = require('../wrappers/CohereAIWrapper');
const LanguageModelInput = require('../model/input/LanguageModelInput');
const SupportedLangModels = {
OPENAI: 'openai',
COHERE: 'cohere',
};
class RemoteLanguageModel {
constructor(keyValue, provider) {
if (!provider) {
provider = SupportedLangModels.OPENAI;
}
const supportedModels = RemoteLanguageModel.getSupportedModels();
if (supportedModels.includes(provider)) {
this.initiate(keyValue, provider);
} else {
const models = supportedModels.join(' - ');
throw new Error(`The received keyValue is not supported. Send any model from: ${models}`);
}
}
initiate(keyValue, keyType) {
this.keyType = keyType;
if (keyType === SupportedLangModels.OPENAI) {
this.openaiWrapper = new OpenAIWrapper(keyValue);
} else if (keyType === SupportedLangModels.COHERE) {
this.cohereWrapper = new CohereAIWrapper(keyValue);
} else {
throw new Error('Invalid provider name');
}
}
static getSupportedModels() {
return Object.values(SupportedLangModels);
}
async generateText(langInput) {
let inputs;
if (langInput instanceof LanguageModelInput) {
if (this.keyType === SupportedLangModels.OPENAI) {
inputs = langInput.getOpenAIInputs();
} else if (this.keyType === SupportedLangModels.COHERE) {
inputs = langInput.getCohereInputs();
} else {
throw new Error('The keyType is not supported');
}
} else if (typeof langInput === 'object') {
inputs = langInput;
} else {
throw new Error('Invalid input: Must be an instance of LanguageModelInput or a dictionary');
}
if (this.keyType === SupportedLangModels.OPENAI) {
const results = await this.openaiWrapper.generateText(inputs);
return results.choices.map((choice) => choice.text);
} else if (this.keyType === SupportedLangModels.COHERE) {
const results = await this.cohereWrapper.generateText(inputs);
return results.generations.map((generation) => generation.text);
} else {
throw new Error('The keyType is not supported');
}
}
}
module.exports = {
RemoteLanguageModel,
SupportedLangModels,
};
},{"../model/input/LanguageModelInput":18,"../wrappers/CohereAIWrapper":43,"../wrappers/OpenAIWrapper":50}],6:[function(require,module,exports){
/*
Apache License
Copyright 2023 Github.com/Barqawiz/IntelliNode
Licensed under the Apache License, Version 2.0 (the "License");
*/
const GoogleAIWrapper = require('../wrappers/GoogleAIWrapper');
const OpenAIWrapper = require('../wrappers/OpenAIWrapper');
const Text2SpeechInput = require('../model/input/Text2SpeechInput');
const SupportedSpeechModels = {
GOOGLE: 'google',
OPENAI: 'openAi',
};
class RemoteSpeechModel {
constructor(keyValue, provider) {
if (!provider) {
provider = SupportedSpeechModels.GOOGLE;
}
const supportedModels = this.getSupportedModels();
if (supportedModels.includes(provider)) {
this.initiate(keyValue, provider);
} else {
const models = supportedModels.join(' - ');
throw new Error(`The received keyValue is not supported. Send any model from: ${models}`);
}
}
initiate(keyValue, keyType) {
this.keyType = keyType;
if (keyType === SupportedSpeechModels.GOOGLE) {
this.googleWrapper = new GoogleAIWrapper(keyValue);
} else if (keyType === SupportedSpeechModels.OPENAI) {
this.openAIWrapper = new OpenAIWrapper(keyValue);
} else {
throw new Error('Invalid provider name');
}
}
getSupportedModels() {
return Object.values(SupportedSpeechModels);
}
async generateSpeech(input) {
if (this.keyType === SupportedSpeechModels.GOOGLE) {
let params;
if (input instanceof Text2SpeechInput) {
params = input.getGoogleInput();
} else if (typeof input === 'object') {
params = input;
} else {
throw new Error('Invalid input: Must be an instance of Text2SpeechInput or a dictionary');
}
const response = await this.googleWrapper.generateSpeech(params);
return response.audioContent;
} else if (this.keyType === SupportedSpeechModels.OPENAI) {
let params;
if (input instanceof Text2SpeechInput) {
params = input.getOpenAIInput();
} else if (typeof input === 'object') {
params = input;
} else {
throw new Error('Invalid input: Must be an instance of Text2SpeechInput or a dictionary');
}
const response = await this.openAIWrapper.textToSpeech(params);
return response;
} else {
throw new Error('The keyType is not supported');
}
}
}
module.exports = {
RemoteSpeechModel,
SupportedSpeechModels,
};
},{"../model/input/Text2SpeechInput":19,"../wrappers/GoogleAIWrapper":45,"../wrappers/OpenAIWrapper":50}],7:[function(require,module,exports){
/*
Apache License
Copyright 2023 Github.com/Barqawiz/IntelliNode
Licensed under the Apache License, Version 2.0 (the "License");
*/
const OpenAIWrapper = require("../wrappers/OpenAIWrapper");
const ReplicateWrapper = require('../wrappers/ReplicateWrapper');
const AWSEndpointWrapper = require('../wrappers/AWSEndpointWrapper');
const { GPTStreamParser, CohereStreamParser, VLLMStreamParser } = require('../utils/StreamParser');
const CohereAIWrapper = require('../wrappers/CohereAIWrapper');
const IntellicloudWrapper = require("../wrappers/IntellicloudWrapper");
const MistralAIWrapper = require('../wrappers/MistralAIWrapper');
const GeminiAIWrapper = require('../wrappers/GeminiAIWrapper');
const AnthropicWrapper = require('../wrappers/AnthropicWrapper');
const SystemHelper = require("../utils/SystemHelper");
const NvidiaWrapper = require("../wrappers/NvidiaWrapper");
const VLLMWrapper = require('../wrappers/VLLMWrapper');
const {
ChatGPTInput,
ChatModelInput,
ChatGPTMessage,
ChatLLamaInput,
LLamaReplicateInput,
CohereInput,
LLamaSageInput,
MistralInput,
GeminiInput,
AnthropicInput,
NvidiaInput,
VLLMInput
} = require("../model/input/ChatModelInput");
const SupportedChatModels = {
OPENAI: "openai",
REPLICATE: "replicate",
SAGEMAKER: "sagemaker",
COHERE: "cohere",
MISTRAL: "mistral",
GEMINI: "gemini",
ANTHROPIC: "anthropic",
NVIDIA: "nvidia",
VLLM: "vllm"
};
class Chatbot {
constructor(keyValue, provider = SupportedChatModels.OPENAI, customProxyHelper = null, options = {}) {
const supportedModels = this.getSupportedModels();
if (supportedModels.includes(provider)) {
this.initiate(keyValue, provider, customProxyHelper, options);
} else {
const models = supportedModels.join(" - ");
throw new Error(
`The received keyValue is not supported. Send any model from: ${models}`
);
}
}
initiate(keyValue, provider, customProxyHelper = null, options = {}) {
this.provider = provider;
if (provider === SupportedChatModels.OPENAI) {
this.openaiWrapper = new OpenAIWrapper(keyValue, customProxyHelper);
} else if (provider === SupportedChatModels.REPLICATE) {
this.replicateWrapper = new ReplicateWrapper(keyValue);
} else if (provider === SupportedChatModels.SAGEMAKER) {
this.sagemakerWrapper = new AWSEndpointWrapper(customProxyHelper.url, keyValue);
} else if (provider === SupportedChatModels.COHERE) {
this.cohereWrapper = new CohereAIWrapper(keyValue);
} else if (provider === SupportedChatModels.MISTRAL) {
this.mistralWrapper = new MistralAIWrapper(keyValue);
} else if (provider === SupportedChatModels.GEMINI) {
this.geminiWrapper = new GeminiAIWrapper(keyValue);
} else if (provider === SupportedChatModels.ANTHROPIC) {
this.anthropicWrapper = new AnthropicWrapper(keyValue);
} else if (provider === SupportedChatModels.NVIDIA) {
const my_options = options || {};
const baseUrl = (my_options.nvidiaOptions && my_options.nvidiaOptions.baseUrl) || my_options.baseUrl;
if (baseUrl) {
this.nvidiaWrapper = new NvidiaWrapper(keyValue, { baseUrl: baseUrl });
} else {
this.nvidiaWrapper = new NvidiaWrapper(keyValue);
}
} else if (provider === SupportedChatModels.VLLM) {
const baseUrl = options.baseUrl;
if (!baseUrl) throw new Error("VLLM requires 'baseUrl' in options.");
this.vllmWrapper = new VLLMWrapper(baseUrl);
} else {
throw new Error("Invalid provider name");
}
// initiate the optional search feature
if (options && options.oneKey) {
const apiBase = options.intelliBase ? options.intelliBase : null;
this.extendedController = options.oneKey.startsWith("in") ? new IntellicloudWrapper(options.oneKey, apiBase) : null;
}
}
getSupportedModels() {
return Object.values(SupportedChatModels);
}
async chat(modelInput, functions = null, function_call = null, debugMode = true) {
// call semantic search
let references = await this.getSemanticSearchContext(modelInput);
// verify the extra params
if (this.provider != SupportedChatModels.OPENAI && (functions != null || function_call != null)) {
throw new Error('The functions and function_call are supported for chatGPT models only.');
}
// call the chatbot
if (this.provider === SupportedChatModels.OPENAI) {
const result = await this._chatGPT(modelInput, functions, function_call);
return modelInput.attachReference ? { result, references } : result;
} else if (this.provider === SupportedChatModels.REPLICATE) {
const result = await this._chatReplicateLLama(modelInput, debugMode);
return modelInput.attachReference ? { result, references } : result;
} else if (this.provider === SupportedChatModels.SAGEMAKER) {
const result = await this._chatSageMaker(modelInput);
return modelInput.attachReference ? { result, references } : result;
} else if (this.provider === SupportedChatModels.COHERE) {
const result = await this._chatCohere(modelInput);
return modelInput.attachReference ? { result, references } : result;
} else if (this.provider === SupportedChatModels.MISTRAL) {
const result = await this._chatMistral(modelInput);
return modelInput.attachReference ? { result, references } : result;
} else if (this.provider === SupportedChatModels.GEMINI) {
const result = await this._chatGemini(modelInput);
return modelInput.attachReference ? { result, references } : result;
} else if (this.provider === SupportedChatModels.ANTHROPIC) {
const result = await this._chatAnthropic(modelInput);
return modelInput.attachReference ? { result, references } : result;
} else if (this.provider === SupportedChatModels.NVIDIA) {
let result = await this._chatNvidia(modelInput);
return modelInput.attachReference ? { result: result, references } : result;
} else if (this.provider === SupportedChatModels.VLLM) {
let result = await this._chatVLLM(modelInput);
return modelInput.attachReference ? { result: result, references } : result;
} else {
throw new Error("The provider is not supported");
}
}
async *stream(modelInput) {
await this.getSemanticSearchContext(modelInput);
if (this.provider === SupportedChatModels.OPENAI) {
yield* this._chatGPTStream(modelInput);
} else if (this.provider === SupportedChatModels.COHERE) {
yield* this._streamCohere(modelInput)
} else if (this.provider === SupportedChatModels.NVIDIA) {
yield* this._streamNvidia(modelInput);
} else if (this.provider === SupportedChatModels.VLLM) {
yield* this._streamVLLM(modelInput);
} else {
throw new Error("The stream function support only chatGPT, for other providers use chat function.");
}
}
async *_streamVLLM(modelInput) {
let params = modelInput instanceof VLLMInput ? modelInput.getChatInput() : modelInput;
params.stream = true;
// Check for completion-only models
const completionOnlyModels = ["google/gemma-2-2b-it"];
const isCompletionOnly = completionOnlyModels.includes(params.model);
let stream;
if (isCompletionOnly) {
// Convert messages to prompt string
const promptMessages = params.messages
.map(msg => `${msg.role.charAt(0).toUpperCase() + msg.role.slice(1)}: ${msg.content}`)
.join("\n") + "\nAssistant:";
const completionParams = {
model: params.model,
prompt: promptMessages,
max_tokens: params.max_tokens || 100,
temperature: params.temperature || 0.7,
stream: true
};
stream = await this.vllmWrapper.generateText(completionParams);
} else {
stream = await this.vllmWrapper.generateChatText(params);
}
const streamParser = new VLLMStreamParser();
// Process the streaming response
for await (const chunk of stream) {
const chunkText = chunk.toString('utf8');
yield* streamParser.feed(chunkText);
}
}
async getSemanticSearchContext(modelInput) {
let references = {};
if (!this.extendedController) {
return references;
}
// Initialize variables for messages or prompt
let messages, lastMessage;
if (modelInput instanceof ChatLLamaInput && typeof modelInput.prompt === "string") {
messages = modelInput.prompt.split('\n').map(line => {
const role = line.startsWith('User:') ? 'user' : 'assistant';
const content = line.replace(/^(User|Assistant): /, '');
return { role, content };
});
} else if (modelInput instanceof GeminiInput) {
messages = modelInput.messages.map(message => {
const role = message.role;
const content = message.parts.map(part => part.text).join(" ");
return { role, content };
});
} else if (Array.isArray(modelInput.messages)) {
messages = modelInput.messages;
} else {
console.log('The input format does not support augmented search.');
return references;
}
lastMessage = messages[messages.length - 1];
if (lastMessage && lastMessage.role === "user") {
const semanticResult = await this.extendedController.semanticSearch(lastMessage.content, modelInput.searchK);
if (semanticResult && semanticResult.length > 0) {
references = semanticResult.reduce((acc, doc) => {
// check if the document_name exists in the accumulator
if (!acc[doc.document_name]) {
acc[doc.document_name] = { pages: [] };
}
return acc;
}, {});
let contextData = semanticResult.map(doc => doc.data.map(dataItem => dataItem.text).join('\n')).join('\n').trim();
const templateWrapper = new SystemHelper().loadStaticPrompt("augmented_chatbot");
const augmentedMessage = templateWrapper.replace('${semantic_search}', contextData).replace('${user_query}', lastMessage.content);
if (modelInput instanceof ChatLLamaInput && modelInput.prompt) {
const promptLines = modelInput.prompt.trim().split('\n');
promptLines.pop();
promptLines.push(`User: ${augmentedMessage}`);
modelInput.prompt = promptLines.join('\n');
} else if (modelInput instanceof ChatModelInput) {
modelInput.deleteLastMessage(lastMessage);
modelInput.addUserMessage(augmentedMessage);
} else if (typeof modelInput === "object" && Array.isArray(modelInput.messages) && messages.length > 0) {
// replace the user message directly in the array
if (lastMessage.content) {
lastMessage.content = augmentedMessage;
}
}
}
}
return references;
}
async _chatVLLM(modelInput) {
let params = modelInput instanceof ChatModelInput ? modelInput.getChatInput() : modelInput;
// Explicit for Gemma (completion-only model)
const completionOnlyModels = ["google/gemma-2-2b-it",];
const isCompletionOnly = completionOnlyModels.includes(params.model);
if (isCompletionOnly) {
// Convert messages to prompt string
const promptMessages = params.messages
.map(msg => `${msg.role.charAt(0).toUpperCase() + msg.role.slice(1)}: ${msg.content}`)
.join("\n") + "\nAssistant:";
const completionParams = {
model: params.model,
prompt: promptMessages,
max_tokens: params.max_tokens || 100,
temperature: params.temperature || 0.7,
};
const result = await this.vllmWrapper.generateText(completionParams);
return result.choices.map(c => c.text.trim());
} else {
const result = await this.vllmWrapper.generateChatText(params);
return result.choices.map(c => c.message.content);
}
}
async *_chatGPTStream(modelInput) {
let params;
if (modelInput instanceof ChatModelInput) {
params = modelInput.getChatInput();
params.stream = true;
} else if (typeof modelInput === "object") {
params = modelInput;
params.stream = true;
} else {
throw new Error("Invalid input: Must be an instance of ChatGPTInput or a dictionary");
}
// Check if this is GPT-5
const isGPT5 = params.model && params.model.toLowerCase().includes('gpt-5');
if (isGPT5) {
// GPT-5 doesn't support streaming in the same way
// For now, throw an error or handle as non-streaming
throw new Error("GPT-5 streaming is not yet supported. Please use the chat() method instead.");
}
const streamParser = new GPTStreamParser();
const stream = await this.openaiWrapper.generateChatText(params);
// Collect data from the stream
for await (const chunk of stream) {
const chunkText = chunk.toString('utf8');
yield* streamParser.feed(chunkText);
}
}
async _chatGPT(modelInput, functions = null, function_call = null) {
let params;
if (modelInput instanceof ChatModelInput) {
params = modelInput.getChatInput();
} else if (typeof modelInput === "object") {
params = modelInput;
} else {
throw new Error("Invalid input: Must be an instance of ChatGPTInput or a dictionary");
}
// Check if this is GPT-5
const isGPT5 = params.model && params.model.toLowerCase().includes('gpt-5');
if (isGPT5) {
// GPT-5 uses different endpoint and response format
const results = await this.openaiWrapper.generateGPT5Response(params);
// GPT-5 response format: { output: [ {type: 'reasoning'}, {type: 'message', content: [...]} ] }
if (results.output && Array.isArray(results.output)) {
// Extract text from the message content
const messageObjects = results.output.filter(item => item.type === 'message');
const responses = messageObjects.map(msg => {
if (msg.content && Array.isArray(msg.content)) {
return msg.content.map(c => c.text || c).join('');
}
return msg.content || '';
});
return responses.length > 0 ? responses : [''];
} else if (results.choices && results.choices.length > 0) {
// Fallback to choices format if available
return results.choices.map(choice => choice.output || choice.text || choice.message?.content);
}
return [''];
} else {
// Standard chat completion for GPT-4 and others
const results = await this.openaiWrapper.generateChatText(params, functions, function_call);
return results.choices.map((choice) => {
if (choice.finish_reason === 'function_call' && choice.message.function_call) {
return {
content: choice.message.content,
function_call: choice.message.function_call
};
} else {
return choice.message.content;
}
});
}
}
async _chatReplicateLLama(modelInput, debugMode) {
let params;
const waitTime = 2500,
maxIterate = 200;
let iteration = 0;
if (modelInput instanceof ChatModelInput) {
params = modelInput.getChatInput();
} else if (typeof modelInput === "object") {
params = modelInput;
} else {
throw new Error("Invalid input: Must be an instance of ChatLLamaInput or a dictionary");
}
try {
const modelName = params.model;
const inputData = params.inputData;
const prediction = await this.replicateWrapper.predict(modelName, inputData);
return new Promise((resolve, reject) => {
const poll = setInterval(async () => {
const status = await this.replicateWrapper.getPredictionStatus(prediction.id);
if (debugMode) {
console.log('The current status:', status.status);
}
if (status.status === 'succeeded' || status.status === 'failed') {
// stop the loop if prediction has completed or failed
clearInterval(poll);
if (status.status === 'succeeded') {
resolve([status.output.join('')]);
} else {
console.error('LLama prediction failed:', status.error);
reject(new Error('LLama prediction failed.'));
}
}
if (iteration > maxIterate) {
reject(new Error('Replicate taking too long to process the input, try again later!'));
}
iteration += 1
}, waitTime);
});
} catch (error) {
console.error('LLama Error:', error);
throw error;
}
}
async _chatSageMaker(modelInput) {
let params;
if (modelInput instanceof LLamaSageInput) {
params = modelInput.getChatInput();
} else if (typeof modelInput === "object") {
params = modelInput;
} else {
throw new Error("Invalid input: Must be an instance of LLamaSageInput or a dictionary");
}
const results = await this.sagemakerWrapper.predict(params);
return results.map(result => result.generation ? result.generation.content : result);
}
async _chatCohere(modelInput) {
let params;
if (modelInput instanceof CohereInput) {
params = modelInput.getChatInput();
} else if (typeof modelInput === "object") {
params = modelInput;
} else {
throw new Error("Invalid input: Must be an instance of ChatGPTInput or an object");
}
const results = await this.cohereWrapper.generateChatText(params);
const responseText = results.text;
return [responseText];
}
async *_streamCohere(modelInput) {
let params;
if (modelInput instanceof CohereInput) {
params = modelInput.getChatInput();
params.stream = true;
} else if (typeof modelInput === "object") {
params = modelInput;
params.stream = true;
} else {
throw new Error("Invalid input: Must be an instance of ChatGPTInput or a dictionary");
}
const streamParser = new CohereStreamParser();
const stream = await this.cohereWrapper.generateChatText(params);
// Collect data from the stream
for await (const chunk of stream) {
const chunkText = chunk.toString('utf8');
yield* streamParser.feed(chunkText);
}
}
async _chatMistral(modelInput) {
let params;
if (modelInput instanceof MistralInput) {
params = modelInput.getChatInput();
} if (modelInput instanceof ChatGPTInput) {
params = modelInput.getChatInput();
} else if (typeof modelInput === "object") {
params = modelInput;
} else {
throw new Error("Invalid input: Must be an instance of MistralInput or an object");
}
const results = await this.mistralWrapper.generateText(params);
return results.choices.map(choice => choice.message.content);
}
async _chatGemini(modelInput) {
let params;
if (modelInput instanceof GeminiInput) {
params = modelInput.getChatInput();
} else if (typeof modelInput === "object") {
params = modelInput;
} else {
throw new Error("Invalid input: Must be an instance of GeminiInput");
}
// call Gemini
const result = await this.geminiWrapper.generateContent(params);
if (!Array.isArray(result.candidates) || result.candidates.length === 0) {
throw new Error("Invalid response from Gemini API: Expected 'candidates' array with content");
}
// iterate over all the candidates
const responses = result.candidates.map(candidate => {
// combine text from all parts
return candidate.content.parts
.map(part => part.text)
.join(' ');
});
return responses;
}
async _chatAnthropic(modelInput) {
let params;
if (modelInput instanceof AnthropicInput) {
params = modelInput.getChatInput();
} else {
throw new Error("Invalid input: Must be an instance of AnthropicInput");
}
const results = await this.anthropicWrapper.generateText(params);
return results.content.map(choice => choice.text);
}
async _chatNvidia(modelInput) {
let params = modelInput instanceof NvidiaInput ? modelInput.getChatInput() : modelInput;
if (params.stream) throw new Error("Use stream() for NVIDIA streaming.");
let resp = await this.nvidiaWrapper.generateText(params);
return resp.choices.map(c => c.message.content);
}
async *_streamNvidia(modelInput) {
let params = modelInput instanceof NvidiaInput ? modelInput.getChatInput() : modelInput;
params.stream = true;
const stream = await this.nvidiaWrapper.generateTextStream(params);
let buffer = '';
for await (const chunk of stream) {
const lines = chunk.toString('utf8').split('\n');
for (let line of lines) {
line = line.trim();
if (!line) continue;
if (line.startsWith('data: [DONE]')) {
yield buffer;
return;
}
if (line.startsWith('data: ')) {
try {
let parsed = JSON.parse(line.replace('data: ', ''));
let content = parsed.choices?.[0]?.delta?.content || '';
buffer += content;
yield content;
} catch(e) {}
}
}
}
}
} /*chatbot class*/
module.exports = {
Chatbot,
SupportedChatModels,
};
},{"../model/input/ChatModelInput":13,"../utils/StreamParser":39,"../utils/SystemHelper":40,"../wrappers/AWSEndpointWrapper":41,"../wrappers/AnthropicWrapper":42,"../wrappers/CohereAIWrapper":43,"../wrappers/GeminiAIWrapper":44,"../wrappers/IntellicloudWrapper":47,"../wrappers/MistralAIWrapper":48,"../wrappers/NvidiaWrapper":49,"../wrappers/OpenAIWrapper":50,"../wrappers/ReplicateWrapper":51,"../wrappers/VLLMWrapper":53}],8:[function(require,module,exports){
(function (Buffer){(function (){
// Gen.js
const { RemoteLanguageModel } = require("../controller/RemoteLanguageModel");
const { RemoteImageModel, SupportedImageModels } = require("../controller/RemoteImageModel");
const { RemoteSpeechModel } = require("../controller/RemoteSpeechModel");
const LanguageModelInput = require("../model/input/LanguageModelInput");
const ImageModelInput = require("../model/input/ImageModelInput");
const Text2SpeechInput = require("../model/input/Text2SpeechInput");
const { Chatbot, SupportedChatModels } = require("../function/Chatbot");
const { ChatGPTInput, ChatGPTMessage, NvidiaInput } = require("../model/input/ChatModelInput");
const { SupportedLangModels } = require('../controller/RemoteLanguageModel');
const SystemHelper = require("../utils/SystemHelper");
const Prompt = require("../utils/Prompt");
const FileHelper = require("../utils/FileHelper");
const path = require('path');
function stripThinking(text) {
/** emove any <think>...</think> block from NVIDIA responses. */
return text.replace(/<think>[\s\S]*?<\/think>/, '').trim();
}
class Gen {
// Marketing description generation
static async get_marketing_desc(promptString, apiKey, provider = SupportedLangModels.OPENAI, customProxyHelper = null) {
if (provider === SupportedLangModels.OPENAI) {
const chatbot = new Chatbot(apiKey, SupportedChatModels.OPENAI, customProxyHelper);
const input = new ChatGPTInput("generate marketing description", { maxTokens: 800 });
input.addUserMessage(`Create a marketing description for the following: ${promptString}`);
const responses = await chatbot.chat(input);
return responses[0].trim();
} else if (provider === SupportedLangModels.COHERE) {
const langInput = new LanguageModelInput({ prompt: `Create a marketing description for the following: ${promptString}` });
langInput.setDefaultValues(SupportedLangModels.COHERE, 400);
const cohereLanguageModel = new RemoteLanguageModel(apiKey, provider);
const responses = await cohereLanguageModel.generateText(langInput);
return responses[0].trim();
} else if (provider === SupportedChatModels.NVIDIA) {
const chatbot = new Chatbot(apiKey, SupportedChatModels.NVIDIA, customProxyHelper);
const input = new NvidiaInput("generate marketing description", { maxTokens: 800, model: 'deepseek-ai/deepseek-r1', temperature: 0.6 });
input.addUserMessage(`Create a marketing description for the following: ${promptString}`);
const responses = await chatbot.chat(input);
let text = responses[0].trim();
return stripThinking(text);
} else {
const supported = RemoteLanguageModel.getSupportedModels().join(' - ');
throw new Error(`Unsupported provider. Use one of: ${supported}, ${SupportedChatModels.NVIDIA}`);
}
}
// Blog post generation
static async get_blog_post(promptString, apiKey, provider = SupportedLangModels.OPENAI, customProxyHelper = null) {
if (provider === SupportedLangModels.OPENAI) {
const chatbot = new Chatbot(apiKey, SupportedChatModels.OPENAI, customProxyHelper);
const input = new ChatGPTInput("generate blog post", { maxTokens: 1200 });
input.addUserMessage(`Write a blog post about ${promptString}`);
const responses = await chatbot.chat(input);
return responses[0].trim();
} else if (provider === SupportedLangModels.COHERE) {
const langInput = new LanguageModelInput({ prompt: `Write a blog post with section titles about ${promptString}` });
langInput.setDefaultValues(SupportedLangModels.COHERE, 1200);
const cohereLanguageModel = new RemoteLanguageModel(apiKey, provider);
const responses = await cohereLanguageModel.generateText(langInput);
return responses[0].trim();
} else if (provider === SupportedChatModels.NVIDIA) {
const chatbot = new Chatbot(apiKey, SupportedChatModels.NVIDIA, customProxyHelper);
const input = new NvidiaInput("generate blog post", { maxTokens: 1200, model: 'deepseek-ai/deepseek-r1', temperature: 0.6 });
input.addUserMessage(`Write a blog post about ${promptString}`);
const responses = await chatbot.chat(input);
let tex