@lobehub/chat
Version:
Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.
216 lines (170 loc) • 7.22 kB
text/typescript
import { GoogleGenAIOptions } from '@google/genai';
import { ModelRuntime } from '@lobechat/model-runtime';
import { LobeVertexAI } from '@lobechat/model-runtime/vertexai';
import { ClientSecretPayload } from '@lobechat/types';
import { safeParseJSON } from '@lobechat/utils';
import { ModelProvider } from 'model-bank';
import { getLLMConfig } from '@/envs/llm';
import apiKeyManager from './apiKeyManager';
export * from './trace';
/**
* Retrieves the options object from environment and apikeymanager
* based on the provider and payload.
*
* @param provider - The model provider.
* @param payload - The JWT payload.
* @returns The options object.
*/
const getParamsFromPayload = (provider: string, payload: ClientSecretPayload) => {
const llmConfig = getLLMConfig() as Record<string, any>;
switch (provider) {
case ModelProvider.VertexAI: {
return {};
}
default: {
let upperProvider = provider.toUpperCase();
if (!(`${upperProvider}_API_KEY` in llmConfig)) {
upperProvider = ModelProvider.OpenAI.toUpperCase(); // Use OpenAI options as default
}
const apiKey = apiKeyManager.pick(payload?.apiKey || llmConfig[`${upperProvider}_API_KEY`]);
const baseURL = payload?.baseURL || process.env[`${upperProvider}_PROXY_URL`];
return baseURL ? { apiKey, baseURL } : { apiKey };
}
case ModelProvider.Ollama: {
const baseURL = payload?.baseURL || process.env.OLLAMA_PROXY_URL;
return { baseURL };
}
case ModelProvider.Azure: {
const { AZURE_API_KEY, AZURE_API_VERSION, AZURE_ENDPOINT } = llmConfig;
const apiKey = apiKeyManager.pick(payload?.apiKey || AZURE_API_KEY);
const baseURL = payload?.baseURL || AZURE_ENDPOINT;
const apiVersion = payload?.azureApiVersion || AZURE_API_VERSION;
return { apiKey, apiVersion, baseURL };
}
case ModelProvider.AzureAI: {
const { AZUREAI_ENDPOINT, AZUREAI_ENDPOINT_KEY } = llmConfig;
const apiKey = payload?.apiKey || AZUREAI_ENDPOINT_KEY;
const baseURL = payload?.baseURL || AZUREAI_ENDPOINT;
return { apiKey, baseURL };
}
case ModelProvider.Bedrock: {
const { AWS_SECRET_ACCESS_KEY, AWS_ACCESS_KEY_ID, AWS_REGION, AWS_SESSION_TOKEN } = llmConfig;
let accessKeyId: string | undefined = AWS_ACCESS_KEY_ID;
let accessKeySecret: string | undefined = AWS_SECRET_ACCESS_KEY;
let region = AWS_REGION;
let sessionToken: string | undefined = AWS_SESSION_TOKEN;
// if the payload has the api key, use user
if (payload.apiKey) {
accessKeyId = payload?.awsAccessKeyId;
accessKeySecret = payload?.awsSecretAccessKey;
sessionToken = payload?.awsSessionToken;
region = payload?.awsRegion;
}
return { accessKeyId, accessKeySecret, region, sessionToken };
}
case ModelProvider.Cloudflare: {
const { CLOUDFLARE_API_KEY, CLOUDFLARE_BASE_URL_OR_ACCOUNT_ID } = llmConfig;
const apiKey = apiKeyManager.pick(payload?.apiKey || CLOUDFLARE_API_KEY);
const baseURLOrAccountID =
payload.apiKey && payload.cloudflareBaseURLOrAccountID
? payload.cloudflareBaseURLOrAccountID
: CLOUDFLARE_BASE_URL_OR_ACCOUNT_ID;
return { apiKey, baseURLOrAccountID };
}
case ModelProvider.ComfyUI: {
const {
COMFYUI_BASE_URL,
COMFYUI_AUTH_TYPE,
COMFYUI_API_KEY,
COMFYUI_USERNAME,
COMFYUI_PASSWORD,
COMFYUI_CUSTOM_HEADERS,
} = llmConfig;
// ComfyUI specific handling with environment variables fallback
const baseURL = payload?.baseURL || COMFYUI_BASE_URL || 'http://127.0.0.1:8000';
// ComfyUI supports multiple auth types: none, basic, bearer, custom
// Extract all relevant auth fields from the payload or environment
const authType = payload?.authType || COMFYUI_AUTH_TYPE || 'none';
const apiKey = payload?.apiKey || COMFYUI_API_KEY;
const username = payload?.username || COMFYUI_USERNAME;
const password = payload?.password || COMFYUI_PASSWORD;
// Parse customHeaders from JSON string (similar to Vertex AI credentials handling)
// Support both payload object and environment variable JSON string
const customHeaders = payload?.customHeaders || safeParseJSON(COMFYUI_CUSTOM_HEADERS);
// Return all authentication parameters
return {
apiKey,
authType,
baseURL,
customHeaders,
password,
username,
};
}
case ModelProvider.GiteeAI: {
const { GITEE_AI_API_KEY } = llmConfig;
const apiKey = apiKeyManager.pick(payload?.apiKey || GITEE_AI_API_KEY);
return { apiKey };
}
case ModelProvider.Github: {
const { GITHUB_TOKEN } = llmConfig;
const apiKey = apiKeyManager.pick(payload?.apiKey || GITHUB_TOKEN);
return { apiKey };
}
case ModelProvider.OllamaCloud: {
const { OLLAMA_CLOUD_API_KEY } = llmConfig;
const apiKey = apiKeyManager.pick(payload?.apiKey || OLLAMA_CLOUD_API_KEY);
return { apiKey };
}
case ModelProvider.TencentCloud: {
const { TENCENT_CLOUD_API_KEY } = llmConfig;
const apiKey = apiKeyManager.pick(payload?.apiKey || TENCENT_CLOUD_API_KEY);
return { apiKey };
}
}
};
const buildVertexOptions = (
payload: ClientSecretPayload,
params: Partial<GoogleGenAIOptions> = {},
): GoogleGenAIOptions => {
const rawCredentials = payload.apiKey ?? process.env.VERTEXAI_CREDENTIALS ?? '';
const credentials = safeParseJSON<Record<string, string>>(rawCredentials);
const projectFromParams = params.project as string | undefined;
const projectFromCredentials = credentials?.project_id;
const projectFromEnv = process.env.VERTEXAI_PROJECT;
const project = projectFromParams ?? projectFromCredentials ?? projectFromEnv;
const location =
(params.location as string | undefined) ?? payload.vertexAIRegion ?? process.env.VERTEXAI_LOCATION ?? undefined;
const googleAuthOptions = params.googleAuthOptions ?? (credentials ? { credentials } : undefined);
const options: GoogleGenAIOptions = {
...params,
vertexai: true,
};
if (googleAuthOptions) options.googleAuthOptions = googleAuthOptions;
if (project) options.project = project;
if (location) options.location = location as GoogleGenAIOptions['location'];
return options;
};
/**
* Initializes the agent runtime with the user payload in backend
* @param provider - The provider name.
* @param payload - The JWT payload.
* @param params
* @returns A promise that resolves when the agent runtime is initialized.
*/
export const initModelRuntimeWithUserPayload = (
provider: string,
payload: ClientSecretPayload,
params: any = {},
) => {
const runtimeProvider = payload.runtimeProvider ?? provider;
if (runtimeProvider === ModelProvider.VertexAI) {
const vertexOptions = buildVertexOptions(payload, params);
const runtime = LobeVertexAI.initFromVertexAI(vertexOptions);
return new ModelRuntime(runtime);
}
return ModelRuntime.initializeWithProvider(runtimeProvider, {
...getParamsFromPayload(runtimeProvider, payload),
...params,
});
};