pyb-ts
Version:
PYB-CLI - Minimal AI Agent with multi-model support and CLI interface
1,339 lines (1,335 loc) • 96.7 kB
JavaScript
import React, { useState, useEffect, useRef } from "react";
import { Box, Text, useInput } from "ink";
import { getTheme } from "@utils/theme";
import { Select } from "./CustomSelect/select.js";
import { Newline } from "ink";
import { getModelManager } from "@utils/model";
import { LIST_ITEM, UP_DOWN_ARROW, ENTER_ARROW, ESC_ARROW } from "@constants/figures";
function ScreenContainer({
title,
exitState,
children
}) {
const theme = getTheme();
return /* @__PURE__ */ React.createElement(
Box,
{
flexDirection: "column",
gap: 1,
borderStyle: "round",
borderColor: theme.secondaryBorder,
paddingX: 2,
paddingY: 1
},
/* @__PURE__ */ React.createElement(Text, { bold: true }, title, " ", exitState.pending ? `(press ${exitState.keyName} again to exit)` : ""),
children
);
}
import { useExitOnCtrlCD } from "@hooks/useExitOnCtrlCD";
import {
getGlobalConfig,
setAllPointersToModel,
setModelPointer
} from "@utils/config";
import models, { providers } from "@constants/models";
import TextInput from "./TextInput.js";
import OpenAI from "openai";
import chalk from "chalk";
import { verifyApiKey } from "@services/claude";
import { fetchCustomModels } from "@services/openai";
import { testGPT5Connection, validateGPT5Config } from "@services/gpt5ConnectionTest";
const CONTEXT_LENGTH_OPTIONS = [
{ label: "32K tokens", value: 32e3 },
{ label: "64K tokens", value: 64e3 },
{ label: "128K tokens", value: 128e3 },
{ label: "200K tokens", value: 2e5 },
{ label: "256K tokens", value: 256e3 },
{ label: "300K tokens", value: 3e5 },
{ label: "512K tokens", value: 512e3 },
{ label: "1000K tokens", value: 1e6 },
{ label: "2000K tokens", value: 2e6 },
{ label: "3000K tokens", value: 3e6 },
{ label: "5000K tokens", value: 5e6 },
{ label: "10000K tokens", value: 1e7 }
];
const DEFAULT_CONTEXT_LENGTH = 128e3;
const MAX_TOKENS_OPTIONS = [
{ label: "1K tokens", value: 1024 },
{ label: "2K tokens", value: 2048 },
{ label: "4K tokens", value: 4096 },
{ label: "8K tokens (recommended)", value: 8192 },
{ label: "16K tokens", value: 16384 },
{ label: "32K tokens", value: 32768 },
{ label: "64K tokens", value: 65536 },
{ label: "128K tokens", value: 131072 }
];
const DEFAULT_MAX_TOKENS = 8192;
function useEscapeNavigation(onEscape, abortController) {
const handledRef = useRef(false);
useInput(
(input, key) => {
if (key.escape && !handledRef.current) {
handledRef.current = true;
setTimeout(() => {
handledRef.current = false;
}, 100);
onEscape();
}
},
{ isActive: true }
);
}
function printModelConfig() {
const config = getGlobalConfig();
const modelProfiles = config.modelProfiles || [];
const activeProfiles = modelProfiles.filter((p) => p.isActive);
if (activeProfiles.length === 0) {
console.log(chalk.gray(" \u23BF No active model profiles configured"));
return;
}
const profileSummary = activeProfiles.map((p) => `${p.name} (${p.provider}: ${p.modelName})`).join(" | ");
console.log(chalk.gray(` \u23BF ${profileSummary}`));
}
function ModelSelector({
onDone: onDoneProp,
abortController,
targetPointer,
isOnboarding = false,
onCancel,
skipModelType = false
}) {
const config = getGlobalConfig();
const theme = getTheme();
const onDone = () => {
printModelConfig();
onDoneProp();
};
const exitState = useExitOnCtrlCD(() => process.exit(0));
const getInitialScreen = () => {
return "provider";
};
const [screenStack, setScreenStack] = useState([getInitialScreen()]);
const currentScreen = screenStack[screenStack.length - 1];
const navigateTo = (screen) => {
setScreenStack((prev) => [...prev, screen]);
};
const goBack = () => {
if (screenStack.length > 1) {
setScreenStack((prev) => prev.slice(0, -1));
} else {
onDone();
}
};
const [selectedProvider, setSelectedProvider] = useState(
config.primaryProvider ?? "anthropic"
);
const [anthropicProviderType, setAnthropicProviderType] = useState("official");
const [selectedModel, setSelectedModel] = useState("");
const [apiKey, setApiKey] = useState("");
const [maxTokens, setMaxTokens] = useState(
config.maxTokens?.toString() || DEFAULT_MAX_TOKENS.toString()
);
const [maxTokensMode, setMaxTokensMode] = useState(
"preset"
);
const [selectedMaxTokensPreset, setSelectedMaxTokensPreset] = useState(config.maxTokens || DEFAULT_MAX_TOKENS);
const [reasoningEffort, setReasoningEffort] = useState("medium");
const [supportsReasoningEffort, setSupportsReasoningEffort] = useState(false);
const [contextLength, setContextLength] = useState(
DEFAULT_CONTEXT_LENGTH
);
const [activeFieldIndex, setActiveFieldIndex] = useState(0);
const [maxTokensCursorOffset, setMaxTokensCursorOffset] = useState(0);
const [availableModels, setAvailableModels] = useState([]);
const [isLoadingModels, setIsLoadingModels] = useState(false);
const [modelLoadError, setModelLoadError] = useState(null);
const [modelSearchQuery, setModelSearchQuery] = useState("");
const [modelSearchCursorOffset, setModelSearchCursorOffset] = useState(0);
const [cursorOffset, setCursorOffset] = useState(0);
const [apiKeyEdited, setApiKeyEdited] = useState(false);
const [fetchRetryCount, setFetchRetryCount] = useState(0);
const [isRetrying, setIsRetrying] = useState(false);
const [isTestingConnection, setIsTestingConnection] = useState(false);
const [connectionTestResult, setConnectionTestResult] = useState(null);
const [validationError, setValidationError] = useState(null);
const [resourceName, setResourceName] = useState("");
const [resourceNameCursorOffset, setResourceNameCursorOffset] = useState(0);
const [customModelName, setCustomModelName] = useState("");
const [customModelNameCursorOffset, setCustomModelNameCursorOffset] = useState(0);
const [ollamaBaseUrl, setOllamaBaseUrl] = useState(
"http://localhost:11434/v1"
);
const [ollamaBaseUrlCursorOffset, setOllamaBaseUrlCursorOffset] = useState(0);
const [customBaseUrl, setCustomBaseUrl] = useState("");
const [customBaseUrlCursorOffset, setCustomBaseUrlCursorOffset] = useState(0);
const [providerBaseUrl, setProviderBaseUrl] = useState("");
const [providerBaseUrlCursorOffset, setProviderBaseUrlCursorOffset] = useState(0);
const reasoningEffortOptions = [
{ label: "Low - Faster responses, less thorough reasoning", value: "low" },
{ label: "Medium - Balanced speed and reasoning depth", value: "medium" },
{
label: "High - Slower responses, more thorough reasoning",
value: "high"
}
];
const availableProviders = Object.keys(providers).filter(
(provider) => provider !== "bigdream" && provider !== "opendev"
);
const providerOptions = availableProviders.map((provider) => {
const modelCount = models[provider]?.length || 0;
const label = getProviderLabel(provider, modelCount);
return {
label,
value: provider
};
});
useEffect(() => {
if (!apiKeyEdited && selectedProvider) {
if (process.env[selectedProvider.toUpperCase() + "_API_KEY"]) {
setApiKey(
process.env[selectedProvider.toUpperCase() + "_API_KEY"]
);
} else {
setApiKey("");
}
}
}, [selectedProvider, apiKey, apiKeyEdited]);
useEffect(() => {
if (currentScreen === "contextLength" && !CONTEXT_LENGTH_OPTIONS.find((opt) => opt.value === contextLength)) {
setContextLength(DEFAULT_CONTEXT_LENGTH);
}
}, [currentScreen, contextLength]);
const ourModelNames = new Set(
(models[selectedProvider] || []).map(
(model) => model.model
)
);
const filteredModels = modelSearchQuery ? availableModels.filter(
(model) => model.model?.toLowerCase().includes(modelSearchQuery.toLowerCase())
) : availableModels;
const sortModelsByPriority = (models2) => {
const priorityKeywords = [
"claude",
"kimi",
"deepseek",
"minimax",
"o3",
"gpt",
"qwen"
];
return models2.sort((a, b) => {
const aModelLower = a.model?.toLowerCase() || "";
const bModelLower = b.model?.toLowerCase() || "";
const aHasPriority = priorityKeywords.some(
(keyword) => aModelLower.includes(keyword)
);
const bHasPriority = priorityKeywords.some(
(keyword) => bModelLower.includes(keyword)
);
if (aHasPriority && !bHasPriority) return -1;
if (!aHasPriority && bHasPriority) return 1;
return a.model.localeCompare(b.model);
});
};
const sortedFilteredModels = sortModelsByPriority(filteredModels);
const modelOptions = sortedFilteredModels.map((model) => {
const isInOurModels = ourModelNames.has(model.model);
return {
label: `${model.model}${getModelDetails(model)}`,
value: model.model
};
});
function getModelDetails(model) {
const details = [];
if (model.max_tokens) {
details.push(`${formatNumber(model.max_tokens)} tokens`);
}
if (model.supports_vision) {
details.push("vision");
}
if (model.supports_function_calling) {
details.push("tools");
}
return details.length > 0 ? ` (${details.join(", ")})` : "";
}
function formatNumber(num) {
if (num >= 1e6) {
return `${(num / 1e6).toFixed(1)}M`;
} else if (num >= 1e3) {
return `${(num / 1e3).toFixed(0)}K`;
}
return num.toString();
}
function getProviderLabel(provider, modelCount) {
if (providers[provider]) {
return `${providers[provider].name} ${providers[provider].status === "wip" ? "(WIP)" : ""} (${modelCount} models)`;
}
return `${provider}`;
}
function handleProviderSelection(provider) {
const providerType = provider;
setSelectedProvider(providerType);
if (provider === "custom") {
saveConfiguration(providerType, selectedModel || "");
onDone();
} else if (provider === "anthropic") {
navigateTo("anthropicSubMenu");
} else {
const defaultBaseUrl = providers[providerType]?.baseURL || "";
setProviderBaseUrl(defaultBaseUrl);
navigateTo("baseUrl");
}
}
async function fetchAnthropicModels2(baseURL, apiKey2) {
try {
const response = await fetch(`${baseURL}/v1/models`, {
method: "GET",
headers: {
"x-api-key": apiKey2,
"anthropic-version": "2023-06-01",
"Content-Type": "application/json",
"Authorization": `Bearer ${apiKey2}`
}
});
if (!response.ok) {
if (response.status === 401) {
throw new Error(
"Invalid API key. Please check your API key and try again."
);
} else if (response.status === 403) {
throw new Error("API key does not have permission to access models.");
} else if (response.status === 404) {
throw new Error(
"API endpoint not found. This provider may not support model listing."
);
} else if (response.status === 429) {
throw new Error(
"Too many requests. Please wait a moment and try again."
);
} else if (response.status >= 500) {
throw new Error(
"API service is temporarily unavailable. Please try again later."
);
} else {
throw new Error(`Unable to connect to API (${response.status}).`);
}
}
const data = await response.json();
let models2 = [];
if (data && data.data && Array.isArray(data.data)) {
models2 = data.data;
} else if (Array.isArray(data)) {
models2 = data;
} else if (data && data.models && Array.isArray(data.models)) {
models2 = data.models;
} else {
throw new Error("API returned unexpected response format.");
}
return models2;
} catch (error) {
if (error instanceof Error && (error.message.includes("API key") || error.message.includes("API endpoint") || error.message.includes("API service") || error.message.includes("response format"))) {
throw error;
}
if (error instanceof Error && error.message.includes("fetch")) {
throw new Error(
"Unable to connect to the API. Please check the base URL and your internet connection."
);
}
throw new Error(
"Failed to fetch models from API. Please check your configuration and try again."
);
}
}
async function fetchAnthropicCompatibleModelsWithFallback(baseURL, provider, apiKeyUrl) {
let lastError = null;
try {
const models2 = await fetchAnthropicModels2(baseURL, apiKey);
return models2.map((model) => ({
model: model.modelName || model.id || model.name || model.model || "unknown",
provider,
max_tokens: model.max_tokens || 8192,
supports_vision: model.supports_vision || true,
supports_function_calling: model.supports_function_calling || true,
supports_reasoning_effort: false
}));
} catch (error) {
lastError = error;
console.log(
`Anthropic API failed for ${provider}, trying OpenAI format:`,
error
);
}
try {
const models2 = await fetchCustomModels(baseURL, apiKey);
return models2.map((model) => ({
model: model.modelName || model.id || model.name || model.model || "unknown",
provider,
max_tokens: model.max_tokens || 8192,
supports_vision: model.supports_vision || false,
supports_function_calling: model.supports_function_calling || true,
supports_reasoning_effort: false
}));
} catch (error) {
lastError = error;
console.log(
`OpenAI API failed for ${provider}, falling back to manual input:`,
error
);
}
let errorMessage = `Failed to fetch ${provider} models using both Anthropic and OpenAI API formats`;
if (lastError) {
errorMessage = lastError.message;
}
if (errorMessage.includes("API key")) {
errorMessage += `
\u{1F4A1} Tip: Get your API key from ${apiKeyUrl}`;
} else if (errorMessage.includes("permission")) {
errorMessage += `
\u{1F4A1} Tip: Make sure your API key has access to the ${provider} API`;
} else if (errorMessage.includes("connection")) {
errorMessage += "\n\n\u{1F4A1} Tip: Check your internet connection and try again";
}
setModelLoadError(errorMessage);
throw new Error(errorMessage);
}
async function fetchAnthropicCompatibleProviderModels() {
let defaultBaseURL;
let apiKeyUrl;
let actualProvider;
switch (anthropicProviderType) {
case "official":
defaultBaseURL = "https://api.anthropic.com";
apiKeyUrl = "https://console.anthropic.com/settings/keys";
actualProvider = "anthropic";
break;
case "bigdream":
defaultBaseURL = "https://api-key.info";
apiKeyUrl = "https://api-key.info/register?aff=MSl4";
actualProvider = "bigdream";
break;
case "opendev":
defaultBaseURL = "https://api.openai-next.com";
apiKeyUrl = "https://api.openai-next.com/register/?aff_code=4xo7";
actualProvider = "opendev";
break;
case "custom":
defaultBaseURL = providerBaseUrl;
apiKeyUrl = "your custom API provider";
actualProvider = "anthropic";
break;
default:
throw new Error(
`Unsupported Anthropic provider type: ${anthropicProviderType}`
);
}
const baseURL = anthropicProviderType === "custom" ? providerBaseUrl : providerBaseUrl || defaultBaseURL;
return await fetchAnthropicCompatibleModelsWithFallback(
baseURL,
actualProvider,
apiKeyUrl
);
}
async function fetchKimiModels() {
try {
const baseURL = providerBaseUrl || "https://api.moonshot.cn/v1";
const models2 = await fetchCustomModels(baseURL, apiKey);
const kimiModels = models2.map((model) => ({
model: model.modelName || model.id || model.name || model.model || "unknown",
provider: "kimi",
max_tokens: model.max_tokens || 8192,
supports_vision: false,
// Default to false, could be enhanced
supports_function_calling: true,
supports_reasoning_effort: false
}));
return kimiModels;
} catch (error) {
let errorMessage = "Failed to fetch Kimi models";
if (error instanceof Error) {
errorMessage = error.message;
}
if (errorMessage.includes("API key")) {
errorMessage += "\n\n\u{1F4A1} Tip: Get your API key from https://platform.moonshot.cn/console/api-keys";
} else if (errorMessage.includes("permission")) {
errorMessage += "\n\n\u{1F4A1} Tip: Make sure your API key has access to the Kimi API";
} else if (errorMessage.includes("connection")) {
errorMessage += "\n\n\u{1F4A1} Tip: Check your internet connection and try again";
}
setModelLoadError(errorMessage);
throw error;
}
}
async function fetchDeepSeekModels() {
try {
const baseURL = providerBaseUrl || "https://api.deepseek.com";
const models2 = await fetchCustomModels(baseURL, apiKey);
const deepseekModels = models2.map((model) => ({
model: model.modelName || model.id || model.name || model.model || "unknown",
provider: "deepseek",
max_tokens: model.max_tokens || 8192,
supports_vision: false,
// Default to false, could be enhanced
supports_function_calling: true,
supports_reasoning_effort: false
}));
return deepseekModels;
} catch (error) {
let errorMessage = "Failed to fetch DeepSeek models";
if (error instanceof Error) {
errorMessage = error.message;
}
if (errorMessage.includes("API key")) {
errorMessage += "\n\n\u{1F4A1} Tip: Get your API key from https://platform.deepseek.com/api_keys";
} else if (errorMessage.includes("permission")) {
errorMessage += "\n\n\u{1F4A1} Tip: Make sure your API key has access to the DeepSeek API";
} else if (errorMessage.includes("connection")) {
errorMessage += "\n\n\u{1F4A1} Tip: Check your internet connection and try again";
}
setModelLoadError(errorMessage);
throw error;
}
}
async function fetchSiliconFlowModels() {
try {
const baseURL = providerBaseUrl || "https://api.siliconflow.cn/v1";
const models2 = await fetchCustomModels(baseURL, apiKey);
const siliconflowModels = models2.map((model) => ({
model: model.modelName || model.id || model.name || model.model || "unknown",
provider: "siliconflow",
max_tokens: model.max_tokens || 8192,
supports_vision: false,
// Default to false, could be enhanced
supports_function_calling: true,
supports_reasoning_effort: false
}));
return siliconflowModels;
} catch (error) {
let errorMessage = "Failed to fetch SiliconFlow models";
if (error instanceof Error) {
errorMessage = error.message;
}
if (errorMessage.includes("API key")) {
errorMessage += "\n\n\u{1F4A1} Tip: Get your API key from https://cloud.siliconflow.cn/i/oJWsm6io";
} else if (errorMessage.includes("permission")) {
errorMessage += "\n\n\u{1F4A1} Tip: Make sure your API key has access to the SiliconFlow API";
} else if (errorMessage.includes("connection")) {
errorMessage += "\n\n\u{1F4A1} Tip: Check your internet connection and try again";
}
setModelLoadError(errorMessage);
throw error;
}
}
async function fetchQwenModels() {
try {
const baseURL = providerBaseUrl || "https://dashscope.aliyuncs.com/compatible-mode/v1";
const models2 = await fetchCustomModels(baseURL, apiKey);
const qwenModels = models2.map((model) => ({
model: model.modelName || model.id || model.name || model.model || "unknown",
provider: "qwen",
max_tokens: model.max_tokens || 8192,
supports_vision: false,
supports_function_calling: true,
supports_reasoning_effort: false
}));
return qwenModels;
} catch (error) {
let errorMessage = "Failed to fetch Qwen models";
if (error instanceof Error) {
errorMessage = error.message;
}
if (errorMessage.includes("API key")) {
errorMessage += "\n\n\u{1F4A1} Tip: Get your API key from https://bailian.console.aliyun.com/?tab=model#/api-key";
} else if (errorMessage.includes("permission")) {
errorMessage += "\n\n\u{1F4A1} Tip: Make sure your API key has access to the Qwen API";
} else if (errorMessage.includes("connection")) {
errorMessage += "\n\n\u{1F4A1} Tip: Check your internet connection and try again";
}
setModelLoadError(errorMessage);
throw error;
}
}
async function fetchGLMModels() {
try {
const baseURL = providerBaseUrl || "https://open.bigmodel.cn/api/paas/v4";
const models2 = await fetchCustomModels(baseURL, apiKey);
const glmModels = models2.map((model) => ({
model: model.modelName || model.id || model.name || model.model || "unknown",
provider: "glm",
max_tokens: model.max_tokens || 8192,
supports_vision: false,
supports_function_calling: true,
supports_reasoning_effort: false
}));
return glmModels;
} catch (error) {
let errorMessage = "Failed to fetch GLM models";
if (error instanceof Error) {
errorMessage = error.message;
}
if (errorMessage.includes("API key")) {
errorMessage += "\n\n\u{1F4A1} Tip: Get your API key from https://open.bigmodel.cn (API Keys section)";
} else if (errorMessage.includes("permission")) {
errorMessage += "\n\n\u{1F4A1} Tip: Make sure your API key has access to the GLM API";
} else if (errorMessage.includes("connection")) {
errorMessage += "\n\n\u{1F4A1} Tip: Check your internet connection and try again";
}
setModelLoadError(errorMessage);
throw error;
}
}
async function fetchMinimaxModels() {
try {
const baseURL = providerBaseUrl || "https://api.minimaxi.com/v1";
const models2 = await fetchCustomModels(baseURL, apiKey);
const minimaxModels = models2.map((model) => ({
model: model.modelName || model.id || model.name || model.model || "unknown",
provider: "minimax",
max_tokens: model.max_tokens || 8192,
supports_vision: false,
supports_function_calling: true,
supports_reasoning_effort: false
}));
return minimaxModels;
} catch (error) {
let errorMessage = "Failed to fetch MiniMax models";
if (error instanceof Error) {
errorMessage = error.message;
}
if (errorMessage.includes("API key")) {
errorMessage += "\n\n\u{1F4A1} Tip: Get your API key from https://www.minimax.io/platform/user-center/basic-information";
} else if (errorMessage.includes("permission")) {
errorMessage += "\n\n\u{1F4A1} Tip: Make sure your API key has access to the MiniMax API";
} else if (errorMessage.includes("connection")) {
errorMessage += "\n\n\u{1F4A1} Tip: Check your internet connection and try again";
}
setModelLoadError(errorMessage);
throw error;
}
}
async function fetchBaiduQianfanModels() {
try {
const baseURL = providerBaseUrl || "https://qianfan.baidubce.com/v2";
const models2 = await fetchCustomModels(baseURL, apiKey);
const baiduModels = models2.map((model) => ({
model: model.modelName || model.id || model.name || model.model || "unknown",
provider: "baidu-qianfan",
max_tokens: model.max_tokens || 8192,
supports_vision: false,
supports_function_calling: true,
supports_reasoning_effort: false
}));
return baiduModels;
} catch (error) {
let errorMessage = "Failed to fetch Baidu Qianfan models";
if (error instanceof Error) {
errorMessage = error.message;
}
if (errorMessage.includes("API key")) {
errorMessage += "\n\n\u{1F4A1} Tip: Get your API key from https://console.bce.baidu.com/iam/#/iam/accesslist";
} else if (errorMessage.includes("permission")) {
errorMessage += "\n\n\u{1F4A1} Tip: Make sure your API key has access to the Baidu Qianfan API";
} else if (errorMessage.includes("connection")) {
errorMessage += "\n\n\u{1F4A1} Tip: Check your internet connection and try again";
}
setModelLoadError(errorMessage);
throw error;
}
}
async function fetchCustomOpenAIModels() {
try {
const models2 = await fetchCustomModels(customBaseUrl, apiKey);
const customModels = models2.map((model) => ({
model: model.modelName || model.id || model.name || model.model || "unknown",
provider: "custom-openai",
max_tokens: model.max_tokens || 4096,
supports_vision: false,
// Default to false, could be enhanced
supports_function_calling: true,
supports_reasoning_effort: false
}));
return customModels;
} catch (error) {
let errorMessage = "Failed to fetch custom API models";
if (error instanceof Error) {
errorMessage = error.message;
}
if (errorMessage.includes("API key")) {
errorMessage += "\n\n\u{1F4A1} Tip: Check that your API key is valid for this endpoint";
} else if (errorMessage.includes("endpoint not found")) {
errorMessage += "\n\n\u{1F4A1} Tip: Make sure the base URL ends with /v1 and supports OpenAI-compatible API";
} else if (errorMessage.includes("connect")) {
errorMessage += "\n\n\u{1F4A1} Tip: Verify the base URL is correct and accessible";
} else if (errorMessage.includes("response format")) {
errorMessage += "\n\n\u{1F4A1} Tip: This API may not be fully OpenAI-compatible";
}
setModelLoadError(errorMessage);
throw error;
}
}
async function fetchGeminiModels() {
try {
const response = await fetch(
`https://generativelanguage.googleapis.com/v1beta/models?key=${apiKey}`
);
if (!response.ok) {
const errorData = await response.json();
throw new Error(
errorData.error?.message || `API error: ${response.status}`
);
}
const { models: models2 } = await response.json();
const geminiModels = models2.filter(
(model) => model.supportedGenerationMethods.includes("generateContent")
).map((model) => ({
model: model.name.replace("models/", ""),
provider: "gemini",
max_tokens: model.outputTokenLimit,
supports_vision: model.supportedGenerationMethods.includes("generateContent"),
supports_function_calling: model.supportedGenerationMethods.includes("generateContent")
}));
return geminiModels;
} catch (error) {
setModelLoadError(
error instanceof Error ? error.message : "Unknown error"
);
throw error;
}
}
async function fetchOllamaModels() {
try {
const response = await fetch(`${ollamaBaseUrl}/models`);
if (!response.ok) {
throw new Error(`HTTP error ${response.status}: ${response.statusText}`);
}
const responseData = await response.json();
let models2 = [];
if (responseData.data && Array.isArray(responseData.data)) {
models2 = responseData.data;
} else if (Array.isArray(responseData.models)) {
models2 = responseData.models;
} else if (Array.isArray(responseData)) {
models2 = responseData;
} else {
throw new Error(
"Invalid response from Ollama API: missing models array"
);
}
const ollamaModels = models2.map((model) => ({
model: model.name ?? model.modelName ?? (typeof model === "string" ? model : ""),
provider: "ollama",
max_tokens: 4096,
// Default value
supports_vision: false,
supports_function_calling: true,
supports_reasoning_effort: false
}));
const validModels = ollamaModels.filter((model) => model.model);
setAvailableModels(validModels);
if (validModels.length > 0) {
navigateTo("model");
} else {
setModelLoadError("No models found in your Ollama installation");
}
return validModels;
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error);
if (errorMessage.includes("fetch")) {
setModelLoadError(
`Could not connect to Ollama server at ${ollamaBaseUrl}. Make sure Ollama is running and the URL is correct.`
);
} else {
setModelLoadError(`Error loading Ollama models: ${errorMessage}`);
}
console.error("Error fetching Ollama models:", error);
return [];
}
}
async function fetchModelsWithRetry() {
const MAX_RETRIES = 2;
let lastError = null;
for (let attempt = 1; attempt <= MAX_RETRIES; attempt++) {
setFetchRetryCount(attempt);
setIsRetrying(attempt > 1);
if (attempt > 1) {
setModelLoadError(
`Attempt ${attempt}/${MAX_RETRIES}: Retrying model discovery...`
);
await new Promise((resolve) => setTimeout(resolve, 1e3));
}
try {
const models2 = await fetchModels();
setFetchRetryCount(0);
setIsRetrying(false);
setModelLoadError(null);
return models2;
} catch (error) {
lastError = error instanceof Error ? error : new Error(String(error));
console.log(`Model fetch attempt ${attempt} failed:`, lastError.message);
if (attempt === MAX_RETRIES) {
break;
}
}
}
setIsRetrying(false);
const errorMessage = lastError?.message || "Unknown error";
const supportsManualInput = [
"anthropic",
"kimi",
"deepseek",
"siliconflow",
"qwen",
"glm",
"minimax",
"baidu-qianfan",
"custom-openai"
].includes(selectedProvider);
if (supportsManualInput) {
setModelLoadError(
`Failed to auto-discover models after ${MAX_RETRIES} attempts: ${errorMessage}
\u26A1 Automatically switching to manual model configuration...`
);
setTimeout(() => {
setModelLoadError(null);
navigateTo("modelInput");
}, 2e3);
} else {
setModelLoadError(
`Failed to load models after ${MAX_RETRIES} attempts: ${errorMessage}`
);
}
return [];
}
async function fetchModels() {
setIsLoadingModels(true);
setModelLoadError(null);
try {
if (selectedProvider === "anthropic") {
const anthropicModels = await fetchAnthropicCompatibleProviderModels();
setAvailableModels(anthropicModels);
navigateTo("model");
return anthropicModels;
}
if (selectedProvider === "custom-openai") {
const customModels = await fetchCustomOpenAIModels();
setAvailableModels(customModels);
navigateTo("model");
return customModels;
}
if (selectedProvider === "gemini") {
const geminiModels = await fetchGeminiModels();
setAvailableModels(geminiModels);
navigateTo("model");
return geminiModels;
}
if (selectedProvider === "kimi") {
const kimiModels = await fetchKimiModels();
setAvailableModels(kimiModels);
navigateTo("model");
return kimiModels;
}
if (selectedProvider === "deepseek") {
const deepseekModels = await fetchDeepSeekModels();
setAvailableModels(deepseekModels);
navigateTo("model");
return deepseekModels;
}
if (selectedProvider === "siliconflow") {
const siliconflowModels = await fetchSiliconFlowModels();
setAvailableModels(siliconflowModels);
navigateTo("model");
return siliconflowModels;
}
if (selectedProvider === "qwen") {
const qwenModels = await fetchQwenModels();
setAvailableModels(qwenModels);
navigateTo("model");
return qwenModels;
}
if (selectedProvider === "glm") {
const glmModels = await fetchGLMModels();
setAvailableModels(glmModels);
navigateTo("model");
return glmModels;
}
if (selectedProvider === "baidu-qianfan") {
const baiduModels = await fetchBaiduQianfanModels();
setAvailableModels(baiduModels);
navigateTo("model");
return baiduModels;
}
if (selectedProvider === "azure") {
navigateTo("modelInput");
return [];
}
let baseURL = providerBaseUrl || providers[selectedProvider]?.baseURL;
if (selectedProvider === "custom-openai") {
baseURL = customBaseUrl;
}
const openai = new OpenAI({
apiKey: apiKey || "dummy-key-for-ollama",
// Ollama doesn't need a real key
baseURL,
dangerouslyAllowBrowser: true
});
const response = await openai.models.list();
const fetchedModels = [];
for (const model of response.data) {
const modelName = model.modelName || model.id || model.name || model.model || "unknown";
const modelInfo = models[selectedProvider]?.find(
(m) => m.model === modelName
);
fetchedModels.push({
model: modelName,
provider: selectedProvider,
max_tokens: modelInfo?.max_output_tokens,
supports_vision: modelInfo?.supports_vision || false,
supports_function_calling: modelInfo?.supports_function_calling || false,
supports_reasoning_effort: modelInfo?.supports_reasoning_effort || false
});
}
setAvailableModels(fetchedModels);
navigateTo("model");
return fetchedModels;
} catch (error) {
console.error("Error fetching models:", error);
throw error;
} finally {
setIsLoadingModels(false);
}
}
function handleApiKeySubmit(key) {
setApiKey(key);
if (selectedProvider === "azure") {
navigateTo("resourceName");
return;
}
fetchModelsWithRetry().catch((error) => {
console.error("Final error after retries:", error);
});
}
function handleResourceNameSubmit(name) {
setResourceName(name);
navigateTo("modelInput");
}
function handleOllamaBaseUrlSubmit(url) {
setOllamaBaseUrl(url);
setIsLoadingModels(true);
setModelLoadError(null);
fetchOllamaModels().finally(() => {
setIsLoadingModels(false);
});
}
function handleCustomBaseUrlSubmit(url) {
const cleanUrl = url.replace(/\/+$/, "");
setCustomBaseUrl(cleanUrl);
navigateTo("apiKey");
}
function handleProviderBaseUrlSubmit(url) {
const cleanUrl = url.replace(/\/+$/, "");
setProviderBaseUrl(cleanUrl);
if (selectedProvider === "ollama") {
setOllamaBaseUrl(cleanUrl);
setIsLoadingModels(true);
setModelLoadError(null);
fetchOllamaModels().finally(() => {
setIsLoadingModels(false);
});
} else {
navigateTo("apiKey");
}
}
function handleAnthropicProviderSelection(providerType) {
setAnthropicProviderType(providerType);
if (providerType === "custom") {
setProviderBaseUrl("");
navigateTo("baseUrl");
} else {
const defaultUrls = {
official: "https://api.anthropic.com",
bigdream: "https://api-key.info",
opendev: "https://api.openai-next.com"
};
setProviderBaseUrl(defaultUrls[providerType]);
navigateTo("apiKey");
}
}
function handleCustomModelSubmit(model) {
setCustomModelName(model);
setSelectedModel(model);
setSupportsReasoningEffort(false);
setReasoningEffort(null);
setMaxTokensMode("preset");
setSelectedMaxTokensPreset(DEFAULT_MAX_TOKENS);
setMaxTokens(DEFAULT_MAX_TOKENS.toString());
setMaxTokensCursorOffset(DEFAULT_MAX_TOKENS.toString().length);
navigateTo("modelParams");
setActiveFieldIndex(0);
}
function handleModelSelection(model) {
setSelectedModel(model);
const modelInfo = availableModels.find((m) => m.model === model);
setSupportsReasoningEffort(modelInfo?.supports_reasoning_effort || false);
if (!modelInfo?.supports_reasoning_effort) {
setReasoningEffort(null);
}
if (modelInfo?.max_tokens) {
const modelMaxTokens = modelInfo.max_tokens;
const matchingPreset = MAX_TOKENS_OPTIONS.find(
(option) => option.value === modelMaxTokens
);
if (matchingPreset) {
setMaxTokensMode("preset");
setSelectedMaxTokensPreset(modelMaxTokens);
setMaxTokens(modelMaxTokens.toString());
} else {
setMaxTokensMode("custom");
setMaxTokens(modelMaxTokens.toString());
}
setMaxTokensCursorOffset(modelMaxTokens.toString().length);
} else {
setMaxTokensMode("preset");
setSelectedMaxTokensPreset(DEFAULT_MAX_TOKENS);
setMaxTokens(DEFAULT_MAX_TOKENS.toString());
setMaxTokensCursorOffset(DEFAULT_MAX_TOKENS.toString().length);
}
navigateTo("modelParams");
setActiveFieldIndex(0);
}
const handleModelParamsSubmit = () => {
if (!CONTEXT_LENGTH_OPTIONS.find((opt) => opt.value === contextLength)) {
setContextLength(DEFAULT_CONTEXT_LENGTH);
}
navigateTo("contextLength");
};
async function testConnection() {
setIsTestingConnection(true);
setConnectionTestResult(null);
try {
let testBaseURL = providerBaseUrl || providers[selectedProvider]?.baseURL || "";
if (selectedProvider === "azure") {
testBaseURL = `https://${resourceName}.openai.azure.com/openai/deployments/${selectedModel}`;
} else if (selectedProvider === "custom-openai") {
testBaseURL = customBaseUrl;
}
const isOpenAICompatible = [
"minimax",
"kimi",
"deepseek",
"siliconflow",
"qwen",
"glm",
"baidu-qianfan",
"openai",
"mistral",
"xai",
"groq",
"custom-openai"
].includes(selectedProvider);
if (isOpenAICompatible) {
const isGPT5 = selectedModel?.toLowerCase().includes("gpt-5");
if (isGPT5) {
console.log(`\u{1F680} Using specialized GPT-5 connection test for model: ${selectedModel}`);
const configValidation = validateGPT5Config({
model: selectedModel,
apiKey,
baseURL: testBaseURL,
maxTokens: parseInt(maxTokens) || 8192,
provider: selectedProvider
});
if (!configValidation.valid) {
return {
success: false,
message: "\u274C GPT-5 configuration validation failed",
details: configValidation.errors.join("\n")
};
}
const gpt5Result = await testGPT5Connection({
model: selectedModel,
apiKey,
baseURL: testBaseURL,
maxTokens: parseInt(maxTokens) || 8192,
provider: selectedProvider
});
return gpt5Result;
}
const endpointsToTry = [];
if (selectedProvider === "minimax") {
endpointsToTry.push(
{
path: "/text/chatcompletion_v2",
name: "MiniMax v2 (recommended)"
},
{ path: "/chat/completions", name: "Standard OpenAI" }
);
} else {
endpointsToTry.push({
path: "/chat/completions",
name: "Standard OpenAI"
});
}
let lastError = null;
for (const endpoint of endpointsToTry) {
try {
const testResult = await testChatEndpoint(
testBaseURL,
endpoint.path,
endpoint.name
);
if (testResult.success) {
return testResult;
}
lastError = testResult;
} catch (error) {
lastError = {
success: false,
message: `Failed to test ${endpoint.name}`,
endpoint: endpoint.path,
details: error instanceof Error ? error.message : String(error)
};
}
}
return lastError || {
success: false,
message: "All endpoints failed",
details: "No endpoints could be reached"
};
} else {
return await testProviderSpecificEndpoint(testBaseURL);
}
} catch (error) {
return {
success: false,
message: "Connection test failed",
details: error instanceof Error ? error.message : String(error)
};
} finally {
setIsTestingConnection(false);
}
}
async function testChatEndpoint(baseURL, endpointPath, endpointName) {
const testURL = `${baseURL.replace(/\/+$/, "")}${endpointPath}`;
const testPayload = {
model: selectedModel,
messages: [
{
role: "user",
content: 'Please respond with exactly "YES" (in capital letters) to confirm this connection is working.'
}
],
max_tokens: Math.max(parseInt(maxTokens) || 8192, 8192),
// Ensure minimum 8192 tokens for connection test
temperature: 0,
stream: false
};
if (selectedModel && selectedModel.toLowerCase().includes("gpt-5")) {
console.log(`Applying GPT-5 parameter fix for model: ${selectedModel}`);
if (testPayload.max_tokens) {
testPayload.max_completion_tokens = testPayload.max_tokens;
delete testPayload.max_tokens;
console.log(`Transformed max_tokens \u2192 max_completion_tokens: ${testPayload.max_completion_tokens}`);
}
if (testPayload.temperature !== void 0 && testPayload.temperature !== 1) {
console.log(`Adjusting temperature from ${testPayload.temperature} to 1 for GPT-5`);
testPayload.temperature = 1;
}
}
const headers = {
"Content-Type": "application/json"
};
if (selectedProvider === "azure") {
headers["api-key"] = apiKey;
} else {
headers["Authorization"] = `Bearer ${apiKey}`;
}
try {
const response = await fetch(testURL, {
method: "POST",
headers,
body: JSON.stringify(testPayload)
});
if (response.ok) {
const data = await response.json();
console.log(
"[DEBUG] Connection test response:",
JSON.stringify(data, null, 2)
);
let responseContent = "";
if (data.choices && data.choices.length > 0) {
responseContent = data.choices[0]?.message?.content || "";
} else if (data.reply) {
responseContent = data.reply;
} else if (data.output) {
responseContent = data.output?.text || data.output || "";
}
console.log("[DEBUG] Extracted response content:", responseContent);
const containsYes = responseContent.toLowerCase().includes("yes");
if (containsYes) {
return {
success: true,
message: `[OK] Connection test passed with ${endpointName}`,
endpoint: endpointPath,
details: `Model responded correctly: "${responseContent.trim()}"`
};
} else {
return {
success: false,
message: `\u26A0\uFE0F ${endpointName} connected but model response unexpected`,
endpoint: endpointPath,
details: `Expected "YES" but got: "${responseContent.trim() || "(empty response)"}"`
};
}
} else {
const errorData = await response.json().catch(() => null);
const errorMessage = errorData?.error?.message || errorData?.message || response.statusText;
return {
success: false,
message: `\u274C ${endpointName} failed (${response.status})`,
endpoint: endpointPath,
details: `Error: ${errorMessage}`
};
}
} catch (error) {
return {
success: false,
message: `\u274C ${endpointName} connection failed`,
endpoint: endpointPath,
details: error instanceof Error ? error.message : String(error)
};
}
}
async function testResponsesEndpoint(baseURL, endpointPath, endpointName) {
const testURL = `${baseURL.replace(/\/+$/, "")}${endpointPath}`;
const testPayload = {
model: selectedModel,
input: [
{
role: "user",
content: 'Please respond with exactly "YES" (in capital letters) to confirm this connection is working.'
}
],
max_completion_tokens: Math.max(parseInt(maxTokens) || 8192, 8192),
temperature: 1,
// GPT-5 only supports temperature=1
// 🚀 Add reasoning configuration for better GPT-5 performance
reasoning: {
effort: "low"
// Fast response for connection test
}
};
console.log(`\u{1F527} Testing GPT-5 Responses API for model: ${selectedModel}`);
console.log(`\u{1F527} Test URL: ${testURL}`);
console.log(`\u{1F527} Test payload:`, JSON.stringify(testPayload, null, 2));
const headers = {
"Content-Type": "application/json",
"Authorization": `Bearer ${apiKey}`
};
try {
const response = await fetch(testURL, {
method: "POST",
headers,
body: JSON.stringify(testPayload)
});
if (response.ok) {
const data = await response.json();
console.log(
"[DEBUG] Responses API connection test response:",
JSON.stringify(data, null, 2)
);
let responseContent = "";
if (data.output_text) {
responseContent = data.output_text;
} else if (data.output) {
responseContent = typeof data.output === "string" ? data.output : data.output.text || "";
}
console.log("[DEBUG] Extracted response content:", responseContent);
const containsYes = responseContent.toLowerCase().includes("yes");
if (containsYes) {
return {
success: true,
message: `[OK] Connection test passed with ${endpointName}`,
endpoint: endpointPath,
details: `GPT-5 responded correctly via Responses API: "${responseContent.trim()}"`
};
} else {
return {
success: false,
message: `\u26A0\uFE0F ${endpointName} connected but model response unexpected`,
endpoint: endpointPath,
details: `Expected "YES" but got: "${responseContent.trim() || "(empty response)"}"`
};
}
} else {
const errorData = await response.json().catch(() => null);
const errorMessage = errorData?.error?.message || errorData?.message || response.statusText;
console.log(`\u{1F6A8} GPT-5 Responses API Error (${response.status}):`, errorData);
let details = `Responses API Error: ${errorMessage}`;
if (response.status === 400 && errorMessage.includes("max_tokens")) {
details += "\n\u{1F527} Note: This appears to be a parameter compatibility issue. The fallback to Chat Completions should handle this.";
} else if (response.status === 404) {
details += "\n\u{1F527} Note: Responses API endpoint may not be available for this model or provider.";
} else if (response.status === 401) {
details += "\n\u{1F527} Note: API key authentication failed.";
}
return {
success: false,
message: `\u274C ${endpointName} failed (${response.status})`,
endpoint: endpointPath,
details
};
}
} catch (error) {
return {
success: false,
message: `\u274C ${endpointName} connection failed`,
endpoint: endpointPath,
details: error instanceof Error ? error.message : String(error)
};
}
}
async function testProviderSpecificEndpoint(baseURL) {
if (selectedProvider === "anthropic" || selectedProvider === "bigdream") {
try {
console.log(
`[DEBUG] Testing ${selectedProvider} connection using official Anthropic SDK...`
);
let testBaseURL = void 0;
if (selectedProvider === "bigdream") {
testBaseURL = baseURL || "https://api-key.info";
} else if (selectedProvider === "anthropic") {
testBaseURL = baseURL && baseURL !== "https://api.anthropic.com" ? baseURL : void 0;
}
const isValid = await verifyApiKey(apiKey, testBaseURL, selectedProvider);
if (isValid) {
return {
success: true,
message: `[OK] ${selectedProvider} connection test passed`,
endpoint: "/messages",
details: "API key verified using official Anthropic SDK"
};
} else {
return {
success: false,
message: `\u274C ${selectedProvider} API key verification failed`,
endpoint: "/messages",
details: "Invalid API key. Please check your API key and try again."
};
}
} catch (error) {
console.log(`[DEBUG] ${selectedProvider} connection test error:`, error);
return {
success: false,
message: `\u274C ${selectedProvider} connection failed`,
endpoint: "/messages",
details: error instanceof Error ? error.message : String(error)
};
}
}
return {
success: true,
message: `[OK] Configuration saved for ${selectedProvider}`,
details: "Provider-specific testing not implemented yet"
};
}
async function handleConnectionTest() {
const result = await testConnection();
setConnectionTestResult(result);