UNPKG

@gguf/claw

Version:

WhatsApp gateway CLI (Baileys web) with Pi RPC agent

1,653 lines (1,642 loc) 98.6 kB
import { X as resolveStateDir, Y as resolveOAuthPath, n as isTruthyEnvValue, o as createSubsystemLogger } from "./entry.js"; import { t as formatCliCommand } from "./command-format-3xiXujG0.js"; import { D as DEFAULT_AGENT_ID, a as resolveAgentModelPrimary } from "./agent-scope-jm0ZdXwM.js"; import { m as resolveUserPath } from "./utils-PmTbZoD1.js"; import { a as saveJsonFile, i as loadJsonFile, r as resolveCopilotApiToken, t as DEFAULT_COPILOT_API_BASE_URL } from "./github-copilot-token-rP-6QdKv.js"; import { execFileSync, execSync } from "node:child_process"; import path from "node:path"; import fs from "node:fs"; import { BedrockClient, ListFoundationModelsCommand } from "@aws-sdk/client-bedrock"; import { getEnvApiKey, getOAuthApiKey, getOAuthProviders } from "@mariozechner/pi-ai"; import lockfile from "proper-lockfile"; import { createHash, randomBytes } from "node:crypto"; //#region src/agents/auth-profiles/constants.ts const AUTH_STORE_VERSION = 1; const AUTH_PROFILE_FILENAME = "auth-profiles.json"; const LEGACY_AUTH_FILENAME = "auth.json"; const CLAUDE_CLI_PROFILE_ID = "anthropic:claude-cli"; const CODEX_CLI_PROFILE_ID = "openai-codex:codex-cli"; const QWEN_CLI_PROFILE_ID = "qwen-portal:qwen-cli"; const MINIMAX_CLI_PROFILE_ID = "minimax-portal:minimax-cli"; const AUTH_STORE_LOCK_OPTIONS = { retries: { retries: 10, factor: 2, minTimeout: 100, maxTimeout: 1e4, randomize: true }, stale: 3e4 }; const EXTERNAL_CLI_SYNC_TTL_MS = 900 * 1e3; const EXTERNAL_CLI_NEAR_EXPIRY_MS = 600 * 1e3; const log$1 = createSubsystemLogger("agents/auth-profiles"); //#endregion //#region src/agents/auth-profiles/display.ts function resolveAuthProfileDisplayLabel(params) { const { cfg, store, profileId } = params; const profile = store.profiles[profileId]; const email = cfg?.auth?.profiles?.[profileId]?.email?.trim() || (profile && "email" in profile ? profile.email?.trim() : void 0); if (email) return `${profileId} (${email})`; return profileId; } //#endregion //#region src/agents/defaults.ts const DEFAULT_PROVIDER = "anthropic"; const DEFAULT_MODEL = "claude-opus-4-5"; const DEFAULT_CONTEXT_TOKENS = 2e5; //#endregion //#region src/agents/bedrock-discovery.ts const DEFAULT_REFRESH_INTERVAL_SECONDS = 3600; const DEFAULT_CONTEXT_WINDOW = 32e3; const DEFAULT_MAX_TOKENS = 4096; const DEFAULT_COST = { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }; const discoveryCache = /* @__PURE__ */ new Map(); let hasLoggedBedrockError = false; function normalizeProviderFilter(filter) { if (!filter || filter.length === 0) return []; const normalized = new Set(filter.map((entry) => entry.trim().toLowerCase()).filter((entry) => entry.length > 0)); return Array.from(normalized).toSorted(); } function buildCacheKey(params) { return JSON.stringify(params); } function includesTextModalities(modalities) { return (modalities ?? []).some((entry) => entry.toLowerCase() === "text"); } function isActive(summary) { const status = summary.modelLifecycle?.status; return typeof status === "string" ? status.toUpperCase() === "ACTIVE" : false; } function mapInputModalities(summary) { const inputs = summary.inputModalities ?? []; const mapped = /* @__PURE__ */ new Set(); for (const modality of inputs) { const lower = modality.toLowerCase(); if (lower === "text") mapped.add("text"); if (lower === "image") mapped.add("image"); } if (mapped.size === 0) mapped.add("text"); return Array.from(mapped); } function inferReasoningSupport(summary) { const haystack = `${summary.modelId ?? ""} ${summary.modelName ?? ""}`.toLowerCase(); return haystack.includes("reasoning") || haystack.includes("thinking"); } function resolveDefaultContextWindow(config) { const value = Math.floor(config?.defaultContextWindow ?? DEFAULT_CONTEXT_WINDOW); return value > 0 ? value : DEFAULT_CONTEXT_WINDOW; } function resolveDefaultMaxTokens(config) { const value = Math.floor(config?.defaultMaxTokens ?? DEFAULT_MAX_TOKENS); return value > 0 ? value : DEFAULT_MAX_TOKENS; } function matchesProviderFilter(summary, filter) { if (filter.length === 0) return true; const normalized = (summary.providerName ?? (typeof summary.modelId === "string" ? summary.modelId.split(".")[0] : void 0))?.trim().toLowerCase(); if (!normalized) return false; return filter.includes(normalized); } function shouldIncludeSummary(summary, filter) { if (!summary.modelId?.trim()) return false; if (!matchesProviderFilter(summary, filter)) return false; if (summary.responseStreamingSupported !== true) return false; if (!includesTextModalities(summary.outputModalities)) return false; if (!isActive(summary)) return false; return true; } function toModelDefinition(summary, defaults) { const id = summary.modelId?.trim() ?? ""; return { id, name: summary.modelName?.trim() || id, reasoning: inferReasoningSupport(summary), input: mapInputModalities(summary), cost: DEFAULT_COST, contextWindow: defaults.contextWindow, maxTokens: defaults.maxTokens }; } async function discoverBedrockModels(params) { const refreshIntervalSeconds = Math.max(0, Math.floor(params.config?.refreshInterval ?? DEFAULT_REFRESH_INTERVAL_SECONDS)); const providerFilter = normalizeProviderFilter(params.config?.providerFilter); const defaultContextWindow = resolveDefaultContextWindow(params.config); const defaultMaxTokens = resolveDefaultMaxTokens(params.config); const cacheKey = buildCacheKey({ region: params.region, providerFilter, refreshIntervalSeconds, defaultContextWindow, defaultMaxTokens }); const now = params.now?.() ?? Date.now(); if (refreshIntervalSeconds > 0) { const cached = discoveryCache.get(cacheKey); if (cached?.value && cached.expiresAt > now) return cached.value; if (cached?.inFlight) return cached.inFlight; } const client = (params.clientFactory ?? ((region) => new BedrockClient({ region })))(params.region); const discoveryPromise = (async () => { const response = await client.send(new ListFoundationModelsCommand({})); const discovered = []; for (const summary of response.modelSummaries ?? []) { if (!shouldIncludeSummary(summary, providerFilter)) continue; discovered.push(toModelDefinition(summary, { contextWindow: defaultContextWindow, maxTokens: defaultMaxTokens })); } return discovered.toSorted((a, b) => a.name.localeCompare(b.name)); })(); if (refreshIntervalSeconds > 0) discoveryCache.set(cacheKey, { expiresAt: now + refreshIntervalSeconds * 1e3, inFlight: discoveryPromise }); try { const value = await discoveryPromise; if (refreshIntervalSeconds > 0) discoveryCache.set(cacheKey, { expiresAt: now + refreshIntervalSeconds * 1e3, value }); return value; } catch (error) { if (refreshIntervalSeconds > 0) discoveryCache.delete(cacheKey); if (!hasLoggedBedrockError) { hasLoggedBedrockError = true; console.warn(`[bedrock-discovery] Failed to list models: ${String(error)}`); } return []; } } //#endregion //#region src/agents/cloudflare-ai-gateway.ts const CLOUDFLARE_AI_GATEWAY_PROVIDER_ID = "cloudflare-ai-gateway"; const CLOUDFLARE_AI_GATEWAY_DEFAULT_MODEL_ID = "claude-sonnet-4-5"; const CLOUDFLARE_AI_GATEWAY_DEFAULT_MODEL_REF = `${CLOUDFLARE_AI_GATEWAY_PROVIDER_ID}/${CLOUDFLARE_AI_GATEWAY_DEFAULT_MODEL_ID}`; const CLOUDFLARE_AI_GATEWAY_DEFAULT_CONTEXT_WINDOW = 2e5; const CLOUDFLARE_AI_GATEWAY_DEFAULT_MAX_TOKENS = 64e3; const CLOUDFLARE_AI_GATEWAY_DEFAULT_COST = { input: 3, output: 15, cacheRead: .3, cacheWrite: 3.75 }; function buildCloudflareAiGatewayModelDefinition(params) { return { id: params?.id?.trim() || CLOUDFLARE_AI_GATEWAY_DEFAULT_MODEL_ID, name: params?.name ?? "Claude Sonnet 4.5", reasoning: params?.reasoning ?? true, input: params?.input ?? ["text", "image"], cost: CLOUDFLARE_AI_GATEWAY_DEFAULT_COST, contextWindow: CLOUDFLARE_AI_GATEWAY_DEFAULT_CONTEXT_WINDOW, maxTokens: CLOUDFLARE_AI_GATEWAY_DEFAULT_MAX_TOKENS }; } function resolveCloudflareAiGatewayBaseUrl(params) { const accountId = params.accountId.trim(); const gatewayId = params.gatewayId.trim(); if (!accountId || !gatewayId) return ""; return `https://gateway.ai.cloudflare.com/v1/${accountId}/${gatewayId}/anthropic`; } //#endregion //#region src/infra/shell-env.ts const DEFAULT_TIMEOUT_MS = 15e3; const DEFAULT_MAX_BUFFER_BYTES = 2 * 1024 * 1024; let lastAppliedKeys = []; let cachedShellPath; function resolveShell(env) { const shell = env.SHELL?.trim(); return shell && shell.length > 0 ? shell : "/bin/sh"; } function parseShellEnv(stdout) { const shellEnv = /* @__PURE__ */ new Map(); const parts = stdout.toString("utf8").split("\0"); for (const part of parts) { if (!part) continue; const eq = part.indexOf("="); if (eq <= 0) continue; const key = part.slice(0, eq); const value = part.slice(eq + 1); if (!key) continue; shellEnv.set(key, value); } return shellEnv; } function loadShellEnvFallback(opts) { const logger = opts.logger ?? console; const exec = opts.exec ?? execFileSync; if (!opts.enabled) { lastAppliedKeys = []; return { ok: true, applied: [], skippedReason: "disabled" }; } if (opts.expectedKeys.some((key) => Boolean(opts.env[key]?.trim()))) { lastAppliedKeys = []; return { ok: true, applied: [], skippedReason: "already-has-keys" }; } const timeoutMs = typeof opts.timeoutMs === "number" && Number.isFinite(opts.timeoutMs) ? Math.max(0, opts.timeoutMs) : DEFAULT_TIMEOUT_MS; const shell = resolveShell(opts.env); let stdout; try { stdout = exec(shell, [ "-l", "-c", "env -0" ], { encoding: "buffer", timeout: timeoutMs, maxBuffer: DEFAULT_MAX_BUFFER_BYTES, env: opts.env, stdio: [ "ignore", "pipe", "pipe" ] }); } catch (err) { const msg = err instanceof Error ? err.message : String(err); logger.warn(`[openclaw] shell env fallback failed: ${msg}`); lastAppliedKeys = []; return { ok: false, error: msg, applied: [] }; } const shellEnv = parseShellEnv(stdout); const applied = []; for (const key of opts.expectedKeys) { if (opts.env[key]?.trim()) continue; const value = shellEnv.get(key); if (!value?.trim()) continue; opts.env[key] = value; applied.push(key); } lastAppliedKeys = applied; return { ok: true, applied }; } function shouldEnableShellEnvFallback(env) { return isTruthyEnvValue(env.OPENCLAW_LOAD_SHELL_ENV); } function shouldDeferShellEnvFallback(env) { return isTruthyEnvValue(env.OPENCLAW_DEFER_SHELL_ENV_FALLBACK); } function resolveShellEnvFallbackTimeoutMs(env) { const raw = env.OPENCLAW_SHELL_ENV_TIMEOUT_MS?.trim(); if (!raw) return DEFAULT_TIMEOUT_MS; const parsed = Number.parseInt(raw, 10); if (!Number.isFinite(parsed)) return DEFAULT_TIMEOUT_MS; return Math.max(0, parsed); } function getShellPathFromLoginShell(opts) { if (cachedShellPath !== void 0) return cachedShellPath; if (process.platform === "win32") { cachedShellPath = null; return cachedShellPath; } const exec = opts.exec ?? execFileSync; const timeoutMs = typeof opts.timeoutMs === "number" && Number.isFinite(opts.timeoutMs) ? Math.max(0, opts.timeoutMs) : DEFAULT_TIMEOUT_MS; const shell = resolveShell(opts.env); let stdout; try { stdout = exec(shell, [ "-l", "-c", "env -0" ], { encoding: "buffer", timeout: timeoutMs, maxBuffer: DEFAULT_MAX_BUFFER_BYTES, env: opts.env, stdio: [ "ignore", "pipe", "pipe" ] }); } catch { cachedShellPath = null; return cachedShellPath; } const shellPath = parseShellEnv(stdout).get("PATH")?.trim(); cachedShellPath = shellPath && shellPath.length > 0 ? shellPath : null; return cachedShellPath; } function getShellEnvAppliedKeys() { return [...lastAppliedKeys]; } //#endregion //#region src/agents/model-auth.ts const AWS_BEARER_ENV = "AWS_BEARER_TOKEN_BEDROCK"; const AWS_ACCESS_KEY_ENV = "AWS_ACCESS_KEY_ID"; const AWS_SECRET_KEY_ENV = "AWS_SECRET_ACCESS_KEY"; const AWS_PROFILE_ENV = "AWS_PROFILE"; function resolveProviderConfig(cfg, provider) { const providers = cfg?.models?.providers ?? {}; const direct = providers[provider]; if (direct) return direct; const normalized = normalizeProviderId(provider); if (normalized === provider) return Object.entries(providers).find(([key]) => normalizeProviderId(key) === normalized)?.[1]; return providers[normalized] ?? Object.entries(providers).find(([key]) => normalizeProviderId(key) === normalized)?.[1]; } function getCustomProviderApiKey(cfg, provider) { return resolveProviderConfig(cfg, provider)?.apiKey?.trim() || void 0; } function resolveProviderAuthOverride(cfg, provider) { const auth = resolveProviderConfig(cfg, provider)?.auth; if (auth === "api-key" || auth === "aws-sdk" || auth === "oauth" || auth === "token") return auth; } function resolveEnvSourceLabel(params) { return `${params.envVars.some((envVar) => params.applied.has(envVar)) ? "shell env: " : "env: "}${params.label}`; } function resolveAwsSdkEnvVarName(env = process.env) { if (env[AWS_BEARER_ENV]?.trim()) return AWS_BEARER_ENV; if (env[AWS_ACCESS_KEY_ENV]?.trim() && env[AWS_SECRET_KEY_ENV]?.trim()) return AWS_ACCESS_KEY_ENV; if (env[AWS_PROFILE_ENV]?.trim()) return AWS_PROFILE_ENV; } function resolveAwsSdkAuthInfo() { const applied = new Set(getShellEnvAppliedKeys()); if (process.env[AWS_BEARER_ENV]?.trim()) return { mode: "aws-sdk", source: resolveEnvSourceLabel({ applied, envVars: [AWS_BEARER_ENV], label: AWS_BEARER_ENV }) }; if (process.env[AWS_ACCESS_KEY_ENV]?.trim() && process.env[AWS_SECRET_KEY_ENV]?.trim()) return { mode: "aws-sdk", source: resolveEnvSourceLabel({ applied, envVars: [AWS_ACCESS_KEY_ENV, AWS_SECRET_KEY_ENV], label: `${AWS_ACCESS_KEY_ENV} + ${AWS_SECRET_KEY_ENV}` }) }; if (process.env[AWS_PROFILE_ENV]?.trim()) return { mode: "aws-sdk", source: resolveEnvSourceLabel({ applied, envVars: [AWS_PROFILE_ENV], label: AWS_PROFILE_ENV }) }; return { mode: "aws-sdk", source: "aws-sdk default chain" }; } async function resolveApiKeyForProvider(params) { const { provider, cfg, profileId, preferredProfile } = params; const store = params.store ?? ensureAuthProfileStore(params.agentDir); if (profileId) { const resolved = await resolveApiKeyForProfile({ cfg, store, profileId, agentDir: params.agentDir }); if (!resolved) throw new Error(`No credentials found for profile "${profileId}".`); const mode = store.profiles[profileId]?.type; return { apiKey: resolved.apiKey, profileId, source: `profile:${profileId}`, mode: mode === "oauth" ? "oauth" : mode === "token" ? "token" : "api-key" }; } const authOverride = resolveProviderAuthOverride(cfg, provider); if (authOverride === "aws-sdk") return resolveAwsSdkAuthInfo(); const order = resolveAuthProfileOrder({ cfg, store, provider, preferredProfile }); for (const candidate of order) try { const resolved = await resolveApiKeyForProfile({ cfg, store, profileId: candidate, agentDir: params.agentDir }); if (resolved) { const mode = store.profiles[candidate]?.type; return { apiKey: resolved.apiKey, profileId: candidate, source: `profile:${candidate}`, mode: mode === "oauth" ? "oauth" : mode === "token" ? "token" : "api-key" }; } } catch {} const envResolved = resolveEnvApiKey(provider); if (envResolved) return { apiKey: envResolved.apiKey, source: envResolved.source, mode: envResolved.source.includes("OAUTH_TOKEN") ? "oauth" : "api-key" }; const customKey = getCustomProviderApiKey(cfg, provider); if (customKey) return { apiKey: customKey, source: "models.json", mode: "api-key" }; const normalized = normalizeProviderId(provider); if (authOverride === void 0 && normalized === "amazon-bedrock") return resolveAwsSdkAuthInfo(); if (provider === "openai") { if (listProfilesForProvider(store, "openai-codex").length > 0) throw new Error("No API key found for provider \"openai\". You are authenticated with OpenAI Codex OAuth. Use openai-codex/gpt-5.2 (ChatGPT OAuth) or set OPENAI_API_KEY for openai/gpt-5.2."); } const authStorePath = resolveAuthStorePathForDisplay(params.agentDir); const resolvedAgentDir = path.dirname(authStorePath); throw new Error([ `No API key found for provider "${provider}".`, `Auth store: ${authStorePath} (agentDir: ${resolvedAgentDir}).`, `Configure auth for this agent (${formatCliCommand("openclaw agents add <id>")}) or copy auth-profiles.json from the main agentDir.` ].join(" ")); } function resolveEnvApiKey(provider) { const normalized = normalizeProviderId(provider); const applied = new Set(getShellEnvAppliedKeys()); const pick = (envVar) => { const value = process.env[envVar]?.trim(); if (!value) return null; return { apiKey: value, source: applied.has(envVar) ? `shell env: ${envVar}` : `env: ${envVar}` }; }; if (normalized === "github-copilot") return pick("COPILOT_GITHUB_TOKEN") ?? pick("GH_TOKEN") ?? pick("GITHUB_TOKEN"); if (normalized === "anthropic") return pick("ANTHROPIC_OAUTH_TOKEN") ?? pick("ANTHROPIC_API_KEY"); if (normalized === "chutes") return pick("CHUTES_OAUTH_TOKEN") ?? pick("CHUTES_API_KEY"); if (normalized === "zai") return pick("ZAI_API_KEY") ?? pick("Z_AI_API_KEY"); if (normalized === "google-vertex") { const envKey = getEnvApiKey(normalized); if (!envKey) return null; return { apiKey: envKey, source: "gcloud adc" }; } if (normalized === "opencode") return pick("OPENCODE_API_KEY") ?? pick("OPENCODE_ZEN_API_KEY"); if (normalized === "qwen-portal") return pick("QWEN_OAUTH_TOKEN") ?? pick("QWEN_PORTAL_API_KEY"); if (normalized === "minimax-portal") return pick("MINIMAX_OAUTH_TOKEN") ?? pick("MINIMAX_API_KEY"); if (normalized === "kimi-coding") return pick("KIMI_API_KEY") ?? pick("KIMICODE_API_KEY"); const envVar = { openai: "OPENAI_API_KEY", google: "GEMINI_API_KEY", groq: "GROQ_API_KEY", deepgram: "DEEPGRAM_API_KEY", cerebras: "CEREBRAS_API_KEY", xai: "XAI_API_KEY", openrouter: "OPENROUTER_API_KEY", "vercel-ai-gateway": "AI_GATEWAY_API_KEY", "cloudflare-ai-gateway": "CLOUDFLARE_AI_GATEWAY_API_KEY", moonshot: "MOONSHOT_API_KEY", minimax: "MINIMAX_API_KEY", xiaomi: "XIAOMI_API_KEY", synthetic: "SYNTHETIC_API_KEY", venice: "VENICE_API_KEY", mistral: "MISTRAL_API_KEY", opencode: "OPENCODE_API_KEY" }[normalized]; if (!envVar) return null; return pick(envVar); } function resolveModelAuthMode(provider, cfg, store) { const resolved = provider?.trim(); if (!resolved) return; const authOverride = resolveProviderAuthOverride(cfg, resolved); if (authOverride === "aws-sdk") return "aws-sdk"; const authStore = store ?? ensureAuthProfileStore(); const profiles = listProfilesForProvider(authStore, resolved); if (profiles.length > 0) { const modes = new Set(profiles.map((id) => authStore.profiles[id]?.type).filter((mode) => Boolean(mode))); if ([ "oauth", "token", "api_key" ].filter((k) => modes.has(k)).length >= 2) return "mixed"; if (modes.has("oauth")) return "oauth"; if (modes.has("token")) return "token"; if (modes.has("api_key")) return "api-key"; } if (authOverride === void 0 && normalizeProviderId(resolved) === "amazon-bedrock") return "aws-sdk"; const envKey = resolveEnvApiKey(resolved); if (envKey?.apiKey) return envKey.source.includes("OAUTH_TOKEN") ? "oauth" : "api-key"; if (getCustomProviderApiKey(cfg, resolved)) return "api-key"; return "unknown"; } async function getApiKeyForModel(params) { return resolveApiKeyForProvider({ provider: params.model.provider, cfg: params.cfg, profileId: params.profileId, preferredProfile: params.preferredProfile, store: params.store, agentDir: params.agentDir }); } function requireApiKey(auth, provider) { const key = auth.apiKey?.trim(); if (key) return key; throw new Error(`No API key resolved for provider "${provider}" (auth mode: ${auth.mode}).`); } //#endregion //#region src/agents/synthetic-models.ts const SYNTHETIC_BASE_URL = "https://api.synthetic.new/anthropic"; const SYNTHETIC_DEFAULT_MODEL_ID = "hf:MiniMaxAI/MiniMax-M2.1"; const SYNTHETIC_DEFAULT_MODEL_REF = `synthetic/${SYNTHETIC_DEFAULT_MODEL_ID}`; const SYNTHETIC_DEFAULT_COST = { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }; const SYNTHETIC_MODEL_CATALOG = [ { id: SYNTHETIC_DEFAULT_MODEL_ID, name: "MiniMax M2.1", reasoning: false, input: ["text"], contextWindow: 192e3, maxTokens: 65536 }, { id: "hf:moonshotai/Kimi-K2-Thinking", name: "Kimi K2 Thinking", reasoning: true, input: ["text"], contextWindow: 256e3, maxTokens: 8192 }, { id: "hf:zai-org/GLM-4.7", name: "GLM-4.7", reasoning: false, input: ["text"], contextWindow: 198e3, maxTokens: 128e3 }, { id: "hf:deepseek-ai/DeepSeek-R1-0528", name: "DeepSeek R1 0528", reasoning: false, input: ["text"], contextWindow: 128e3, maxTokens: 8192 }, { id: "hf:deepseek-ai/DeepSeek-V3-0324", name: "DeepSeek V3 0324", reasoning: false, input: ["text"], contextWindow: 128e3, maxTokens: 8192 }, { id: "hf:deepseek-ai/DeepSeek-V3.1", name: "DeepSeek V3.1", reasoning: false, input: ["text"], contextWindow: 128e3, maxTokens: 8192 }, { id: "hf:deepseek-ai/DeepSeek-V3.1-Terminus", name: "DeepSeek V3.1 Terminus", reasoning: false, input: ["text"], contextWindow: 128e3, maxTokens: 8192 }, { id: "hf:deepseek-ai/DeepSeek-V3.2", name: "DeepSeek V3.2", reasoning: false, input: ["text"], contextWindow: 159e3, maxTokens: 8192 }, { id: "hf:meta-llama/Llama-3.3-70B-Instruct", name: "Llama 3.3 70B Instruct", reasoning: false, input: ["text"], contextWindow: 128e3, maxTokens: 8192 }, { id: "hf:meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8", name: "Llama 4 Maverick 17B 128E Instruct FP8", reasoning: false, input: ["text"], contextWindow: 524e3, maxTokens: 8192 }, { id: "hf:moonshotai/Kimi-K2-Instruct-0905", name: "Kimi K2 Instruct 0905", reasoning: false, input: ["text"], contextWindow: 256e3, maxTokens: 8192 }, { id: "hf:moonshotai/Kimi-K2.5", name: "Kimi K2.5", reasoning: true, input: ["text"], contextWindow: 256e3, maxTokens: 8192 }, { id: "hf:openai/gpt-oss-120b", name: "GPT OSS 120B", reasoning: false, input: ["text"], contextWindow: 128e3, maxTokens: 8192 }, { id: "hf:Qwen/Qwen3-235B-A22B-Instruct-2507", name: "Qwen3 235B A22B Instruct 2507", reasoning: false, input: ["text"], contextWindow: 256e3, maxTokens: 8192 }, { id: "hf:Qwen/Qwen3-Coder-480B-A35B-Instruct", name: "Qwen3 Coder 480B A35B Instruct", reasoning: false, input: ["text"], contextWindow: 256e3, maxTokens: 8192 }, { id: "hf:Qwen/Qwen3-VL-235B-A22B-Instruct", name: "Qwen3 VL 235B A22B Instruct", reasoning: false, input: ["text", "image"], contextWindow: 25e4, maxTokens: 8192 }, { id: "hf:zai-org/GLM-4.5", name: "GLM-4.5", reasoning: false, input: ["text"], contextWindow: 128e3, maxTokens: 128e3 }, { id: "hf:zai-org/GLM-4.6", name: "GLM-4.6", reasoning: false, input: ["text"], contextWindow: 198e3, maxTokens: 128e3 }, { id: "hf:deepseek-ai/DeepSeek-V3", name: "DeepSeek V3", reasoning: false, input: ["text"], contextWindow: 128e3, maxTokens: 8192 }, { id: "hf:Qwen/Qwen3-235B-A22B-Thinking-2507", name: "Qwen3 235B A22B Thinking 2507", reasoning: true, input: ["text"], contextWindow: 256e3, maxTokens: 8192 } ]; function buildSyntheticModelDefinition(entry) { return { id: entry.id, name: entry.name, reasoning: entry.reasoning, input: [...entry.input], cost: SYNTHETIC_DEFAULT_COST, contextWindow: entry.contextWindow, maxTokens: entry.maxTokens }; } //#endregion //#region src/agents/venice-models.ts const VENICE_BASE_URL = "https://api.venice.ai/api/v1"; const VENICE_DEFAULT_MODEL_ID = "llama-3.3-70b"; const VENICE_DEFAULT_MODEL_REF = `venice/${VENICE_DEFAULT_MODEL_ID}`; const VENICE_DEFAULT_COST = { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }; /** * Complete catalog of Venice AI models. * * Venice provides two privacy modes: * - "private": Fully private inference, no logging, ephemeral * - "anonymized": Proxied through Venice with metadata stripped (for proprietary models) * * Note: The `privacy` field is included for documentation purposes but is not * propagated to ModelDefinitionConfig as it's not part of the core model schema. * Privacy mode is determined by the model itself, not configurable at runtime. * * This catalog serves as a fallback when the Venice API is unreachable. */ const VENICE_MODEL_CATALOG = [ { id: "llama-3.3-70b", name: "Llama 3.3 70B", reasoning: false, input: ["text"], contextWindow: 131072, maxTokens: 8192, privacy: "private" }, { id: "llama-3.2-3b", name: "Llama 3.2 3B", reasoning: false, input: ["text"], contextWindow: 131072, maxTokens: 8192, privacy: "private" }, { id: "hermes-3-llama-3.1-405b", name: "Hermes 3 Llama 3.1 405B", reasoning: false, input: ["text"], contextWindow: 131072, maxTokens: 8192, privacy: "private" }, { id: "qwen3-235b-a22b-thinking-2507", name: "Qwen3 235B Thinking", reasoning: true, input: ["text"], contextWindow: 131072, maxTokens: 8192, privacy: "private" }, { id: "qwen3-235b-a22b-instruct-2507", name: "Qwen3 235B Instruct", reasoning: false, input: ["text"], contextWindow: 131072, maxTokens: 8192, privacy: "private" }, { id: "qwen3-coder-480b-a35b-instruct", name: "Qwen3 Coder 480B", reasoning: false, input: ["text"], contextWindow: 262144, maxTokens: 8192, privacy: "private" }, { id: "qwen3-next-80b", name: "Qwen3 Next 80B", reasoning: false, input: ["text"], contextWindow: 262144, maxTokens: 8192, privacy: "private" }, { id: "qwen3-vl-235b-a22b", name: "Qwen3 VL 235B (Vision)", reasoning: false, input: ["text", "image"], contextWindow: 262144, maxTokens: 8192, privacy: "private" }, { id: "qwen3-4b", name: "Venice Small (Qwen3 4B)", reasoning: true, input: ["text"], contextWindow: 32768, maxTokens: 8192, privacy: "private" }, { id: "deepseek-v3.2", name: "DeepSeek V3.2", reasoning: true, input: ["text"], contextWindow: 163840, maxTokens: 8192, privacy: "private" }, { id: "venice-uncensored", name: "Venice Uncensored (Dolphin-Mistral)", reasoning: false, input: ["text"], contextWindow: 32768, maxTokens: 8192, privacy: "private" }, { id: "mistral-31-24b", name: "Venice Medium (Mistral)", reasoning: false, input: ["text", "image"], contextWindow: 131072, maxTokens: 8192, privacy: "private" }, { id: "google-gemma-3-27b-it", name: "Google Gemma 3 27B Instruct", reasoning: false, input: ["text", "image"], contextWindow: 202752, maxTokens: 8192, privacy: "private" }, { id: "openai-gpt-oss-120b", name: "OpenAI GPT OSS 120B", reasoning: false, input: ["text"], contextWindow: 131072, maxTokens: 8192, privacy: "private" }, { id: "zai-org-glm-4.7", name: "GLM 4.7", reasoning: true, input: ["text"], contextWindow: 202752, maxTokens: 8192, privacy: "private" }, { id: "claude-opus-45", name: "Claude Opus 4.5 (via Venice)", reasoning: true, input: ["text", "image"], contextWindow: 202752, maxTokens: 8192, privacy: "anonymized" }, { id: "claude-sonnet-45", name: "Claude Sonnet 4.5 (via Venice)", reasoning: true, input: ["text", "image"], contextWindow: 202752, maxTokens: 8192, privacy: "anonymized" }, { id: "openai-gpt-52", name: "GPT-5.2 (via Venice)", reasoning: true, input: ["text"], contextWindow: 262144, maxTokens: 8192, privacy: "anonymized" }, { id: "openai-gpt-52-codex", name: "GPT-5.2 Codex (via Venice)", reasoning: true, input: ["text", "image"], contextWindow: 262144, maxTokens: 8192, privacy: "anonymized" }, { id: "gemini-3-pro-preview", name: "Gemini 3 Pro (via Venice)", reasoning: true, input: ["text", "image"], contextWindow: 202752, maxTokens: 8192, privacy: "anonymized" }, { id: "gemini-3-flash-preview", name: "Gemini 3 Flash (via Venice)", reasoning: true, input: ["text", "image"], contextWindow: 262144, maxTokens: 8192, privacy: "anonymized" }, { id: "grok-41-fast", name: "Grok 4.1 Fast (via Venice)", reasoning: true, input: ["text", "image"], contextWindow: 262144, maxTokens: 8192, privacy: "anonymized" }, { id: "grok-code-fast-1", name: "Grok Code Fast 1 (via Venice)", reasoning: true, input: ["text"], contextWindow: 262144, maxTokens: 8192, privacy: "anonymized" }, { id: "kimi-k2-thinking", name: "Kimi K2 Thinking (via Venice)", reasoning: true, input: ["text"], contextWindow: 262144, maxTokens: 8192, privacy: "anonymized" }, { id: "minimax-m21", name: "MiniMax M2.1 (via Venice)", reasoning: true, input: ["text"], contextWindow: 202752, maxTokens: 8192, privacy: "anonymized" } ]; /** * Build a ModelDefinitionConfig from a Venice catalog entry. * * Note: The `privacy` field from the catalog is not included in the output * as ModelDefinitionConfig doesn't support custom metadata fields. Privacy * mode is inherent to each model and documented in the catalog/docs. */ function buildVeniceModelDefinition(entry) { return { id: entry.id, name: entry.name, reasoning: entry.reasoning, input: [...entry.input], cost: VENICE_DEFAULT_COST, contextWindow: entry.contextWindow, maxTokens: entry.maxTokens }; } /** * Discover models from Venice API with fallback to static catalog. * The /models endpoint is public and doesn't require authentication. */ async function discoverVeniceModels() { if (process.env.VITEST) return VENICE_MODEL_CATALOG.map(buildVeniceModelDefinition); try { const response = await fetch(`${VENICE_BASE_URL}/models`, { signal: AbortSignal.timeout(5e3) }); if (!response.ok) { console.warn(`[venice-models] Failed to discover models: HTTP ${response.status}, using static catalog`); return VENICE_MODEL_CATALOG.map(buildVeniceModelDefinition); } const data = await response.json(); if (!Array.isArray(data.data) || data.data.length === 0) { console.warn("[venice-models] No models found from API, using static catalog"); return VENICE_MODEL_CATALOG.map(buildVeniceModelDefinition); } const catalogById = new Map(VENICE_MODEL_CATALOG.map((m) => [m.id, m])); const models = []; for (const apiModel of data.data) { const catalogEntry = catalogById.get(apiModel.id); if (catalogEntry) models.push(buildVeniceModelDefinition(catalogEntry)); else { const isReasoning = apiModel.model_spec.capabilities.supportsReasoning || apiModel.id.toLowerCase().includes("thinking") || apiModel.id.toLowerCase().includes("reason") || apiModel.id.toLowerCase().includes("r1"); const hasVision = apiModel.model_spec.capabilities.supportsVision; models.push({ id: apiModel.id, name: apiModel.model_spec.name || apiModel.id, reasoning: isReasoning, input: hasVision ? ["text", "image"] : ["text"], cost: VENICE_DEFAULT_COST, contextWindow: apiModel.model_spec.availableContextTokens || 128e3, maxTokens: 8192 }); } } return models.length > 0 ? models : VENICE_MODEL_CATALOG.map(buildVeniceModelDefinition); } catch (error) { console.warn(`[venice-models] Discovery failed: ${String(error)}, using static catalog`); return VENICE_MODEL_CATALOG.map(buildVeniceModelDefinition); } } //#endregion //#region src/agents/models-config.providers.ts const MINIMAX_API_BASE_URL = "https://api.minimax.chat/v1"; const MINIMAX_PORTAL_BASE_URL = "https://api.minimax.io/anthropic"; const MINIMAX_DEFAULT_MODEL_ID = "MiniMax-M2.1"; const MINIMAX_DEFAULT_VISION_MODEL_ID = "MiniMax-VL-01"; const MINIMAX_DEFAULT_CONTEXT_WINDOW = 2e5; const MINIMAX_DEFAULT_MAX_TOKENS = 8192; const MINIMAX_OAUTH_PLACEHOLDER = "minimax-oauth"; const MINIMAX_API_COST = { input: 15, output: 60, cacheRead: 2, cacheWrite: 10 }; const XIAOMI_BASE_URL = "https://api.xiaomimimo.com/anthropic"; const XIAOMI_DEFAULT_MODEL_ID = "mimo-v2-flash"; const XIAOMI_DEFAULT_CONTEXT_WINDOW = 262144; const XIAOMI_DEFAULT_MAX_TOKENS = 8192; const XIAOMI_DEFAULT_COST = { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }; const MOONSHOT_BASE_URL = "https://api.moonshot.ai/v1"; const MOONSHOT_DEFAULT_MODEL_ID = "kimi-k2.5"; const MOONSHOT_DEFAULT_CONTEXT_WINDOW = 256e3; const MOONSHOT_DEFAULT_MAX_TOKENS = 8192; const MOONSHOT_DEFAULT_COST = { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }; const QWEN_PORTAL_BASE_URL = "https://portal.qwen.ai/v1"; const QWEN_PORTAL_OAUTH_PLACEHOLDER = "qwen-oauth"; const QWEN_PORTAL_DEFAULT_CONTEXT_WINDOW = 128e3; const QWEN_PORTAL_DEFAULT_MAX_TOKENS = 8192; const QWEN_PORTAL_DEFAULT_COST = { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }; const OLLAMA_BASE_URL = "http://127.0.0.1:11434/v1"; const OLLAMA_API_BASE_URL = "http://127.0.0.1:11434"; const OLLAMA_DEFAULT_CONTEXT_WINDOW = 128e3; const OLLAMA_DEFAULT_MAX_TOKENS = 8192; const OLLAMA_DEFAULT_COST = { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }; async function discoverOllamaModels() { if (process.env.VITEST || false) return []; try { const response = await fetch(`${OLLAMA_API_BASE_URL}/api/tags`, { signal: AbortSignal.timeout(5e3) }); if (!response.ok) { console.warn(`Failed to discover Ollama models: ${response.status}`); return []; } const data = await response.json(); if (!data.models || data.models.length === 0) { console.warn("No Ollama models found on local instance"); return []; } return data.models.map((model) => { const modelId = model.name; return { id: modelId, name: modelId, reasoning: modelId.toLowerCase().includes("r1") || modelId.toLowerCase().includes("reasoning"), input: ["text"], cost: OLLAMA_DEFAULT_COST, contextWindow: OLLAMA_DEFAULT_CONTEXT_WINDOW, maxTokens: OLLAMA_DEFAULT_MAX_TOKENS }; }); } catch (error) { console.warn(`Failed to discover Ollama models: ${String(error)}`); return []; } } function normalizeApiKeyConfig(value) { const trimmed = value.trim(); return /^\$\{([A-Z0-9_]+)\}$/.exec(trimmed)?.[1] ?? trimmed; } function resolveEnvApiKeyVarName(provider) { const resolved = resolveEnvApiKey(provider); if (!resolved) return; const match = /^(?:env: |shell env: )([A-Z0-9_]+)$/.exec(resolved.source); return match ? match[1] : void 0; } function resolveAwsSdkApiKeyVarName() { return resolveAwsSdkEnvVarName() ?? "AWS_PROFILE"; } function resolveApiKeyFromProfiles(params) { const ids = listProfilesForProvider(params.store, params.provider); for (const id of ids) { const cred = params.store.profiles[id]; if (!cred) continue; if (cred.type === "api_key") return cred.key; if (cred.type === "token") return cred.token; } } function normalizeGoogleModelId(id) { if (id === "gemini-3-pro") return "gemini-3-pro-preview"; if (id === "gemini-3-flash") return "gemini-3-flash-preview"; return id; } function normalizeGoogleProvider(provider) { let mutated = false; const models = provider.models.map((model) => { const nextId = normalizeGoogleModelId(model.id); if (nextId === model.id) return model; mutated = true; return { ...model, id: nextId }; }); return mutated ? { ...provider, models } : provider; } function normalizeProviders(params) { const { providers } = params; if (!providers) return providers; const authStore = ensureAuthProfileStore(params.agentDir, { allowKeychainPrompt: false }); let mutated = false; const next = {}; for (const [key, provider] of Object.entries(providers)) { const normalizedKey = key.trim(); let normalizedProvider = provider; if (normalizedProvider.apiKey && normalizeApiKeyConfig(normalizedProvider.apiKey) !== normalizedProvider.apiKey) { mutated = true; normalizedProvider = { ...normalizedProvider, apiKey: normalizeApiKeyConfig(normalizedProvider.apiKey) }; } if (Array.isArray(normalizedProvider.models) && normalizedProvider.models.length > 0 && !normalizedProvider.apiKey?.trim()) if ((normalizedProvider.auth ?? (normalizedKey === "amazon-bedrock" ? "aws-sdk" : void 0)) === "aws-sdk") { const apiKey = resolveAwsSdkApiKeyVarName(); mutated = true; normalizedProvider = { ...normalizedProvider, apiKey }; } else { const fromEnv = resolveEnvApiKeyVarName(normalizedKey); const fromProfiles = resolveApiKeyFromProfiles({ provider: normalizedKey, store: authStore }); const apiKey = fromEnv ?? fromProfiles; if (apiKey?.trim()) { mutated = true; normalizedProvider = { ...normalizedProvider, apiKey }; } } if (normalizedKey === "google") { const googleNormalized = normalizeGoogleProvider(normalizedProvider); if (googleNormalized !== normalizedProvider) mutated = true; normalizedProvider = googleNormalized; } next[key] = normalizedProvider; } return mutated ? next : providers; } function buildMinimaxProvider() { return { baseUrl: MINIMAX_API_BASE_URL, api: "openai-completions", models: [{ id: MINIMAX_DEFAULT_MODEL_ID, name: "MiniMax M2.1", reasoning: false, input: ["text"], cost: MINIMAX_API_COST, contextWindow: MINIMAX_DEFAULT_CONTEXT_WINDOW, maxTokens: MINIMAX_DEFAULT_MAX_TOKENS }, { id: MINIMAX_DEFAULT_VISION_MODEL_ID, name: "MiniMax VL 01", reasoning: false, input: ["text", "image"], cost: MINIMAX_API_COST, contextWindow: MINIMAX_DEFAULT_CONTEXT_WINDOW, maxTokens: MINIMAX_DEFAULT_MAX_TOKENS }] }; } function buildMinimaxPortalProvider() { return { baseUrl: MINIMAX_PORTAL_BASE_URL, api: "anthropic-messages", models: [{ id: MINIMAX_DEFAULT_MODEL_ID, name: "MiniMax M2.1", reasoning: false, input: ["text"], cost: MINIMAX_API_COST, contextWindow: MINIMAX_DEFAULT_CONTEXT_WINDOW, maxTokens: MINIMAX_DEFAULT_MAX_TOKENS }] }; } function buildMoonshotProvider() { return { baseUrl: MOONSHOT_BASE_URL, api: "openai-completions", models: [{ id: MOONSHOT_DEFAULT_MODEL_ID, name: "Kimi K2.5", reasoning: false, input: ["text"], cost: MOONSHOT_DEFAULT_COST, contextWindow: MOONSHOT_DEFAULT_CONTEXT_WINDOW, maxTokens: MOONSHOT_DEFAULT_MAX_TOKENS }] }; } function buildQwenPortalProvider() { return { baseUrl: QWEN_PORTAL_BASE_URL, api: "openai-completions", models: [{ id: "coder-model", name: "Qwen Coder", reasoning: false, input: ["text"], cost: QWEN_PORTAL_DEFAULT_COST, contextWindow: QWEN_PORTAL_DEFAULT_CONTEXT_WINDOW, maxTokens: QWEN_PORTAL_DEFAULT_MAX_TOKENS }, { id: "vision-model", name: "Qwen Vision", reasoning: false, input: ["text", "image"], cost: QWEN_PORTAL_DEFAULT_COST, contextWindow: QWEN_PORTAL_DEFAULT_CONTEXT_WINDOW, maxTokens: QWEN_PORTAL_DEFAULT_MAX_TOKENS }] }; } function buildSyntheticProvider() { return { baseUrl: SYNTHETIC_BASE_URL, api: "anthropic-messages", models: SYNTHETIC_MODEL_CATALOG.map(buildSyntheticModelDefinition) }; } function buildXiaomiProvider() { return { baseUrl: XIAOMI_BASE_URL, api: "anthropic-messages", models: [{ id: XIAOMI_DEFAULT_MODEL_ID, name: "Xiaomi MiMo V2 Flash", reasoning: false, input: ["text"], cost: XIAOMI_DEFAULT_COST, contextWindow: XIAOMI_DEFAULT_CONTEXT_WINDOW, maxTokens: XIAOMI_DEFAULT_MAX_TOKENS }] }; } async function buildVeniceProvider() { return { baseUrl: VENICE_BASE_URL, api: "openai-completions", models: await discoverVeniceModels() }; } async function buildOllamaProvider() { return { baseUrl: OLLAMA_BASE_URL, api: "openai-completions", models: await discoverOllamaModels() }; } async function resolveImplicitProviders(params) { const providers = {}; const authStore = ensureAuthProfileStore(params.agentDir, { allowKeychainPrompt: false }); const minimaxKey = resolveEnvApiKeyVarName("minimax") ?? resolveApiKeyFromProfiles({ provider: "minimax", store: authStore }); if (minimaxKey) providers.minimax = { ...buildMinimaxProvider(), apiKey: minimaxKey }; if (listProfilesForProvider(authStore, "minimax-portal").length > 0) providers["minimax-portal"] = { ...buildMinimaxPortalProvider(), apiKey: MINIMAX_OAUTH_PLACEHOLDER }; const moonshotKey = resolveEnvApiKeyVarName("moonshot") ?? resolveApiKeyFromProfiles({ provider: "moonshot", store: authStore }); if (moonshotKey) providers.moonshot = { ...buildMoonshotProvider(), apiKey: moonshotKey }; const syntheticKey = resolveEnvApiKeyVarName("synthetic") ?? resolveApiKeyFromProfiles({ provider: "synthetic", store: authStore }); if (syntheticKey) providers.synthetic = { ...buildSyntheticProvider(), apiKey: syntheticKey }; const veniceKey = resolveEnvApiKeyVarName("venice") ?? resolveApiKeyFromProfiles({ provider: "venice", store: authStore }); if (veniceKey) providers.venice = { ...await buildVeniceProvider(), apiKey: veniceKey }; if (listProfilesForProvider(authStore, "qwen-portal").length > 0) providers["qwen-portal"] = { ...buildQwenPortalProvider(), apiKey: QWEN_PORTAL_OAUTH_PLACEHOLDER }; const xiaomiKey = resolveEnvApiKeyVarName("xiaomi") ?? resolveApiKeyFromProfiles({ provider: "xiaomi", store: authStore }); if (xiaomiKey) providers.xiaomi = { ...buildXiaomiProvider(), apiKey: xiaomiKey }; const cloudflareProfiles = listProfilesForProvider(authStore, "cloudflare-ai-gateway"); for (const profileId of cloudflareProfiles) { const cred = authStore.profiles[profileId]; if (cred?.type !== "api_key") continue; const accountId = cred.metadata?.accountId?.trim(); const gatewayId = cred.metadata?.gatewayId?.trim(); if (!accountId || !gatewayId) continue; const baseUrl = resolveCloudflareAiGatewayBaseUrl({ accountId, gatewayId }); if (!baseUrl) continue; const apiKey = resolveEnvApiKeyVarName("cloudflare-ai-gateway") ?? cred.key?.trim() ?? ""; if (!apiKey) continue; providers["cloudflare-ai-gateway"] = { baseUrl, api: "anthropic-messages", apiKey, models: [buildCloudflareAiGatewayModelDefinition()] }; break; } const ollamaKey = resolveEnvApiKeyVarName("ollama") ?? resolveApiKeyFromProfiles({ provider: "ollama", store: authStore }); if (ollamaKey) providers.ollama = { ...await buildOllamaProvider(), apiKey: ollamaKey }; return providers; } async function resolveImplicitCopilotProvider(params) { const env = params.env ?? process.env; const authStore = ensureAuthProfileStore(params.agentDir, { allowKeychainPrompt: false }); const hasProfile = listProfilesForProvider(authStore, "github-copilot").length > 0; const githubToken = (env.COPILOT_GITHUB_TOKEN ?? env.GH_TOKEN ?? env.GITHUB_TOKEN ?? "").trim(); if (!hasProfile && !githubToken) return null; let selectedGithubToken = githubToken; if (!selectedGithubToken && hasProfile) { const profileId = listProfilesForProvider(authStore, "github-copilot")[0]; const profile = profileId ? authStore.profiles[profileId] : void 0; if (profile && profile.type === "token") selectedGithubToken = profile.token; } let baseUrl = DEFAULT_COPILOT_API_BASE_URL; if (selectedGithubToken) try { baseUrl = (await resolveCopilotApiToken({ githubToken: selectedGithubToken, env })).baseUrl; } catch { baseUrl = DEFAULT_COPILOT_API_BASE_URL; } return { baseUrl, models: [] }; } async function resolveImplicitBedrockProvider(params) { const env = params.env ?? process.env; const discoveryConfig = params.config?.models?.bedrockDiscovery; const enabled = discoveryConfig?.enabled; const hasAwsCreds = resolveAwsSdkEnvVarName(env) !== void 0; if (enabled === false) return null; if (enabled !== true && !hasAwsCreds) return null; const region = discoveryConfig?.region ?? env.AWS_REGION ?? env.AWS_DEFAULT_REGION ?? "us-east-1"; const models = await discoverBedrockModels({ region, config: discoveryConfig }); if (models.length === 0) return null; return { baseUrl: `https://bedrock-runtime.${region}.amazonaws.com`, api: "bedrock-converse-stream", auth: "aws-sdk", models }; } //#endregion //#region src/agents/model-selection.ts function normalizeAliasKey(value) { return value.trim().toLowerCase(); } function modelKey(provider, model) { return `${provider}/${model}`; } function normalizeProviderId(provider) { const normalized = provider.trim().toLowerCase(); if (normalized === "z.ai" || normalized === "z-ai") return "zai"; if (normalized === "opencode-zen") return "opencode"; if (normalized === "qwen") return "qwen-portal"; if (normalized === "kimi-code") return "kimi-coding"; return normalized; } function isCliProvider(provider, cfg) { const normalized = normalizeProviderId(provider); if (normalized === "claude-cli") return true; if (normalized === "codex-cli") return true; const backends = cfg?.agents?.defaults?.cliBackends ?? {}; return Object.keys(backends).some((key) => normalizeProviderId(key) === normalized); } function normalizeAnthropicModelId(model) { const trimmed = model.trim(); if (!trimmed) return trimmed; const lower = trimmed.toLowerCase(); if (lower === "opus-4.5") return "claude-opus-4-5"; if (lower === "sonnet-4.5") return "claude-sonnet-4-5"; return trimmed; } function normalizeProviderModelId(provider, model) { if (provider === "anthropic") return normalizeAnthropicModelId(model); if (provider === "google") return normalizeGoogleModelId(model); return model; } function parseModelRef(raw, defaultProvider) { const trimmed = raw.trim(); if (!trimmed) return null; const slash = trimmed.indexOf("/"); if (slash === -1) { const provider = normalizeProviderId(defaultProvider); return { provider, model: normalizeProviderModelId(provider, trimmed) }; } const provider = normalizeProviderId(trimmed.slice(0, slash).trim()); const model = trimmed.slice(slash + 1).trim(); if (!provider || !model) return null; return { provider, model: normalizeProviderModelId(provider, model) }; } function buildModelAliasIndex(params) { const byAlias = /* @__PURE__ */ new Map(); const byKey = /* @__PURE__ */ new Map(); const rawModels = params.cfg.agents?.defaults?.models ?? {}; for (const [keyRaw, entryRaw] of Object.entries(rawModels)) { const parsed = parseModelRef(String(keyRaw ?? ""), params.defaultProvider); if (!parsed) continue; const alias = String(entryRaw?.alias ?? "").trim(); if (!alias) continue; const aliasKey = normalizeAliasKey(alias); byAlias.set(aliasKey, { alias, ref: parsed }); const key = modelKey(parsed.provider, parsed.model); const existing = byKey.get(key) ?? []; existing.push(alias); byKey.set(key, existing); } return { byAlias, byKey }; } function resolveModelRefFromString(params) { const trimmed = params.raw.trim(); if (!trimmed) return null; if (!trimmed.includes("/")) { const aliasKey = normalizeAliasKey(trimmed); const aliasMatch = params.aliasIndex?.byAlias.get(aliasKey); if (aliasMatch) return { ref: aliasMatch.ref, alias: aliasMatch.alias }; } const parsed = parseModelRef(trimmed, params.defaultProvider); if (!parsed) return null; return { ref: parsed }; } function resolveConfiguredModelRef(params) { const rawModel = (() => { const raw = params.cfg.agents?.defaults?.model; if (typeof raw === "string") return raw.trim(); return raw?.primary?.trim() ?? ""; })(); if (rawModel) { const trimmed = rawModel.trim(); const aliasIndex = buildModelAliasIndex({ cfg: params.cfg, defaultProvider: params.defaultProvider }); if (!trimmed.includes("/")) { const aliasKey = normalizeAliasKey(trimmed); const aliasMatch = aliasIndex.byAlias.get(aliasKey); if (aliasMatch) return aliasMatch.ref; console.warn(`[openclaw] Model "${trimmed}" specified without provider. Falling back to "anthropic/${trimmed}". Please use "anthropic/${trimmed}" in your config.`); return { provider: "anthropic", model: trimmed }; } const resolved = resolveModelRefFromString({ raw: trimmed, defaultProvider: params.defaultProvider, aliasIndex }); if (resolved) return resolved.ref; } return { provider: params.defaultProvider, model: params.defaultModel }; } function resolveDefaultModelForAgent(params) { const agentModelOverride = params.agentId ? resolveAgentModelPrimary(params.cfg, params.agentId) : void 0; return resolveConfiguredModelRef({ cfg: agentModelOverride && agentModelOverride.length > 0 ? { ...params.cfg, agents: { ...params.cfg.agents, defaults: { ...params.cfg.agents?.defaults, model: { ...typeof params.cfg.agents?.defaults?.model === "object" ? params.cfg.agents.defaults.model : void 0, primary: agentModelOverride } } } } : params.cfg, defaultProvider: DEFAULT_PROVIDER, defaultModel: DEFAULT_MODEL }); } function buildAllowedModelSet(params) { const rawAllowlist = (() => { const modelMap = params.cfg.agents?.defaults?.models ?? {}; return Object.keys(modelMap); })(); const allowAny = rawAllowlist.length === 0; const defaultModel = params.defaultModel?.trim(); const defaultKey = defaultModel && params.defaultProvider ? modelKey(params.defaultProvider, defaultModel) : void 0; const catalogKeys = new Set(params.catalog.map((entry) => modelKey(entry.provider, entry.id))); if (allowAny) { if (defaultKey) catalogKeys.add(defaultKey); return { allowAny: true, allowedCatalog: params.catalog, allowedKeys: catalogKeys }; } const allowedKeys = /* @__PURE__ */ new Set(); const configuredProviders = params.cfg.models?.providers ?? {}; for (const raw of rawAllowlist) { const parsed = parseModelRef(String(raw), params.defaultProvider); if (!parsed) continue; const key = modelKey(parsed.provider, parsed.model); const providerKey = normalizeProviderId(parsed.provider); if (isCliProvider(parsed.provider, params.cfg)) allowed