UNPKG

pyb-ts

Version:

PYB-CLI - Minimal AI Agent with multi-model support and CLI interface

1,035 lines (1,034 loc) 29.7 kB
var models_default = { openai: [ { model: "gpt-4", max_tokens: 4096, max_input_tokens: 8192, max_output_tokens: 4096, input_cost_per_token: 3e-5, output_cost_per_token: 6e-5, provider: "openai", mode: "chat", supports_function_calling: true, supports_prompt_caching: true, supports_system_messages: true, supports_tool_choice: true }, { model: "gpt-4o", max_tokens: 16384, max_input_tokens: 128e3, max_output_tokens: 16384, input_cost_per_token: 25e-7, output_cost_per_token: 1e-5, input_cost_per_token_batches: 125e-8, output_cost_per_token_batches: 5e-6, cache_read_input_token_cost: 125e-8, provider: "openai", mode: "chat", supports_function_calling: true, supports_parallel_function_calling: true, supports_response_schema: true, supports_vision: true, supports_prompt_caching: true, supports_system_messages: true, supports_tool_choice: true }, { model: "gpt-4.5-preview", max_tokens: 16384, max_input_tokens: 128e3, max_output_tokens: 16384, input_cost_per_token: 75e-6, output_cost_per_token: 15e-5, input_cost_per_token_batches: 375e-7, output_cost_per_token_batches: 75e-6, cache_read_input_token_cost: 375e-7, provider: "openai", mode: "chat", supports_function_calling: true, supports_parallel_function_calling: true, supports_response_schema: true, supports_vision: true, supports_prompt_caching: true, supports_system_messages: true, supports_tool_choice: true }, { model: "gpt-4.5-preview-2025-02-27", max_tokens: 16384, max_input_tokens: 128e3, max_output_tokens: 16384, input_cost_per_token: 75e-6, output_cost_per_token: 15e-5, input_cost_per_token_batches: 375e-7, output_cost_per_token_batches: 75e-6, cache_read_input_token_cost: 375e-7, provider: "openai", mode: "chat", supports_function_calling: true, supports_parallel_function_calling: true, supports_response_schema: true, supports_vision: true, supports_prompt_caching: true, supports_system_messages: true, supports_tool_choice: true }, { model: "gpt-4o-mini", max_tokens: 16384, max_input_tokens: 128e3, max_output_tokens: 16384, input_cost_per_token: 15e-8, output_cost_per_token: 6e-7, input_cost_per_token_batches: 75e-9, output_cost_per_token_batches: 3e-7, cache_read_input_token_cost: 75e-9, provider: "openai", mode: "chat", supports_function_calling: true, supports_parallel_function_calling: true, supports_response_schema: true, supports_vision: true, supports_prompt_caching: true, supports_system_messages: true, supports_tool_choice: true }, { model: "gpt-4o-mini-2024-07-18", max_tokens: 16384, max_input_tokens: 128e3, max_output_tokens: 16384, input_cost_per_token: 15e-8, output_cost_per_token: 6e-7, input_cost_per_token_batches: 75e-9, output_cost_per_token_batches: 3e-7, cache_read_input_token_cost: 75e-9, provider: "openai", mode: "chat", supports_function_calling: true, supports_parallel_function_calling: true, supports_response_schema: true, supports_vision: true, supports_prompt_caching: true, supports_system_messages: true, supports_tool_choice: true }, { model: "o1", max_tokens: 1e5, max_input_tokens: 2e5, max_output_tokens: 1e5, input_cost_per_token: 15e-6, output_cost_per_token: 6e-5, cache_read_input_token_cost: 75e-7, provider: "openai", mode: "chat", supports_function_calling: true, supports_parallel_function_calling: true, supports_vision: true, supports_prompt_caching: true, supports_system_messages: true, supports_response_schema: true, supports_tool_choice: true, supports_reasoning_effort: true }, { model: "o3-mini", max_tokens: 1e5, max_input_tokens: 2e5, max_output_tokens: 1e5, input_cost_per_token: 11e-7, output_cost_per_token: 44e-7, cache_read_input_token_cost: 55e-8, provider: "openai", mode: "chat", supports_function_calling: true, supports_parallel_function_calling: false, supports_vision: false, supports_prompt_caching: true, supports_response_schema: true, supports_tool_choice: true, supports_reasoning_effort: true }, { model: "o3-mini-2025-01-31", max_tokens: 1e5, max_input_tokens: 2e5, max_output_tokens: 1e5, input_cost_per_token: 11e-7, output_cost_per_token: 44e-7, cache_read_input_token_cost: 55e-8, provider: "openai", mode: "chat", supports_function_calling: true, supports_parallel_function_calling: false, supports_vision: false, supports_prompt_caching: true, supports_response_schema: true, supports_tool_choice: true, supports_reasoning_effort: true }, { model: "o1-2024-12-17", max_tokens: 1e5, max_input_tokens: 2e5, max_output_tokens: 1e5, input_cost_per_token: 15e-6, output_cost_per_token: 6e-5, cache_read_input_token_cost: 75e-7, provider: "openai", mode: "chat", supports_function_calling: true, supports_parallel_function_calling: true, supports_vision: true, supports_prompt_caching: true, supports_system_messages: true, supports_response_schema: true, supports_tool_choice: true, supports_reasoning_effort: true }, { model: "chatgpt-4o-latest", max_tokens: 4096, max_input_tokens: 128e3, max_output_tokens: 4096, input_cost_per_token: 5e-6, output_cost_per_token: 15e-6, provider: "openai", mode: "chat", supports_function_calling: true, supports_parallel_function_calling: true, supports_vision: true, supports_prompt_caching: true, supports_system_messages: true, supports_tool_choice: true }, { model: "gpt-4o-2024-05-13", max_tokens: 4096, max_input_tokens: 128e3, max_output_tokens: 4096, input_cost_per_token: 5e-6, output_cost_per_token: 15e-6, input_cost_per_token_batches: 25e-7, output_cost_per_token_batches: 75e-7, provider: "openai", mode: "chat", supports_function_calling: true, supports_parallel_function_calling: true, supports_vision: true, supports_prompt_caching: true, supports_system_messages: true, supports_tool_choice: true }, { model: "gpt-4o-2024-08-06", max_tokens: 16384, max_input_tokens: 128e3, max_output_tokens: 16384, input_cost_per_token: 25e-7, output_cost_per_token: 1e-5, input_cost_per_token_batches: 125e-8, output_cost_per_token_batches: 5e-6, cache_read_input_token_cost: 125e-8, provider: "openai", mode: "chat", supports_function_calling: true, supports_parallel_function_calling: true, supports_response_schema: true, supports_vision: true, supports_prompt_caching: true, supports_system_messages: true, supports_tool_choice: true }, { model: "gpt-4o-2024-11-20", max_tokens: 16384, max_input_tokens: 128e3, max_output_tokens: 16384, input_cost_per_token: 25e-7, output_cost_per_token: 1e-5, input_cost_per_token_batches: 125e-8, output_cost_per_token_batches: 5e-6, cache_read_input_token_cost: 125e-8, provider: "openai", mode: "chat", supports_function_calling: true, supports_parallel_function_calling: true, supports_response_schema: true, supports_vision: true, supports_prompt_caching: true, supports_system_messages: true, supports_tool_choice: true }, { model: "gpt-4-turbo", max_tokens: 4096, max_input_tokens: 128e3, max_output_tokens: 4096, input_cost_per_token: 1e-5, output_cost_per_token: 3e-5, provider: "openai", mode: "chat", supports_function_calling: true, supports_parallel_function_calling: true, supports_vision: true, supports_prompt_caching: true, supports_system_messages: true, supports_tool_choice: true }, // GPT-5 Models { model: "gpt-5", max_tokens: 32768, max_input_tokens: 2e5, max_output_tokens: 32768, input_cost_per_token: 1e-5, output_cost_per_token: 5e-5, cache_read_input_token_cost: 5e-6, provider: "openai", mode: "chat", supports_function_calling: true, supports_parallel_function_calling: true, supports_vision: true, supports_prompt_caching: true, supports_system_messages: true, supports_tool_choice: true, supports_reasoning_effort: true, supports_responses_api: true, supports_custom_tools: true, supports_allowed_tools: true, supports_verbosity_control: true }, { model: "gpt-5-mini", max_tokens: 16384, max_input_tokens: 128e3, max_output_tokens: 16384, input_cost_per_token: 1e-6, output_cost_per_token: 5e-6, cache_read_input_token_cost: 5e-7, provider: "openai", mode: "chat", supports_function_calling: true, supports_parallel_function_calling: true, supports_vision: true, supports_prompt_caching: true, supports_system_messages: true, supports_tool_choice: true, supports_reasoning_effort: true, supports_responses_api: true, supports_custom_tools: true, supports_allowed_tools: true, supports_verbosity_control: true }, { model: "gpt-5-nano", max_tokens: 8192, max_input_tokens: 64e3, max_output_tokens: 8192, input_cost_per_token: 5e-7, output_cost_per_token: 2e-6, cache_read_input_token_cost: 25e-8, provider: "openai", mode: "chat", supports_function_calling: true, supports_parallel_function_calling: true, supports_vision: false, supports_prompt_caching: true, supports_system_messages: true, supports_tool_choice: true, supports_reasoning_effort: true, supports_responses_api: true, supports_custom_tools: true, supports_allowed_tools: true, supports_verbosity_control: true }, { model: "gpt-5-chat-latest", max_tokens: 32768, max_input_tokens: 2e5, max_output_tokens: 32768, input_cost_per_token: 1e-5, output_cost_per_token: 5e-5, cache_read_input_token_cost: 5e-6, provider: "openai", mode: "chat", supports_function_calling: true, supports_parallel_function_calling: true, supports_vision: true, supports_prompt_caching: true, supports_system_messages: true, supports_tool_choice: true, supports_reasoning_effort: true, supports_responses_api: false, supports_custom_tools: false, supports_allowed_tools: false, supports_verbosity_control: true, requires_chat_completions: true } ], mistral: [ { model: "mistral-small", max_tokens: 8191, max_input_tokens: 32e3, max_output_tokens: 8191, input_cost_per_token: 1e-6, output_cost_per_token: 3e-6, provider: "mistral", supports_function_calling: true, mode: "chat", supports_assistant_prefill: true, supports_tool_choice: true }, { model: "mistral-small-latest", max_tokens: 8191, max_input_tokens: 32e3, max_output_tokens: 8191, input_cost_per_token: 1e-6, output_cost_per_token: 3e-6, provider: "mistral", supports_function_calling: true, mode: "chat", supports_assistant_prefill: true, supports_tool_choice: true }, { model: "mistral-large-latest", max_tokens: 128e3, max_input_tokens: 128e3, max_output_tokens: 128e3, input_cost_per_token: 2e-6, output_cost_per_token: 6e-6, provider: "mistral", mode: "chat", supports_function_calling: true, supports_assistant_prefill: true, supports_tool_choice: true }, { model: "open-mixtral-8x7b", max_tokens: 8191, max_input_tokens: 32e3, max_output_tokens: 8191, input_cost_per_token: 7e-7, output_cost_per_token: 7e-7, provider: "mistral", mode: "chat", supports_function_calling: true, supports_assistant_prefill: true, supports_tool_choice: true }, { model: "open-mixtral-8x22b", max_tokens: 8191, max_input_tokens: 65336, max_output_tokens: 8191, input_cost_per_token: 2e-6, output_cost_per_token: 6e-6, provider: "mistral", mode: "chat", supports_function_calling: true, supports_assistant_prefill: true, supports_tool_choice: true } ], deepseek: [ { model: "deepseek-reasoner", max_tokens: 8192, max_input_tokens: 65536, max_output_tokens: 8192, input_cost_per_token: 55e-8, input_cost_per_token_cache_hit: 14e-8, output_cost_per_token: 219e-8, provider: "deepseek", mode: "chat", supports_function_calling: true, supports_assistant_prefill: true, supports_tool_choice: true, supports_prompt_caching: true }, { model: "deepseek-chat", max_tokens: 8192, max_input_tokens: 65536, max_output_tokens: 8192, input_cost_per_token: 27e-8, input_cost_per_token_cache_hit: 7e-8, cache_read_input_token_cost: 7e-8, cache_creation_input_token_cost: 0, output_cost_per_token: 11e-7, provider: "deepseek", mode: "chat", supports_function_calling: true, supports_assistant_prefill: true, supports_tool_choice: true, supports_prompt_caching: true }, { model: "deepseek-coder", max_tokens: 4096, max_input_tokens: 128e3, max_output_tokens: 4096, input_cost_per_token: 14e-8, input_cost_per_token_cache_hit: 14e-9, output_cost_per_token: 28e-8, provider: "deepseek", mode: "chat", supports_function_calling: true, supports_assistant_prefill: true, supports_tool_choice: true, supports_prompt_caching: true } ], xai: [ { model: "grok-beta", max_tokens: 131072, max_input_tokens: 131072, max_output_tokens: 131072, input_cost_per_token: 5e-6, output_cost_per_token: 15e-6, provider: "xai", mode: "chat", supports_function_calling: true, supports_vision: true, supports_tool_choice: true } ], groq: [ { model: "llama-3.3-70b-versatile", max_tokens: 8192, max_input_tokens: 128e3, max_output_tokens: 8192, input_cost_per_token: 59e-8, output_cost_per_token: 79e-8, provider: "groq", mode: "chat", supports_function_calling: true, supports_response_schema: true, supports_tool_choice: true }, { model: "llama2-70b-4096", max_tokens: 4096, max_input_tokens: 4096, max_output_tokens: 4096, input_cost_per_token: 7e-7, output_cost_per_token: 8e-7, provider: "groq", mode: "chat", supports_function_calling: true, supports_response_schema: true, supports_tool_choice: true }, { model: "llama3-8b-8192", max_tokens: 8192, max_input_tokens: 8192, max_output_tokens: 8192, input_cost_per_token: 5e-8, output_cost_per_token: 8e-8, provider: "groq", mode: "chat", supports_function_calling: true, supports_response_schema: true, supports_tool_choice: true }, { model: "llama-3.2-1b-preview", max_tokens: 8192, max_input_tokens: 8192, max_output_tokens: 8192, input_cost_per_token: 4e-8, output_cost_per_token: 4e-8, provider: "groq", mode: "chat", supports_function_calling: true, supports_response_schema: true, supports_tool_choice: true }, { model: "llama-3.2-3b-preview", max_tokens: 8192, max_input_tokens: 8192, max_output_tokens: 8192, input_cost_per_token: 6e-8, output_cost_per_token: 6e-8, provider: "groq", mode: "chat", supports_function_calling: true, supports_response_schema: true, supports_tool_choice: true }, { model: "llama-3.2-11b-text-preview", max_tokens: 8192, max_input_tokens: 8192, max_output_tokens: 8192, input_cost_per_token: 18e-8, output_cost_per_token: 18e-8, provider: "groq", mode: "chat", supports_function_calling: true, supports_response_schema: true, supports_tool_choice: true }, { model: "llama-3.2-90b-text-preview", max_tokens: 8192, max_input_tokens: 8192, max_output_tokens: 8192, input_cost_per_token: 9e-7, output_cost_per_token: 9e-7, provider: "groq", mode: "chat", supports_function_calling: true, supports_response_schema: true, supports_tool_choice: true }, { model: "llama3-70b-8192", max_tokens: 8192, max_input_tokens: 8192, max_output_tokens: 8192, input_cost_per_token: 59e-8, output_cost_per_token: 79e-8, provider: "groq", mode: "chat", supports_function_calling: true, supports_response_schema: true, supports_tool_choice: true }, { model: "llama-3.1-8b-instant", max_tokens: 8e3, max_input_tokens: 8e3, max_output_tokens: 8e3, input_cost_per_token: 5e-8, output_cost_per_token: 8e-8, provider: "groq", mode: "chat", supports_function_calling: true, supports_response_schema: true, supports_tool_choice: true }, { model: "llama-3.1-70b-versatile", max_tokens: 8e3, max_input_tokens: 8e3, max_output_tokens: 8e3, input_cost_per_token: 59e-8, output_cost_per_token: 79e-8, provider: "groq", mode: "chat", supports_function_calling: true, supports_response_schema: true, supports_tool_choice: true }, { model: "llama-3.1-405b-reasoning", max_tokens: 8e3, max_input_tokens: 8e3, max_output_tokens: 8e3, input_cost_per_token: 59e-8, output_cost_per_token: 79e-8, provider: "groq", mode: "chat", supports_function_calling: true, supports_response_schema: true, supports_tool_choice: true }, { model: "mixtral-8x7b-32768", max_tokens: 32768, max_input_tokens: 32768, max_output_tokens: 32768, input_cost_per_token: 24e-8, output_cost_per_token: 24e-8, provider: "groq", mode: "chat", supports_function_calling: true, supports_response_schema: true, supports_tool_choice: true }, { model: "gemma-7b-it", max_tokens: 8192, max_input_tokens: 8192, max_output_tokens: 8192, input_cost_per_token: 7e-8, output_cost_per_token: 7e-8, provider: "groq", mode: "chat", supports_function_calling: true, supports_response_schema: true, supports_tool_choice: true }, { model: "gemma2-9b-it", max_tokens: 8192, max_input_tokens: 8192, max_output_tokens: 8192, input_cost_per_token: 2e-7, output_cost_per_token: 2e-7, provider: "groq", mode: "chat", supports_function_calling: true, supports_response_schema: true, supports_tool_choice: true }, { model: "llama3-groq-70b-8192-tool-use-preview", max_tokens: 8192, max_input_tokens: 8192, max_output_tokens: 8192, input_cost_per_token: 89e-8, output_cost_per_token: 89e-8, provider: "groq", mode: "chat", supports_function_calling: true, supports_response_schema: true, supports_tool_choice: true }, { model: "llama3-groq-8b-8192-tool-use-preview", max_tokens: 8192, max_input_tokens: 8192, max_output_tokens: 8192, input_cost_per_token: 19e-8, output_cost_per_token: 19e-8, provider: "groq", mode: "chat", supports_function_calling: true, supports_response_schema: true, supports_tool_choice: true } ], anthropic: [ { model: "claude-3-5-haiku-latest", max_tokens: 8192, max_input_tokens: 2e5, max_output_tokens: 8192, input_cost_per_token: 8e-7, output_cost_per_token: 4e-6, cache_creation_input_token_cost: 125e-8, cache_read_input_token_cost: 1e-7, provider: "anthropic", mode: "chat", supports_function_calling: true, supports_vision: true, tool_use_system_prompt_tokens: 264, supports_assistant_prefill: true, supports_prompt_caching: true, supports_response_schema: true, deprecation_date: "2025-10-01", supports_tool_choice: true }, { model: "claude-3-opus-latest", max_tokens: 4096, max_input_tokens: 2e5, max_output_tokens: 4096, input_cost_per_token: 15e-6, output_cost_per_token: 75e-6, cache_creation_input_token_cost: 1875e-8, cache_read_input_token_cost: 15e-7, provider: "anthropic", mode: "chat", supports_function_calling: true, supports_vision: true, tool_use_system_prompt_tokens: 395, supports_assistant_prefill: true, supports_prompt_caching: true, supports_response_schema: true, deprecation_date: "2025-03-01", supports_tool_choice: true }, { model: "claude-3-7-sonnet-latest", max_tokens: 8192, max_input_tokens: 2e5, max_output_tokens: 8192, input_cost_per_token: 3e-6, output_cost_per_token: 15e-6, cache_creation_input_token_cost: 375e-8, cache_read_input_token_cost: 3e-7, provider: "anthropic", mode: "chat", supports_function_calling: true, supports_vision: true, tool_use_system_prompt_tokens: 159, supports_assistant_prefill: true, supports_prompt_caching: true, supports_response_schema: true, deprecation_date: "2025-06-01", supports_tool_choice: true } ], gemini: [ { model: "gemini-2.0-flash", max_tokens: 8192, max_input_tokens: 1048576, max_output_tokens: 8192, max_images_per_prompt: 3e3, max_videos_per_prompt: 10, max_video_length: 1, max_audio_length_hours: 8.4, max_audio_per_prompt: 1, max_pdf_size_mb: 30, input_cost_per_audio_token: 7e-7, input_cost_per_token: 1e-7, output_cost_per_token: 4e-7, provider: "gemini", mode: "chat", rpm: 1e4, tpm: 1e7, supports_system_messages: true, supports_function_calling: true, supports_vision: true, supports_response_schema: true, supports_audio_output: true, supports_tool_choice: true, source: "https://ai.google.dev/pricing#2_0flash" }, { model: "gemini-2.0-flash-lite", max_tokens: 8192, max_input_tokens: 1048576, max_output_tokens: 8192, max_images_per_prompt: 3e3, max_videos_per_prompt: 10, max_video_length: 1, max_audio_length_hours: 8.4, max_audio_per_prompt: 1, max_pdf_size_mb: 30, input_cost_per_audio_token: 75e-9, input_cost_per_token: 75e-9, output_cost_per_token: 3e-7, provider: "gemini", mode: "chat", rpm: 6e4, tpm: 1e7, supports_system_messages: true, supports_function_calling: true, supports_vision: true, supports_response_schema: true, supports_audio_output: false, supports_tool_choice: true, source: "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash-lite" }, { model: "gemini-2.0-flash-thinking-exp", max_tokens: 8192, max_input_tokens: 1048576, max_output_tokens: 8192, max_images_per_prompt: 3e3, max_videos_per_prompt: 10, max_video_length: 1, max_audio_length_hours: 8.4, max_audio_per_prompt: 1, max_pdf_size_mb: 30, input_cost_per_image: 0, input_cost_per_video_per_second: 0, input_cost_per_audio_per_second: 0, input_cost_per_token: 0, input_cost_per_character: 0, input_cost_per_token_above_128k_tokens: 0, input_cost_per_character_above_128k_tokens: 0, input_cost_per_image_above_128k_tokens: 0, input_cost_per_video_per_second_above_128k_tokens: 0, input_cost_per_audio_per_second_above_128k_tokens: 0, output_cost_per_token: 0, output_cost_per_character: 0, output_cost_per_token_above_128k_tokens: 0, output_cost_per_character_above_128k_tokens: 0, provider: "gemini", mode: "chat", supports_system_messages: true, supports_function_calling: true, supports_vision: true, supports_response_schema: true, supports_audio_output: true, tpm: 4e6, rpm: 10, source: "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash", supports_tool_choice: true } ], kimi: [ { model: "kimi-k2-0711-preview", max_tokens: 16384, max_input_tokens: 128e3, max_output_tokens: 16384, input_cost_per_token: 3e-6, output_cost_per_token: 15e-6, provider: "kimi", mode: "chat", supports_function_calling: true, supports_system_messages: true, supports_tool_choice: true } ], bigdream: [ { model: "claude-sonnet-4-20250514", max_tokens: 8192, max_input_tokens: 2e5, max_output_tokens: 8192, input_cost_per_token: 3e-6, output_cost_per_token: 15e-6, provider: "bigdream", mode: "chat", supports_function_calling: true, supports_vision: true, supports_system_messages: true, supports_tool_choice: true, supports_prompt_caching: true } ], qwen: [], glm: [], minimax: [ { model: "abab6.5s-chat", max_tokens: 8192, max_input_tokens: 245760, max_output_tokens: 8192, input_cost_per_token: 1e-6, output_cost_per_token: 3e-6, provider: "minimax", mode: "chat", supports_function_calling: true, supports_system_messages: true, supports_tool_choice: true }, { model: "abab6.5g-chat", max_tokens: 8192, max_input_tokens: 245760, max_output_tokens: 8192, input_cost_per_token: 2e-6, output_cost_per_token: 6e-6, provider: "minimax", mode: "chat", supports_function_calling: true, supports_system_messages: true, supports_tool_choice: true }, { model: "abab5.5s-chat", max_tokens: 8192, max_input_tokens: 16384, max_output_tokens: 8192, input_cost_per_token: 5e-7, output_cost_per_token: 2e-6, provider: "minimax", mode: "chat", supports_function_calling: true, supports_system_messages: true, supports_tool_choice: true } ], "baidu-qianfan": [], siliconflow: [], ollama: [], burncloud: [] }; const providers = { kimi: { name: "Kimi (Moonshot)", baseURL: "https://api.moonshot.cn/v1" }, anthropic: { name: "Claude", baseURL: "https://api.anthropic.com" }, burncloud: { name: "BurnCloud (All models)", baseURL: "https://ai.burncloud.com/v1" }, deepseek: { name: "DeepSeek", baseURL: "https://api.deepseek.com" }, qwen: { name: "Qwen (Alibaba)", baseURL: "https://dashscope.aliyuncs.com/compatible-mode/v1" }, openai: { name: "OpenAI", baseURL: "https://api.openai.com/v1" }, ollama: { name: "Ollama", baseURL: "http://localhost:11434/v1" }, gemini: { name: "Gemini", baseURL: "https://generativelanguage.googleapis.com/v1beta/openai" }, bigdream: { name: "BigDream", baseURL: "https://api-key.info" }, opendev: { name: "OpenDev", baseURL: "https://api.openai-next.com" }, "custom-openai": { name: "Custom OpenAI-Compatible API", baseURL: "" // Will be configured by user }, openrouter: { name: "OpenRouter", baseURL: "https://openrouter.ai/api/v1" }, minimax: { name: "MiniMax", baseURL: "https://api.minimaxi.com/v1" }, siliconflow: { name: "SiliconFlow", baseURL: "https://api.siliconflow.cn/v1" }, glm: { name: "GLM (Zhipu AI)", baseURL: "https://open.bigmodel.cn/api/paas/v4" }, "baidu-qianfan": { name: "Baidu Qianfan", baseURL: "https://qianfan.baidubce.com/v2" }, mistral: { name: "Mistral", baseURL: "https://api.mistral.ai/v1" }, xai: { name: "xAI", baseURL: "https://api.x.ai/v1" }, groq: { name: "Groq", baseURL: "https://api.groq.com/openai/v1" }, azure: { name: "Azure OpenAI", baseURL: "" // Will be dynamically constructed based on resource name } }; export { models_default as default, providers }; //# sourceMappingURL=models.js.map