recoder-shared
Version:
Shared types, utilities, and configurations for Recoder
114 lines • 5.61 kB
TypeScript
/**
* Other provider types and constants for recoder.xyz
*/
import type { ModelInfo } from './api';
export type ChutesModelId = "gpt-4o" | "gpt-4o-mini" | "gpt-4-turbo" | "claude-3-5-sonnet-20241022" | "claude-3-5-haiku-20241022";
export declare const chutesDefaultModelId: ChutesModelId;
export declare const DEEP_SEEK_DEFAULT_TEMPERATURE = 0.7;
export declare const chutesModels: Record<ChutesModelId, ModelInfo>;
export type ClaudeCodeModelId = "claude-3-5-sonnet-20241022" | "claude-3-5-haiku-20241022";
export declare const claudeCodeDefaultModelId = "claude-3-5-sonnet-20241022";
export declare const claudeCodeModels: Record<ClaudeCodeModelId, ModelInfo>;
export declare function getClaudeCodeModelId(modelId?: string): ClaudeCodeModelId;
export type DeepSeekModelId = "deepseek-v3" | "deepseek-reasoner" | "deepseek-chat";
export declare const deepSeekDefaultModelId: DeepSeekModelId;
export declare const deepSeekModels: Record<DeepSeekModelId, ModelInfo>;
export declare const LITELLM_COMPUTER_USE_MODELS: string[];
export declare const lMStudioDefaultModelInfo: ModelInfo;
export declare const ollamaDefaultModelInfo: ModelInfo;
export declare const OPEN_ROUTER_COMPUTER_USE_MODELS: string[];
export declare const OPEN_ROUTER_REASONING_BUDGET_MODELS: string[];
export declare const OPEN_ROUTER_REQUIRED_REASONING_BUDGET_MODELS: string[];
export declare function isModelParameter(key: string): boolean;
export declare const GLAMA_DEFAULT_TEMPERATURE = 0.7;
export type GlamaModelId = "glama" | "glama-default";
export declare const glamaDefaultModelId: GlamaModelId;
export declare const glamaDefaultModelInfo: {
maxTokens: number;
contextWindow: number;
supportsImages: boolean;
supportsPromptCache: boolean;
description: string;
};
export declare const glamaModels: {
"glama-default": {
maxTokens: number;
contextWindow: number;
supportsImages: boolean;
supportsPromptCache: boolean;
description: string;
};
};
export type GroqModelId = "llama-3.1-70b-versatile" | "llama-3.1-8b-instant" | "llama-3.2-90b-text-preview" | "mixtral-8x7b-32768" | "gemma2-9b-it";
export declare const groqDefaultModelId: GroqModelId;
export declare const groqModels: Record<GroqModelId, ModelInfo>;
export declare const MISTRAL_DEFAULT_TEMPERATURE = 0.7;
export type MistralModelId = "mistral-large-latest" | "mistral-medium" | "mistral-small" | "codestral-latest";
export declare const mistralDefaultModelId: MistralModelId;
export declare const mistralModels: Record<MistralModelId, ModelInfo>;
export declare const litellmDefaultModelId = "gpt-3.5-turbo";
export declare const litellmDefaultModelInfo: {
maxTokens: number;
contextWindow: number;
supportsImages: boolean;
supportsPromptCache: boolean;
description: string;
};
export declare const LMSTUDIO_DEFAULT_TEMPERATURE = 0.7;
export declare const openAiModelInfoSaneDefaults: {
maxTokens: number;
contextWindow: number;
supportsImages: boolean;
supportsPromptCache: boolean;
description: string;
};
export declare const kilocodeDefaultModelId = "gpt-4";
export declare const OPENAI_NATIVE_DEFAULT_TEMPERATURE = 0.7;
export type OpenAiNativeModelId = "gpt-4" | "gpt-4-turbo" | "gpt-4o" | "gpt-4o-mini" | "gpt-3.5-turbo";
export declare const openAiNativeDefaultModelId: OpenAiNativeModelId;
export declare const openAiNativeModels: Record<OpenAiNativeModelId, ModelInfo>;
export declare const azureOpenAiDefaultApiVersion = "2024-02-15-preview";
export declare const OPENAI_AZURE_AI_INFERENCE_PATH = "/v1/chat/completions";
export declare const openRouterDefaultModelId = "openai/gpt-4o";
export declare const openRouterDefaultModelInfo: {
maxTokens: number;
contextWindow: number;
supportsImages: boolean;
supportsPromptCache: boolean;
description: string;
};
export declare const OPENROUTER_DEFAULT_PROVIDER_NAME = "OpenRouter";
export declare const OPEN_ROUTER_PROMPT_CACHING_MODELS: string[];
export declare const requestyDefaultModelId = "claude-3-5-sonnet-20241022";
export declare const requestyDefaultModelInfo: {
maxTokens: number;
contextWindow: number;
supportsImages: boolean;
supportsPromptCache: boolean;
description: string;
};
export declare const unboundDefaultModelId = "gpt-4o";
export declare const unboundDefaultModelInfo: {
maxTokens: number;
contextWindow: number;
supportsImages: boolean;
supportsPromptCache: boolean;
description: string;
};
export type XAIModelId = "grok-4" | "grok-3" | "grok-3-fast" | "grok-3-mini" | "grok-3-mini-fast" | "grok-2-1212" | "grok-2-vision-1212";
export declare const xaiDefaultModelId: XAIModelId;
export declare const xaiModels: Record<XAIModelId, ModelInfo>;
export type GeminiCliModelId = "gemini-1.5-pro-latest" | "gemini-1.5-flash-latest";
export declare const geminiCliDefaultModelId: GeminiCliModelId;
export declare const geminiCliModels: Record<GeminiCliModelId, ModelInfo>;
export type GeminiModelId = "gemini-1.5-pro" | "gemini-1.5-flash";
export declare const geminiDefaultModelId: GeminiModelId;
export declare const geminiModels: Record<GeminiModelId, ModelInfo>;
export declare const GEMINI_DEFAULT_TEMPERATURE = 0.7;
export declare const GEMINI_DEFAULT_MAX_TOKENS = 4096;
export declare const FIREWORKS_DEFAULT_TEMPERATURE = 0.7;
export declare const FIREWORKS_DEFAULT_MAX_TOKENS = 4096;
export declare const HUMAN_RELAY_DEFAULT_TEMPERATURE = 0.7;
export declare const VSCODE_LM_DEFAULT_TEMPERATURE = 0.7;
export declare const LITELLM_DEFAULT_TEMPERATURE = 0.7;
//# sourceMappingURL=other-provider-types.d.ts.map