@ai-sdk/deepinfra
Version:
The **[DeepInfra provider](https://ai-sdk.dev/providers/ai-sdk-providers/deepinfra)** for the [AI SDK](https://ai-sdk.dev/docs) contains language model support for the DeepInfra API, giving you access to models like Llama 3, Mixtral, and other state-of-th
72 lines (64 loc) • 5.44 kB
text/typescript
import { ProviderV3, LanguageModelV3, ImageModelV3, EmbeddingModelV3 } from '@ai-sdk/provider';
import { FetchFunction } from '@ai-sdk/provider-utils';
export { OpenAICompatibleErrorData as DeepInfraErrorData } from '@ai-sdk/openai-compatible';
type DeepInfraChatModelId = '01-ai/Yi-34B-Chat' | 'Austism/chronos-hermes-13b-v2' | 'bigcode/starcoder2-15b-instruct-v0.1' | 'bigcode/starcoder2-15b' | 'codellama/CodeLlama-34b-Instruct-hf' | 'codellama/CodeLlama-70b-Instruct-hf' | 'cognitivecomputations/dolphin-2.6-mixtral-8x7b' | 'cognitivecomputations/dolphin-2.9.1-llama-3-70b' | 'databricks/dbrx-instruct' | 'deepinfra/airoboros-70b' | 'deepseek-ai/DeepSeek-V3' | 'google/codegemma-7b-it' | 'google/gemma-1.1-7b-it' | 'google/gemma-2-27b-it' | 'google/gemma-2-9b-it' | 'Gryphe/MythoMax-L2-13b-turbo' | 'Gryphe/MythoMax-L2-13b' | 'HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1' | 'KoboldAI/LLaMA2-13B-Tiefighter' | 'lizpreciatior/lzlv_70b_fp16_hf' | 'mattshumer/Reflection-Llama-3.1-70B' | 'meta-llama/Llama-2-13b-chat-hf' | 'meta-llama/Llama-2-70b-chat-hf' | 'meta-llama/Llama-2-7b-chat-hf' | 'meta-llama/Llama-3.2-11B-Vision-Instruct' | 'meta-llama/Llama-3.2-1B-Instruct' | 'meta-llama/Llama-3.2-3B-Instruct' | 'meta-llama/Llama-3.2-90B-Vision-Instruct' | 'meta-llama/Llama-3.3-70B-Instruct-Turbo' | 'meta-llama/Llama-3.3-70B-Instruct' | 'meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8' | 'meta-llama/Llama-4-Scout-17B-16E-Instruct' | 'meta-llama/Meta-Llama-3-70B-Instruct' | 'meta-llama/Meta-Llama-3-8B-Instruct' | 'meta-llama/Meta-Llama-3.1-405B-Instruct' | 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3.1-70B-Instruct' | 'meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3.1-8B-Instruct' | 'microsoft/Phi-3-medium-4k-instruct' | 'microsoft/WizardLM-2-7B' | 'microsoft/WizardLM-2-8x22B' | 'mistralai/Mistral-7B-Instruct-v0.1' | 'mistralai/Mistral-7B-Instruct-v0.2' | 'mistralai/Mistral-7B-Instruct-v0.3' | 'mistralai/Mistral-Nemo-Instruct-2407' | 'mistralai/Mixtral-8x22B-Instruct-v0.1' | 'mistralai/Mixtral-8x22B-v0.1' | 'mistralai/Mixtral-8x7B-Instruct-v0.1' | 'NousResearch/Hermes-3-Llama-3.1-405B' | 'nvidia/Llama-3.1-Nemotron-70B-Instruct' | 'nvidia/Nemotron-4-340B-Instruct' | 'openbmb/MiniCPM-Llama3-V-2_5' | 'openchat/openchat_3.5' | 'openchat/openchat-3.6-8b' | 'Phind/Phind-CodeLlama-34B-v2' | 'Qwen/Qwen2-72B-Instruct' | 'Qwen/Qwen2-7B-Instruct' | 'Qwen/Qwen2.5-72B-Instruct' | 'Qwen/Qwen2.5-7B-Instruct' | 'Qwen/Qwen2.5-Coder-32B-Instruct' | 'Qwen/Qwen2.5-Coder-7B' | 'Qwen/QwQ-32B-Preview' | 'Sao10K/L3-70B-Euryale-v2.1' | 'Sao10K/L3-8B-Lunaris-v1' | 'Sao10K/L3.1-70B-Euryale-v2.2' | (string & {});
type DeepInfraEmbeddingModelId = 'BAAI/bge-base-en-v1.5' | 'BAAI/bge-large-en-v1.5' | 'BAAI/bge-m3' | 'intfloat/e5-base-v2' | 'intfloat/e5-large-v2' | 'intfloat/multilingual-e5-large' | 'sentence-transformers/all-MiniLM-L12-v2' | 'sentence-transformers/all-MiniLM-L6-v2' | 'sentence-transformers/all-mpnet-base-v2' | 'sentence-transformers/clip-ViT-B-32' | 'sentence-transformers/clip-ViT-B-32-multilingual-v1' | 'sentence-transformers/multi-qa-mpnet-base-dot-v1' | 'sentence-transformers/paraphrase-MiniLM-L6-v2' | 'shibing624/text2vec-base-chinese' | 'thenlper/gte-base' | 'thenlper/gte-large' | (string & {});
type DeepInfraCompletionModelId = DeepInfraChatModelId;
type DeepInfraImageModelId = 'stabilityai/sd3.5' | 'black-forest-labs/FLUX-1.1-pro' | 'black-forest-labs/FLUX-1-schnell' | 'black-forest-labs/FLUX-1-dev' | 'black-forest-labs/FLUX-pro' | 'black-forest-labs/FLUX.1-Kontext-dev' | 'black-forest-labs/FLUX.1-Kontext-pro' | 'stabilityai/sd3.5-medium' | 'stabilityai/sdxl-turbo' | (string & {});
interface DeepInfraProviderSettings {
/**
DeepInfra API key.
*/
apiKey?: string;
/**
Base URL for the API calls.
*/
baseURL?: string;
/**
Custom headers to include in the requests.
*/
headers?: Record<string, string>;
/**
Custom fetch implementation. You can use it as a middleware to intercept requests,
or to provide a custom fetch implementation for e.g. testing.
*/
fetch?: FetchFunction;
}
interface DeepInfraProvider extends ProviderV3 {
/**
Creates a model for text generation.
*/
(modelId: DeepInfraChatModelId): LanguageModelV3;
/**
Creates a chat model for text generation.
*/
chatModel(modelId: DeepInfraChatModelId): LanguageModelV3;
/**
Creates a model for image generation.
*/
image(modelId: DeepInfraImageModelId): ImageModelV3;
/**
Creates a model for image generation.
*/
imageModel(modelId: DeepInfraImageModelId): ImageModelV3;
/**
Creates a chat model for text generation.
*/
languageModel(modelId: DeepInfraChatModelId): LanguageModelV3;
/**
Creates a completion model for text generation.
*/
completionModel(modelId: DeepInfraCompletionModelId): LanguageModelV3;
/**
Creates a embedding model for text generation.
*/
embeddingModel(modelId: DeepInfraEmbeddingModelId): EmbeddingModelV3;
/**
* @deprecated Use `embeddingModel` instead.
*/
textEmbeddingModel(modelId: DeepInfraEmbeddingModelId): EmbeddingModelV3;
}
declare function createDeepInfra(options?: DeepInfraProviderSettings): DeepInfraProvider;
declare const deepinfra: DeepInfraProvider;
declare const VERSION: string;
export { type DeepInfraProvider, type DeepInfraProviderSettings, VERSION, createDeepInfra, deepinfra };