UNPKG

rawi

Version:

Rawi (راوي) is the developer-friendly AI CLI that brings the power of 11 major AI providers directly to your terminal. With seamless shell integration, persistent conversations, and 200+ specialized prompt templates, Rawi transforms your command line into

57 lines (53 loc) 3.76 kB
export { AnthropicModelId, anthropicChatProvider, anthropicModels, anthropicProvider, generateWithAnthropic, streamWithAnthropic } from './anthropic.provider.js'; export { AzureModelId, azureChatProvider, azureModels, azureProvider, generateWithAzure, streamWithAzure } from './azure.provider.js'; export { BedrockModelId, bedrockChatProvider, bedrockModels, bedrockProvider, generateWithBedrock, streamWithBedrock } from './bedrock.provider.js'; export { CerebrasModelId, cerebrasChatProvider, cerebrasModelIds, cerebrasModels, cerebrasProvider, generateWithCerebras, streamWithCerebras } from './cerebras.provider.js'; export { DeepSeekModelId, deepSeekModelIds, deepSeekModels, deepSeekProvider, deepseekChatProvider, generateWithDeepSeek, streamWithDeepSeek } from './deepseek.provider.js'; export { GoogleModelId, generateWithGoogle, googleChatProvider, googleModels, googleProvider, streamWithGoogle } from './google.provider.js'; export { LMStudioModelId, generateWithLMStudio, lmstudioChatProvider, lmstudioModels, lmstudioProvider, streamWithLMStudio } from './lmstudio.provider.js'; export { MistralModelId, generateWithMistral, mistralChatProvider, mistralModelIds, mistralModels, mistralProvider, streamWithMistral } from './mistral.provider.js'; export { OllamaModelId, generateWithOllama, ollamaChatProvider, ollamaModels, ollamaProvider, streamWithOllama } from './ollama.provider.js'; export { OpenAIModelId, generateWithOpenAI, openaiChatProvider, openaiModels, openaiProvider, streamWithOpenAI } from './openai.provider.js'; import { ChatProvider } from './types.js'; export { ChatCredentials, ChatStreamOptions, ExecGenerationOptions, ExecGenerationResult } from './types.js'; export { StreamResponseOptions, generateWithProvider, processQuery, streamResponse } from './utils.js'; export { XaiModelId, generateWithXAI, streamWithXAI, xaiChatProvider, xaiModels, xaiProvider } from './xai.provider.js'; import { SupportedProvider, ProviderConfig } from '../shared/types.js'; import '@ai-sdk/anthropic'; import '@ai-sdk/azure'; import '@ai-sdk/amazon-bedrock'; import '@ai-sdk/cerebras'; import '@ai-sdk/deepseek'; import '@ai-sdk/google'; import '@ai-sdk/mistral'; import '../../libs/providers/ollama/ollama-provider.js'; import '../../libs/providers/ollama/ollama-chat-settings.js'; import '../../libs/providers/ollama/ollama-models-list.js'; import '@ai-sdk/provider'; import '../../libs/providers/ollama/ollama-embedding-settings.js'; import '@ai-sdk/openai'; import 'ai'; import '@ai-sdk/xai'; declare const askProviders: Record<SupportedProvider, ProviderConfig>; declare const chatProviders: Record<string, ChatProvider>; declare const execProviders: { readonly 'amazon-bedrock': "generateWithBedrock"; readonly anthropic: "generateWithAnthropic"; readonly azure: "generateWithAzure"; readonly bedrock: "generateWithBedrock"; readonly cerebras: "generateWithCerebras"; readonly deepseek: "generateWithDeepSeek"; readonly google: "generateWithGoogle"; readonly lmstudio: "generateWithLMStudio"; readonly mistral: "generateWithMistral"; readonly ollama: "generateWithOllama"; readonly openai: "generateWithOpenAI"; readonly xai: "generateWithXAI"; }; type ExecProviderName = keyof typeof execProviders; declare const getProvider: (name: SupportedProvider) => ProviderConfig; declare const getAllProviders: () => ProviderConfig[]; declare const getProviderNames: () => SupportedProvider[]; declare const getChatProvider: (name: string) => ChatProvider; declare const getChatProviderNames: () => string[]; export { ChatProvider, type ExecProviderName, askProviders, chatProviders, execProviders, getAllProviders, getChatProvider, getChatProviderNames, getProvider, getProviderNames };