UNPKG

rawi

Version:

Rawi (راوي) is the developer-friendly AI CLI that brings the power of 11 major AI providers directly to your terminal. With seamless shell integration, persistent conversations, and 200+ specialized prompt templates, Rawi transforms your command line into

1 lines 5.26 kB
{"version":3,"sources":["/home/mkabumattar/work/withrawi/rawi/dist/chunk-GBWNCRXL.cjs","../src/core/providers/ollama.provider.ts"],"names":["ollamaModelIds","ollamaModels","name","ollamaProvider","streamWithOllama","credentials","prompt","ollamaSettings","createOllama","result","streamText","error"],"mappings":"AAAA;AACA,wDAA+C,wDAAyC,wDAAyC,wBCA1F,IAwBjCA,CAAAA,CAAiBA,mBAAAA,CAEVC,CAAAA,aAA4BD,CAAAA,CAAe,GAAA,CAAKE,CAAAA,EAAAA,CAAU,CACrE,IAAA,CAAAA,CAAAA,CACA,WAAA,CAAaA,CACf,CAAA,CAAE,CAAA,CAEWC,CAAAA,aAAiB,CAC5B,IAAA,CAAM,QAAA,CACN,WAAA,CAAa,kBAAA,CACb,MAAA,CAAQF,CACV,CAAA,CAEaG,CAAAA,aAAmB,KAAA,CAC9BC,CAAAA,CACAC,CAAAA,CAAAA,EAC+B,CAC/B,GAAI,CACF,IAAMC,CAAAA,CAAsC,CAAC,CAAA,CAG3CF,CAAAA,CAAY,gBAAA,EACZ,SAAA,GAAaA,CAAAA,CAAY,gBAAA,EAAA,CAEzBE,CAAAA,CAAe,OAAA,CAAUF,CAAAA,CAAY,gBAAA,CAAiB,OAAA,CAAA,CAGxD,IAAMF,CAAAA,CAAiBK,iCAAAA,MACrB,CAAO,IAAA,CAAKD,CAAc,CAAA,CAAE,MAAA,CAAS,CAAA,CAAIA,CAAAA,CAAiB,KAAA,CAC5D,CAAA,CAEME,CAAAA,CAASC,4BAAAA,CACb,KAAA,CAAOP,CAAAA,CAAeE,CAAAA,CAAY,KAAK,CAAA,CACvC,MAAA,CAAAC,CAAAA,CACA,WAAA,CAAaD,CAAAA,CAAY,WAAA,EAAe,EAAA,CACxC,eAAA,CAAiBA,CAAAA,CAAY,SAAA,EAAa,IAC5C,CAAC,CAAA,CAED,MAAO,CACL,UAAA,CAAYI,CAAAA,CAAO,UAAA,CACnB,YAAA,CAAcA,CAAAA,CAAO,IACvB,CACF,CAAA,KAAA,CAASE,CAAAA,CAAO,CACd,MAAM,IAAI,KAAA,CACR,CAAA,oCAAA,EACEA,EAAAA,WAAiB,KAAA,CAAQA,CAAAA,CAAM,OAAA,CAAU,MAAA,CAAOA,CAAK,CACvD,CAAA,CAAA;ADrEkrB","file":"/home/mkabumattar/work/withrawi/rawi/dist/chunk-GBWNCRXL.cjs","sourcesContent":[null,"import type {ModelMessage} from 'ai';\nimport {generateText, streamText} from 'ai';\nimport {\n ollamaModelIds as _ollamaModelIds,\n createOllama,\n ollama,\n} from '../../libs/providers/ollama/index.js';\nimport {parseCommandFromResponse} from '../exec/parser.js';\nimport type {\n LooseToStrict,\n ModelInfo,\n RawiCredentials,\n StreamingResponse,\n} from '../shared/index.js';\nimport type {\n ChatCredentials,\n ChatProvider,\n ChatStreamOptions,\n ExecGenerationOptions,\n ExecGenerationResult,\n} from './types.js';\n\ntype LooseOllamaModelId = Parameters<typeof ollama>[0];\nexport type OllamaModelId = LooseToStrict<LooseOllamaModelId>;\n\nconst ollamaModelIds = _ollamaModelIds as readonly OllamaModelId[];\n\nexport const ollamaModels: ModelInfo[] = ollamaModelIds.map((name) => ({\n name,\n displayName: name,\n}));\n\nexport const ollamaProvider = {\n name: 'ollama' as const,\n displayName: '🟢 Ollama',\n models: ollamaModels,\n} as const;\n\nexport const streamWithOllama = async (\n credentials: RawiCredentials,\n prompt: string,\n): Promise<StreamingResponse> => {\n try {\n const ollamaSettings: Record<string, any> = {};\n\n if (\n credentials.providerSettings &&\n 'baseURL' in credentials.providerSettings\n ) {\n ollamaSettings.baseURL = credentials.providerSettings.baseURL;\n }\n\n const ollamaProvider = createOllama(\n Object.keys(ollamaSettings).length > 0 ? ollamaSettings : undefined,\n );\n\n const result = streamText({\n model: ollamaProvider(credentials.model),\n prompt,\n temperature: credentials.temperature || 0.7,\n maxOutputTokens: credentials.maxTokens || 2048,\n });\n\n return {\n textStream: result.textStream,\n fullResponse: result.text,\n };\n } catch (error) {\n throw new Error(\n `Error calling Ollama streaming API: ${\n error instanceof Error ? error.message : String(error)\n }`,\n );\n }\n};\n\nexport const ollamaChatProvider: ChatProvider = {\n name: 'ollama',\n displayName: '🟢 Ollama',\n\n async streamChat(\n credentials: ChatCredentials,\n messages: ModelMessage[],\n options: ChatStreamOptions = {},\n ): Promise<AsyncIterable<string>> {\n const ollamaSettings: Record<string, any> = {};\n\n if (\n credentials.providerSettings &&\n 'baseURL' in credentials.providerSettings\n ) {\n ollamaSettings.baseURL = credentials.providerSettings.baseURL;\n }\n\n const result = streamText({\n model: ollama(credentials.model, ollamaSettings),\n messages,\n temperature: credentials.temperature || options.temperature || 0.7,\n maxOutputTokens: credentials.maxTokens || options.maxTokens || 2048,\n });\n\n return result.textStream;\n },\n};\n\nexport const generateWithOllama = async (\n options: ExecGenerationOptions,\n): Promise<ExecGenerationResult> => {\n const startTime = Date.now();\n\n try {\n const ollamaSettings: Record<string, any> = {};\n\n if (\n options.credentials.providerSettings &&\n 'baseURL' in options.credentials.providerSettings\n ) {\n ollamaSettings.baseURL = options.credentials.providerSettings.baseURL;\n }\n\n const ollamaProvider = createOllama(\n Object.keys(ollamaSettings).length > 0 ? ollamaSettings : undefined,\n );\n\n const result = await generateText({\n model: ollamaProvider(options.credentials.model),\n system: options.systemPrompt,\n prompt: options.userPrompt,\n });\n\n const generationTime = Date.now() - startTime;\n\n const command = parseCommandFromResponse(result.text);\n\n return {\n command,\n generationTime,\n };\n } catch (error) {\n throw new Error(`Ollama exec generation failed: ${error}`);\n }\n};\n"]}