UNPKG

rawi

Version:

Rawi (راوي) is the developer-friendly AI CLI that brings the power of 11 major AI providers directly to your terminal. With seamless shell integration, persistent conversations, and 200+ specialized prompt templates, Rawi transforms your command line into

1 lines 7.58 kB
{"version":3,"sources":["/home/mkabumattar/work/withrawi/rawi/dist/chunk-TCDDN5PV.cjs","../src/cli/commands/ask/actions/provider-processing.ts"],"names":["streamAIResponse","credentials","sessionId","filteredQuery","dbManager","filteringEnabled","filterTypes","options","spinnerManager"],"mappings":"AAAA;AACA,wDAAwC,wDAAyC,wDAAyC,4ECDxG,IAMLA,CAAAA,CAAmB,KAAA,CAC9BC,CAAAA,CACAC,CAAAA,CACAC,CAAAA,CACAC,CAAAA,CACAC,CAAAA,CACAC,CAAAA,CACAC,CAAAA,CAAAA,EACkB,CAClB,MAAMH,CAAAA,CAAU,UAAA,CACdF,CAAAA,CACA,MAAA,CACAC,CAAAA,CACAF,CAAAA,CAAY,QAAA,CACZA,CAAAA,CAAY,KAAA,CACZA,CAAAA,CAAY,WAAA,CACZA,CAAAA,CAAY,SACd,CAAA,CAEIM,CAAAA,CAAQ,OAAA,EACVC,mBAAAA,CAAe,KAAA,CACb,YAAA,CACA,CAAA,yBAAA,EAA4BP,CAAAA,CAAY,QAAQ,CAAA,EAAA,EAAKA,CAAAA,CAAY,KAAK,CAAA,IAAA,CAAA,CACtE,CAAC,KAAA,CAAO,MAAM,CAChB,CAAA,CAGF,OAAA,CAAQ,GAAA,CAAI,CAAA,EAAA;AAuF8C,kBAAA;AAsBlC,oDAAA;AD1IqV","file":"/home/mkabumattar/work/withrawi/rawi/dist/chunk-TCDDN5PV.cjs","sourcesContent":[null,"import chalk from 'chalk';\nimport {ContentFilter} from '../../../../core/content-filter/content-filter.js';\nimport type {DatabaseManager} from '../../../../core/database/manager.js';\nimport {processQuery} from '../../../../core/providers/utils.js';\nimport {spinnerManager} from '../../../../core/shared/spinner.js';\n\nexport const streamAIResponse = async (\n credentials: any,\n sessionId: string,\n filteredQuery: string,\n dbManager: DatabaseManager,\n filteringEnabled: boolean,\n filterTypes: string[] | undefined,\n options: any,\n): Promise<void> => {\n await dbManager.addMessage(\n sessionId,\n 'user',\n filteredQuery,\n credentials.provider,\n credentials.model,\n credentials.temperature,\n credentials.maxTokens,\n );\n\n if (options.verbose) {\n spinnerManager.start(\n 'generation',\n `Streaming response using ${credentials.provider} (${credentials.model})...`,\n {color: 'cyan'},\n );\n }\n\n console.log(`${chalk.cyan('Result:')}`);\n\n let response: string;\n let hasStartedStreaming = false;\n\n try {\n let fullResponseBuffer = '';\n const needResponseStats =\n filteringEnabled && (options.verbose || options.showFiltered);\n\n response = await processQuery(credentials, filteredQuery, {\n streaming: true,\n filtering: filteringEnabled\n ? {\n enabled: true,\n types: filterTypes,\n showFiltered: false,\n }\n : undefined,\n onChunk: (chunk: string) => {\n if (!hasStartedStreaming) {\n if (options.verbose) {\n spinnerManager.succeed(\n 'generation',\n 'Starting to stream response...',\n );\n }\n hasStartedStreaming = true;\n }\n\n if (needResponseStats) {\n fullResponseBuffer += chunk;\n }\n\n process.stdout.write(chunk);\n },\n onComplete: () => {\n console.log();\n\n if (needResponseStats) {\n const highlightFilter = new ContentFilter({\n enabled: true,\n types: filterTypes,\n showFiltered: false,\n highlightFiltered: options.highlightFiltered || false,\n });\n\n const fullFilterResult =\n highlightFilter.filterContent(fullResponseBuffer);\n const filterCount = fullFilterResult.filterCount;\n\n if (Object.keys(filterCount).length > 0) {\n if (options.verbose) {\n console.log(chalk.yellow('📊 Filtering Summary:'));\n console.log(chalk.yellow('───────────────────'));\n\n const totalFiltered = Object.values(filterCount).reduce(\n (sum, count) => sum + count,\n 0,\n );\n console.log(\n chalk.yellow(`Total filtered items: ${totalFiltered}`),\n );\n\n console.log(chalk.yellow('\\nBreakdown by type:'));\n for (const [type, count] of Object.entries(filterCount)) {\n const percentage = Math.round((count / totalFiltered) * 100);\n const bar = '█'.repeat(\n Math.min(20, Math.floor(percentage / 5)),\n );\n console.log(\n chalk.yellow(\n ` ${type.padEnd(15)}: ${count.toString().padStart(3)} (${percentage}%) ${bar}`,\n ),\n );\n }\n } else {\n const totalFiltered = Object.values(filterCount).reduce(\n (sum, count) => sum + count,\n 0,\n );\n console.log(\n chalk.yellow(\n `📊 Filtered ${totalFiltered} sensitive items from response`,\n ),\n );\n\n const sortedTypes = Object.entries(filterCount)\n .sort(([, countA], [, countB]) => countB - countA)\n .slice(0, 3);\n\n if (sortedTypes.length > 0) {\n const typesList = sortedTypes\n .map(([type, count]) => `${type} (${count})`)\n .join(', ');\n console.log(chalk.yellow(` Most common: ${typesList}`));\n }\n\n console.log(\n chalk.dim(' Use --verbose for detailed filtering statistics'),\n );\n }\n\n if (options.highlightFiltered && fullFilterResult.highlightedText) {\n console.log(\n chalk.yellow('\\n🔍 Highlighted sensitive content in response:'),\n );\n console.log('─────────────────────────────────────────');\n console.log(fullFilterResult.highlightedText);\n console.log('─────────────────────────────────────────');\n }\n } else if (options.verbose) {\n console.log(\n chalk.dim('✅ No sensitive information detected in response'),\n );\n }\n }\n\n if (options.verbose) {\n console.log(chalk.dim('✅ Streaming completed'));\n }\n },\n onError: (error: Error) => {\n if (options.verbose) {\n spinnerManager.fail('generation', 'Failed to generate response');\n console.error(chalk.red(`❌ ${error.message}`));\n }\n },\n });\n\n await dbManager.addMessage(\n sessionId,\n 'assistant',\n response,\n credentials.provider,\n credentials.model,\n credentials.temperature,\n credentials.maxTokens,\n );\n\n if (options.verbose) {\n spinnerManager.succeed('generation', 'Response streamed successfully!');\n }\n } catch (error) {\n if (options.verbose) {\n spinnerManager.fail('generation', 'Failed to generate response');\n console.error(\n chalk.red(\n `❌ ${error instanceof Error ? error.message : String(error)}`,\n ),\n );\n }\n response = `Unable to generate response using ${credentials.provider} (${credentials.model}). Error: ${error instanceof Error ? error.message : String(error)}`;\n\n await dbManager.addMessage(\n sessionId,\n 'assistant',\n response,\n credentials.provider,\n credentials.model,\n credentials.temperature,\n credentials.maxTokens,\n {error: true},\n );\n }\n};\n"]}