UNPKG

gemini-code-flow

Version:

AI-powered development orchestration for Gemini CLI - adapted from Claude Code Flow by ruvnet

152 lines 5.25 kB
"use strict"; /** * Gemini Client Integration * Adapted from Claude Code Flow by ruvnet */ Object.defineProperty(exports, "__esModule", { value: true }); exports.GeminiClient = void 0; const generative_ai_1 = require("@google/generative-ai"); class GeminiClient { genAI; model; config; constructor(config) { this.config = config; // Handle authentication method const authMethod = config.authMethod || 'google-account'; if (authMethod === 'api-key') { if (!config.apiKey) { throw new Error('API key is required when using api-key authentication method'); } this.genAI = new generative_ai_1.GoogleGenerativeAI(config.apiKey); } else { // For google-account method, let Gemini CLI handle authentication // This assumes the user has already authenticated via `gemini` command const apiKey = config.apiKey || process.env.GEMINI_API_KEY || ''; if (!apiKey) { console.warn('No API key provided. Ensure you are authenticated via Google account or set GEMINI_API_KEY'); } this.genAI = new generative_ai_1.GoogleGenerativeAI(apiKey); } this.model = this.genAI.getGenerativeModel({ model: config.model || 'gemini-1.5-pro', }); } /** * Execute a prompt with the Gemini model */ async execute(prompt, mode) { try { const generationConfig = { temperature: this.getModeTemperature(mode), maxOutputTokens: this.config.maxOutputTokens || 8192, }; const result = await this.model.generateContent({ contents: [{ role: 'user', parts: [{ text: prompt }] }], generationConfig, }); const response = await result.response; return response.text(); } catch (error) { throw new Error(`Gemini execution failed: ${error instanceof Error ? error.message : 'Unknown error'}`); } } /** * Execute with multimodal input (images, PDFs, etc.) */ async executeMultimodal(prompt, files, mode) { try { const parts = [{ text: prompt }]; // Add file parts for (const file of files) { parts.push({ inlineData: { mimeType: file.mimeType, data: file.data.toString('base64'), }, }); } const generationConfig = { temperature: this.getModeTemperature(mode), maxOutputTokens: this.config.maxOutputTokens || 8192, }; const result = await this.model.generateContent({ contents: [{ role: 'user', parts }], generationConfig, }); const response = await result.response; return response.text(); } catch (error) { throw new Error(`Gemini multimodal execution failed: ${error instanceof Error ? error.message : 'Unknown error'}`); } } /** * Stream response for real-time output */ async *streamExecute(prompt, mode) { try { const generationConfig = { temperature: this.getModeTemperature(mode), maxOutputTokens: this.config.maxOutputTokens || 8192, }; const result = await this.model.generateContentStream({ contents: [{ role: 'user', parts: [{ text: prompt }] }], generationConfig, }); for await (const chunk of result.stream) { const chunkText = chunk.text(); if (chunkText) { yield chunkText; } } } catch (error) { throw new Error(`Gemini stream execution failed: ${error instanceof Error ? error.message : 'Unknown error'}`); } } /** * Get temperature setting for specific mode */ getModeTemperature(mode) { const modeTemperatures = { architect: 0.7, coder: 0.3, tester: 0.2, debugger: 0.1, security: 0.2, documentation: 0.5, integrator: 0.4, monitor: 0.2, optimizer: 0.3, ask: 0.8, devops: 0.3, tutorial: 0.6, database: 0.2, specification: 0.4, mcp: 0.3, orchestrator: 0.5, designer: 0.8, }; return modeTemperatures[mode] ?? this.config.temperature ?? 0.5; } /** * Check model availability and quota */ async checkHealth() { try { const result = await this.model.generateContent({ contents: [{ role: 'user', parts: [{ text: 'Hello' }] }], generationConfig: { maxOutputTokens: 10 }, }); return !!result.response; } catch (error) { return false; } } } exports.GeminiClient = GeminiClient; //# sourceMappingURL=gemini-client.js.map