ultimate-mcp-server
Version:
The definitive all-in-one Model Context Protocol server for AI-assisted coding across 30+ platforms
76 lines • 2.65 kB
JavaScript
import { GoogleGenerativeAI } from "@google/generative-ai";
import { Logger } from "../utils/logger.js";
export class GoogleProvider {
name = "google";
genAI;
logger;
models = [
{
id: "gemini-pro",
name: "Gemini Pro",
contextLength: 32000,
pricing: { input: 0.00000125, output: 0.00000375 },
capabilities: ["coding", "reasoning", "analysis"],
provider: "google",
},
{
id: "gemini-pro-vision",
name: "Gemini Pro Vision",
contextLength: 32000,
pricing: { input: 0.00000125, output: 0.00000375 },
capabilities: ["coding", "reasoning", "vision"],
provider: "google",
},
];
constructor(apiKey) {
this.logger = new Logger("GoogleProvider");
this.genAI = new GoogleGenerativeAI(apiKey);
}
async complete(prompt, options) {
try {
const model = this.genAI.getGenerativeModel({
model: options?.model || "gemini-pro",
});
const result = await model.generateContent({
contents: [{ role: "user", parts: [{ text: prompt }] }],
generationConfig: {
temperature: options?.temperature ?? 0.7,
maxOutputTokens: options?.maxTokens,
topP: options?.topP,
},
});
const response = await result.response;
return response.text();
}
catch (error) {
this.logger.error("Google completion failed:", error);
throw error;
}
}
async completeWithContext(messages, options) {
try {
const model = this.genAI.getGenerativeModel({
model: options?.model || "gemini-pro",
});
const contents = messages.map(msg => ({
role: msg.role === 'assistant' ? 'model' : 'user',
parts: [{ text: msg.content }],
}));
const result = await model.generateContent({
contents,
generationConfig: {
temperature: options?.temperature ?? 0.7,
maxOutputTokens: options?.maxTokens,
topP: options?.topP,
},
});
const response = await result.response;
return response.text();
}
catch (error) {
this.logger.error("Google completion with context failed:", error);
throw error;
}
}
}
//# sourceMappingURL=google.js.map