@quantumai/quantum-cli-core
Version:
Quantum CLI Core - Multi-LLM Collaboration System
63 lines • 2.35 kB
JavaScript
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { BaseLLMProvider, } from './base-provider.js';
import OpenAI from 'openai';
export class OpenAIProvider extends BaseLLMProvider {
id = 'openai';
capabilities = ['text-generation', 'code-generation'];
client;
modelName;
constructor(providerConfig) {
super(providerConfig);
const apiKey = process.env[providerConfig.apiKeyEnvVar || 'OPENAI_API_KEY'];
if (!apiKey) {
throw new Error(`OpenAI API key not found in environment variable: ${providerConfig.apiKeyEnvVar || 'OPENAI_API_KEY'}`);
}
this.client = new OpenAI({ apiKey });
this.modelName = providerConfig.modelName || 'gpt-4o';
}
async generate(prompt, options) {
const startTime = Date.now();
try {
const chatCompletion = await this.client.chat.completions.create({
messages: [{ role: 'user', content: prompt }],
model: this.modelName,
});
const content = chatCompletion.choices[0]?.message?.content || '';
const latency = Date.now() - startTime;
const tokens = chatCompletion.usage?.total_tokens || 0;
// TODO: Calculate actual cost based on model and token usage
const cost = 0;
const confidence = 0.85; // Default confidence for OpenAI
return this.createResponse(content, latency, tokens, cost, confidence);
}
catch (error) {
return this.createErrorResponse(error);
}
}
async validateCredentials() {
try {
// Attempt a small request to validate the API key
await this.client.models.list();
return true;
}
catch (error) {
console.error(`OpenAI credential validation failed: ${error.message}`);
return false;
}
}
async *generateStream(prompt, options) {
const stream = await this.client.chat.completions.create({
messages: [{ role: 'user', content: prompt }],
model: this.modelName,
stream: true,
});
for await (const chunk of stream) {
yield chunk.choices[0]?.delta?.content || '';
}
}
}
//# sourceMappingURL=openai-provider.js.map