deprecopilot
Version:
Automated dependency management with AI-powered codemods
111 lines (110 loc) • 3.7 kB
JavaScript
import { promises as fs } from 'fs';
import { join } from 'path';
const GEMINI_API_KEY = process.env.GEMINI_API_KEY || '';
const OPENAI_API_KEY = process.env.OPENAI_API_KEY || '';
const GEMINI_API_URL = 'https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent';
const OPENAI_API_URL = 'https://api.openai.com/v1/chat/completions';
const CACHE_PATH = join(process.cwd(), '.deprecopilot-llm-cache.json');
export async function readCache() {
try {
const raw = await fs.readFile(CACHE_PATH, 'utf-8');
return JSON.parse(raw);
}
catch {
return {};
}
}
export async function writeCache(cache) {
await fs.writeFile(CACHE_PATH, JSON.stringify(cache, null, 2), { flag: 'w' });
}
export function makeCacheKey(prompt, context) {
return JSON.stringify({ prompt, context });
}
export async function fetchGemini(prompt, context) {
if (!GEMINI_API_KEY)
throw new Error('GEMINI_API_KEY not set');
const body = {
contents: [
{ parts: [{ text: prompt }] }
],
...(context ? { context } : {})
};
const url = `${GEMINI_API_URL}?key=${GEMINI_API_KEY}`;
const res = await fetch(url, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(body)
});
if (!res.ok) {
const errText = await res.text();
throw new Error(`Gemini API error: ${res.status} ${errText}`);
}
const data = await res.json();
const text = data.candidates?.[0]?.content?.parts?.[0]?.text || '';
if (!text)
throw new Error('No response from Gemini API');
return text;
}
export async function fetchOpenAI(prompt, context) {
if (!OPENAI_API_KEY)
throw new Error('OPENAI_API_KEY not set');
const body = {
model: 'gpt-4',
messages: [
{ role: 'system', content: 'You are a helpful assistant that generates jscodeshift codemods.' },
{ role: 'user', content: prompt }
],
temperature: 0.1,
max_tokens: 4000
};
const res = await fetch(OPENAI_API_URL, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${OPENAI_API_KEY}`
},
body: JSON.stringify(body)
});
if (!res.ok) {
const errText = await res.text();
throw new Error(`OpenAI API error: ${res.status} ${errText}`);
}
const data = await res.json();
const text = data.choices?.[0]?.message?.content || '';
if (!text)
throw new Error('No response from OpenAI API');
return text;
}
export async function generateCodemod(prompt, context, provider = 'gemini') {
const cache = await readCache();
const key = makeCacheKey(prompt, context);
if (cache[key])
return cache[key];
let lastErr;
for (let i = 0; i < 3; ++i) {
try {
let result;
if (provider === 'openai') {
result = await fetchOpenAI(prompt, context);
}
else {
result = await fetchGemini(prompt, context);
}
cache[key] = result;
await writeCache(cache);
return result;
}
catch (err) {
lastErr = err;
const delay = Math.pow(2, i) * 500 + Math.random() * 100;
await new Promise(r => setTimeout(r, delay));
}
}
if (lastErr instanceof Error) {
process.stderr.write('LLM error: ' + lastErr.message + '\n');
throw lastErr;
}
throw new Error('Unknown LLM error');
}
const llmClient = { generateCodemod };
export default llmClient;