@shayan-builds/use-every-llm
Version:
A unified API wrapper to use multiple LLMs like OpenAI and Gemini with ease.
65 lines (55 loc) • 1.54 kB
JavaScript
import {
GoogleGenAI,
createUserContent,
createPartFromUri,
} from "@google/genai";
import OpenAI from "openai";
import * as fs from "node:fs";
const gemini = new GoogleGenAI({
apiKey: "AIzaSyDvKrElDZVDsdHVILCiHP7gswHZRF2a8sc",
});
const gpt = new OpenAI({
baseURL: "https://models.github.ai/inference",
apiKey: "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",
});
async function useLLM({ model, prompt, systemPrompt = "", image = "" }) {
if (model.includes("gpt")) {
const response = await gpt.chat.completions.create({
messages: [
{ role: "system", content: systemPrompt },
{ role: "user", content: prompt },
],
model,
});
console.log(response.choices[0].message.content);
} else if (model.includes("gemini")) {
const contents = [
image
? {
inlineData: {
mimeType: "image/jpeg",
data: fs.readFileSync(image, {
encoding: "base64",
}),
},
}
: "",
{ text: prompt },
];
const config = systemPrompt
? { systemInstruction: systemPrompt }
: undefined;
const response = await gemini.models.generateContent({
model,
contents,
...(config ? { config } : {}), // only add config if it exists
});
console.log(response.text);
}
}
await useLLM({
model: "gemini-2.5-flash",
prompt: "describe the image",
image: "Screenshot 2025-07-17 at 5.35.40 PM.png",
});
// await useLLM("gpt-4.1", "What model is it?")