aiwrapper
Version:
A Universal AI Wrapper for JavaScript & TypeScript
108 lines • 4.53 kB
JavaScript
import { LangResultWithMessages, LanguageProvider, } from "../language-provider.js";
import { httpRequestWithRetry as fetch, } from "../../http-request.js";
import { processResponseStream } from "../../process-response-stream.js";
import { models } from 'aimodels';
import { calculateModelResponseTokens } from "../utils/token-calculator.js";
export class GoogleLang extends LanguageProvider {
constructor(options) {
const modelName = options.model || "gemini-2.0-flash";
super(modelName);
// Get model info from aimodels
const modelInfo = models.id(modelName);
if (!modelInfo) {
console.error(`Invalid Google model: ${modelName}. Model not found in aimodels database.`);
}
this.modelInfo = modelInfo;
this._apiKey = options.apiKey;
this._model = modelName;
this._systemPrompt = options.systemPrompt || "";
this._maxTokens = options.maxTokens;
}
async ask(prompt, onResult) {
const messages = [];
if (this._systemPrompt) {
messages.push({
role: "system",
content: this._systemPrompt,
});
}
messages.push({
role: "user",
content: prompt,
});
return await this.chat(messages, onResult);
}
async chat(messages, onResult) {
const result = new LangResultWithMessages(messages);
// Transform messages into Google's format
const contents = messages.map(msg => {
if (msg.role === "system") {
// For system messages, we'll send them as user messages with a clear prefix
return {
role: "user",
parts: [{ text: `System instruction: ${msg.content}` }]
};
}
return {
role: msg.role === "assistant" ? "model" : "user",
parts: [{ text: msg.content }]
};
});
// Calculate max tokens if we have model info
let maxOutputTokens = this._maxTokens;
if (this.modelInfo && !maxOutputTokens) {
maxOutputTokens = calculateModelResponseTokens(this.modelInfo, messages, this._maxTokens);
}
const requestBody = {
contents,
generationConfig: {
maxOutputTokens: maxOutputTokens,
temperature: 0.7,
topP: 0.8,
topK: 40,
}
};
const onData = (data) => {
var _a, _b, _c, _d, _e;
if (data.finished) {
result.finished = true;
onResult === null || onResult === void 0 ? void 0 : onResult(result);
return;
}
// Handle Google's streaming format
if ((_e = (_d = (_c = (_b = (_a = data.candidates) === null || _a === void 0 ? void 0 : _a[0]) === null || _b === void 0 ? void 0 : _b.content) === null || _c === void 0 ? void 0 : _c.parts) === null || _d === void 0 ? void 0 : _d[0]) === null || _e === void 0 ? void 0 : _e.text) {
const text = data.candidates[0].content.parts[0].text;
result.answer += text;
result.messages = [...messages, {
role: "assistant",
content: result.answer,
}];
onResult === null || onResult === void 0 ? void 0 : onResult(result);
}
};
const response = await fetch(`https://generativelanguage.googleapis.com/v1beta/models/${this._model}:streamGenerateContent?alt=sse&key=${this._apiKey}`, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify(requestBody),
onNotOkResponse: async (res, decision) => {
if (res.status === 401) {
decision.retry = false;
throw new Error("API key is invalid. Please check your API key and try again.");
}
if (res.status === 400) {
const data = await res.text();
decision.retry = false;
throw new Error(data);
}
return decision;
},
}).catch((err) => {
throw new Error(err);
});
await processResponseStream(response, onData);
return result;
}
}
//# sourceMappingURL=google-lang.js.map