aiwrapper
Version:
A Universal AI Wrapper for JavaScript & TypeScript
243 lines (242 loc) • 9.3 kB
JavaScript
var __defProp = Object.defineProperty;
var __defProps = Object.defineProperties;
var __getOwnPropDescs = Object.getOwnPropertyDescriptors;
var __getOwnPropSymbols = Object.getOwnPropertySymbols;
var __hasOwnProp = Object.prototype.hasOwnProperty;
var __propIsEnum = Object.prototype.propertyIsEnumerable;
var __defNormalProp = (obj, key, value) => key in obj ? __defProp(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value;
var __spreadValues = (a, b) => {
for (var prop in b || (b = {}))
if (__hasOwnProp.call(b, prop))
__defNormalProp(a, prop, b[prop]);
if (__getOwnPropSymbols)
for (var prop of __getOwnPropSymbols(b)) {
if (__propIsEnum.call(b, prop))
__defNormalProp(a, prop, b[prop]);
}
return a;
};
var __spreadProps = (a, b) => __defProps(a, __getOwnPropDescs(b));
var __objRest = (source, exclude) => {
var target = {};
for (var prop in source)
if (__hasOwnProp.call(source, prop) && exclude.indexOf(prop) < 0)
target[prop] = source[prop];
if (source != null && __getOwnPropSymbols)
for (var prop of __getOwnPropSymbols(source)) {
if (exclude.indexOf(prop) < 0 && __propIsEnum.call(source, prop))
target[prop] = source[prop];
}
return target;
};
var __publicField = (obj, key, value) => __defNormalProp(obj, typeof key !== "symbol" ? key + "" : key, value);
import { LangResult, LanguageProvider } from "../language-provider.js";
import { LangMessages } from "../messages.js";
import { httpRequestWithRetry as fetch } from "../../http-request.js";
import { processServerEvents } from "../../process-server-events.js";
import { models } from "aimodels";
import { calculateModelResponseTokens } from "../utils/token-calculator.js";
class OllamaLang extends LanguageProvider {
constructor(options) {
const modelName = options.model || "llama2:latest";
super(modelName);
__publicField(this, "_config");
__publicField(this, "modelInfo");
this._config = {
model: modelName,
systemPrompt: options.systemPrompt || "",
maxTokens: options.maxTokens,
baseURL: options.url || "http://localhost:11434"
};
this.modelInfo = models.id(modelName);
if (!this.modelInfo) {
}
}
transformBody(body) {
if (body.max_tokens) {
const _a = body, { max_tokens } = _a, rest = __objRest(_a, ["max_tokens"]);
return __spreadProps(__spreadValues({}, rest), {
context_length: max_tokens
});
}
return body;
}
async ask(prompt, options) {
const abortSignal = options == null ? void 0 : options.signal;
const messages = new LangMessages();
messages.addUserMessage(prompt);
const result = new LangResult(messages);
let requestMaxTokens = this._config.maxTokens;
if (this.modelInfo) {
requestMaxTokens = calculateModelResponseTokens(
this.modelInfo,
[{ role: "user", text: prompt }],
this._config.maxTokens
);
}
let visibleContent = "";
let openThinkTagIndex = -1;
let pendingThinkingContent = "";
const onResult = options == null ? void 0 : options.onResult;
const onData = (data) => {
var _a, _b;
if (data.done) {
const extracted = this.extractThinking(visibleContent);
if (extracted.thinking) {
}
result.finished = true;
const last2 = result.length > 0 ? result[result.length - 1] : void 0;
if (last2) (_a = options == null ? void 0 : options.onResult) == null ? void 0 : _a.call(options, last2);
return;
}
if (data.response) {
const currentChunk = data.response;
visibleContent += currentChunk;
this.processChunkForThinking(currentChunk, visibleContent, result, openThinkTagIndex, pendingThinkingContent);
openThinkTagIndex = visibleContent.lastIndexOf("<think>");
if (openThinkTagIndex !== -1) {
const closeTagIndex = visibleContent.indexOf("</think>", openThinkTagIndex);
if (closeTagIndex === -1) {
pendingThinkingContent = visibleContent.substring(openThinkTagIndex + 7);
}
}
}
const last = result.length > 0 ? result[result.length - 1] : void 0;
if (last) (_b = options == null ? void 0 : options.onResult) == null ? void 0 : _b.call(options, last);
};
try {
const response = await fetch(`${this._config.baseURL}/api/generate`, {
method: "POST",
headers: {
"Content-Type": "application/json"
},
body: JSON.stringify(__spreadValues({
model: this._config.model,
prompt,
stream: true
}, requestMaxTokens && { num_predict: requestMaxTokens })),
signal: abortSignal
}).catch((err) => {
throw new Error(err);
});
await processServerEvents(response, onData, abortSignal);
} catch (error) {
if ((error == null ? void 0 : error.name) === "AbortError") {
result.aborted = true;
error.partialResult = result;
}
throw error;
}
if (!onResult) {
const extracted = this.extractThinking(result.answer);
if (extracted.thinking) {
}
}
return result;
}
async chat(messages, options) {
const abortSignal = options == null ? void 0 : options.signal;
const result = new LangResult(messages);
let requestMaxTokens = this._config.maxTokens;
if (this.modelInfo) {
requestMaxTokens = calculateModelResponseTokens(
this.modelInfo,
messages,
this._config.maxTokens
);
}
let visibleContent = "";
let openThinkTagIndex = -1;
let pendingThinkingContent = "";
const onResult = options == null ? void 0 : options.onResult;
const onData = (data) => {
var _a, _b;
if (data.done) {
const extracted = this.extractThinking(visibleContent);
if (extracted.thinking) {
}
result.finished = true;
const last2 = result.length > 0 ? result[result.length - 1] : void 0;
if (last2) (_a = options == null ? void 0 : options.onResult) == null ? void 0 : _a.call(options, last2);
return;
}
if (data.message && data.message.content) {
const currentChunk = data.message.content;
visibleContent += currentChunk;
this.processChunkForThinking(currentChunk, visibleContent, result, openThinkTagIndex, pendingThinkingContent);
openThinkTagIndex = visibleContent.lastIndexOf("<think>");
if (openThinkTagIndex !== -1) {
const closeTagIndex = visibleContent.indexOf("</think>", openThinkTagIndex);
if (closeTagIndex === -1) {
pendingThinkingContent = visibleContent.substring(openThinkTagIndex + 7);
}
}
}
const last = result.length > 0 ? result[result.length - 1] : void 0;
if (last) (_b = options == null ? void 0 : options.onResult) == null ? void 0 : _b.call(options, last);
};
const images = [];
const mappedMessages = messages.map((m) => {
if (Array.isArray(m.content)) {
for (const part of m.content) {
if (part.type === "image") {
const img = part.image;
if (img.kind === "base64") images.push(img.base64);
if (img.kind === "url" && typeof img.url === "string" && img.url.startsWith("data:")) {
const match = img.url.match(/^data:([^;]+);base64,(.*)$/);
if (match) images.push(match[2]);
}
}
}
const text = m.content.filter((p) => p.type === "text").map((p) => p.text).join("\n");
return { role: m.role, content: text };
}
return m;
});
try {
const response = await fetch(`${this._config.baseURL}/api/chat`, {
method: "POST",
body: JSON.stringify(__spreadValues(__spreadProps(__spreadValues({
model: this._config.model,
messages: mappedMessages
}, images.length > 0 ? { images } : {}), {
stream: true
}), requestMaxTokens && { num_predict: requestMaxTokens })),
signal: abortSignal
}).catch((err) => {
throw new Error(err);
});
await processServerEvents(response, onData, abortSignal);
} catch (error) {
if ((error == null ? void 0 : error.name) === "AbortError") {
result.aborted = true;
error.partialResult = result;
}
throw error;
}
if (!onResult) {
const extracted = this.extractThinking(result.answer);
if (extracted.thinking) {
}
}
return result;
}
// Helper to extract thinking content from <think> tags
extractThinking(content) {
const thinkRegex = /<think>([\s\S]*?)<\/think>/g;
const matches = content.match(thinkRegex);
if (!matches || matches.length === 0) {
return { thinking: "", answer: content };
}
const thinking = matches.map((match) => match.replace(/<think>|<\/think>/g, "").trim()).join("\n");
const answer = content.replace(thinkRegex, "").trim();
return { thinking, answer };
}
// Process a chunk for thinking content during streaming
processChunkForThinking(currentChunk, fullContent, result, openTagIndex, pendingThinking) {
}
}
export {
OllamaLang
};
//# sourceMappingURL=ollama-lang.js.map