node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
28 lines • 814 B
JavaScript
import os from "os";
import { getPlatform } from "./getPlatform.js";
import { getLinuxDistroInfo } from "./getLinuxDistroInfo.js";
export async function getPlatformInfo() {
const currentPlatform = getPlatform();
if (currentPlatform === "mac")
return {
name: "macOS",
version: os.release()
};
else if (currentPlatform === "linux") {
const linuxDistroInfo = await getLinuxDistroInfo();
return {
name: linuxDistroInfo.name,
version: linuxDistroInfo.version
};
}
else if (currentPlatform === "win")
return {
name: "Windows",
version: os.release()
};
return {
name: "Unknown",
version: os.release()
};
}
//# sourceMappingURL=getPlatformInfo.js.map