gpt4all-cli
Version:
<h1 align="center">Welcome to gpt4all-cli 👋</h1>
154 lines (148 loc) • 4.41 kB
JavaScript
var __async = (__this, __arguments, generator) => {
return new Promise((resolve, reject) => {
var fulfilled = (value) => {
try {
step(generator.next(value));
} catch (e) {
reject(e);
}
};
var rejected = (value) => {
try {
step(generator.throw(value));
} catch (e) {
reject(e);
}
};
var step = (x) => x.done ? resolve(x.value) : Promise.resolve(x.value).then(fulfilled, rejected);
step((generator = generator.apply(__this, __arguments)).next());
});
};
// src/index.ts
import { consola } from "consola";
import ora from "ora";
import { Command } from "commander";
import { join } from "path";
import { homedir } from "os";
import debug from "debug";
// src/utils.ts
import path from "path";
import fs from "fs";
import prettier from "prettier";
function reset(nomicDir) {
const files = fs.readdirSync(nomicDir);
const remainingFiles = [];
files.forEach((file) => {
if (file !== "gpt4all" && file !== "gpt4all.exe") {
fs.rmSync(path.join(nomicDir, file), { recursive: true });
} else {
remainingFiles.push(file);
}
});
return remainingFiles;
}
function formatCodeBlocks(text) {
if (text.includes("```")) {
return prettier.format(text, { parser: "markdown" });
}
return text;
}
// src/lib/gpt-factory.ts
import { GPT4All } from "gpt4all-ts-client";
function gptFactory(model) {
if (String(model).includes("gpt4all")) {
return new GPT4All(model);
}
throw new Error(`Unsupported model or model not specified!`);
}
// package.json
var version = "0.2.2";
// src/index.ts
var program = new Command();
program.version(version).description("GPT4all CLI").option(
"-m, --model <value>",
"Choose a model (default: gpt4all-lora-quantized)",
""
).option(
"-r, --reset",
"Reset the model by deleting the ~/.nomic folder",
false
).option("--debug", "Enable debug mode", false).helpOption("-h, --help", "Display help for command");
program.parse(process.argv);
var main = () => __async(void 0, null, function* () {
consola.info("Welcome to the GPT4all CLI!");
const options = program.opts();
if (options.debug) {
debug.enable("gpt4all");
}
if (options.reset) {
const nomicPath = join(homedir(), ".nomic");
consola.warn(
`This will delete ${nomicPath} and all its contents except gpt4all file.`
);
const confirm = yield consola.prompt("Are you sure?", {
type: "confirm"
});
if (confirm) {
consola.start(`Deleting ${nomicPath} ...`);
reset(nomicPath);
consola.success("Reset completed!");
} else {
consola.info("Reset cancelled.");
}
process.exit(0);
}
let model = options.model;
const supportedModels = [
"gpt4all-lora-quantized",
"gpt4all-lora-unfiltered-quantized"
];
if (model === "") {
model = yield consola.prompt(
"Choose a model (default: gpt4all-lora-quantized): ",
{
type: "select",
options: [
{
label: "gpt4all-lora-quantized",
value: "gpt4all-lora-quantized",
hint: "Default model"
},
{
label: "gpt4all-lora-unfiltered-quantized",
value: "gpt4all-lora-unfiltered-quantized",
hint: "Unfiltered model, may contain offensive content"
}
]
}
);
} else if (!supportedModels.includes(model)) {
consola.error(`Invalid model option: ${model}`);
process.exit(1);
}
const gpt4all = gptFactory(model);
consola.start(`Initialize and download ${model} model if missing ...`);
yield gpt4all.init();
yield gpt4all.open();
consola.ready("Model ready!");
let prompt = "";
while (prompt.toLowerCase() !== "exit" && prompt.toLowerCase() !== "quit") {
prompt = yield consola.prompt(
'Enter your prompt (or type "exit" or "quit" to finish): ',
{
type: "text",
default: "exit"
}
);
if ((prompt == null ? void 0 : prompt.toLowerCase()) !== "exit" && (prompt == null ? void 0 : prompt.toLowerCase()) !== "quit") {
const spinner = ora("Generating response...").start();
const response = yield gpt4all.prompt(prompt);
spinner.stop();
consola.warn(formatCodeBlocks(response));
}
}
gpt4all.close();
consola.success("Thank you for using the GPT4all CLI!");
});
main().catch(consola.error);