UNPKG

nicechat

Version:

An extensible AI chat framework for OpenAi's models

92 lines (91 loc) 4.48 kB
"use strict"; var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } return new (P || (P = Promise))(function (resolve, reject) { function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } step((generator = generator.apply(thisArg, _arguments || [])).next()); }); }; var __asyncValues = (this && this.__asyncValues) || function (o) { if (!Symbol.asyncIterator) throw new TypeError("Symbol.asyncIterator is not defined."); var m = o[Symbol.asyncIterator], i; return m ? m.call(o) : (o = typeof __values === "function" ? __values(o) : o[Symbol.iterator](), i = {}, verb("next"), verb("throw"), verb("return"), i[Symbol.asyncIterator] = function () { return this; }, i); function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; } function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); } }; var __importDefault = (this && this.__importDefault) || function (mod) { return (mod && mod.__esModule) ? mod : { "default": mod }; }; Object.defineProperty(exports, "__esModule", { value: true }); exports.chat = chat; const chalk_1 = __importDefault(require("chalk")); const nicechat_1 = require("../nicechat"); const openai_1 = require("./openai"); const vertexai_1 = require("@google-cloud/vertexai"); const MAX_OUTPUT_TOKENS = 8192; function chat(projectId, location, model, system) { return __awaiter(this, void 0, void 0, function* () { var _a, e_1, _b, _c; var _d, _e, _f, _g, _h; const vertexAI = new vertexai_1.VertexAI({ project: projectId, location }); const generativeModel = vertexAI.getGenerativeModel({ model, systemInstruction: { role: "system", parts: [{ text: system }], }, }); (0, openai_1.printStarter)("vertex", model, system); const history = []; // initial user input const input = yield (0, nicechat_1.readLine)(); history.push({ role: "user", parts: [{ text: input }], }); while (true) { const request = { contents: history, generationConfig: { maxOutputTokens: MAX_OUTPUT_TOKENS, temperature: 0.7, }, }; const streamingResult = yield generativeModel.generateContentStream(request); let responseText = ""; try { for (var _j = true, _k = (e_1 = void 0, __asyncValues(streamingResult.stream)), _l; _l = yield _k.next(), _a = _l.done, !_a; _j = true) { _c = _l.value; _j = false; const item = _c; const chunk = ((_h = (_g = (_f = (_e = (_d = item.candidates) === null || _d === void 0 ? void 0 : _d[0]) === null || _e === void 0 ? void 0 : _e.content) === null || _f === void 0 ? void 0 : _f.parts) === null || _g === void 0 ? void 0 : _g[0]) === null || _h === void 0 ? void 0 : _h.text) || ""; if (chunk) { process.stdout.write(chalk_1.default.greenBright(chunk)); responseText += chunk; } } } catch (e_1_1) { e_1 = { error: e_1_1 }; } finally { try { if (!_j && !_a && (_b = _k.return)) yield _b.call(_k); } finally { if (e_1) throw e_1.error; } } // Add assistant response to history history.push({ role: "model", parts: [{ text: responseText }], }); // ask user for next input console.log("\n"); const nextInput = yield (0, nicechat_1.readLine)(); history.push({ role: "user", parts: [{ text: nextInput }], }); } }); }