gpt3rocket
Version:
Little helper utility for priming + transacting w/ GPT3 api
320 lines • 15.3 kB
JavaScript
"use strict";
var __assign = (this && this.__assign) || function () {
__assign = Object.assign || function(t) {
for (var s, i = 1, n = arguments.length; i < n; i++) {
s = arguments[i];
for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p))
t[p] = s[p];
}
return t;
};
return __assign.apply(this, arguments);
};
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
var __generator = (this && this.__generator) || function (thisArg, body) {
var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;
return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g;
function verb(n) { return function (v) { return step([n, v]); }; }
function step(op) {
if (f) throw new TypeError("Generator is already executing.");
while (_) try {
if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [op[0] & 2, t.value];
switch (op[0]) {
case 0: case 1: t = op; break;
case 4: _.label++; return { value: op[1], done: false };
case 5: _.label++; y = op[1]; op = [0]; continue;
case 7: op = _.ops.pop(); _.trys.pop(); continue;
default:
if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }
if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }
if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }
if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }
if (t[2]) _.ops.pop();
_.trys.pop(); continue;
}
op = body.call(thisArg, _);
} catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }
if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };
}
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.gpt3Endpoint = exports.GPT3Rocket = void 0;
var axios_1 = require("axios");
/**
* ## Opts: Samples & prefix
* Samples & a prefix string will prime your agent
*
* ### opts.samples (optional)
*
* *array of Samples*
*
* ```ts
* const samples = [['marco', 'polo'], ['marrrrrccccoo', 'pollllooo']
* ```
* ### opts.prefix (optional)
* String to prepend to top of message as "primer"
*
* *string*
*
*```ts
* const prefix = 'The following is an exchange between the user and an intelligent agent. The agent is friendly, prompt, and wants to help the
* ```
*
## Transform (optional)
* An optional function to adjust how the prefix & samples are structured when sent to API
*
* Receives samples, prefix, inputString, outputString
* Without a custom function, a template will look like the following
*
* ```
* Prefix phrase ____
* input: aaa
* output: bbb
* input: ${user_prompt_goes_here}
*```
*
*
* ```ts
* const transform = ({samples, prefix, inputString, outputString} => {
* const decoratedSamples = samples.map((example, idx) => {
* if (!(idx % 2)) {
* return `${inputString}:${example}`;
* } else {
* return `${outputString}:${example}`;
* }
* });
*
* return `\n${prefix}\n${decoratedSamples.join("\n")}`;
*
* })
*
* ```
*
* ## APIConfig
* ```
* engine:string; // The engine ID, defaults to davinci (ada, babbage, curie, davinci)
* prompt?:string; //One or more prompts to generate from. Can be a string, list of strings, a list of integers (i.e. a single prompt encoded as tokens), or list of lists of integers (i.e. many prompts encoded as integers).
* max_tokens?:number; //How many tokens to complete to. Can return fewer if a stop sequence is hit.
* temperature?:number; //What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. We generally recommend using this or top_p but not both.
* top_p?:number; //An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend using this or temperature but not both.
* n?:number; //How many choices to create for each prompt.
* stream?:boolean; //Whether to stream back partial progress. If set, tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message.
* logprobs?:integer; //Include the log probabilities on the logprobs most likely tokens. So for example, if logprobs is 10, the API will return a list of the 10 most likely tokens. If logprobs is supplied, the API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response.
* stop?:string; //One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. *
* ```
*/
var GPT3Rocket = /** @class */ (function () {
function GPT3Rocket(configRef) {
// ["What are you?", "I am a helper agent here to answer your questions!"],
var defaults = {
samples: [],
prefix: "This is a conversation with a helpful agent. The agent is kind, clever and eager to help",
transform: this._transformer,
inputString: "input",
outputString: "output",
credential: "______________________-",
APIConfig: {
endpoint: "https://api.openai.com/v1/engines/davinci/completions",
full_response: false,
},
APIFlags: {
max_tokens: 20,
temperature: 0.3,
stop: "\n",
},
debug: false,
};
var mergeAPIFlags = Object.assign(defaults.APIFlags, configRef.APIFlags || {});
this.config = Object.assign(defaults, configRef, {
APIFlags: mergeAPIFlags,
});
this.__debug("<gpt3-rocket> Root config:", this.config);
}
GPT3Rocket.prototype.buildQuery = function (prompt, samples, prefix) {
if (samples === void 0) { samples = []; }
if (prefix === void 0) { prefix = ""; }
var prefixRef = this.config.prefix || "";
if (prefix) {
prefixRef = prefix;
}
var sampleRef = this.config.samples || [];
if (samples && samples.length) {
// Q: merge samples?
sampleRef = samples;
}
if (typeof this.config.transform === "function") {
return this.config.transform(prompt, sampleRef, prefixRef, this.config.inputString, this.config.outputString);
}
else {
return this._transformer(prompt, sampleRef, prefixRef, this.config.inputString, this.config.outputString);
}
};
GPT3Rocket.prototype.ask = function (prompt, samples, prefix, APIFlags, APIConfig) {
if (samples === void 0) { samples = []; }
if (prefix === void 0) { prefix = ""; }
if (APIFlags === void 0) { APIFlags = {}; }
if (APIConfig === void 0) { APIConfig = {}; }
return __awaiter(this, void 0, void 0, function () {
var query, mergedAPIConfig, mergedAPIFlags, endpoint, error, result, full_response, res, target;
var _this = this;
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
query = prompt;
if (samples && samples.length) {
if (prefix) {
query = this.buildQuery(prompt, samples, prefix);
}
else if (this.config.prefix) {
query = this.buildQuery(prompt, samples, this.config.prefix);
}
}
else {
if (this.config.samples) {
query = this.buildQuery(prompt, this.config.samples, this.config.prefix);
}
else {
query = this.buildQuery(prompt, this.config.samples, this.config.prefix);
}
}
mergedAPIConfig = Object.assign(this.config.APIConfig, APIConfig);
mergedAPIFlags = Object.assign(this.config.APIFlags, APIFlags);
this.__debug("<gpt3-rocket> Query: ", query);
endpoint = mergedAPIConfig.endpoint;
error = false;
return [4 /*yield*/, axios_1.default
.post(endpoint, __assign({ prompt: query }, mergedAPIFlags), {
headers: {
"Content-Type": "application/json",
Authorization: "Bearer " + this.config.credential,
},
})
.catch(function (e) {
_this.__debug("<gpt3-rocket> ERROR:", e.response);
if (e.response && e.response.status === 401 && e.response.data) {
console.log("\n\n<YOUR CREDENTIAL IS LIKELY INVALID>\n\n");
if (e.response.data.error) {
error = e.response.data.error;
}
}
})];
case 1:
result = _a.sent();
full_response = mergedAPIConfig.full_response;
if (!error) {
if (full_response && result) {
return [2 /*return*/, result.data];
}
else if (result) {
res = result.data.choices[0].text || "";
target = this.config.outputString + ":";
return [2 /*return*/, { text: res.replace(target, "") }];
}
}
else {
if (full_response) {
return [2 /*return*/, error];
}
else {
return [2 /*return*/, {
text: error.message || "There was a problem (your key might be invalid)",
}];
}
}
return [2 /*return*/];
}
});
});
};
GPT3Rocket.prototype.add = function (sample) {
var _a, _b;
if (sample.length > 2 || sample.length < 2 || !Array.isArray(sample)) {
throw new Error("Sample should be exactly one input & one output");
}
//@ts-ignorex
(_b = (_a = this === null || this === void 0 ? void 0 : this.config) === null || _a === void 0 ? void 0 : _a.samples) === null || _b === void 0 ? void 0 : _b.push(sample);
};
GPT3Rocket.prototype.addPrefix = function (prefix) {
this.config.prefix = prefix;
};
GPT3Rocket.prototype.changeTransformer = function (transformerFunction) {
if (typeof transformerFunction === "function") {
this.config.transform = transformerFunction;
}
};
GPT3Rocket.prototype.resetTransformer = function () {
this.config.transform = this._transformer;
};
GPT3Rocket.prototype.clear = function () {
this.clearSamples();
this.clearPrefix();
};
GPT3Rocket.prototype.clearSamples = function () {
this.config.samples = [];
};
GPT3Rocket.prototype.clearPrefix = function () {
this.config.prefix = "";
};
GPT3Rocket.prototype.updateCredential = function (credential) {
this.config.credential = credential;
};
GPT3Rocket.prototype.__debug = function () {
var payload = [];
for (var _i = 0; _i < arguments.length; _i++) {
payload[_i] = arguments[_i];
}
if (this.config.debug) {
console.log.apply(console, payload);
}
};
GPT3Rocket.prototype._transformer = function (prompt, samples, prefix, inputString, outputString) {
//@ts-ignore
var decoratedSamples = [].concat.apply([], samples).map(function (example, idx) {
if (!(idx % 2)) {
return inputString + ":" + example;
}
else {
return outputString + ":" + example;
}
});
if (prefix && decoratedSamples.length) {
return prefix + "\n" + decoratedSamples.join("\n") + "\n" + inputString + ":" + prompt + "\n";
}
else {
return inputString + ":" + prompt + "\n";
}
};
return GPT3Rocket;
}());
exports.GPT3Rocket = GPT3Rocket;
/**
* ENDPOINT
*
*/
exports.gpt3Endpoint = function (config) {
var inst = new GPT3Rocket(config);
// TODO: req/res types, body-parser/no body-parser
return function (req, res, next) { return __awaiter(void 0, void 0, void 0, function () {
var _a, _b, samples, _c, prefix, _d, APIConfig, _e, APIFlags, prompt, result;
return __generator(this, function (_f) {
switch (_f.label) {
case 0:
_a = req.body, _b = _a.samples, samples = _b === void 0 ? [] : _b, _c = _a.prefix, prefix = _c === void 0 ? "" : _c, _d = _a.APIConfig, APIConfig = _d === void 0 ? {} : _d, _e = _a.APIFlags, APIFlags = _e === void 0 ? {} : _e, prompt = _a.prompt;
return [4 /*yield*/, inst.ask(prompt, samples, prefix, APIFlags, APIConfig)];
case 1:
result = _f.sent();
return [2 /*return*/, res.status(200).send(result)];
}
});
}); };
};
//# sourceMappingURL=index.js.map