langschema
Version:
one-line LLM output parsers for JS/TS
327 lines (326 loc) • 14.5 kB
JavaScript
"use strict";
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.string = exports.list = exports.categorize = exports.bool = exports.asZodType = void 0;
const openai_1 = require("openai");
const zod_1 = require("zod");
const zod_to_json_schema_1 = require("zod-to-json-schema");
function backoff(retries, fn, delay = 500) {
return __awaiter(this, void 0, void 0, function* () {
try {
return yield fn();
}
catch (error) {
if (retries === 1)
throw error;
yield new Promise(resolve => setTimeout(resolve, delay));
return backoff(retries - 1, fn, delay * 2);
}
});
}
function buildLLM() {
const openai = new openai_1.OpenAIApi(new openai_1.Configuration({ apiKey: process.env.OPENAI_API_KEY }));
return {
createChatCompletion(request) {
return backoff(10, () => openai.createChatCompletion(request), 500);
}
};
}
function buildLLMOptions(promptOptions) {
return {
temperature: 0,
model: (promptOptions === null || promptOptions === void 0 ? void 0 : promptOptions.gpt4) ? "gpt-4" : "gpt-3.5-turbo"
};
}
/**
* This function parses the given input into a given Zod type using the OpenAI API. The
* input type can be ANY Zod type, not just an object - a boolean, a number, an enum,
* etc. are all valid inputs.
*
* @export
* @param {string} prompt - The input to parse
* @param zodType - The Zod type to parse the response into.
* @param {GenericPromptOptions} [promptOptions] - Optional settings for the prompt.
* @returns {Promise<T>} A promise that resolves to the parsed value.
*
* @throws {ZodError} If the parsed response does not match the expected structure.
*
* @async
*/
function asZodType(prompt, zodType, promptOptions) {
var _a;
return __awaiter(this, void 0, void 0, function* () {
if (!prompt) {
return zodType.parse("");
}
const openai = buildLLM();
const llmOptions = buildLLMOptions(promptOptions);
let wrapperZod;
let shouldWrap = zodType._def.typeName !== "ZodObject";
if (shouldWrap) {
wrapperZod = zod_1.z.object({ value: zodType });
}
else {
wrapperZod = zodType;
}
const jsonSchema = (_a = (0, zod_to_json_schema_1.zodToJsonSchema)(wrapperZod, "wrapper").definitions) === null || _a === void 0 ? void 0 : _a.wrapper;
const result = yield openai.createChatCompletion(Object.assign(Object.assign({}, llmOptions), { messages: [
{
role: "system",
content: "Follow the user's instructions exactly, and respond with ONLY what the user requests in valid JSON format. No extraneous information."
},
{
role: "user",
content: prompt
}
], function_call: { name: "answer" }, functions: [
{
name: "answer",
description: "Answer the user's question",
parameters: jsonSchema
}
] }));
const evaluated = wrapperZod.parse(JSON.parse(result.data.choices[0].message.function_call.arguments));
return shouldWrap ? evaluated.value : evaluated;
});
}
exports.asZodType = asZodType;
/**
* Asynchronously handles a binary prompt to return a boolean answer.
*
* This function creates a Large Language Model (LLM) from the provided options
* and prompts the user with a message. It then returns a boolean value based on the
* user's answer.
*
* @export
* @param {string} prompt - The prompt message to display to the user.
* @param {GenericPromptOptions} [promptOptions] - Optional settings for the prompt.
* @returns {Promise<boolean>} A promise that resolves to a boolean indicating the user's response.
*
* @throws {ZodError} If the parsed response does not match the expected structure.
*
* @async
*/
function bool(prompt, promptOptions) {
var _a, _b;
return __awaiter(this, void 0, void 0, function* () {
if (!prompt) {
return false;
}
const openai = buildLLM();
const llmOptions = buildLLMOptions(promptOptions);
const result = yield openai.createChatCompletion(Object.assign(Object.assign({}, llmOptions), { messages: [
{
role: "system",
content: 'Answer the following question with a true or false.'
},
{
role: "user",
content: prompt
}
], function_call: { name: "answer" }, functions: [
{
name: "answer",
description: "Answer the user's question",
parameters: {
type: "object",
required: ["value"],
description: "An object containing a boolean value.",
properties: {
value: {
type: "boolean",
description: "The boolean value to return.",
},
},
},
}
] }));
const zBooleanAnswer = zod_1.z.object({ value: zod_1.z.boolean() });
const answer = JSON.parse((_b = (_a = result.data.choices[0].message) === null || _a === void 0 ? void 0 : _a.function_call) === null || _b === void 0 ? void 0 : _b.arguments);
return zBooleanAnswer.parse(answer).value;
});
}
exports.bool = bool;
/**
* Asynchronously handles a categorical prompt and returns the classified category
*
* This function creates a Large Language Model (LLM) from the provided options
* and prompts the user with a message. It then returns the selected category,
* which must be one of the provided allowed values.
*
* @export
* @param {string} prompt - The user's question to classify
* @param {AtLeastOne<string>} allowedValues - Array of allowable categorical values.
* @param {GenericPromptOptions} [promptOptions] - Optional settings for the prompt.
* @returns {Promise<string>} A promise that resolves to a string indicating the user's selected category.
*
* @throws {Error} If no prompt is provided.
* @throws {ZodError} If the parsed response does not match the expected structure or is not one of the allowed values.
*
* @async
*/
function categorize(prompt, allowedValues, promptOptions) {
var _a, _b;
return __awaiter(this, void 0, void 0, function* () {
if (!prompt) {
throw new Error("Prompt is required");
}
const openai = buildLLM();
const llmOptions = buildLLMOptions(promptOptions);
const result = yield openai.createChatCompletion(Object.assign(Object.assign({}, llmOptions), { messages: [
{
role: "system",
content: `Answer the following question with one of the following allowed values: ${allowedValues.join(", ")}. You MUST use the exact spelling and capitalization of the values.`
},
{
role: "user",
content: prompt
}
], function_call: { name: "answer" }, functions: [
{
name: "answer",
description: "Answer the user's question",
parameters: {
type: "object",
required: ["value"],
properties: {
value: {
type: "string",
enum: allowedValues,
description: "The value to use, MUST be one of the allowed values",
},
},
},
}
] }));
const returnedValue = JSON.parse((_b = (_a = result.data.choices[0].message) === null || _a === void 0 ? void 0 : _a.function_call) === null || _b === void 0 ? void 0 : _b.arguments);
const zStringAnswer = zod_1.z.object({ value: zod_1.z.enum(allowedValues) });
return zStringAnswer.parse(returnedValue).value;
});
}
exports.categorize = categorize;
/**
* Asynchronously handles a list prompt and returns an array of selected values.
*
* This function creates a Large Language Model (LLM) from the provided options
* and prompts the user with a message. The user is expected to select a minimum
* and maximum number of values from the allowed list, and the function returns an array
* of these values.
*
* @export
* @param {string} prompt - The prompt message to display to the user.
* @param {null | AtLeastOne<string>} allowedValues - Array of allowable values. Null indicates that any string is allowed.
* @param {number} [minValues=1] - The minimum number of values the user must select.
* @param {number} [maxValues=5] - The maximum number of values the user can select.
* @param {GenericPromptOptions} [promptOptions] - Optional settings for the prompt.
* @returns {Promise<string[]>} A promise that resolves to an array of strings indicating the user's selected values.
*
* @throws {Error} If no prompt is provided, if minValues is not less than maxValues, or if minValues is not greater than zero.
* @throws {ZodError} If the parsed response does not match the expected structure or is not one of the allowed values.
*
* @async
*/
function list(prompt, allowedValues, minValues = 1, maxValues = 5, promptOptions) {
var _a, _b;
return __awaiter(this, void 0, void 0, function* () {
if (minValues >= maxValues) {
throw new Error("minValues must be less than maxValues");
}
if (minValues < 0) {
throw new Error("minValues must be greater than zero");
}
if (!prompt) {
return [];
}
const llmOptions = buildLLMOptions(promptOptions);
const zeroMessage = minValues === 0 ? "You may also answer with no values." : "";
const multipleMessage = minValues > 1 ? "You may also answer with multiple values." : "";
const allowedValuesMessage = allowedValues ? ` of the following allowed values: ${allowedValues.join(", ")}. You MUST use the exact spelling and capitalization of the values` : "";
const itemsType = { type: "string" };
if (allowedValues) {
itemsType.enum = allowedValues;
}
const openai = buildLLM();
const result = yield openai.createChatCompletion(Object.assign(Object.assign({}, llmOptions), { messages: [
{
role: "system",
content: `Answer the following question with AT LEAST ${minValues} and AT MOST ${maxValues}${allowedValuesMessage}. ${multipleMessage}${zeroMessage}`
},
{
role: "user",
content: prompt
}
], function_call: { name: "answer" }, functions: [
{
name: "answer",
description: "Answer the user's question",
parameters: {
type: "object",
required: ["value"],
properties: {
value: {
type: "array",
description: "The values to use",
minItems: minValues,
maxItems: maxValues,
items: itemsType
},
},
},
}
] }));
const returnedValue = JSON.parse((_b = (_a = result.data.choices[0].message) === null || _a === void 0 ? void 0 : _a.function_call) === null || _b === void 0 ? void 0 : _b.arguments);
const zStringArrayAnswer = zod_1.z.object({
value: zod_1.z.array(allowedValues ? zod_1.z.enum(allowedValues) : zod_1.z.string())
});
const parsedValue = zStringArrayAnswer.parse(returnedValue).value;
if (parsedValue.length < minValues) {
throw new Error(`You must provide at least ${minValues} values`);
}
else if (parsedValue.length > maxValues) {
parsedValue.splice(maxValues);
}
return parsedValue;
});
}
exports.list = list;
/**
* Asynchronously handles a string prompt and returns any string response. No ouptut validation is performed.
*
* @export
* @param {string} prompt - The prompt message to display to the user.
* @returns {Promise<string>} A promise that resolves to a string indicating the content of the user's response.
*
* @throws {Error} If the LLM fails to create or call.
*
* @async
*/
function string(prompt) {
return __awaiter(this, void 0, void 0, function* () {
if (!prompt) {
return "";
}
const openai = buildLLM();
const llmOptions = buildLLMOptions();
const result = yield openai.createChatCompletion(Object.assign(Object.assign({}, llmOptions), { messages: [
{
role: "system",
content: "You will follow the user's instructions exactly. You will respond with ONLY what the user requests, and NO extraneous information like 'Sure, here you go:', or 'That's a great question!', etc."
},
{
role: "user",
content: prompt
}
] }));
return result.data.choices[0].message.content;
});
}
exports.string = string;