json-gpt
Version:
Json-GPT permite interactuar de forma sencilla con el modelo GPT-3.5-turbo en formato JSON
179 lines (158 loc) • 6.41 kB
text/typescript
/**
* Module for solving requests using the OpenAI GPT-3 API.
* @module solve
*/
import dotenv from 'dotenv'
import { ChatCompletionRequestMessage, Configuration, CreateChatCompletionRequest, OpenAIApi } from 'openai'
dotenv.config()
export const openai = new OpenAIApi(new Configuration({
apiKey: process.env.OPENAI_API_KEY
}))
/**
* Type definition for the input request to be passed to the solve function.
* It can be either a string or an array of ChatCompletionRequestMessage objects.
*/
export type SolveRequest = string | Array<ChatCompletionRequestMessage>
/**
* Type definition for the options that can be passed to the solve function.
* It includes optional parameters for model configuration, exponential backoff and verbosity.
* @property {number} [initial_delay=4000] - Exponential backoff initial delay.
* @property {number} [max_retries=4] - Exponential backoff max retires.
* @property {number} [delay_exponential=2] - Exponential backoff delay exponential.
* @property {boolean} [verbose=false] - Verbose configuration.
*/
export type SolveRequestOptions = Partial<Omit<CreateChatCompletionRequest, 'model' | 'messages'>> & {
initial_delay?: number;
max_retries?: number;
delay_exponential?: number;
verbose?: boolean;
}
/**
* Interface for the response returned by the solve function.
* It includes the status code and the data (string) generated by GPT-3.5-tubo.
* @property {number} status - Response status
* @property {string} data - Response data
*/
export interface SolveResponse {
status: number;
data: string;
}
/**
* Formats the input request into an array of ChatCompletionRequestMessage objects.
* @param {SolveRequest} request - The input request to be formatted.
* @returns {Array<ChatCompletionRequestMessage>} - Array of ChatCompletionRequestMessage objects.
*/
function formatRequest(request: SolveRequest): Array<ChatCompletionRequestMessage> {
const messages: Array<ChatCompletionRequestMessage> = []
if (Array.isArray(request)) {
messages.push(...request)
} else if (typeof request === 'string') {
messages.push({
role: 'system',
content: request
});
} else {
throw {
status: 0,
data: '{"error": "Invalid request format", "text": "void" }'
}
}
return messages;
}
/**
* Function for formatting errors returned by the OpenAI API.
* @function
* @param {number} status - The status error code.
* @param {string} error - The error name or code.
* @param {string} text - The error message.
* @returns {SolveResponse} - Formatted error object with status code and error message.
*/
function formatError(status: number, error: string, text: string) {
return {
status,
data: `{"error": "${error}", "text": "${text}" }`
};
}
/**
Function for making a request to the OpenAI GPT-3.5-turbo API to complete text.
@function
@async
@param {Omit<CreateChatCompletionRequest, 'model'>} request - The input request to be processed by GPT-3.5-turbo. It should be an object that conforms to the CreateChatCompletionRequest interface, with the 'model' property excluded.
@returns {string} - The generated text content from the response of the API, or an empty string if the response does not contain any content.
*/
async function callGPT(request: Omit<CreateChatCompletionRequest, 'model'>) {
return (await openai.createChatCompletion({
model: 'gpt-3.5-turbo',
temperature: 0,
top_p: 1,
frequency_penalty: 0,
presence_penalty: 0,
max_tokens: 2000,
n: 1,
...request
})).data.choices[0]!.message!.content || ''
}
const MAX_RETRIES = 4
const INITIAL_DELAY = 4000
const DELAY_EXPONENTIAL = 2
/**
* Function for solving a request using the OpenAI GPT-3.5-turbo API.
* @function
* @async
* @param {SolveRequest} request - The input request to be processed by GPT-3.5-turbo. It can be either a string or an array of ChatCompletionRequestMessage objects.
* @param {SolveRequestOptions} options - Optional parameters for model configuration, exponential backoff and verbosity..
* @returns {Promise<SolveResponse>} - Promise that resolves to a SolveResponse object containing the status code and the data (string) generated by GPT-3.5-turbo.
*/
export async function solve(request: SolveRequest, options?: SolveRequestOptions): Promise<SolveResponse> {
const {
max_retries = MAX_RETRIES,
initial_delay = INITIAL_DELAY,
delay_exponential = DELAY_EXPONENTIAL,
verbose = false,
...apiOptions
} = options || {}
let delay = initial_delay
let messages: Array<ChatCompletionRequestMessage>
try {
messages = formatRequest(request)
} catch (error: any) {
return error as SolveResponse
}
if(verbose) console.log('Sending to GPT-3.5-turbo', messages, apiOptions)
let retries = 0
while(retries <= max_retries) {
try {
const response = await callGPT({
messages,
...apiOptions
})
if(verbose) console.log('GPT-3.5-turbo response: ', response)
return {
status: 200,
data: response
};
} catch (error: any) {
if (error.response && error.response.status === 429) {
await new Promise(resolve => setTimeout(resolve, delay));
delay *= delay_exponential;
retries++;
if(verbose) console.log('Retrying...', retries)
} else {
const err = formatError(
error.response.status,
'OpenAI API Error',
`${error.response.statusText}: ${error.response.data.error.message}`
)
if (verbose) console.log(err)
return err
}
}
}
const err = formatError(
429,
'MAX RETRIES REACHED',
`Exponential fail with inital delay: ${initial_delay}, max reties: ${max_retries} and delay exponential: ${delay_exponential}`
)
if (verbose) console.log(err)
return err
}