chatgpt-optimized-official
Version:
ChatGPT Client using official OpenAI API
575 lines (511 loc) • 18.6 kB
text/typescript
import axios from "axios";
import { randomUUID } from "crypto";
import { encode } from "gpt-3-encoder";
import Usage from "../models/chatgpt-usage.js";
import Options from "../models/chatgpt-options.js";
import Conversation from "../models/conversation.js";
import Message from "../models/chatgpt-message.js";
import MessageType from "../enums/message-type.js";
import AppDbContext from "./app-dbcontext.js";
import OpenAIKey from "../models/openai-key.js";
//import { Configuration, OpenAIApi } from "openai";
import { type } from "os";
type ContentBlock = {
type: "text",
text: string
} | {
type: "image_url",
image_url: {
url: string,
detail?: "low" | "high" | "auto"
}
};
type ToolOutput = {
name: string;
tool_call_id: string;
content: string;
};
class ChatGPT {
public options: Options;
private db: AppDbContext;
public onUsage: (usage: Usage) => void;
constructor(key: string | string[], options?: Options) {
this.db = new AppDbContext();
this.db.WaitForLoad().then(() => {
if (typeof key === "string") {
if (this.db.keys.Any((x) => x.key === key)) return;
this.db.keys.Add({
key: key,
queries: 0,
balance: 0,
tokens: 0,
});
} else if (Array.isArray(key)) {
key.forEach((k) => {
if (this.db.keys.Any((x) => x.key === k)) return;
this.db.keys.Add({
key: k,
queries: 0,
balance: 0,
tokens: 0,
});
});
}
});
this.options = {
model: options?.model || "gpt-3.5-turbo", // default model
temperature: options?.temperature || 0.7,
max_tokens: options?.max_tokens || 100,
top_p: options?.top_p || 0.9,
frequency_penalty: options?.frequency_penalty || 0,
presence_penalty: options?.presence_penalty || 0,
instructions: options?.instructions || `You are ChatGPT, a language model developed by OpenAI. You are designed to respond to user input in a conversational manner, Answer as concisely as possible. Your training data comes from a diverse range of internet text and You have been trained to generate human-like responses to various questions and prompts. You can provide information on a wide range of topics, but your knowledge is limited to what was present in your training data, which has a cutoff date of 2021. You strive to provide accurate and helpful information to the best of your ability.\nKnowledge cutoff: 2021-09`,
price: options?.price || 0.002,
max_conversation_tokens: options?.max_conversation_tokens || 4097,
endpoint: options?.endpoint || "https://api.openai.com/v1/chat/completions",
moderation: options?.moderation || false,
functions: options?.functions || null,
function_call: options?.function_call || null,
tools: options?.tools || null,
tool_choice: options?.tool_choice || 'auto',
parallel_tool_calls: options?.parallel_tool_calls || false,
response_format: options?.response_format || null,
};
}
private getOpenAIKey(): OpenAIKey {
let key = this.db.keys.OrderBy((x) => x.balance).FirstOrDefault();
if (key == null) {
key = this.db.keys.FirstOrDefault();
}
if (key == null) {
throw new Error("No keys available.");
}
return key;
}
private async *chunksToLines(chunksAsync: any) {
let previous = "";
for await (const chunk of chunksAsync) {
const bufferChunk = Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk);
previous += bufferChunk;
let eolIndex;
while ((eolIndex = previous.indexOf("\n")) >= 0) {
// line includes the EOL
const line = previous.slice(0, eolIndex + 1).trimEnd();
if (line === "data: [DONE]") break;
if (line.startsWith("data: ")) yield line;
previous = previous.slice(eolIndex + 1);
}
}
}
private async *linesToMessages(linesAsync: any) {
for await (const line of linesAsync) {
const message = line.substring("data :".length);
yield message;
}
}
private async *streamCompletion(data: any) {
yield* this.linesToMessages(this.chunksToLines(data));
}
private getInstructions(username: string): string {
return `${this.options.instructions}
Current date: ${this.getToday()}
Current time: ${this.getTime()}${username !== "User" ? `\nName of the user talking to: ${username}` : ""}`;
}
public addConversation(conversationId: string, userName: string = "User") {
let conversation: Conversation = {
id: conversationId,
userName: userName,
messages: [],
};
this.db.conversations.Add(conversation);
return conversation;
}
public getConversation(conversationId: string, userName: string = "User") {
let conversation = this.db.conversations.Where((conversation) => conversation.id === conversationId).FirstOrDefault();
if (!conversation) {
conversation = this.addConversation(conversationId, userName);
} else {
conversation.lastActive = Date.now();
}
conversation.userName = userName;
return conversation;
}
public resetConversation(conversationId: string) {
let conversation = this.db.conversations.Where((conversation) => conversation.id == conversationId).FirstOrDefault();
//console.log(conversation);
if (conversation) {
conversation.messages = [];
conversation.lastActive = Date.now();
}
return conversation;
}
public async ask(prompt: string, conversationId: string = "default", userName: string = "User") {
return await this.askStream(
(data) => { },
(data) => { },
prompt,
conversationId,
userName,
);
}
public async askStream(data: (arg0: string) => void, usage: (usage: Usage) => void, prompt: string, conversationId: string = "default", userName: string = "User") {
let oAIKey = this.getOpenAIKey();
let conversation = this.getConversation(conversationId, userName);
if (this.options.moderation) {
let flagged = await this.moderate(prompt, oAIKey.key);
if (flagged) {
for (let chunk in "Your message was flagged as inappropriate and was not sent.".split("")) {
data(chunk);
await this.wait(100);
}
return "Your message was flagged as inappropriate and was not sent.";
}
}
let promptStr = this.generatePrompt(conversation, prompt);
//console.log(promptStr);
let prompt_tokens = this.countTokens(promptStr);
try {
const response = await axios.post(
this.options.endpoint,
{
model: this.options.model,
messages: promptStr,
temperature: this.options.temperature,
max_tokens: this.options.max_tokens,
top_p: this.options.top_p,
frequency_penalty: this.options.frequency_penalty,
presence_penalty: this.options.presence_penalty,
stream: true
},
{
responseType: "stream",
headers: {
Accept: "text/event-stream",
"Content-Type": "application/json",
Authorization: `Bearer ${oAIKey.key}`,
},
},
);
// console.log("Stream message:", response.data)
let responseStr = "";
let responseArg = "";
let responseNameFunction = "";
for await (const message of this.streamCompletion(response.data)) {
try {
const parsed = JSON.parse(message);
const { delta, finish_reason } = parsed.choices[0];
const { content, function_call } = delta;
if (function_call) {
responseNameFunction += function_call.name;
responseArg += function_call.arguments;
}
//console.log("Stream message:", parsed.choices[0])
if (finish_reason === "function_call") {
responseStr = JSON.stringify({ "name": responseNameFunction, "arguments": responseArg });
data(responseStr);
} else
if (content) {
responseStr += content;
data(content);
}
} catch (error) {
console.error("Could not JSON parse stream message", message, error);
}
}
let completion_tokens = encode(responseStr).length;
let usageData = {
key: oAIKey.key,
prompt_tokens: prompt_tokens,
completion_tokens: completion_tokens,
total_tokens: prompt_tokens + completion_tokens,
};
usage(usageData);
if (this.onUsage) this.onUsage(usageData);
oAIKey.tokens += usageData.total_tokens;
oAIKey.balance = (oAIKey.tokens / 1000) * this.options.price;
oAIKey.queries++;
conversation.messages.push({
id: randomUUID(),
content: responseStr,
type: MessageType.Assistant,
date: Date.now(),
});
return responseStr;
} catch (error: any) {
if (error.response && error.response.data && error.response.headers["content-type"] === "application/json") {
let errorResponseStr = "";
for await (const message of error.response.data) {
errorResponseStr += message;
}
const errorResponseJson = JSON.parse(errorResponseStr);
throw new Error(errorResponseJson.error.message);
} else {
throw new Error(error.message);
}
}
}
public async askV1(
prompt: string | ContentBlock[] | ToolOutput[],
conversationId: string = "default",
type: number = 1,
function_name?: string,
tool_call_id?: string,
userName: string = "User",
response_format?: any
) {
return await this.askPost(
(data) => {},
(data) => {},
prompt,
conversationId,
function_name,
userName,
type,
tool_call_id,
response_format
);
}
public async askPost(data: (arg0: string) => void,
usage: (usage: Usage) => void,
prompt: string | ContentBlock[] | ToolOutput[],
conversationId: string = "default",
function_name?: string,
userName: string = "User",
type: number = MessageType.User,
tool_call_id?: string, response_format?: any) {
let oAIKey = this.getOpenAIKey();
let conversation = this.getConversation(conversationId, userName);
// if (this.options.moderation) {
// let flagged = await this.moderate(prompt, oAIKey.key);
// if (flagged) {
// return { message: "Your message was flagged as inappropriate and was not sent." };
// }
// }
// console.log("funciones paraleas")
let promptStr = this.generatePrompt(conversation, prompt, type, function_name, tool_call_id);
//console.log(promptStr)
let prompt_tokens = this.countTokens(promptStr);
try {
let auxOptions = {
model: this.options.model,
messages: promptStr,
temperature: this.options.temperature,
max_tokens: this.options.max_tokens,
top_p: this.options.top_p,
frequency_penalty: this.options.frequency_penalty,
presence_penalty: this.options.presence_penalty,
stream: false, // Note this
tools: this.options.tools,
tool_choice: this.options.tool_choice,
parallel_tool_calls: this.options.parallel_tool_calls,
response_format: response_format,
}
if (this.options.functions) {
auxOptions["functions"] = this.options.functions;
auxOptions["function_call"] = this.options.function_call ? this.options.function_call : "auto";
}
// console.log("aux options:", auxOptions)
const response = await axios.post(
this.options.endpoint,
auxOptions,
{
responseType: "json", // Note this
headers: {
Accept: "application/json", // Note this
"Content-Type": "application/json",
Authorization: `Bearer ${oAIKey.key}`,
},
},
);
// console.log("Stream message:", response.data.choices[0])
let completion_tokens = response.data.usage['completion_tokens'];
let usageData = {
key: oAIKey.key,
prompt_tokens: prompt_tokens,
completion_tokens: completion_tokens,
total_tokens: prompt_tokens + completion_tokens,
};
if (this.onUsage) this.onUsage(usageData);
oAIKey.tokens += usageData.total_tokens;
oAIKey.balance = (oAIKey.tokens / 1000) * this.options.price;
oAIKey.queries++;
// si el mensaje es de tipo function
if (response.data.choices[0].finish_reason == "tool_calls") {
conversation.messages.push({
id: randomUUID(),
content: response.data.choices[0]['message']['content'],
type: MessageType.Assistant,
date: Date.now(),
name: function_name,
tool_calls: response.data.choices[0]['message']['tool_calls'],
});
}
else
if (response.data.choices[0]['message']['content']) {
//console.log("response.data.choices[0]['message']['content']", response.data.choices[0]['message']['content'])
conversation.messages.push({
id: randomUUID(),
content: response.data.choices[0]['message']['content'] ? response.data.choices[0]['message']['content'] : "",
type: MessageType.Assistant,
date: Date.now(),
});
}
data(JSON.stringify(response.data.choices[0]))
return response.data.choices[0]; // return the full response
} catch (error: any) {
if (error.response && error.response.data && error.response.headers["content-type"] === "application/json") {
throw new Error(error.response.data.error.message);
} else {
throw new Error(error.message);
}
}
}
public async moderate(prompt: string, key: string) {
// try {
// let openAi = new OpenAIApi(new Configuration({ apiKey: key }));
// let response = await openAi.createModeration({
// input: prompt,
// });
// return response.data.results[0].flagged;
// } catch (error) {
// return false;
// }
return false;
}
// Función para generar el prompt basado en la conversación y el mensaje actual
private generatePrompt(conversation: Conversation,
prompt: string | ContentBlock[] | ToolOutput[],
type: number = MessageType.User,
function_name?: string,
tool_call_id?: string): Message[] {
// Si el prompt es un array de resultados de herramientas, procesarlo como tal
if (Array.isArray(prompt) && prompt.length > 0 && 'tool_call_id' in prompt[0]) {
const toolOutputs = prompt as ToolOutput[];
for (const toolOutput of toolOutputs) {
conversation.messages.push({
id: randomUUID(),
content: toolOutput.content,
type: MessageType.Function,
date: Date.now(),
name: toolOutput.name,
tool_call_id: toolOutput.tool_call_id,
});
}
} else {
// De lo contrario, procesar como un mensaje normal (usuario, asistente o función única)
let message = {
id: randomUUID(),
content: prompt as string | ContentBlock[],
type: type,
date: Date.now(),
};
// Si el mensaje es de tipo Function, agregar el nombre de la función y el ID de la llamada a la herramienta
if (type === MessageType.Function && function_name) {
message["name"] = function_name;
message["tool_call_id"] = tool_call_id;
}
// Agregar el nuevo mensaje a la conversación
conversation.messages.push(message);
}
// Generar los mensajes para la conversación
let messages = this.generateMessages(conversation);
// Calcular la longitud del prompt codificado en tokens
let promptEncodedLength = this.countTokens(messages);
// Calcular la longitud total (prompt + respuesta máxima permitida)
let totalLength = promptEncodedLength + this.options.max_tokens;
// Si la longitud total excede el máximo permitido, eliminar mensajes antiguos
while (totalLength > this.options.max_conversation_tokens) {
conversation.messages.shift(); // Eliminar el mensaje más antiguo
messages = this.generateMessages(conversation); // Regenerar los mensajes
promptEncodedLength = this.countTokens(messages); // Recalcular la longitud del prompt
totalLength = promptEncodedLength + this.options.max_tokens; // Recalcular la longitud total
}
// Actualizar la última actividad de la conversación
conversation.lastActive = Date.now();
return messages; // Devolver los mensajes generados
}
// Función para generar los mensajes de la conversación
private generateMessages(conversation: Conversation): Message[] {
let messages: Message[] = [];
messages.push({
role: "system",
content: this.getInstructions(conversation.userName),
});
for (let message of conversation.messages) {
if (message.type === MessageType.Function) {
messages.push({
tool_call_id: message.tool_call_id,
role: "tool",
name: message.name || "unknownFunction",
content: message.content,
});
} else if (message.type === MessageType.User) {
messages.push({
role: "user",
content: message.content,
});
} else {
if (message.tool_calls) {
messages.push({
role: "assistant",
content: message.content,
tool_calls: message.tool_calls,
});
} else {
messages.push({
role: "assistant",
content: message.content,
});
}
}
}
return messages;
}
private countTokens(messages: Message[]): number {
let tokens: number = 0;
for (let message of messages) {
if (message.content) {
if (typeof message.content === "string") {
tokens += encode(message.content).length;
} else if (Array.isArray(message.content)) {
for (let contentBlock of message.content) {
if (contentBlock.type === "text") {
tokens += encode(contentBlock.text).length;
} else if (contentBlock.type === "image_url") {
// Estimar tokens para imágenes
if (contentBlock.image_url.detail === "low" || !contentBlock.image_url.detail) {
tokens += 85;
} else if (contentBlock.image_url.detail === "high") {
tokens += 765; // Ajusta según el tamaño real de la imagen si es posible
} else {
tokens += 85; // Estimación por defecto
}
}
}
}
}
}
return tokens;
}
private getToday() {
let today = new Date();
let dd = String(today.getDate()).padStart(2, "0");
let mm = String(today.getMonth() + 1).padStart(2, "0");
let yyyy = today.getFullYear();
return `${yyyy}-${mm}-${dd}`;
}
private getTime() {
let today = new Date();
let hours: any = today.getHours();
let minutes: any = today.getMinutes();
let ampm = hours >= 12 ? "PM" : "AM";
hours = hours % 12;
hours = hours ? hours : 12;
minutes = minutes < 10 ? `0${minutes}` : minutes;
return `${hours}:${minutes} ${ampm}`;
}
private wait(ms: number) {
return new Promise((resolve) => setTimeout(resolve, ms));
}
}
export default ChatGPT;