json-object-editor
Version:
JOE the Json Object Editor | Platform Edition
1,189 lines (1,104 loc) • 53.5 kB
JavaScript
const OpenAI = require("openai");
const { google } = require('googleapis');
const path = require('path');
const MCP = require("../modules/MCP.js");
// const { name } = require("json-object-editor/server/webconfig");
function ChatGPT() {
// const fetch = (await import('node-fetch')).default;
//const openai = new OpenAI();
// Load the service account key JSON file
const serviceAccountKeyFile = path.join(__dirname, '../local-joe-239900-e9e3b447c70e.json');
const google_auth = new google.auth.GoogleAuth({
keyFile: serviceAccountKeyFile,
scopes: ['https://www.googleapis.com/auth/documents.readonly'],
});
var self = this;
this.async ={};
function coloredLog(message){
console.log(JOE.Utils.color('[chatgpt]', 'plugin', false), message);
}
//xx -setup and send a test prompt to chatgpt
//xx get the api key from joe settings
//get a prompt from id
//send the prompt to chatgpt
//++get the cotnent of a file
//++send the content of a file to chatgpt
//++ structure data
//++ save the response to an ai_repsonse
//create an ai_response
//store the content
//attach to the request
//store ids sent with the request
this.default = function(data, req, res) {
try {
var payload = {
params: req.params,
data: data
};
} catch (e) {
return { errors: 'plugin error: ' + e, failedat: 'plugin' };
}
return payload;
};
function getAPIKey() {
const setting = JOE.Utils.Settings('OPENAI_API_KEY');
if (!setting) throw new Error("Missing OPENAI_API_KEY setting");
return setting;
}
function getSchemaDef(name) {
if (!name) return { full: null, summary: null };
const full = JOE.Schemas && JOE.Schemas.schema && JOE.Schemas.schema[name];
const summary = JOE.Schemas && JOE.Schemas.summary && JOE.Schemas.summary[name];
return { full, summary };
}
/**
* callMCPTool
*
* Small, well‑scoped helper to invoke a JOE MCP tool directly in‑process,
* without going over HTTP or worrying about POST size limits.
*
* Usage:
* const result = await callMCPTool('listSchemas', {}, { req });
*
* Notes:
* - `toolName` must exist on MCP.tools.
* - `params` should be a plain JSON-serializable object.
* - `ctx` is optional and can pass `{ req }` or other context that MCP
* tools might want (for auth, user, etc.).
*/
async function callMCPTool(toolName, params = {}, ctx = {}) {
if (!MCP || !MCP.tools) {
throw new Error("MCP module not initialized; cannot call MCP tool");
}
if (!toolName || typeof toolName !== 'string') {
throw new Error("Missing or invalid MCP tool name");
}
const fn = MCP.tools[toolName];
if (typeof fn !== 'function') {
throw new Error(`MCP tool "${toolName}" not found`);
}
try {
// All MCP tools accept (params, ctx) and return a JSON-serializable result.
// The Responses / tools API often returns arguments as a JSON string, so
// normalize that here before invoking the tool.
let toolParams = params;
if (typeof toolParams === 'string') {
try {
toolParams = JSON.parse(toolParams);
} catch (parseErr) {
console.error(`[chatgpt] Failed to JSON-parse tool arguments for "${toolName}"`, parseErr, toolParams);
// Fall back to passing the raw string so tools that expect it still work.
}
}
const result = await fn(toolParams || {}, ctx || {});
return result;
} catch (e) {
// Surface a clean error upstream but keep details in logs.
console.error(`[chatgpt] MCP tool "${toolName}" error:`, e);
throw new Error(`MCP tool "${toolName}" failed: ${e && e.message || 'Unknown error'}`);
}
}
/**
* extractToolCalls
*
* Best-effort parser for tool calls from a Responses API result.
* The Responses output shape may evolve; this function looks for
* any "tool_call" typed content in response.output[*].content[*]
* and normalizes it into `{ name, arguments }` objects.
*/
function extractToolCalls(response) {
var calls = [];
if (!response || !Array.isArray(response.output)) { return calls; }
response.output.forEach(function (item) {
if (!item) { return; }
// v1-style: item.type === 'tool_call'
if (item.type === 'function_call') {
calls.push({
name: item.name || item.function_name,
arguments: item.arguments || item.function_arguments || {}
});
}
// message-style: item.content is an array of parts
if (Array.isArray(item.content)) {
item.content.forEach(function (part) {
if (!part) { return; }
if (part.type === 'function_call') {
calls.push({
name: part.name || part.tool_name,
arguments: part.arguments || part.args || {}
});
}
});
}
});
return calls;
}
// Detect "request too large / token limit" style errors from the Responses API.
function isTokenLimitError(err) {
if (!err || typeof err !== 'object') return false;
if (err.status !== 429 && err.status !== 400) return false;
const msg = (err.error && err.error.message) || err.message || '';
if (!msg) return false;
const lower = String(msg).toLowerCase();
// Cover common phrasing from OpenAI for context/TPM limits.
return (
lower.includes('request too large') ||
lower.includes('too many tokens') ||
lower.includes('max tokens') ||
lower.includes('maximum context length') ||
lower.includes('tokens per min')
);
}
// Create a compact representation of a JOE object for use in slim payloads.
function slimJOEObject(item) {
if (!item || typeof item !== 'object') return item;
const name = item.name || item.title || item.label || item.email || item.slug || item._id || '';
const info = item.info || item.description || item.summary || '';
return {
_id: item._id,
itemtype: item.itemtype,
name: name,
info: info
};
}
// Given an `understandObject` result, produce a slimmed version:
// - keep `object` as-is
// - keep `flattened` for the main object (depth-limited) if present
// - replace each related entry with { field, _id, itemtype, object:{_id,itemtype,name,info} }
// - preserve `schemas`, `tags`, `statuses`, and mark `slim:true`
function slimUnderstandObjectResult(result) {
if (!result || typeof result !== 'object') return result;
const out = {
_id: result._id,
itemtype: result.itemtype,
object: result.object,
// retain main flattened view if available; this is typically much smaller
flattened: result.flattened || null,
schemas: result.schemas || {},
tags: result.tags || {},
statuses: result.statuses || {},
slim: true
};
if (Array.isArray(result.related)) {
out.related = result.related.map(function (rel) {
if (!rel) return rel;
const base = rel.object || {};
const slim = slimJOEObject(base);
return {
field: rel.field,
_id: slim && slim._id || rel._id,
itemtype: slim && slim.itemtype || rel.itemtype,
object: slim
};
});
} else {
out.related = [];
}
return out;
}
// Walk the messages array and, for any system message containing a JSON payload
// of the form { "tool": "understandObject", "result": {...} }, replace the
// result with a slimmed version to reduce token count. Returns a new array; if
// nothing was changed, returns the original array.
function shrinkUnderstandObjectMessagesForTokens(messages) {
if (!Array.isArray(messages)) return messages;
let changed = false;
const shrunk = messages.map(function (msg) {
if (!msg || msg.role !== 'system') return msg;
if (typeof msg.content !== 'string') return msg;
try {
const parsed = JSON.parse(msg.content);
if (!parsed || parsed.tool !== 'understandObject' || !parsed.result) {
return msg;
}
const slimmed = slimUnderstandObjectResult(parsed.result);
changed = true;
return {
...msg,
content: JSON.stringify({ tool: 'understandObject', result: slimmed })
};
} catch (_e) {
return msg;
}
});
return changed ? shrunk : messages;
}
/**
* runWithTools
*
* Single orchestration function for calling the OpenAI Responses API
* with optional tools (sourced from a JOE `ai_assistant`), handling
* tool calls via MCP, and issuing a follow-up model call with the
* tool results injected.
*
* Inputs (opts):
* - openai: OpenAI client instance
* - model: model name to use (e.g. "gpt-4.1-mini", "gpt-5.1")
* - systemText: string of system / instructions text
* - messages: array of { role, content } for the conversation so far
* - assistant: JOE `ai_assistant` object (may contain `tools`)
* - req: Express request (passed into MCP tools as context)
*
* Returns:
* - { response, finalText, messages, toolCalls }
* where `finalText` is the assistant-facing text (from output_text)
* and `messages` is the possibly-extended message list including
* any synthetic `tool` messages.
*/
async function runWithTools(opts) {
const openai = opts.openai;
const model = opts.model;
const systemText = opts.systemText || "";
const messages = Array.isArray(opts.messages) ? opts.messages.slice() : [];
const assistant = opts.assistant || null;
const req = opts.req;
// Normalize tools: in many schemas tools may be stored as a JSON string;
// here we accept either an array or a JSON-stringified array.
let tools = null;
if (assistant && assistant.tools) {
if (Array.isArray(assistant.tools)) {
tools = assistant.tools;
} else if (typeof assistant.tools === 'string') {
try {
const parsed = JSON.parse(assistant.tools);
if (Array.isArray(parsed)) {
tools = parsed;
}
} catch (e) {
console.error('[chatgpt] Failed to parse assistant.tools JSON', e);
}
}
}
// Normalize tool definitions for the Responses API. The assistant UI
// uses the Assistants-style shape ({ type:'function', function:{...} }),
// but Responses expects the name/description/parameters at the top level:
// { type:'function', name:'x', description:'...', parameters:{...} }
if (Array.isArray(tools)) {
tools = tools.map(function (t) {
if (t && t.type === 'function' && t.function && !t.name) {
const fn = t.function || {};
return {
type: 'function',
name: fn.name,
description: fn.description,
parameters: fn.parameters || {}
};
}
return t;
});
}
// No tools configured – do a simple single Responses call.
if (!tools) {
const resp = await openai.responses.create({
model: model,
instructions: systemText,
input: messages
});
return {
response: resp,
finalText: resp.output_text || "",
messages: messages,
toolCalls: []
};
}
// Step 1: call the model with tools enabled.
const first = await openai.responses.create({
model: model,
instructions: systemText,
input: messages,
tools: tools,
tool_choice: "auto"
});
const toolCalls = extractToolCalls(first);
// If the model didn't decide to use tools, just return the first answer.
if (!toolCalls.length) {
return {
response: first,
finalText: first.output_text || "",
messages: messages,
toolCalls: []
};
}
// Step 2: execute each tool call via MCP and append tool results.
for (let i = 0; i < toolCalls.length; i++) {
const tc = toolCalls[i];
try {
const result = await callMCPTool(tc.name, tc.arguments || {}, { req });
messages.push({
// Responses API does not support a "tool" role in messages.
// We inject tool outputs as a synthetic system message so
// the model can see the results without affecting the
// user/assistant turn structure.
role: "system",
content: JSON.stringify({ tool: tc.name, result: result })
});
} catch (e) {
console.error("[chatgpt] MCP tool error in runWithTools:", e);
messages.push({
role: "system",
content: JSON.stringify({
tool: tc.name,
error: e && e.message || "Tool execution failed"
})
});
}
}
// Step 3: ask the model again with tool outputs included.
let finalMessages = messages;
let second;
try {
second = await openai.responses.create({
model: model,
instructions: systemText,
input: finalMessages
});
} catch (e) {
if (isTokenLimitError(e)) {
console.warn("[chatgpt] Responses token limit hit; shrinking understandObject payloads and retrying once");
const shrunk = shrinkUnderstandObjectMessagesForTokens(finalMessages);
// If nothing was shrunk, just rethrow the original error.
if (shrunk === finalMessages) {
throw e;
}
finalMessages = shrunk;
// Retry once with the smaller payload; let any error bubble up.
second = await openai.responses.create({
model: model,
instructions: systemText,
input: finalMessages
});
} else {
throw e;
}
}
return {
response: second,
finalText: second.output_text || "",
messages: finalMessages,
toolCalls: toolCalls
};
}
// function newClient(){
// var key = getAPIKey();
// var c = new OpenAI({
// apiKey: key, // This is the default and can be omitted
// });
// if(!c || !c.apiKey){
// return { errors: 'No API key provided' };
// }
// return c;
// }
function newClient() {
return new OpenAI({ apiKey: getAPIKey() });
}
this.testPrompt= async function(data, req, res) {
try {
var payload = {
params: req.params,
data: data
};
} catch (e) {
return { errors: 'plugin error: ' + e, failedat: 'plugin' };
}
const client = newClient();
if(client.errors){
return { errors: client.errors };
}
try {
const chatCompletion = await client.chat.completions.create({
messages: [{ role: 'user', content: 'Tell me a story about JOE: the json object editor in under 256 chars.' }],
model: 'gpt-4o',
});
coloredLog(chatCompletion);
return {payload,chatCompletion,content:chatCompletion.choices[0].message.content};
} catch (error) {
if (error.status === 429) {
return { errors: 'You exceeded your current quota, please check your plan and billing details.' };
} else {
return { errors: 'plugin error: ' + error.message, failedat: 'plugin' };
}
}
}
this.sendInitialConsultTranscript= async function(data, req, res) {
coloredLog("sendInitialConsultTranscript");
//get the prompt object from the prompt id
//get the business object from the refrenced object id
//see if there is a initial_transcript_url property on that object
//if there is, get the content of the file
//send the content to chatgpt, with the template property of the prompt object
//get the response
try {
var payload = {
params: req.params,
data: data
};
} catch (e) {
return { errors: 'plugin error: ' + e, failedat: 'plugin' };
}
var businessOBJ = JOE.Data.business.find(b=>b._id == data.business);
var promptOBJ = JOE.Data.ai_prompt.find(p=>p._id == data.ai_prompt);
// See if there is an initial_transcript_url property on that object
const transcriptUrl = businessOBJ.initial_transcript_url;
if (!transcriptUrl) {
return res.jsonp({ error: 'No initial transcript URL found' });
}
//Get the content of the file from Google Docs
const transcriptContent = await getGoogleDocContent(transcriptUrl);
if (!transcriptContent || transcriptContent.error) {
return res.jsonp({ error: (transcriptContent.error && transcriptContent.error.message)||'Failed to fetch transcript content' });
}
const tokenCount = countTokens(`${promptOBJ.template}\n\n${transcriptContent}`);
payload.tokenCount = tokenCount;
coloredLog("token count: "+tokenCount);
//return res.jsonp({tokens:tokenCount,content:transcriptContent});
// Send the content to ChatGPT, with the template property of the prompt object
const client = new OpenAI({
apiKey: getAPIKey(), // This is the default and can be omitted
});
const chatResponse = await client.chat.completions.create({
messages: [{ role: 'user', content: `${promptOBJ.template}\n\n${transcriptContent}` }],
model: 'gpt-4o',
});
// Get the response
const chatContent = chatResponse.choices[0].message.content;
const responseName = `${businessOBJ.name} - ${promptOBJ.name}`;
// Save the response
await saveAIResponse({
name:responseName,
business: data.business,
ai_prompt: data.ai_prompt,
response: chatContent,
payload,
prompt_method:req.params.method
});
coloredLog("response saved -"+responseName);
return {payload,
businessOBJ,
promptOBJ,
chatContent,
responseName
};
}
async function getGoogleDocContent(docUrl) {
try {
const auth = new google.auth.GoogleAuth({
scopes: ['https://www.googleapis.com/auth/documents.readonly']
});
//get google docs apikey from settings
const GOOGLE_API_KEY = JOE.Utils.Settings('GOOGLE_DOCS_API_KEY');
const docs = google.docs({ version: 'v1', auth:google_auth });
const docId = extractDocIdFromUrl(docUrl);
const doc = await docs.documents.get({ documentId: docId });
let content = doc.data.body.content.map(element => {
if (element.paragraph && element.paragraph.elements) {
return element.paragraph.elements.map(
e => e.textRun ? e.textRun.content.replace(/Euron Nicholson/g, '[EN]').replace(/\d{2}:\d{2}:\d{2}\.\d{3} --> \d{2}:\d{2}:\d{2}\.\d{3}/g, '-ts-')
: ''
).join('');
}
return '';
}).join('\n');
// Remove timestamps and line numbers
//content = content.replace(/^\d+\n\d{2}:\d{2}:\d{2}\.\d{3} --> \d{2}:\d{2}:\d{2}\.\d{3}\n/gm, '');
return content;
} catch (error) {
console.error('Error fetching Google Doc content:', error);
return {error};
}
}
function countTokens(text, model = 'gpt-4o') {
const enc = encoding_for_model(model);
const tokens = enc.encode(text);
return tokens.length;
}
function extractDocIdFromUrl(url) {
const match = url.match(/\/d\/([a-zA-Z0-9-_]+)/);
return match ? match[1] : null;
}
async function saveAIResponse(data) {
try {
const aiResponse = {
name: data.name,
itemtype: 'ai_response',
business: data.business,
ai_prompt: data.ai_prompt,
response: data.response,
payload: data.payload,
prompt_method:data.prompt_method,
created: (new Date).toISOString(),
_id:cuid()
// Add any other fields you want to save
};
await new Promise((resolve, reject) => {
JOE.Storage.save(aiResponse, 'ai_response', function(err, result) {
if (err) {
coloredLog('Error saving AI response: ' + err);
reject(err);
} else {
coloredLog('AI response saved successfully');
resolve(result);
}
});
});
} catch (error) {
coloredLog('Error in saveAIResponse: ' + error);
}
}
// Normalize model output that should contain JSON. Models often wrap JSON
// in markdown fences (```json ... ```), and may prepend/append prose. This
// helper strips fences and tries to isolate the first well-formed JSON
// object/array substring so JSON.parse has the best chance of succeeding.
function extractJsonText(raw) {
if (!raw) { return ''; }
let t = String(raw).trim();
// If there is any ```...``` fenced block, prefer its contents.
const fenceIdx = t.indexOf('```json') !== -1 ? t.indexOf('```json') : t.indexOf('```');
if (fenceIdx !== -1) {
let start = fenceIdx;
const firstNewline = t.indexOf('\n', start);
if (firstNewline !== -1) {
t = t.substring(firstNewline + 1);
} else {
t = t.substring(start + 3);
}
const lastFence = t.lastIndexOf('```');
if (lastFence !== -1) {
t = t.substring(0, lastFence);
}
t = t.trim();
}
// If there's extra prose around the JSON, slice from first {/[ to last }/]
if (t[0] !== '{' && t[0] !== '[') {
const firstBrace = t.indexOf('{');
const firstBracket = t.indexOf('[');
let first = -1;
if (firstBrace === -1) { first = firstBracket; }
else if (firstBracket === -1) { first = firstBrace; }
else { first = Math.min(firstBrace, firstBracket); }
const lastBrace = Math.max(t.lastIndexOf('}'), t.lastIndexOf(']'));
if (first !== -1 && lastBrace !== -1 && lastBrace > first) {
t = t.slice(first, lastBrace + 1);
}
}
return t.trim();
}
// Autofill feature (Responses API; supports assistant_id or model)
this.autofill = async function (data, req, res) {
const startedAt = Date.now();
try {
const body = data || {};
const objectId = body.object_id || body._id;
const object = body.object || $J.get(objectId);
const schemaName = body.schema || (object && object.itemtype) || body.itemtype;
const { full: schemaFull, summary: schemaSummary } = getSchemaDef(schemaName);
const rawFields = body.fields || body.field;
const fields = Array.isArray(rawFields) ? rawFields : (rawFields ? [rawFields] : []);
const userPrompt = body.prompt || '';
const assistantId = body.assistant_id || null;
if (!object) {
return { success: false, error: 'Object not found', code: 'OBJECT_NOT_FOUND' };
}
if (!schemaName) {
return { success: false, error: 'Schema name not determined', code: 'SCHEMA_REQUIRED' };
}
if (!fields.length) {
return { success: false, error: 'No fields specified', code: 'FIELDS_REQUIRED' };
}
const flattened = JOE.Utils.flattenObject(object._id);
const systemText = [
'You are JOE (Json Object Editor) assistant.',
'Task: Populate only the requested fields according to the provided schema context and JOE conventions.',
'- Respect field types (text, number, arrays, enums, references).',
'- Do NOT invent IDs for reference fields; only return human text for text-like fields.',
'- If a field is an enum, choose the closest valid enum. If unsure, omit it from patch.',
'- If a field is an array, return an array of values.',
'- Never modify unrelated fields.',
'- Output MUST be strict JSON with a top-level key "patch" containing only populated fields.',
'- If you lack sufficient information, return an empty patch.'
].join('\\n');
const schemaForContext = schemaSummary || schemaFull || {};
const userInput = JSON.stringify({
action: 'autofill_fields',
target_schema: schemaName,
requested_fields: fields,
user_prompt: userPrompt,
object_context: flattened,
schema_context: schemaForContext
}, null, ' ');
const openai = newClient();
const model = body.model || 'gpt-4o-mini';////'gpt-5-nano';
// For simplicity and robustness, use plain text output and instruct the
// model to return a strict JSON object. We previously attempted the
// Responses `json_schema` response_format, but the SDK shape can change
// and is harder to parse reliably; text + JSON.parse is sufficient here.
const requestBase = {
temperature: 0.2,
instructions: systemText,
input: userInput
};
// Optional web_search tool: if the caller sets allow_web truthy, expose
// the built-in web_search capability and let the model decide when to
// call it.
if (body.allow_web) {
coloredLog("allowing web search");
requestBase.tools = [{ type: 'web_search' }];
requestBase.tool_choice = 'auto';
}
let response;
if (assistantId) {
response = await openai.responses.create({ assistant_id: assistantId, ...requestBase });
} else {
response = await openai.responses.create({ model, ...requestBase });
}
let textOut = '';
try { textOut = response.output_text || ''; } catch (_e) {}
coloredLog("textOut: "+textOut);
if (!textOut && response && Array.isArray(response.output)) {
for (let i = 0; i < response.output.length; i++) {
const item = response.output[i];
if (item && item.type === 'message' && item.content && Array.isArray(item.content)) {
const textPart = item.content.find(function (c) { return c.type === 'output_text' || c.type === 'text'; });
if (textPart && (textPart.text || textPart.output_text)) {
textOut = textPart.text || textPart.output_text;
break;
}
}
}
}
let patch = {};
try {
const jsonText = extractJsonText(textOut);
const parsed = JSON.parse(jsonText || '{}');
patch = parsed.patch || {};
} catch (_e) {
console.warn('[chatgpt.autofill] Failed to parse JSON patch from model output', _e);
}
coloredLog("patch: "+JSON.stringify(patch));
const filteredPatch = {};
fields.forEach(function (f) {
if (Object.prototype.hasOwnProperty.call(patch, f)) {
filteredPatch[f] = patch[f];
}
});
// If we got no fields back on the first attempt, retry once before
// giving up. Avoid infinite loops by marking a retry flag.
if (!Object.keys(filteredPatch).length && !body._retry) {
coloredLog('[autofill] empty patch, retrying once');
const retryBody = Object.assign({}, body, { _retry: true });
return await self.autofill(retryBody, req, res);
}
// Optional save
let savedItem = null;
if (body.save_history || body.save_itemtype) {
const targetItemtype = body.save_itemtype || 'ai_response';
if (JOE.Schemas && JOE.Schemas.schema && JOE.Schemas.schema[targetItemtype]) {
const saveObj = {
itemtype: targetItemtype,
name: `[${schemaName}] autofill → ${fields.join(', ')}`,
object_id: object._id,
target_schema: schemaName,
fields,
prompt: userPrompt,
patch: filteredPatch,
model,
raw: { response }
};
await new Promise(function (resolve) {
JOE.Storage.save(saveObj, targetItemtype, function (_err, saved) {
savedItem = saved || null;
resolve();
});
});
}
}
return {
success: true,
patch: filteredPatch,
model,
usage: response && response.usage,
saved: !!savedItem,
saved_item: savedItem,
elapsed_ms: Date.now() - startedAt
};
} catch (e) {
return { success: false, error: e && e.message || 'Unknown error' };
}
};
this.getResponse = function(data, req, res) {
try {
var prompt = data.prompt;
if (!prompt) {
return { error: 'No prompt provided' };
}
// Simulate a response from ChatGPT
var response = `ChatGPT response to: ${prompt}`;
res.jsonp({ response: response });
return { use_callback: true };
} catch (e) {
return { errors: 'plugin error: ' + e, failedat: 'plugin' };
}
};
this.html = function(data, req, res) {
return JSON.stringify(self.default(data, req), '', '\t\r\n <br/>');
};
/* NEW AI RESPONSE API*/
this.executeJOEAiPrompt = async function(data, req, res) {
const referencedObjectIds = []; // Track all objects touched during helper function
try {
const promptId = data.ai_prompt;
const params = data;
if (!promptId) {
return { error: "Missing prompt_id." };
}
const prompt = await $J.get(promptId); // Use $J.get for consistency
if (!prompt) {
return { error: "Prompt not found." };
}
let instructions = prompt.instructions || "";
let finalInstructions=instructions;
let finalInput='';
// Pre-load all content_objects if content_items exist
const contentObjects = {};
if (prompt.content_items && Array.isArray(prompt.content_items)) {
for (const content of prompt.content_items) {
if (params[content.reference]) {
const obj = $J.get(params[content.reference]);
if (obj) {
contentObjects[content.itemtype] = obj;
// Pre-track referenced object
if (obj._id && !referencedObjectIds.includes(obj._id)) {
referencedObjectIds.push(obj._id);
}
}
}
}
}
// Execute any helper functions if present
if (prompt.functions) {
const modFunc = JOE.Utils.requireFromString(prompt.functions, prompt._id);
const helperResult = await modFunc({
instructions,
params,
ai_prompt: prompt,
content_objects: contentObjects,
trackObject: (obj) => {
if (obj?._id && !referencedObjectIds.includes(obj._id)) {
referencedObjectIds.push(obj._id);
}
}
});
if (typeof helperResult === 'object' && helperResult.error) {
return { error: helperResult.error };
}
// Assume the result is { instructions, input }
finalInstructions = helperResult.instructions || instructions;
finalInput = helperResult.input;
}
const openai = newClient(); // however your OpenAI client is created
const payload = {
model: prompt.ai_model || "gpt-4o",
instructions: finalInstructions||instructions, // string only
input:finalInput||'',
tools: prompt.tools || [{ "type": "web_search" }],
tool_choice: prompt.tool_choice || "auto",
temperature: prompt.temperature ? parseFloat(prompt.temperature) : 0.7,
//return_token_usage: true
//max_tokens: prompt.max_tokens ?? 1200
};
const response = await openai.responses.create(payload);
// const payload = createResponsePayload(prompt, params, instructions, data.user_prompt);
// const response = await openai.chat.completions.create(payload);
const saved = await saveAiResponseRefactor({
prompt,
ai_response_content: response.output_text || "",
user_prompt: payload.input,
params,
referenced_object_ids: referencedObjectIds,
response_id:response.id,
usage: response.usage || {}
});
return { success: true, ai_response_id: saved._id,response:response.output_text || "",usage:response.usage };
} catch (e) {
console.error('❌ executeJOEAiPrompt error:', e);
return { error: "Failed to execute AI prompt.",message: e.message };
}
};
function createResponsePayload(prompt, params, instructions, user_prompt) {
return {
model: prompt.model || "gpt-4o",
messages: [
{ role: "system", content: instructions },
{ role: "user", content: user_prompt || "" }
],
tools: prompt.tools || undefined,
tool_choice: prompt.tool_choice || "auto",
temperature: prompt.temperature ?? 0.7,
max_tokens: prompt.max_tokens ?? 1200
};
}
async function saveAiResponseRefactor({ prompt, ai_response_content, user_prompt, params, referenced_object_ids,response_id,usage}) {
var response_keys = [];
try {
response_keys = Object.keys(JSON.parse(ai_response_content));
}catch (e) {
console.error('❌ Error parsing AI response content for keys:', e);
}
const aiResponse = {
name: `${prompt.name}`,
itemtype: 'ai_response',
ai_prompt: prompt._id,
prompt_name: prompt.name,
prompt_method:prompt.prompt_method,
response: ai_response_content,
response_keys: response_keys,
response_id:response_id||'',
user_prompt: user_prompt,
params_used: params,
usage: usage || {},
tags: prompt.tags || [],
model_used: prompt.ai_model || "gpt-4o",
referenced_objects: referenced_object_ids, // new flexible array of referenced object ids
created: (new Date).toISOString(),
_id: cuid()
};
await new Promise((resolve, reject) => {
JOE.Storage.save(aiResponse, 'ai_response', function(err, result) {
if (err) {
console.error('❌ Error saving AI response:', err);
reject(err);
} else {
console.log('✅ AI response saved successfully');
resolve(result);
}
});
});
return aiResponse;
}
// ---------- Widget chat endpoints (Responses API + optional assistants) ----------
function normalizeMessages(messages) {
if (!Array.isArray(messages)) { return []; }
return messages.map(function (m) {
return {
role: m.role || 'assistant',
content: m.content || '',
created_at: m.created_at || m.created || new Date().toISOString()
};
});
}
/**
* widgetStart
*
* Purpose:
* Create and persist a new `ai_widget_conversation` record for the
* external `<joe-ai-widget>` chat component. This is a lightweight
* conversation record that stores model, assistant, system text and
* messages for the widget.
*
* Inputs (data):
* - model (optional) override model for the widget
* - ai_assistant_id (optional) JOE ai_assistant cuid
* - system (optional) explicit system text
* - source (optional) freeform source tag, defaults to "widget"
*
* OpenAI calls:
* - None. This endpoint only touches storage.
*
* Output:
* - { success, conversation_id, model, assistant_id }
* where assistant_id is the OpenAI assistant_id (if present).
*/
this.widgetStart = async function (data, req, res) {
try {
var body = data || {};
// Default to a modern chat model when no assistant/model is provided.
// If an assistant is supplied, its ai_model will override this.
var model = body.model || "gpt-5.1";
var assistant = body.ai_assistant_id ? $J.get(body.ai_assistant_id) : null;
var system = body.system || (assistant && assistant.instructions) || "";
// Prefer explicit user fields coming from the client (ai-widget-test page
// passes _joe.User fields). Widget endpoints no longer infer from req.User
// to keep a single, explicit source of truth.
var user = null;
if (body.user_id || body.user_name || body.user_color) {
user = {
_id: body.user_id,
name: body.user_name,
fullname: body.user_name,
color: body.user_color
};
}
var user_color = (body.user_color) || (user && user.color) || null;
var convo = {
_id: (typeof cuid === 'function') ? cuid() : undefined,
itemtype: "ai_widget_conversation",
model: (assistant && assistant.ai_model) || model,
assistant: assistant && assistant._id,
assistant_id: assistant && assistant.assistant_id,
assistant_color: assistant && assistant.assistant_color,
user: user && user._id,
user_name: user && (user.fullname || user.name),
user_color: user_color,
system: system,
messages: [],
source: body.source || "widget",
created: new Date().toISOString(),
joeUpdated: new Date().toISOString()
};
const saved = await new Promise(function (resolve, reject) {
// Widget conversations are lightweight and do not need full history diffs.
JOE.Storage.save(convo, "ai_widget_conversation", function (err, result) {
if (err) return reject(err);
resolve(result);
}, { history: false });
});
return {
success: true,
conversation_id: saved._id,
model: saved.model,
assistant_id: saved.assistant_id || null,
assistant_color: saved.assistant_color || null,
user_color: saved.user_color || user_color || null
};
} catch (e) {
console.error("[chatgpt] widgetStart error:", e);
return { success: false, error: e && e.message || "Unknown error" };
}
};
/**
* widgetHistory
*
* Purpose:
* Load an existing `ai_widget_conversation` and normalize its
* messages for use by `<joe-ai-widget>` on page load or refresh.
*
* Inputs (data):
* - conversation_id or _id: the widget conversation cuid
*
* OpenAI calls:
* - None. Purely storage + normalization.
*
* Output:
* - { success, conversation_id, model, assistant_id, messages }
*/
this.widgetHistory = async function (data, req, res) {
try {
var conversation_id = data.conversation_id || data._id;
if (!conversation_id) {
return { success: false, error: "Missing conversation_id" };
}
const convo = await new Promise(function (resolve, reject) {
JOE.Storage.load("ai_widget_conversation", { _id: conversation_id }, function (err, results) {
if (err) return reject(err);
resolve(results && results[0]);
});
});
if (!convo) {
return { success: false, error: "Conversation not found" };
}
convo.messages = normalizeMessages(convo.messages);
return {
success: true,
conversation_id: convo._id,
model: convo.model,
assistant_id: convo.assistant_id || null,
assistant_color: convo.assistant_color || null,
user_color: convo.user_color || null,
messages: convo.messages
};
} catch (e) {
console.error("[chatgpt] widgetHistory error:", e);
return { success: false, error: e && e.message || "Unknown error" };
}
};
/**
* widgetMessage
*
* Purpose:
* Handle a single user turn for `<joe-ai-widget>`:
* - Append the user message to the stored conversation.
* - Call OpenAI Responses (optionally with tools from the selected
* `ai_assistant`, via runWithTools + MCP).
* - Append the assistant reply, persist the conversation, and return
* the full message history plus the latest assistant message.
*
* Inputs (data):
* - conversation_id or _id: cuid of the widget conversation
* - content: user text
* - role: user role, defaults to "user"
* - assistant_id: optional OpenAI assistant_id (used only to
* locate the JOE ai_assistant config)
* - model: optional model override
*
* OpenAI calls:
* - responses.create (once if no tools; twice when tools are present):
* * First call may include tools (assistant.tools) and `tool_choice:"auto"`.
* * Any tool calls are executed via MCP and injected as `tool` messages.
* * Second call is plain Responses with updated messages.
*
* Output:
* - { success, conversation_id, model, assistant_id, messages,
* last_message, usage }
*/
this.widgetMessage = async function (data, req, res) {
try {
var body = data || {};
var conversation_id = body.conversation_id || body._id;
var content = body.content;
var role = body.role || "user";
if (!conversation_id || !content) {
return { success: false, error: "Missing conversation_id or content" };
}
const convo = await new Promise(function (resolve, reject) {
JOE.Storage.load("ai_widget_conversation", { _id: conversation_id }, function (err, results) {
if (err) return reject(err);
resolve(results && results[0]);
});
});
if (!convo) {
return { success: false, error: "Conversation not found" };
}
convo.messages = normalizeMessages(convo.messages);
const nowIso = new Date().toISOString();
// Append user message
const userMsg = { role: role, content: content, created_at: nowIso };
convo.messages.push(userMsg);
// Backfill user metadata (id/name/color) on older conversations that
// were created before we started storing these fields. Prefer explicit
// body fields only; we no longer infer from req.User so that widget
// calls always have a single, explicit user source.
var u = null;
if (body.user_id || body.user_name || body.user_color) {
u = {
_id: body.user_id,
name: body.user_name,
fullname: body.user_name,
color: body.user_color
};
}
if (u) {
if (!convo.user && u._id) {
convo.user = u._id;
}
if (!convo.user_name && (u.fullname || u.name)) {
convo.user_name = u.fullname || u.name;
}
if (!convo.user_color && u.color) {
convo.user_color = u.color;
}
}
const assistantId = body.assistant_id || convo.assistant_id || null;
// NOTE: assistantId here is the OpenAI assistant_id, not the JOE cuid.
// We do NOT pass assistant_id to the Responses API (it is not supported in the
// version we are using); instead we look up the JOE ai_assistant by assistant_id
// and inject its configuration (model, instructions, tools) into the request.
var assistantObj = null;
if (assistantId && JOE && JOE.Data && Array.isArray(JOE.Data.ai_assistant)) {
assistantObj = JOE.Data.ai_assistant.find(function (a) {
return a && a.assistant_id === assistantId;
}) || null;
}
const openai = newClient();
const model = (assistantObj && assistantObj.ai_model) || convo.model || body.model || "gpt-5.1";
// Prefer explicit system text on the conversation, then assistant instructions.
const systemText = (convo.system && String(convo.system)) ||
(assistantObj && assistantObj.instructions) ||