json-object-editor
Version:
JOE the Json Object Editor | Platform Edition
1,156 lines (1,085 loc) • 104 kB
JavaScript
const OpenAI = require("openai");
const { google } = require('googleapis');
const path = require('path');
const os = require('os');
const fs = require('fs');
const MCP = require("../modules/MCP.js");
// const { name } = require("json-object-editor/server/webconfig");
function ChatGPT() {
// const fetch = (await import('node-fetch')).default;
//const openai = new OpenAI();
// Load the service account key JSON file
const serviceAccountKeyFile = path.join(__dirname, '../local-joe-239900-e9e3b447c70e.json');
const google_auth = new google.auth.GoogleAuth({
keyFile: serviceAccountKeyFile,
scopes: ['https://www.googleapis.com/auth/documents.readonly'],
});
var self = this;
this.async ={};
function coloredLog(message){
try{
// Only emit verbose plugin logs in non‑production environments.
// This keeps consoles clean in production while preserving rich
// traces (assistant resolution, MCP config, systemText) during
// local/dev debugging.
var env = null;
if (typeof JOE !== 'undefined' && JOE && JOE.webconfig && JOE.webconfig.env){
env = JOE.webconfig.env;
} else if (typeof process !== 'undefined' && process.env && process.env.NODE_ENV){
env = process.env.NODE_ENV;
}
if (env && env.toLowerCase() === 'production'){
return;
}
console.log(JOE.Utils.color('[chatgpt]', 'plugin', false), message);
}catch(_e){
// If anything goes wrong determining env, default to logging so
// that development debugging is not silently broken.
try{
console.log('[chatgpt]', message);
}catch(__e){}
}
}
//xx -setup and send a test prompt to chatgpt
//xx get the api key from joe settings
//get a prompt from id
//send the prompt to chatgpt
//++get the cotnent of a file
//++send the content of a file to chatgpt
//++ structure data
//++ save the response to an ai_repsonse
//create an ai_response
//store the content
//attach to the request
//store ids sent with the request
this.default = function(data, req, res) {
try {
var payload = {
params: req.params,
data: data
};
} catch (e) {
return { errors: 'plugin error: ' + e, failedat: 'plugin' };
}
return payload;
};
function getAPIKey() {
const setting = JOE.Utils.Settings('OPENAI_API_KEY');
if (!setting) throw new Error("Missing OPENAI_API_KEY setting");
return setting;
}
function getSchemaDef(name) {
if (!name) return { full: null, summary: null };
const full = JOE.Schemas && JOE.Schemas.schema && JOE.Schemas.schema[name];
const summary = JOE.Schemas && JOE.Schemas.summary && JOE.Schemas.summary[name];
return { full, summary };
}
function buildMcpToolsFromConfig(cfg) {
if (!cfg || !cfg.mcp_enabled) {
return { tools: null, names: [] };
}
try {
const names = MCP.getToolNamesForToolset(
cfg.mcp_toolset || 'read-only',
Array.isArray(cfg.mcp_selected_tools) ? cfg.mcp_selected_tools : null
);
const defs = MCP.getToolDefinitions(names);
return { tools: defs, names: names };
} catch (e) {
console.warn('[chatgpt] buildMcpToolsFromConfig failed', e);
return { tools: null, names: [] };
}
}
/**
* callMCPTool
*
* Small, well‑scoped helper to invoke a JOE MCP tool directly in‑process,
* without going over HTTP or worrying about POST size limits.
*
* Usage:
* const result = await callMCPTool('listSchemas', {}, { req });
*
* Notes:
* - `toolName` must exist on MCP.tools.
* - `params` should be a plain JSON-serializable object.
* - `ctx` is optional and can pass `{ req }` or other context that MCP
* tools might want (for auth, user, etc.).
*/
async function callMCPTool(toolName, params = {}, ctx = {}) {
if (!MCP || !MCP.tools) {
throw new Error("MCP module not initialized; cannot call MCP tool");
}
if (!toolName || typeof toolName !== 'string') {
throw new Error("Missing or invalid MCP tool name");
}
const fn = MCP.tools[toolName];
if (typeof fn !== 'function') {
throw new Error(`MCP tool "${toolName}" not found`);
}
try {
// All MCP tools accept (params, ctx) and return a JSON-serializable result.
// The Responses / tools API often returns arguments as a JSON string, so
// normalize that here before invoking the tool.
let toolParams = params;
if (typeof toolParams === 'string') {
try {
toolParams = JSON.parse(toolParams);
} catch (parseErr) {
console.error(`[chatgpt] Failed to JSON-parse tool arguments for "${toolName}"`, parseErr, toolParams);
// Fall back to passing the raw string so tools that expect it still work.
}
}
const result = await fn(toolParams || {}, ctx || {});
return result;
} catch (e) {
// Surface a clean error upstream but keep details in logs.
console.error(`[chatgpt] MCP tool "${toolName}" error:`, e);
throw new Error(`MCP tool "${toolName}" failed: ${e && e.message || 'Unknown error'}`);
}
}
/**
* extractToolCalls
*
* Best-effort parser for tool calls from a Responses API result.
* The Responses output shape may evolve; this function looks for
* any "tool_call" typed content in response.output[*].content[*]
* and normalizes it into `{ name, arguments }` objects.
*/
function extractToolCalls(response) {
var calls = [];
if (!response || !Array.isArray(response.output)) { return calls; }
response.output.forEach(function (item) {
if (!item) { return; }
// v1-style: item.type === 'tool_call'
if (item.type === 'function_call') {
calls.push({
name: item.name || item.function_name,
arguments: item.arguments || item.function_arguments || {}
});
}
// message-style: item.content is an array of parts
if (Array.isArray(item.content)) {
item.content.forEach(function (part) {
if (!part) { return; }
if (part.type === 'function_call') {
calls.push({
name: part.name || part.tool_name,
arguments: part.arguments || part.args || {}
});
}
});
}
});
return calls;
}
// Detect "request too large / token limit" style errors from the Responses API.
function isTokenLimitError(err) {
if (!err || typeof err !== 'object') return false;
if (err.status !== 429 && err.status !== 400) return false;
const msg = (err.error && err.error.message) || err.message || '';
if (!msg) return false;
const lower = String(msg).toLowerCase();
// Cover common phrasing from OpenAI for context/TPM limits.
return (
lower.includes('request too large') ||
lower.includes('too many tokens') ||
lower.includes('max tokens') ||
lower.includes('maximum context length') ||
lower.includes('tokens per min')
);
}
// Create a compact representation of a JOE object for use in slim payloads.
function slimJOEObject(item) {
if (!item || typeof item !== 'object') return item;
const name = item.name || item.title || item.label || item.email || item.slug || item._id || '';
const info = item.info || item.description || item.summary || '';
return {
_id: item._id,
itemtype: item.itemtype,
name: name,
info: info
};
}
// Given an `understandObject` result, produce a slimmed version:
// - keep `object` as-is
// - keep `flattened` for the main object (depth-limited) if present
// - replace each related entry with { field, _id, itemtype, object:{_id,itemtype,name,info} }
// - preserve `schemas`, `tags`, `statuses`, and mark `slim:true`
function slimUnderstandObjectResult(result) {
if (!result || typeof result !== 'object') return result;
const out = {
_id: result._id,
itemtype: result.itemtype,
object: result.object,
// retain main flattened view if available; this is typically much smaller
flattened: result.flattened || null,
schemas: result.schemas || {},
tags: result.tags || {},
statuses: result.statuses || {},
slim: true
};
if (Array.isArray(result.related)) {
out.related = result.related.map(function (rel) {
if (!rel) return rel;
const base = rel.object || {};
const slim = slimJOEObject(base);
return {
field: rel.field,
_id: slim && slim._id || rel._id,
itemtype: slim && slim.itemtype || rel.itemtype,
object: slim
};
});
} else {
out.related = [];
}
return out;
}
// Walk the messages array and, for any system message containing a JSON payload
// of the form { "tool": "understandObject", "result": {...} }, replace the
// result with a slimmed version to reduce token count. Returns a new array; if
// nothing was changed, returns the original array.
function shrinkUnderstandObjectMessagesForTokens(messages) {
if (!Array.isArray(messages)) return messages;
let changed = false;
const shrunk = messages.map(function (msg) {
if (!msg || msg.role !== 'system') return msg;
if (typeof msg.content !== 'string') return msg;
try {
const parsed = JSON.parse(msg.content);
if (!parsed || parsed.tool !== 'understandObject' || !parsed.result) {
return msg;
}
const slimmed = slimUnderstandObjectResult(parsed.result);
changed = true;
return {
...msg,
content: JSON.stringify({ tool: 'understandObject', result: slimmed })
};
} catch (_e) {
return msg;
}
});
return changed ? shrunk : messages;
}
/**
* runWithTools
*
* Single orchestration function for calling the OpenAI Responses API
* with optional tools (sourced from a JOE `ai_assistant`), handling
* tool calls via MCP, and issuing a follow-up model call with the
* tool results injected.
*
* Inputs (opts):
* - openai: OpenAI client instance
* - model: model name to use (e.g. "gpt-4.1-mini", "gpt-5.1")
* - systemText: string of system / instructions text
* - messages: array of { role, content } for the conversation so far
* - assistant: JOE `ai_assistant` object (may contain `tools`)
* - req: Express request (passed into MCP tools as context)
*
* Returns:
* - { response, finalText, messages, toolCalls }
* where `finalText` is the assistant-facing text (from output_text)
* and `messages` is the possibly-extended message list including
* any synthetic `tool` messages.
*/
async function runWithTools(opts) {
const openai = opts.openai;
const model = opts.model;
const systemText = opts.systemText || "";
const messages = Array.isArray(opts.messages) ? opts.messages.slice() : [];
const assistant = opts.assistant || null;
const req = opts.req;
const attachmentsMode = opts.attachments_mode || null;
const openaiFileIds = opts.openai_file_ids || null;
// Debug/trace: log the effective system instructions going into this
// Responses+tools call. This helps verify assistant + MCP instructions
// wiring across prompts, assists, autofill, and widget chat.
try{
coloredLog('runWithTools systemText:\n' + systemText);
}catch(_e){}
// Normalize tools: manual assistant.tools plus optional MCP tools
let manualTools = null;
if (assistant && assistant.tools) {
if (Array.isArray(assistant.tools)) {
manualTools = assistant.tools;
} else if (typeof assistant.tools === 'string') {
try {
const parsed = JSON.parse(assistant.tools);
if (Array.isArray(parsed)) {
manualTools = parsed;
}
} catch (e) {
console.error('[chatgpt] Failed to parse assistant.tools JSON', e);
}
}
}
// Flatten any Assistants-style function definitions
if (Array.isArray(manualTools)) {
manualTools = manualTools.map(function (t) {
if (t && t.type === 'function' && t.function && !t.name) {
const fn = t.function || {};
return {
type: 'function',
name: fn.name,
description: fn.description,
parameters: fn.parameters || {}
};
}
return t;
});
}
// Merge manual tools with MCP tools (manual wins on name collisions).
let tools = null;
const mergedByName = {};
const mcp = buildMcpToolsFromConfig(assistant || {});
const mcpTools = Array.isArray(mcp.tools) ? mcp.tools : null;
if (Array.isArray(manualTools) || Array.isArray(mcpTools)) {
tools = [];
(manualTools || []).forEach(function(t){
if (!t) { return; }
if (t.name) { mergedByName[t.name] = true; }
tools.push(t);
});
(mcpTools || []).forEach(function(t){
if (!t || !t.name) { return; }
if (mergedByName[t.name]) { return; }
tools.push(t);
});
}
// No tools configured – do a simple single Responses call.
if (!tools) {
const resp = await openai.responses.create({
model: model,
instructions: systemText,
input: messages
});
return {
response: resp,
finalText: resp.output_text || "",
messages: messages,
toolCalls: []
};
}
// Step 1: call the model with tools enabled.
let firstPayload = {
model: model,
instructions: systemText,
input: messages,
tools: tools,
tool_choice: "auto"
};
if (attachmentsMode && Array.isArray(openaiFileIds) && openaiFileIds.length){
try{
firstPayload = await attachFilesToResponsesPayload(openai, firstPayload, {
attachments_mode: attachmentsMode,
openai_file_ids: openaiFileIds
});
}catch(e){
console.warn('[chatgpt] runWithTools attachments failed; continuing without attachments', e && e.message || e);
}
}
const first = await openai.responses.create(firstPayload);
const toolCalls = extractToolCalls(first);
// If the model didn't decide to use tools, just return the first answer.
if (!toolCalls.length) {
return {
response: first,
finalText: first.output_text || "",
messages: messages,
toolCalls: []
};
}
// Step 2: execute each tool call via MCP and append tool results.
for (let i = 0; i < toolCalls.length; i++) {
const tc = toolCalls[i];
try {
const result = await callMCPTool(tc.name, tc.arguments || {}, { req });
messages.push({
// Responses API does not support a "tool" role in messages.
// We inject tool outputs as a synthetic system message so
// the model can see the results without affecting the
// user/assistant turn structure.
role: "system",
content: JSON.stringify({ tool: tc.name, result: result })
});
} catch (e) {
console.error("[chatgpt] MCP tool error in runWithTools:", e);
messages.push({
role: "system",
content: JSON.stringify({
tool: tc.name,
error: e && e.message || "Tool execution failed"
})
});
}
}
// Step 3: ask the model again with tool outputs included.
let finalMessages = messages;
let second;
try {
let secondPayload = {
model: model,
instructions: systemText,
input: finalMessages
};
if (attachmentsMode && Array.isArray(openaiFileIds) && openaiFileIds.length){
try{
secondPayload = await attachFilesToResponsesPayload(openai, secondPayload, {
attachments_mode: attachmentsMode,
openai_file_ids: openaiFileIds
});
}catch(e){
console.warn('[chatgpt] runWithTools second-call attachments failed; continuing without attachments', e && e.message || e);
}
}
second = await openai.responses.create(secondPayload);
} catch (e) {
if (isTokenLimitError(e)) {
console.warn("[chatgpt] Responses token limit hit; shrinking understandObject payloads and retrying once");
const shrunk = shrinkUnderstandObjectMessagesForTokens(finalMessages);
// If nothing was shrunk, just rethrow the original error.
if (shrunk === finalMessages) {
throw e;
}
finalMessages = shrunk;
// Retry once with the smaller payload; let any error bubble up.
let retryPayload = {
model: model,
instructions: systemText,
input: finalMessages
};
if (attachmentsMode && Array.isArray(openaiFileIds) && openaiFileIds.length){
try{
retryPayload = await attachFilesToResponsesPayload(openai, retryPayload, {
attachments_mode: attachmentsMode,
openai_file_ids: openaiFileIds
});
}catch(e2){
console.warn('[chatgpt] runWithTools retry attachments failed; continuing without attachments', e2 && e2.message || e2);
}
}
second = await openai.responses.create(retryPayload);
} else {
throw e;
}
}
return {
response: second,
finalText: second.output_text || "",
messages: finalMessages,
toolCalls: toolCalls
};
}
// function newClient(){
// var key = getAPIKey();
// var c = new OpenAI({
// apiKey: key, // This is the default and can be omitted
// });
// if(!c || !c.apiKey){
// return { errors: 'No API key provided' };
// }
// return c;
// }
function newClient() {
return new OpenAI({ apiKey: getAPIKey() });
}
// Safely call Responses API with optional temperature/top_p.
// If the model rejects these parameters, strip and retry once.
async function safeResponsesCreate(openai, payload){
try{
return await openai.responses.create(payload);
}catch(e){
try{
var msg = (e && (e.error && e.error.message) || e.message || '').toLowerCase();
var badTemp = msg.includes("unsupported parameter") && msg.includes("temperature");
var badTopP = msg.includes("unsupported parameter") && msg.includes("top_p");
var unknownTemp = msg.includes("unknown parameter") && msg.includes("temperature");
var unknownTopP = msg.includes("unknown parameter") && msg.includes("top_p");
if (badTemp || badTopP || unknownTemp || unknownTopP){
var p2 = Object.assign({}, payload);
if (p2.hasOwnProperty('temperature')) delete p2.temperature;
if (p2.hasOwnProperty('top_p')) delete p2.top_p;
console.warn('[chatgpt] Retrying without temperature/top_p due to model rejection');
return await openai.responses.create(p2);
}
}catch(_e){ /* fallthrough */ }
throw e;
}
}
// Ensure a vector store exists with the provided file_ids indexed; returns { vectorStoreId }
async function ensureVectorStoreForFiles(fileIds = []){
const openai = newClient();
// Create ephemeral store per run (could be optimized to reuse/persist later)
const vs = await openai.vectorStores.create({ name: 'JOE Prompt Run '+Date.now() });
const storeId = vs.id;
// Link files by id
for (const fid of (fileIds||[]).slice(0,10)) {
try{
await openai.vectorStores.files.create(storeId, { file_id: fid });
}catch(e){
console.warn('[chatgpt] vectorStores.files.create failed for', fid, e && e.message || e);
}
}
// Poll (best-effort) until files are processed or timeout
const timeoutMs = 8000;
const start = Date.now();
try{
while(Date.now() - start < timeoutMs){
const listed = await openai.vectorStores.files.list(storeId, { limit: 100 });
const items = (listed && listed.data) || [];
const pending = items.some(f => f.status && f.status !== 'completed');
if(!pending){ break; }
await new Promise(r => setTimeout(r, 500));
}
}catch(_e){ /* non-fatal */ }
return { vectorStoreId: storeId };
}
// ---------------- OpenAI Files helpers ----------------
/**
* attachFilesToResponsesPayload
*
* Shared helper to wire OpenAI `responses.create` payloads with file
* attachments in a consistent way for both MCP and non‑MCP paths.
*
* Modes:
* - attachments_mode === 'file_search':
* - Ensures a temporary vector store via ensureVectorStoreForFiles.
* - Adds a `file_search` tool to payload.tools (if not already present).
* - Sets payload.tool_resources.file_search.vector_store_ids.
* - Leaves payload.input as text/messages.
*
* - attachments_mode === 'direct' (default):
* - Converts the existing `input` string (if any) into an `input_text`
* part and appends up to 10 `{ type:'input_file', file_id }` parts.
* - Sets payload.input = [{ role:'user', content: parts }].
*
* This function is intentionally file‑only; it does not modify instructions
* or other payload fields.
*/
async function attachFilesToResponsesPayload(openai, payload, opts){
const mode = (opts && opts.attachments_mode) || 'direct';
const fileIds = (opts && opts.openai_file_ids) || [];
if (!Array.isArray(fileIds) || !fileIds.length) {
return payload;
}
if (mode === 'file_search') {
const ensured = await ensureVectorStoreForFiles(fileIds);
payload.tools = payload.tools || [];
if (!payload.tools.find(function(t){ return t && t.type === 'file_search'; })) {
payload.tools.push({ type:'file_search' });
}
payload.tool_resources = Object.assign({}, payload.tool_resources, {
file_search: { vector_store_ids: [ ensured.vectorStoreId ] }
});
return payload;
}
// Default: direct context stuffing using input_text + input_file parts.
const parts = [];
if (typeof payload.input === 'string' && payload.input.trim().length) {
parts.push({ type:'input_text', text: String(payload.input) });
} else if (Array.isArray(payload.input)) {
// If caller already provided messages as input, preserve them by
// flattening into input_text where possible.
try{
const txt = JSON.stringify(payload.input);
if (txt && txt.length) {
parts.push({ type:'input_text', text: txt });
}
}catch(_e){}
}
fileIds.slice(0, 10).forEach(function(fid){
if (fid) {
parts.push({ type:'input_file', file_id: fid });
}
});
payload.input = [{ role:'user', content: parts }];
return payload;
}
async function uploadFileFromBuffer(buffer, filename, contentType, purpose) {
const openai = newClient();
const usePurpose = purpose || 'assistants';
const tmpDir = os.tmpdir();
const safeName = filename || ('upload_' + Date.now());
const tmpPath = path.join(tmpDir, safeName);
await fs.promises.writeFile(tmpPath, buffer);
try {
// openai.files.create accepts a readable stream
const fileStream = fs.createReadStream(tmpPath);
const created = await openai.files.create({
purpose: usePurpose,
file: fileStream
});
return { id: created.id, purpose: usePurpose };
} finally {
// best-effort cleanup
fs.promises.unlink(tmpPath).catch(()=>{});
}
}
// Expose a helper that other plugins can call in-process
this.filesUploadFromBufferHelper = async function ({ buffer, filename, contentType, purpose }) {
if (!buffer || !buffer.length) {
throw new Error('Missing buffer');
}
return await uploadFileFromBuffer(buffer, filename, contentType, purpose || 'assistants');
};
// Public endpoint to retry OpenAI upload from a URL (e.g., S3 object URL)
this.filesRetryFromUrl = async function (data, req, res) {
try {
const { default: got } = await import('got');
const url = data && (data.url || data.location);
const filename = data && data.filename || (url && url.split('/').pop()) || ('upload_' + Date.now());
const contentType = data && data.contentType || undefined;
const purpose = 'assistants';
if (!url) {
return { success: false, error: 'Missing url' };
}
const resp = await got(url, { responseType: 'buffer' });
const buffer = resp.body;
const created = await uploadFileFromBuffer(buffer, filename, contentType, purpose);
return { success: true, openai_file_id: created.id, openai_purpose: created.purpose };
} catch (e) {
return { success: false, error: e && e.message || 'Retry upload failed' };
}
};
this.testPrompt= async function(data, req, res) {
try {
var payload = {
params: req.params,
data: data
};
} catch (e) {
return { errors: 'plugin error: ' + e, failedat: 'plugin' };
}
const client = newClient();
if(client.errors){
return { errors: client.errors };
}
try {
const chatCompletion = await client.chat.completions.create({
messages: [{ role: 'user', content: 'Tell me a story about JOE: the json object editor in under 256 chars.' }],
model: 'gpt-4o',
});
coloredLog(chatCompletion);
const text = chatCompletion.choices && chatCompletion.choices[0] && chatCompletion.choices[0].message && chatCompletion.choices[0].message.content || '';
// Optionally persist as ai_response with parsed JSON when applicable
const parsed = (function(){
try {
const jt = extractJsonText(text);
return jt ? JSON.parse(jt) : null;
} catch(_e){ return null; }
})();
try {
var creator_type = null;
var creator_id = null;
try{
var u = req && req.User;
if (u && u._id){
creator_type = 'user';
creator_id = u._id;
}
}catch(_e){}
const aiResponse = {
itemtype: 'ai_response',
name: 'Test Prompt → ChatGPT',
response_type: 'testPrompt',
response: text,
response_json: parsed,
response_id: chatCompletion.id || '',
user_prompt: payload && payload.data && payload.data.prompt || 'Tell me a story about JOE: the json object editor in under 256 chars.',
model_used: 'gpt-4o',
created: (new Date()).toISOString(),
creator_type: creator_type,
creator_id: creator_id
};
JOE.Storage.save(aiResponse, 'ai_response', function(){}, { history: false, user: (req && req.User) || { name:'system' } });
} catch(_e){ /* best-effort only */ }
return {payload,chatCompletion,content:text};
} catch (error) {
if (error.status === 429) {
return { errors: 'You exceeded your current quota, please check your plan and billing details.' };
} else {
return { errors: 'plugin error: ' + error.message, failedat: 'plugin' };
}
}
}
this.sendInitialConsultTranscript= async function(data, req, res) {
coloredLog("sendInitialConsultTranscript");
//get the prompt object from the prompt id
//get the business object from the refrenced object id
//see if there is a initial_transcript_url property on that object
//if there is, get the content of the file
//send the content to chatgpt, with the template property of the prompt object
//get the response
try {
var payload = {
params: req.params,
data: data
};
} catch (e) {
return { errors: 'plugin error: ' + e, failedat: 'plugin' };
}
var businessOBJ = JOE.Data.business.find(b=>b._id == data.business);
var promptOBJ = JOE.Data.ai_prompt.find(p=>p._id == data.ai_prompt);
// See if there is an initial_transcript_url property on that object
const transcriptUrl = businessOBJ.initial_transcript_url;
if (!transcriptUrl) {
return res.jsonp({ error: 'No initial transcript URL found' });
}
//Get the content of the file from Google Docs
const transcriptContent = await getGoogleDocContent(transcriptUrl);
if (!transcriptContent || transcriptContent.error) {
return res.jsonp({ error: (transcriptContent.error && transcriptContent.error.message)||'Failed to fetch transcript content' });
}
const tokenCount = countTokens(`${promptOBJ.template}\n\n${transcriptContent}`);
payload.tokenCount = tokenCount;
coloredLog("token count: "+tokenCount);
//return res.jsonp({tokens:tokenCount,content:transcriptContent});
// Send the content to ChatGPT, with the template property of the prompt object
const client = new OpenAI({
apiKey: getAPIKey(), // This is the default and can be omitted
});
const chatResponse = await client.chat.completions.create({
messages: [{ role: 'user', content: `${promptOBJ.template}\n\n${transcriptContent}` }],
model: 'gpt-4o',
});
// Get the response
const chatContent = chatResponse.choices[0].message.content;
const responseName = `${businessOBJ.name} - ${promptOBJ.name}`;
// Save the response
await saveAIResponse({
name:responseName,
business: data.business,
ai_prompt: data.ai_prompt,
response: chatContent,
payload,
prompt_method:req.params.method
}, req && req.User);
coloredLog("response saved -"+responseName);
return {payload,
businessOBJ,
promptOBJ,
chatContent,
responseName
};
}
async function getGoogleDocContent(docUrl) {
try {
const auth = new google.auth.GoogleAuth({
scopes: ['https://www.googleapis.com/auth/documents.readonly']
});
//get google docs apikey from settings
const GOOGLE_API_KEY = JOE.Utils.Settings('GOOGLE_DOCS_API_KEY');
const docs = google.docs({ version: 'v1', auth:google_auth });
const docId = extractDocIdFromUrl(docUrl);
const doc = await docs.documents.get({ documentId: docId });
let content = doc.data.body.content.map(element => {
if (element.paragraph && element.paragraph.elements) {
return element.paragraph.elements.map(
e => e.textRun ? e.textRun.content.replace(/Euron Nicholson/g, '[EN]').replace(/\d{2}:\d{2}:\d{2}\.\d{3} --> \d{2}:\d{2}:\d{2}\.\d{3}/g, '-ts-')
: ''
).join('');
}
return '';
}).join('\n');
// Remove timestamps and line numbers
//content = content.replace(/^\d+\n\d{2}:\d{2}:\d{2}\.\d{3} --> \d{2}:\d{2}:\d{2}\.\d{3}\n/gm, '');
return content;
} catch (error) {
console.error('Error fetching Google Doc content:', error);
return {error};
}
}
function countTokens(text, model = 'gpt-4o') {
const enc = encoding_for_model(model);
const tokens = enc.encode(text);
return tokens.length;
}
function extractDocIdFromUrl(url) {
const match = url.match(/\/d\/([a-zA-Z0-9-_]+)/);
return match ? match[1] : null;
}
async function saveAIResponse(data, user) {
try {
var creator_type = null;
var creator_id = null;
try{
if (user && user._id){
creator_type = 'user';
creator_id = user._id;
}
}catch(_e){}
const aiResponse = {
name: data.name,
itemtype: 'ai_response',
business: data.business,
ai_prompt: data.ai_prompt,
response: data.response,
payload: data.payload,
prompt_method:data.prompt_method,
created: (new Date).toISOString(),
_id:cuid(),
creator_type: creator_type,
creator_id: creator_id
// Add any other fields you want to save
};
await new Promise((resolve, reject) => {
JOE.Storage.save(aiResponse, 'ai_response', function(err, result) {
if (err) {
coloredLog('Error saving AI response: ' + err);
reject(err);
} else {
coloredLog('AI response saved successfully');
resolve(result);
}
});
});
} catch (error) {
coloredLog('Error in saveAIResponse: ' + error);
}
}
// Normalize model output that should contain JSON. Models often wrap JSON
// in markdown fences (```json ... ```), and may prepend/append prose. This
// helper strips fences and tries to isolate the first well-formed JSON
// object/array substring so JSON.parse has the best chance of succeeding.
// Handles cases where tool call logs are concatenated before the actual JSON.
function extractJsonText(raw) {
if (!raw) { return ''; }
let t = String(raw).trim();
// If there is any ```...``` fenced block, prefer its contents.
const fenceIdx = t.indexOf('```json') !== -1 ? t.indexOf('```json') : t.indexOf('```');
if (fenceIdx !== -1) {
let start = fenceIdx;
const firstNewline = t.indexOf('\n', start);
if (firstNewline !== -1) {
t = t.substring(firstNewline + 1);
} else {
t = t.substring(start + 3);
}
const lastFence = t.lastIndexOf('```');
if (lastFence !== -1) {
t = t.substring(0, lastFence);
}
t = t.trim();
}
// Handle cases where tool call logs (small JSON objects like {"tool":"..."})
// are concatenated before the actual response JSON (larger JSON object).
// Find all JSON objects and pick the largest one that's not a tool log.
const jsonCandidates = [];
const firstBrace = t.indexOf('{');
const firstBracket = t.indexOf('[');
const lastBrace = Math.max(t.lastIndexOf('}'), t.lastIndexOf(']'));
if (firstBrace === -1 && firstBracket === -1) {
return '';
}
const startPos = (firstBrace === -1) ? firstBracket :
((firstBracket === -1) ? firstBrace : Math.min(firstBrace, firstBracket));
if (startPos === -1 || lastBrace === -1 || lastBrace <= startPos) {
return t.trim();
}
// Find all potential JSON objects
for (let i = startPos; i <= lastBrace; i++) {
if (t[i] !== '{' && t[i] !== '[') continue;
// Find matching closing brace/bracket
let depth = 0;
let inString = false;
let escape = false;
let endPos = -1;
for (let j = i; j <= lastBrace; j++) {
const char = t[j];
if (escape) {
escape = false;
continue;
}
if (char === '\\') {
escape = true;
continue;
}
if (char === '"') {
inString = !inString;
continue;
}
if (!inString) {
if (char === '{' || char === '[') {
depth++;
} else if (char === '}' || char === ']') {
depth--;
if (depth === 0) {
endPos = j;
break;
}
}
}
}
if (endPos !== -1) {
const candidate = t.substring(i, endPos + 1);
// Skip tool call logs - they match pattern {"tool":"..."}
const isToolLog = /^\s*{\s*"tool"\s*:/.test(candidate);
try {
JSON.parse(candidate);
jsonCandidates.push({
text: candidate,
length: candidate.length,
isToolLog: isToolLog
});
} catch (e) {
// Not valid JSON, skip
}
}
}
// Find the largest non-tool-log JSON object, or largest overall if all are tool logs
if (jsonCandidates.length > 0) {
// Filter out tool logs first
const nonToolLogs = jsonCandidates.filter(c => !c.isToolLog);
const candidatesToUse = nonToolLogs.length > 0 ? nonToolLogs : jsonCandidates;
// Sort by length (descending) and return the largest
candidatesToUse.sort((a, b) => b.length - a.length);
return candidatesToUse[0].text.trim();
}
// Fallback: try simple first-to-last extraction
if (t[0] !== '{' && t[0] !== '[') {
const first = startPos;
const last = lastBrace;
if (first !== -1 && last !== -1 && last > first) {
t = t.slice(first, last + 1);
}
}
return t.trim();
}
// Autofill feature (Responses API; supports assistant_id or model)
this.autofill = async function (data, req, res) {
const startedAt = Date.now();
const progressToken = (data || {}).progress_token || null;
try {
const body = data || {};
const objectId = body.object_id || body._id;
const object = body.object || $J.get(objectId);
const schemaName = body.schema || (object && object.itemtype) || body.itemtype;
const { full: schemaFull, summary: schemaSummary } = getSchemaDef(schemaName);
const rawFields = body.fields || body.field;
const fields = Array.isArray(rawFields) ? rawFields : (rawFields ? [rawFields] : []);
const userPrompt = body.prompt || '';
const assistantId = body.assistant_id || null;
if (!object) {
return { success: false, error: 'Object not found', code: 'OBJECT_NOT_FOUND' };
}
if (!schemaName) {
return { success: false, error: 'Schema name not determined', code: 'SCHEMA_REQUIRED' };
}
if (!fields.length) {
return { success: false, error: 'No fields specified', code: 'FIELDS_REQUIRED' };
}
// Register job immediately at start
if (progressToken && objectId) {
const fieldId = fields.length === 1 ? fields[0] : fields.join(',');
registerAiJobIfToken(progressToken, {
objectId: objectId,
fieldId: fieldId,
status: 'starting',
message: 'Starting field autofill...',
progress: 0,
total: 100
});
}
const flattened = JOE.Utils.flattenObject(object._id);
const systemText = [
'You are JOE (Json Object Editor) assistant.',
'Task: Populate only the requested fields according to the provided schema context and JOE conventions.',
'- Respect field types (text, number, arrays, enums, references).',
'- Do NOT invent IDs for reference fields; only return human text for text-like fields.',
'- If a field is an enum, choose the closest valid enum. If unsure, omit it from patch.',
'- If a field is an array, return an array of values.',
'- Never modify unrelated fields.',
'- Output MUST be strict JSON with a top-level key "patch" containing only populated fields.',
'- If you lack sufficient information, return an empty patch.'
].join('\\n');
const schemaForContext = schemaSummary || schemaFull || {};
const userInput = JSON.stringify({
action: 'autofill_fields',
target_schema: schemaName,
requested_fields: fields,
user_prompt: userPrompt,
object_context: flattened,
schema_context: schemaForContext
}, null, ' ');
// Update progress before OpenAI call
if (progressToken && objectId) {
updateAiJobIfToken(progressToken, {
status: 'running',
message: 'Analyzing field content...',
progress: 10,
total: 100
});
}
const openai = newClient();
const model = body.model || 'gpt-4o-mini';////'gpt-5-nano';
// Normalize MCP options for autofill. By default, when mcp_enabled is
// true we expose the read-only toolset, which is safe for field
// suggestions. Callers can override toolset / selected tools.
const mcpEnabled = !!body.mcp_enabled;
const mcpToolset = body.mcp_toolset || 'read-only';
const mcpSelected = Array.isArray(body.mcp_selected_tools) ? body.mcp_selected_tools : null;
const mcpInstructionsMode = body.mcp_instructions_mode || 'auto';
let response;
let mcpToolCalls = [];
if (mcpEnabled) {
const toolNames = MCP.getToolNamesForToolset(mcpToolset, mcpSelected);
const toolsForModel = MCP.getToolDefinitions(toolNames);
const mcpText = MCP.buildToolInstructions(toolNames, mcpInstructionsMode);
const systemTextWithMcp = [systemText, mcpText || ''].join('\n').trim();
const messages = [{ role:'user', content:userInput }];
// Update progress before OpenAI API call
if (progressToken && objectId) {
updateAiJobIfToken(progressToken, {
status: 'running',
message: 'Generating field value...',
progress: 50,
total: 100
});
}
const runResult = await runWithTools({
openai: openai,
model: model,
systemText: systemTextWithMcp,
messages: messages,
assistant: { tools: toolsForModel },
req: req
});
response = runResult.response;
if (runResult && Array.isArray(runResult.toolCalls)) {
mcpToolCalls = runResult.toolCalls.map(function(tc){
return {
name: tc && (tc.name || tc.function_name || tc.tool_name),
arguments: tc && tc.arguments
};
}).filter(function(x){ return x && x.name; });
}
} else {
// For simplicity and robustness, use plain text output and instruct the
// model to return a strict JSON object. We previously attempted the
// Responses `json_schema` response_format, but the SDK shape can change
// and is harder to parse reliably; text + JSON.parse is sufficient here.
const requestBase = {
temperature: 0.2,
instructions: systemText,
input: userInput
};
// Optional web_search tool: if the caller sets allow_web truthy, expose
// the built-in web_search capability and let the model decide when to
// call it.
if (body.allow_web) {
coloredLog("allowing web search");
requestBase.tools = [{ type: 'web_search' }];
requestBase.tool_choice = 'auto';
}
// Update progress before OpenAI API call
if (progressToken && objectId) {
updateAiJobIfToken(progressToken, {
status: 'running',
message: 'Generating field value...',
progress: 50,
total: 100
});
}
if (assistantId) {