openai-code
Version:
An unofficial proxy layer that lets you use Anthropic Claude Code with any OpenAI API backend.
959 lines (807 loc) • 37 kB
JavaScript
import express from "express"
import { existsSync, readFileSync } from "node:fs"
import { writeFile } from "node:fs/promises"
import { storeTempFile, readTempFile, safeReadFileSync } from "./fs.mjs"
import { OpenAIOptimizedPrompts } from "./prompts.mjs"
import { loadDotEnv, getEnv } from "./env.mjs"
import { sendOpenAIRequest, getReasoningModel, answer, decide } from "./openai.mjs"
import { resolve, join } from "node:path"
import { indexRelevantCodeFromContext, embedSingleDocument } from "./vectorindex.mjs"
import { addToIndex, updateByPath } from "./vectordb.mjs"
import { processAnswer } from "./response.mjs"
import { getTechsetConfigContext } from "./techset.mjs"
import { escapeContent } from "./prompts.mjs"
import { parseCommands } from "./command.mjs"
import { getVectorSearchTopK, vectorSearchCommand } from "./commands/vector-search.mjs"
import { getState, setState } from "./state.mjs"
import { isDeepThoughtActivated, getMaxDeepReasoningGraphDepth, defaultMaxReasoningGraphDepth } from "./commands/deep-thought.mjs"
import fg from "fast-glob"
import { skipDirectoryNames } from "./ignore.mjs"
import { isPerplexityActivated, perplexity } from "./commands/perplexity.mjs"
import { fetchStackOverflowAnswers, isStackOverflowActivated } from "./commands/stackoverflow.mjs"
const app = express()
const port = getEnv('OPENAI_CODE_PORT') || 6543
app.use(express.json({ limit: "500mb" }))
const buildIndentedStructure = (fileList) => {
const indent = (level) => ' '.repeat(level);
const structure = [];
const pathStack = [];
fileList.forEach(filePath => {
const parts = filePath.split('/');
let level = 0;
// Find the common path prefix
while (level < pathStack.length && pathStack[level] === parts[level]) {
level++;
}
// Remove the differing parts from the stack
while (pathStack.length > level) {
pathStack.pop();
}
// Add new parts to the stack and structure
while (level < parts.length) {
const isFile = level === parts.length - 1;
const prefix = isFile ? '- ' : '- ';
structure.push(`${indent(level)}${prefix}${parts[level]}`);
pathStack.push(parts[level]);
level++;
}
});
return structure.join('\n');
};
app.post("/v1/messages", async (req, res) => {
try {
const {
model,
messages,
system,
stream = false,
} = req.body
let promptType = "default"
// commands parsed on user input
let userMessageCommands = {}
let nextTaskCommands = {}
let currentGoalCommands = {}
let context = ""
let techsetContext = null;
let workspaceRules = ""
let ignorePatterns = []
// agent state
let stackoverflowData = null
let perplexityData = null
let relevantCode = null;
const reasoningModel = getReasoningModel()
const formattedMessages = []
// current working directory (known with any default type prompt, parsed from context)
let workingDirectory = null
async function postProcessToolExecution(toolCall, toolId) {
console.log('Tool call:', toolCall)
storeTempFile(toolId, JSON.stringify(toolCall))
}
if (messages && messages.length === 1 && messages[0]?.role === "user" &&
messages[0]?.content && JSON.stringify(messages[0]?.content)?.indexOf('<policy_spec>') !== -1) {
/*
const commandToRun = messages[0]?.content
?.split('\n')
.map(line => line.match(/^Command: (.+)$/)?.[1])
.filter(Boolean)[0]
const policyDecision = await answer(OpenAIOptimizedPrompts.policyPrompt.split("<COMMAND>").join(commandToRun), "low")
const policySpec = policyDecision?.policy || "command_injection_detected" // secure default
*/
await processAnswer({
textMessage: "none",
useTools: [],
stream,
model,
res,
postProcessToolExecution
});
return;
}
// Bash command result processing
if (messages && messages.length === 1 && messages[0]?.role === "user" &&
messages[0]?.content && /Command: (.+)?\nOutput: /.test(messages[0]?.content)) {
await processAnswer({
textMessage: (() => {
const contentText = messages[0]?.content || "";
if (/error/i.test(contentText)) {
return "The command had an error to fix.";
}
if (/warning/i.test(contentText)) {
return "The command has a warning to consider.";
}
return "The output has been printed.";
})(),
useTools: [],
stream,
model,
res,
postProcessToolExecution
})
return;
}
if (system && messages.length === 1 && system[0]?.text?.startsWith('You are a command description generator.')) {
await processAnswer({
textMessage: "Skipped command docs. Don't know what it does? Choose 'No', ask, re-run.",
useTools: [],
stream,
model,
res,
postProcessToolExecution
})
return
}
let flattenedSystemMessages = []
if (system) {
if (Array.isArray(system)) {
flattenedSystemMessages = system.map(message => message.text)
} else {
flattenedSystemMessages = [system]
}
flattenedSystemMessages = flattenedSystemMessages.filter(prompt => !prompt.startsWith("You are Claude Code"))
}
async function mapSystemPrompt(basePrompt, flattenedSystemPrompts) {
let systemPrompt = ""
for (const prompt of flattenedSystemPrompts) {
// Extract <env>...</env> if present
const envMatch = prompt.match(/<env>([\s\S]*?)<\/env>/)
if (envMatch) {
context += `${envMatch[0]}\n`
}
// Extract all <context name="[^"]+">...</context> blocks
const contextMatches = [...prompt.matchAll(/<context name="[^"]+">([\s\S]*?)<\/context>/g)]
let originalDirectoryStructure = null;
for (const match of contextMatches) {
const contextName = match[0].match(/name="([^"]+)"/)[1]; // get the name of the context
if (contextName === "directoryStructure") {
originalDirectoryStructure = match[0]
}
context += `${match[0]}\n`
}
if (context !== "") {
const workingDirMatch = context.match(/Working directory: ([^\n]+)/)
if (workingDirMatch) {
workingDirectory = workingDirMatch[1]
const additionalIgnorePatternsContents = safeReadFileSync(join(workingDirectory, '.claudeignore'))
if (additionalIgnorePatternsContents) {
ignorePatterns = additionalIgnorePatternsContents.split('\n').filter(Boolean);
}
if (workingDirectory) {
console.log("Using working directory:", workingDirectory)
if (originalDirectoryStructure) {
const flatList = await fg(["**/*"], {
cwd: workingDirectory,
onlyFiles: false,
ignore: [...ignorePatterns, ...skipDirectoryNames].map(n => `${n}/**`)
})
console.log("Better flat list", flatList)
const indentedStructure = buildIndentedStructure(flatList);
const structureContext = OpenAIOptimizedPrompts.directoryStructureContextPrompt.split('<DIRECTORY_STRUCTURE>').join(indentedStructure)
// replace bloated directory structure with a more concise one
context = context.replace(originalDirectoryStructure, structureContext)
}
if (messages.length === 1) { // start of workflow or when /clear was used
setState(workingDirectory, "currentGoal", null)
setState(workingDirectory, "nextTask", null)
console.log("Start of workflow or /clear has been executed!");
}
// index relevant code (partially, of course)
await indexRelevantCodeFromContext(workingDirectory)
const envPath = resolve(workingDirectory, '.env')
if (existsSync(envPath)) {
try {
loadDotEnv(envPath)
console.log("Environment variables loaded from .env file: (ONLY FOR: OPENAI_CODE_API_KEY, OPENAI_CODE_BASE_URL, TOOLS_MODEL, REASONING_MODEL) NOT sent over wire!", envPath)
} catch (error) {
console.error("Error loading .env file:", error)
}
}
techsetContext = await getTechsetConfigContext(workingDirectory);
const claudeRulesPath = resolve(workingDirectory, 'CLAUDE_RULES.md')
if (existsSync(claudeRulesPath)) {
try {
workspaceRules = readFileSync(claudeRulesPath, 'utf-8')
console.log("CLAUDE_RULES.md content loaded successfully.")
// You can process the content of CLAUDE_RULES.md here if needed
} catch (error) {
console.error("Error reading CLAUDE_RULES.md:", error)
}
} else {
console.log("CLAUDE_RULES.md does not exist in the working directory.")
}
}
}
}
if (prompt.startsWith("You are an expert at analyzing git history.")) {
systemPrompt = prompt
promptType = "git-history"
} else if (prompt.startsWith("Extract any file paths that this command reads or modifies.")) {
systemPrompt = prompt
promptType = "file-paths"
} else if (prompt.startsWith("Analyze if this message indicates a new conversation topic.")) {
systemPrompt = OpenAIOptimizedPrompts.systemPromptAnalyzeNewTopic
promptType = "new-topic"
} else {
promptType = "default"
systemPrompt = `${basePrompt
.split('<CONTEXT>').join(escapeContent(context))
.split('<TOOLS>').join(escapeContent(JSON.stringify(OpenAIOptimizedPrompts.systemToolsPrompt)))
.split('<CUSTOM_RULES>').join(workspaceRules ? `User-defined rules:\n${escapeContent(workspaceRules)}\n` : '')
.split('<TECHSET_CONTEXT>').join(getTechsetContextFormatted())
.split(`Model: ${model}`).join(`Model: ${reasoningModel}`)}`
}
}
if (systemPrompt.toLowerCase().indexOf('json') === -1) {
systemPrompt += "\n\nMUST answer in parsable JSON format." // enforce JSON format, in case of arbitrary prompt
console.log("[WARN] System prompt was missing JSON format enforcement:", systemPrompt)
}
return systemPrompt
}
const getMessageHistoryFormatted = () => JSON.stringify(formattedMessages.slice(1), null, 2)
const getTechsetContextFormatted = () => techsetContext ? `<PROJECT_CONFIG>\n${escapeContent(JSON.stringify(techsetContext))}\n</PROJECT_CONFIG>` : ''
// Process system messages
if (flattenedSystemMessages.length > 0) {
formattedMessages.push({
role: "system",
content: await mapSystemPrompt(OpenAIOptimizedPrompts.systemPrompt, flattenedSystemMessages)
})
}
const lastFormattedMessage = formattedMessages[formattedMessages.length - 1]
const lastPromptIsGitHistoryPrompt =
lastFormattedMessage?.content && /You are an expert at analyzing git history./i.test(lastFormattedMessage.content)
const lastPromptHasFilesModifiedByUser =
lastFormattedMessage?.content && /Files modified by user:/i.test(lastFormattedMessage.content)
if (lastPromptIsGitHistoryPrompt && !lastPromptHasFilesModifiedByUser) {
await processAnswer({
textMessage: "You asked to analyze git history but no files modified by user were found.",
useTools: [],
stream,
model,
res,
postProcessToolExecution
})
return
}
const toolCallHistory = new Map()
let mostRecentUserMessage = null
let mostRecentAssistantMessage = null
let nextTask = null
let currentGoal = null;
let prevTask = null;
// role can be "user", "assistant"
const mapEvent = async(item, role) => {
if (typeof item === "string") {
if (role === "user") {
mostRecentUserMessage = item
}
if (role === "assistant") {
mostRecentAssistantMessage = item
}
return item
} else {
switch (item.type) {
case "text": {
if (role === "user") { // define new milestone goal
mostRecentUserMessage = item.text
// user changed the milestone goa
}
if (role ==="assistant") {
mostRecentAssistantMessage = item.text
}
return { type: "text", text: item.text }
}
case "image": {
return {
type: "image_url",
image_url: {
url: `data:${item.source.media_type}${item.source.type},${item.source.data}`,
detail: "high",
},
}
}
case "tool_result": {
await trackToolState(item)
const text = `Tool with id: ${item.tool_use_id} was executed. Result: ${JSON.stringify(item.content)}. ${item.is_error ?
OpenAIOptimizedPrompts.toolResultErrorPrompt :
OpenAIOptimizedPrompts.toolResultFollowupPrompt }`
mostRecentAssistantMessage = text
return {
type: "text",
text,
}
}
case "tool_use": {
await trackToolState(item)
const text = `Tool ${item.name} with id ${item.id} and parameters: ${JSON.stringify(item.input)} planned for use.`
mostRecentAssistantMessage = text
return {
type: "text",
text,
}
}
default:
console.error("Unsupported item type:", item.type)
return null
}
}
}
// Process messages and map their content.
if (messages && Array.isArray(messages)) {
for (const message of messages) {
let events = message.content
if (Array.isArray(events)) {
const mappedEvents = [];
for (const item of events) {
const event = await mapEvent(item, message.role);
if (event !== null) {
mappedEvents.push(event);
}
}
events = mappedEvents
} else {
// a single event, actually
events = await mapEvent(events, message.role)
}
if (events !== null) {
formattedMessages.push({ role: message.role, content: events })
}
}
}
async function trackToolState(event) {
if (event.type === "tool_use") {
// If a new tool_use event is received, mark any pending error as handled.
for (const [id, record] of toolCallHistory.entries()) {
if (record.state === "error") {
// Reset the error state to indicate recovery.
toolCallHistory.set(id, { ...record, state: "error_handled" })
}
}
toolCallHistory.set(event.id, { state: "planned", payload: event })
} else if (event.type === "tool_result") {
const toolCall = readTempFile(event.tool_use_id)
if (toolCall !== null) {
const toolCallParsed = JSON.parse(toolCall)
console.log("Tool called (finished execution):", toolCallParsed)
console.log('- Updating Vector Database index after tool use...')
if (toolCallParsed?.function?.name === "Replace") {
const args = JSON.parse(toolCallParsed.function.arguments);
const fileExists = existsSync(args.file_path);
if (fileExists) {
// If the file exists, update it with the new content
updateByPath(args.file_path, await embedSingleDocument(args.file_path, args.content));
} else {
// If the file does not exist, add the new content
addToIndex(args.file_path, args.content);
}
}
if (toolCallParsed?.function?.name === "Edit") {
const args = JSON.parse(toolCallParsed.function.arguments);
updateByPath(args.file_path, await embedSingleDocument(args.file_path));
}
console.log('- Finished updating Vector Database index after tool use.')
}
// replace with null to avoid re-processing the same tool result
storeTempFile(event.tool_use_id, JSON.stringify(null))
const toolId = event.tool_use_id
if (toolCallHistory.has(toolId)) {
toolCallHistory.get(toolId).state = event.is_error ? "error" : "executed"
} else {
toolCallHistory.set(toolId, { state: event.is_error ? "error" : "executed", payload: event })
}
}
}
const getSystemPrompt = async() => {
let systemPrompt = formattedMessages?.[0]?.content;
if (promptType === "default") {
userMessageCommands = parseCommands(mostRecentUserMessage)
currentGoal = getState(workingDirectory, "currentGoal")
if (!currentGoal && mostRecentUserMessage) {
currentGoal = mostRecentUserMessage;
currentGoalCommands = parseCommands(currentGoal)
setState(workingDirectory, "currentGoal", currentGoal)
}
/*
currentGoal = getState(workingDirectory, "currentGoal")
// conclude on a good starting/re-starting goal (after topic change)
if (!currentGoal && mostRecentUserMessage) {
// user responded to the assistant's answer, so we need to reinforce the goal
const concludedGoal = await answer(OpenAIOptimizedPrompts.goalConclusionPrompt
.split('<USER_MESSAGE>').join(mostRecentUserMessage)
.split('<ASSISTANT_MESSAGE>').join(mostRecentAssistantMessage || "(no assistant message yet)")
.split('<CONTEXT>').join(escapeContent(context))
.split('<TECHSET_CONTEXT>').join(techsetContext ? `<PROJECT_CONFIG>\n${escapeContent(JSON.stringify(techsetContext))}\n</PROJECT_CONFIG>` : '')
)
if (concludedGoal?.goal) {
currentGoal = concludedGoal.goal
setState(workingDirectory, "currentGoal", currentGoal)
console.log("Concluded a new goal based on USER_MESSAGE and ASSISTANT_MESSAGE", currentGoal)
}
}
*/
// define milestone goal if not yet defined
/*
milestoneGoal = getState(workingDirectory, "milestoneGoal")
if (!milestoneGoal) {
const definedMilestoneGoal = await answer(OpenAIOptimizedPrompts.defineMilestoneGoalPrompt
.split('<GOAL>').join(currentGoal)
.split('<CONTEXT>').join(escapeContent(context))
.split('<TECHSET_CONTEXT>').join(techsetContext ? `<PROJECT_CONFIG>\n${escapeContent(JSON.stringify(techsetContext))}\n</PROJECT_CONFIG>` : '')
)
if (definedMilestoneGoal?.milestoneGoal) {
milestoneGoal = definedMilestoneGoal.milestoneGoal
setState(workingDirectory, "milestoneGoal", milestoneGoal)
console.log("Set a a milestone goal:", milestoneGoal)
}
}*/
// something has been executed, entering auto-regressive goal-setting mode
// (reasoning on what to do next, based on mostRecentAssistantMessage, GOAL and MILESTONE_GOAL)
console.log("Most recent assistant message:", mostRecentAssistantMessage)
console.log("Most recent user message:", mostRecentUserMessage)
console.log("Current goal:", currentGoal)
if (isDeepThoughtActivated(userMessageCommands)) {
const nextTaskDecision = await answer(OpenAIOptimizedPrompts.defineNextTaskPrompt
.split('<USER_MESSAGE>').join(mostRecentUserMessage || "(no user message yet)")
.split('<PREVIOUS_TASK>').join(prevTask || "(no previous task yet)")
.split('<GOAL>').join(currentGoal) // most recent user message OR next current goal, auto-regressively determined by the model through reasoning
.split('<ASSISTANT_MESSAGE>').join(mostRecentAssistantMessage || "(no assistant message yet)")
.split('<CONTEXT>').join(escapeContent(context))
.split('<MESSAGE_HISTORY>').join(escapeContent(getMessageHistoryFormatted()))
.split('<TECHSET_CONTEXT>').join(getTechsetContextFormatted())
)
if (nextTaskDecision?.nextTask) {
prevTask = nextTask // save last task
nextTask = nextTaskDecision.nextTask // updates the actual goal as what the model "thinks" the dev actually needs now (to fulfill the originl goal)
nextTaskCommands = parseCommands(nextTask)
setState(workingDirectory, "nextTask", nextTask)
console.log("Determined the next task to reach GOAL", currentGoal, "to be", nextTask)
}
if (nextTaskDecision?.newGoal) {
currentGoal = nextTaskDecision.newGoal
currentGoalCommands = parseCommands(currentGoal)
setState(workingDirectory, "currentGoal", currentGoal)
console.log("Set a new goal:", currentGoal)
}
} else {
setState(workingDirectory, "currentGoal", null) // reset the goal (shall not persist over turns)
}
// :p command for activating Perplexity agent; prevent double-fetching
if (isPerplexityActivated(userMessageCommands) && !perplexityData) {
perplexityData = await perplexity(nextTask || currentGoal)
const { text: pplxText, citations: pplxCitations } = perplexityData
if (pplxText && Array.isArray(pplxCitations)) {
systemPrompt = systemPrompt
.split('<PERPLEXITY_CONTEXT>').join(`<pplx_results><text>${pplxText}</text><citations>${pplxCitations.map((v, i) => `[${i}] ${v}`).join("\n")}</citations></pplx_results>`)
} else {
systemPrompt = systemPrompt
.split('<PERPLEXITY_CONTEXT>').join('')
}
} else {
systemPrompt = systemPrompt
.split('<PERPLEXITY_CONTEXT>').join('')
}
// :so command for activating StackOverflow agent
if (isStackOverflowActivated(userMessageCommands) && !stackoverflowData) {
stackoverflowData = await fetchStackOverflowAnswers(nextTask || currentGoal)
systemPrompt = systemPrompt
.split('<STACKOVERFLOW_CONTEXT>').join(`<stackoverflow_results>${JSON.stringify(stackoverflowData)}</stackoverflow_results>`)
} else {
systemPrompt = systemPrompt
.split('<STACKOVERFLOW_CONTEXT>').join('')
}
// when no new goal has been defined, use the current goal (most recent user message)
systemPrompt = systemPrompt
.split('<TASK>').join(escapeContent( nextTask || currentGoal ))
.split('<GOAL>').join(escapeContent(currentGoal))
.split('<MESSAGE_HISTORY>').join(escapeContent(getMessageHistoryFormatted()))
const userMessageVectorSearchK = getVectorSearchTopK(userMessageCommands) || 0
const nextTaskVectorSearchK = getVectorSearchTopK(nextTaskCommands) || 0
const currentGoalVectorSearchK = getVectorSearchTopK(currentGoalCommands) || 0
const maxK = Math.max(userMessageVectorSearchK, nextTaskVectorSearchK, currentGoalVectorSearchK)
if (maxK > 0 && !relevantCode) {
relevantCode = await vectorSearchCommand(nextTask, maxK)
}
if (relevantCode) {
systemPrompt += relevantCode || `(no relevant code found for this task. Increase to :v${maxK+1} use a more specific prompt or try searching with the LS or GlobTool)`
}
//console.log("System prompt:", systemPrompt)
}
return systemPrompt
}
let reReasoningRequired = false;
let reasoningResult = null;
let reasoningAttempts = 0;
let inputTokens = 0;
let outputTokens = 0;
const runReasoning = async () => {
let useTools = null;
let textMessage = null;
const produceReasoningReturnState = () => ({
useTools,
textMessage,
})
console.log("")
console.log("--- Prompt ---")
console.log("")
const prompt = await getSystemPrompt()
console.log('Prompt:', prompt)
// adaptive reasoning effort
const determineReasoningMode = () => {
if (model.indexOf('haiku') !== -1) {
return "low"
}
if (reReasoningRequired || /think (hard(er)?|deep(er)?|more)/i.test(mostRecentUserMessage)) {
return "high"
}
return "medium"
}
const reasoningEffort = determineReasoningMode();
console.log(`Using ${reasoningModel} model with *${reasoningEffort}* reasoning effort for this request.`);
// actual reasoning request
const reasonCompletion = await sendOpenAIRequest({
messages: [{
role: "system",
content: prompt,
}],
stream: false,
model: reasoningModel,
tools: [],
reasoning_effort: reasoningEffort,
tool_choice: "none",
response_format: {
type: "json_object",
},
});
// add up tokens
inputTokens += reasonCompletion.usage.prompt_tokens;
outputTokens += reasonCompletion.usage.completion_tokens;
if (reasonCompletion.choices && reasonCompletion.choices.length > 0) {
const choice = reasonCompletion.choices[0];
const content = choice.message.content;
let lastReasoningResult
try {
lastReasoningResult = JSON.parse(content);
} catch (error) {
reReasoningRequired = true;
formattedMessages.push({
role: "user",
content: OpenAIOptimizedPrompts.jsonParseErrorCorrectionPrompt,
});
return produceReasoningReturnState();
}
// if none of this is defined, we need to expect a model misbehavior. We need to tell the model.
if (typeof lastReasoningResult.isNewTopic === "undefined" &&
typeof lastReasoningResult.useTools === "undefined" &&
typeof lastReasoningResult.text === "undefined") {
reReasoningRequired = true;
formattedMessages.push({
role: "user",
content: OpenAIOptimizedPrompts.jsonSchemaCorrectionPrompt,
});
return produceReasoningReturnState();
}
// if use_tools is not parsable as JSON, we need to tell the model to format it correctly.
if (lastReasoningResult.useTools && typeof lastReasoningResult.useTools !== "string") {
useTools = lastReasoningResult.useTools
reReasoningRequired = false
// TODO: check the validity of each tool against schema; for each tool, handle and fix model's misbehavior
return produceReasoningReturnState();
} else if (typeof lastReasoningResult.useTools === "string") {
reReasoningRequired = true;
formattedMessages.push({
role: "user",
content: OpenAIOptimizedPrompts.jsonSchemaCorrectionPrompt,
});
return produceReasoningReturnState();
}
// in this case (isNewTopic is set), the text message can be a JSON string
if (typeof lastReasoningResult.isNewTopic !== "undefined") {
if (lastReasoningResult.isNewTopic) {
console.log("New topic detected, resetting goals.")
}
textMessage = JSON.stringify(lastReasoningResult)
reReasoningRequired = false
return produceReasoningReturnState();
}
// standard text message response
if (typeof lastReasoningResult.text === "string") {
textMessage = lastReasoningResult.text
try {
// if the text message is a JSON string, we need to tell the model to format it correctly.
const wrongToolCallStruct = JSON.parse(textMessage)
console.log('[ERROR] looks like a wrong tool call! Data:', wrongToolCallStruct)
// TODO: Try to quick-recover from the issue (file_path, content => Replace)
const filePath = wrongToolCallStruct.file_path || wrongToolCallStruct.filepath || wrongToolCallStruct.path
const content = wrongToolCallStruct.content || wrongToolCallStruct.contents || wrongToolCallStruct.text
if (filePath && content) {
console.log("[WARN] Attempting to recover from wrong tool call... (data present in text)", filePath, content)
reReasoningRequired = false
useTools = [{
name: "Replace",
parameters: { file_path: filePath, content }
}]
textMessage = null; // clear the text message
return produceReasoningReturnState();
} else {
reReasoningRequired = true;
formattedMessages.push({
role: "user",
content: OpenAIOptimizedPrompts.jsonSchemaCorrectionPrompt,
});
return produceReasoningReturnState();
}
} catch (error) {
// ignore, it should be this way
}
reReasoningRequired = false
return produceReasoningReturnState();
} else {
const filePath = wrongToolCallStruct.file_path || wrongToolCallStruct.filepath || wrongToolCallStruct.path
const content = wrongToolCallStruct.content || wrongToolCallStruct.contents || wrongToolCallStruct.text
if (filePath && content) {
console.log("[WARN] Attempting to recover from wrong tool call... (data present in root)", filePath, content)
reReasoningRequired = false
useTools = [{
name: "Replace",
parameters: { file_path: filePath, content }
}]
return produceReasoningReturnState();
} else {
reReasoningRequired = true;
formattedMessages.push({
role: "user",
content: OpenAIOptimizedPrompts.jsonSchemaCorrectionPrompt,
});
return produceReasoningReturnState();
}
}
}
reReasoningRequired = true;
// no response error
return produceReasoningReturnState();
};
/*
if (promptType === "default" && isVectorSearchActivated(commands)) {
relevantCode = await vectorSearchCommand(mostRecentUserMessage, getVectorSearchTopK(commands))
//if (await decide(OpenAIOptimizedPrompts.judgeNeedProjectSearchPrompt.replace('<GOAL>', lastGoal))) {
//relevantCode = await getRelevantCodeForPrompt(lastGoal, codebaseSearchK)
//}
}*/
const maxDeepThoughtGraphDepth = getMaxDeepReasoningGraphDepth(userMessageCommands) || defaultMaxReasoningGraphDepth;
// reseaning attempt, when there is none yet, or re-thinking is required, max $maxDeepThoughtGraphDepth attempts
while (!reasoningResult || (reReasoningRequired && reasoningAttempts < maxDeepThoughtGraphDepth)) {
console.log(`Reasoning attempt #${reasoningAttempts + 1}...`)
reasoningResult = await runReasoning();
// cleanup for o3 issue, not recognizing tool calls
// sentence start forms with "I", as in "I will...", "Ich schreibe jetzt...", ...
const sentenceStartsWithPlan = [
"I", // English
"Let's", // English (inclusive)
"Ich", // German
"Lass", // German (inclusive)
"Je", // French
"On", // French (inclusive)
"Yo", // Spanish
"Vamos", // Spanish (inclusive)
"Eu", // Portuguese
"Vamos", // Portuguese (inclusive)
"Jeg", // Norwegian
"La", // Norwegian (inclusive)
"Ik", // Dutch
"Laten", // Dutch (inclusive)
"Minä", // Finnish
"Tehdään", // Finnish (inclusive)
"Jag", // Swedish
"Låt", // Swedish (inclusive)
"Aku", // Indonesian
"Mari", // Indonesian (inclusive)
"Mina", // Estonian
"Teeme", // Estonian (inclusive)
"Eg", // Icelandic
"Við", // Icelandic (inclusive)
"Ego", // Greek
"Ας", // Greek (inclusive)
"Mig", // Danish
"Lad", // Danish (inclusive)
"我", // Chinese (simplified)
"我们", // Chinese (simplified, inclusive)
"Я", // Russian
"Давайте", // Russian (inclusive)
"私は", // Japanese
"さあ", // Japanese (inclusive)
"나는", // Korean
"우리", // Korean (inclusive)
"أنا", // Arabic
"لن", // Arabic (inclusive)
"मैं", // Hindi
"चलो", // Hindi (inclusive)
];
if (reasoningResult.textMessage && promptType === "default" &&
sentenceStartsWithPlan.some(start => reasoningResult.textMessage.startsWith(start))) {
//const messageContainsAPlan = await decide(OpenAIOptimizedPrompts.decideSimpleFuture.split('<MESSAGE>').join(reasoningResult.textMessage))
console.log("[WARN] Assistant response looks like a plan to be done, but is hasn't been done yet. Instructing the model to execute...")
// it's a plan to do something ("I'll...")
//if (messageContainsAPlan) {
// model feeds a new goal to itself...
currentGoal = sentenceStartsWithPlan.reduce((text, start) => {
const regex = new RegExp(`^${start}\\b`, 'i');
return text.replace(regex, '');
}, reasoningResult.textMessage).trim(); // transforms "I will do" -> "Will do...""
currentGoalCommands = parseCommands(currentGoal)
setState(workingDirectory, "currentGoal", currentGoal)
console.log("Set a new goal:", currentGoal)
reReasoningRequired = true;
reasoningAttempts--; // this call does not count as a reasoning attempt
}
reasoningAttempts++; // increment reasoning attempts
}
if (reasoningAttempts >= maxDeepThoughtGraphDepth) {
console.log("Reasoning attempts exceeded, returning with error.")
await processAnswer({
usage: {
input_tokens: inputTokens,
output_tokens: outputTokens
},
textMessage: "The model failed to reason correctly. Please try again. Maybe use /clear to clear the conversation history.",
useTools: [],
stream, // answer as requested, stream or not
model, // act as if the original model was used
res,
postProcessToolExecution
});
return
}
await processAnswer({
usage: {
input_tokens: inputTokens,
output_tokens: outputTokens
},
textMessage: reasoningResult.textMessage,
useTools: reasoningResult.useTools,
stream, // answer as requested, stream or not
model, // act as if the original model was used
res,
postProcessToolExecution
});
} catch (error) {
console.error("Error processing request:", error)
if (!res.headersSent) {
res.status(500).json({
type: "error",
error: { type: "server_error", message: error.message },
})
} else {
try {
const err = { type: "error", error: { type: "server_error", message: error.message } }
res.end(JSON.stringify(err))
} catch (endError) {
console.error("Failed to end response after error:", endError)
}
}
}
})
async function initializeClaudeConfig() {
const homeDir = process.env.HOME
const configPath = `${homeDir}/.claude.json`
if (!existsSync(configPath)) {
const userID = Array.from({ length: 64 }, () => Math.random().toString(16)[2]).join("")
const configContent = {
numStartups: 184,
autoUpdaterStatus: "disabled",
userID,
hasCompletedOnboarding: true,
lastOnboardingVersion: "0.2.9",
projects: {},
}
await writeFile(configPath, JSON.stringify(configContent, null, 2))
}
}
async function run() {
await initializeClaudeConfig()
app.listen(port, "0.0.0.0", () => {
const serverUrl = `http://127.0.0.1:${port}`
console.log(`Claude Code OpenAI proxy running on: ${serverUrl}`)
console.log(`This is how you run claude code using this proxy: ANTHROPIC_BASE_URL="${serverUrl}" claude`)
console.log("Exit using Ctrl+C")
})
}
run()