route-claudecode
Version:
Advanced routing and transformation system for Claude Code outputs to multiple AI providers
789 lines • 32.1 kB
JavaScript
;
/**
* Enhanced OpenAI Format Transformer
* Handles conversion between OpenAI API format and unified format
* Includes tool call processing and response handling
*
* 遵循零硬编码、零Fallback、零沉默失败原则
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports.OpenAITransformer = void 0;
exports.createOpenAITransformer = createOpenAITransformer;
const logger_1 = require("@/utils/logger");
class OpenAITransformer {
name = 'openai';
/**
* 🎯 Convert BaseRequest (Anthropic format) to OpenAI API format
* 这是Provider调用的主要入口点
*/
transformBaseRequestToOpenAI(request) {
if (!request) {
throw new Error('BaseRequest is null or undefined - violates zero fallback principle');
}
// 🔧 检查Provider配置中的forceNonStreaming设置
const providerConfig = request.metadata?.providerConfig;
const forceNonStreaming = providerConfig?.forceNonStreaming;
if (forceNonStreaming) {
logger_1.logger.info('🔧 [OPENAI-TRANSFORMER] forceNonStreaming enabled, setting stream: false', {
requestId: request.metadata?.requestId,
providerId: request.metadata?.providerId
});
}
const openaiRequest = {
model: request.model,
messages: this.convertAnthropicMessagesToOpenAI(request.messages || []),
max_tokens: request.max_tokens || 131072,
temperature: request.temperature,
// 🔧 如果forceNonStreaming启用,强制设置为非流式
stream: forceNonStreaming ? false : (request.stream || false)
};
// 处理系统消息
if (request.system) {
openaiRequest.messages.unshift({
role: 'system',
content: request.system
});
}
// 🔧 处理工具定义转换
if (request.tools && Array.isArray(request.tools) && request.tools.length > 0) {
openaiRequest.tools = this.convertAnthropicToolsToOpenAI(request.tools);
// 处理工具选择 (如果存在)
const requestWithToolChoice = request;
if (requestWithToolChoice.tool_choice) {
openaiRequest.tool_choice = this.convertToolChoice(requestWithToolChoice.tool_choice);
}
}
console.log('🔄 [OPENAI-TRANSFORMER] BaseRequest -> OpenAI:', {
hasTools: !!(openaiRequest.tools && openaiRequest.tools.length > 0),
toolCount: openaiRequest.tools?.length || 0,
messageCount: openaiRequest.messages.length,
model: openaiRequest.model
});
// 🚨 DEBUG: 输出完整的转换后请求数据
console.log('🔍 [OPENAI-TRANSFORMER-DEBUG] Full converted request:', JSON.stringify({
model: openaiRequest.model,
messages: openaiRequest.messages,
tools: openaiRequest.tools,
max_tokens: openaiRequest.max_tokens,
temperature: openaiRequest.temperature,
stream: openaiRequest.stream
}, null, 2));
return openaiRequest;
}
/**
* 🎯 Convert OpenAI API response to BaseResponse (Anthropic format)
* 这是Provider调用的主要出口点
*/
transformOpenAIResponseToBase(response, originalRequest) {
if (!response) {
throw new Error('OpenAI response is null or undefined - silent failure detected');
}
// 🔍 [DEBUG] 详细记录transformer接收到的数据
const requestId = originalRequest.metadata?.requestId || 'unknown';
console.log('🔍 [TRANSFORMER-DEBUG] transformOpenAIResponseToBase received:', {
requestId,
hasResponse: !!response,
responseType: typeof response,
responseKeys: response ? Object.keys(response) : null,
hasChoices: !!response?.choices,
choicesType: typeof response?.choices,
choicesLength: response?.choices?.length || 0,
responseObject: response?.object,
responseId: response?.id
});
if (!response?.choices) {
console.log('🚨 [TRANSFORMER-DEBUG] Missing choices field, full response:', {
requestId,
fullResponse: JSON.stringify(response, null, 2)
});
}
const choice = response.choices?.[0];
if (!choice) {
// 🔧 [STREAMING-FIX] 对于流式响应,某些chunk可能没有choices字段
// 这是正常情况,不应该抛出错误,应该返回空响应或跳过
if (response.object === 'chat.completion.chunk') {
// 返回一个基本的空chunk响应
console.log('🔧 [TRANSFORMER-DEBUG] Returning empty chunk response for streaming', { requestId });
return {
id: response.id || `msg_${Date.now()}`,
content: [],
model: originalRequest.metadata?.originalModel || response.model || 'unknown',
role: 'assistant',
stop_reason: null,
stop_sequence: null,
usage: { input_tokens: 0, output_tokens: 0 },
type: 'message'
};
}
console.log('🚨 [TRANSFORMER-DEBUG] About to throw missing choices error', {
requestId,
responseObject: response?.object,
isStreamingChunk: response?.object === 'chat.completion.chunk'
});
throw new Error('OpenAI response missing choices - invalid response format');
}
// 🔧 处理工具调用转换
const content = this.convertOpenAIMessageToAnthropicContent(choice.message);
// 🎯 修复finish_reason映射
const finishReason = this.mapOpenAIFinishReasonToAnthropic(choice.finish_reason, this.hasToolCalls(choice.message));
const baseResponse = {
id: response.id || `msg_${Date.now()}`,
content,
model: originalRequest.metadata?.originalModel || response.model,
role: 'assistant',
stop_reason: finishReason,
stop_sequence: null,
usage: {
input_tokens: response.usage?.prompt_tokens || 0,
output_tokens: response.usage?.completion_tokens || 0
}
};
console.log('🔄 [OPENAI-TRANSFORMER] OpenAI -> BaseResponse:', {
hasTools: content.some((c) => c.type === 'tool_use'),
toolCount: content.filter((c) => c.type === 'tool_use').length,
stopReason: finishReason,
contentBlocks: content.length
});
return baseResponse;
}
/**
* 🎯 Process OpenAI streaming response and convert to Anthropic SSE events
* 处理流式响应转换
*/
async *transformOpenAIStreamToAnthropicSSE(stream, originalRequest, requestId) {
let messageId = `msg_${Date.now()}`;
let hasStarted = false;
let toolCallBuffer = new Map();
let textContent = '';
try {
for await (const chunk of stream) {
const choice = chunk.choices?.[0];
if (!choice)
continue;
// 发送message_start事件
if (!hasStarted) {
yield {
event: 'message_start',
data: {
type: 'message_start',
message: {
id: messageId,
type: 'message',
role: 'assistant',
content: [],
model: originalRequest.metadata?.originalModel || chunk.model,
stop_reason: null,
stop_sequence: null,
usage: { input_tokens: 0, output_tokens: 0 }
}
}
};
hasStarted = true;
}
// 处理文本内容
if (choice.delta?.content) {
if (textContent === '') {
// 第一次文本内容,发送content_block_start
yield {
event: 'content_block_start',
data: {
type: 'content_block_start',
index: 0,
content_block: {
type: 'text',
text: ''
}
}
};
}
textContent += choice.delta.content;
yield {
event: 'content_block_delta',
data: {
type: 'content_block_delta',
index: 0,
delta: {
type: 'text_delta',
text: choice.delta.content
}
}
};
}
// 🔧 处理工具调用
if (choice.delta?.tool_calls) {
for (const toolCall of choice.delta.tool_calls) {
const index = toolCall.index || 0;
if (!toolCallBuffer.has(index)) {
// 新的工具调用开始
const toolId = toolCall.id || `tool_${Date.now()}_${index}`;
const toolName = toolCall.function?.name || 'unknown_tool';
toolCallBuffer.set(index, {
id: toolId,
name: toolName,
input: ''
});
yield {
event: 'content_block_start',
data: {
type: 'content_block_start',
index: index + 1, // 文本占用index 0
content_block: {
type: 'tool_use',
id: toolId,
name: toolName,
input: {}
}
}
};
}
// 处理工具参数增量
if (toolCall.function?.arguments) {
const bufferedTool = toolCallBuffer.get(index);
bufferedTool.input += toolCall.function.arguments;
yield {
event: 'content_block_delta',
data: {
type: 'content_block_delta',
index: index + 1,
delta: {
type: 'input_json_delta',
partial_json: toolCall.function.arguments
}
}
};
}
}
}
// 处理完成
if (choice.finish_reason) {
// 结束所有内容块
if (textContent) {
yield {
event: 'content_block_stop',
data: {
type: 'content_block_stop',
index: 0
}
};
}
for (const [index] of toolCallBuffer) {
yield {
event: 'content_block_stop',
data: {
type: 'content_block_stop',
index: index + 1
}
};
}
// 🎯 修复finish_reason映射
const anthropicFinishReason = this.mapOpenAIFinishReasonToAnthropic(choice.finish_reason, toolCallBuffer.size > 0);
yield {
event: 'message_delta',
data: {
type: 'message_delta',
delta: {
stop_reason: anthropicFinishReason,
stop_sequence: null
},
usage: {
output_tokens: 1
}
}
};
yield {
event: 'message_stop',
data: {
type: 'message_stop'
}
};
break;
}
}
}
catch (error) {
console.error('🚨 [OPENAI-TRANSFORMER] Stream processing failed:', {
error: error instanceof Error ? error.message : String(error),
requestId
});
throw error;
}
}
/**
* Convert OpenAI request to unified format
*/
transformRequestToUnified(request) {
const unified = {
messages: this.convertMessagesToUnified(request.messages || []),
model: request.model,
max_tokens: request.max_tokens,
temperature: request.temperature,
stream: request.stream || false
};
// Handle tools
if (request.tools && Array.isArray(request.tools)) {
unified.tools = request.tools.map((tool) => ({
type: 'function',
function: {
name: tool.function.name,
description: tool.function.description || '',
parameters: tool.function.parameters
}
}));
}
// Handle tool choice
if (request.tool_choice) {
if (typeof request.tool_choice === 'string') {
unified.tool_choice = request.tool_choice;
}
else if (request.tool_choice.function?.name) {
unified.tool_choice = request.tool_choice.function.name;
}
}
return unified;
}
/**
* Convert unified request to OpenAI format
*/
transformRequestFromUnified(request) {
const openaiRequest = {
model: request.model,
messages: this.convertMessagesFromUnified(request.messages),
max_tokens: request.max_tokens || 131072,
temperature: request.temperature,
stream: request.stream || false
};
// Add system message if present
if (request.system) {
openaiRequest.messages.unshift({
role: 'system',
content: request.system
});
}
// Handle tools
if (request.tools && request.tools.length > 0) {
openaiRequest.tools = request.tools.map(tool => ({
type: 'function',
function: {
name: tool.function.name,
description: tool.function.description,
parameters: tool.function.parameters
}
}));
// Handle tool choice
if (request.tool_choice) {
if (request.tool_choice === 'auto' || request.tool_choice === 'none') {
openaiRequest.tool_choice = request.tool_choice;
}
else {
openaiRequest.tool_choice = {
type: 'function',
function: { name: request.tool_choice }
};
}
}
}
return openaiRequest;
}
/**
* Convert OpenAI response to unified format
*/
transformResponseToUnified(response) {
// finish_reason修正现在在预处理器中处理
const choice = response.choices?.[0];
if (!choice) {
throw new Error('No choices in OpenAI response');
}
return {
id: response.id,
object: 'chat.completion',
created: response.created || Math.floor(Date.now() / 1000),
model: response.model,
choices: [{
index: 0,
message: {
role: choice.message.role,
content: choice.message.content,
tool_calls: choice.message.tool_calls
},
finish_reason: choice.finish_reason
}],
usage: {
prompt_tokens: response.usage?.prompt_tokens || 0,
completion_tokens: response.usage?.completion_tokens || 0,
total_tokens: response.usage?.total_tokens || 0
}
};
}
/**
* Convert unified response to OpenAI format
*/
transformResponseFromUnified(response) {
return response; // Already in OpenAI format
}
/**
* Convert OpenAI streaming chunk to unified format
* 保留finish_reason,传递给下游处理
*/
/**
* Convert OpenAI streaming chunk to unified format
* 🔧 修复finish_reason映射,确保工具调用正确处理
*/
transformStreamChunk(chunk) {
if (!chunk.choices?.[0]) {
return null;
}
const choice = chunk.choices[0];
// 🔧 修正finish_reason:如果有工具调用但finish_reason是stop,修正为tool_calls
if (choice.finish_reason === 'stop' && choice.delta?.tool_calls?.length > 0) {
choice.finish_reason = 'tool_calls';
}
return {
id: chunk.id,
object: 'chat.completion.chunk',
created: chunk.created || Math.floor(Date.now() / 1000),
model: chunk.model,
choices: [{
index: 0,
delta: choice.delta,
finish_reason: choice.finish_reason // 传递修正后的finish_reason
}]
};
}
/**
* Convert messages to unified format
*/
convertMessagesToUnified(messages) {
const unifiedMessages = [];
const toolResultMap = new Map();
// First pass: collect tool results
messages.forEach(msg => {
if (msg.role === 'tool' && msg.tool_call_id) {
if (!toolResultMap.has(msg.tool_call_id)) {
toolResultMap.set(msg.tool_call_id, []);
}
toolResultMap.get(msg.tool_call_id).push({
role: 'tool',
content: msg.content,
tool_call_id: msg.tool_call_id
});
}
});
// Second pass: process messages and merge tool results
for (let i = 0; i < messages.length; i++) {
const msg = messages[i];
if (msg.role === 'tool') {
continue; // Skip, will be handled in assistant messages
}
const unifiedMsg = {
role: msg.role,
content: msg.content || ''
};
// Handle tool calls in assistant messages
if (msg.role === 'assistant' && msg.tool_calls && msg.tool_calls.length > 0) {
unifiedMsg.tool_calls = msg.tool_calls.map((toolCall) => ({
id: toolCall.id,
type: 'function',
function: {
name: toolCall.function.name,
arguments: toolCall.function.arguments
}
}));
// If content is null but we have tool calls, set empty content
if (unifiedMsg.content === null && unifiedMsg.tool_calls && unifiedMsg.tool_calls.length > 0) {
unifiedMsg.content = '';
}
}
unifiedMessages.push(unifiedMsg);
// Add tool results after assistant message with tool calls
if (msg.role === 'assistant' && msg.tool_calls) {
msg.tool_calls.forEach((toolCall) => {
const toolResults = toolResultMap.get(toolCall.id);
if (toolResults) {
toolResults.forEach((result) => {
unifiedMessages.push({
role: 'tool',
content: result.content,
tool_call_id: result.tool_call_id
});
});
}
else {
// Add placeholder tool result if missing
unifiedMessages.push({
role: 'tool',
content: JSON.stringify({
success: true,
message: 'Tool call executed successfully',
tool_call_id: toolCall.id
}),
tool_call_id: toolCall.id
});
}
});
}
}
return unifiedMessages;
}
/**
* Convert messages from unified format
*/
convertMessagesFromUnified(messages) {
return messages.map(msg => {
const openaiMsg = {
role: msg.role,
content: msg.content
};
// Handle tool calls
if (msg.tool_calls && msg.tool_calls.length > 0) {
openaiMsg.tool_calls = msg.tool_calls;
// OpenAI expects null content when there are tool calls
if (!msg.content || msg.content === '') {
openaiMsg.content = null;
}
}
// Handle tool results
if (msg.role === 'tool' && msg.tool_call_id) {
openaiMsg.tool_call_id = msg.tool_call_id;
}
return openaiMsg;
});
}
/**
* 🔧 Convert Anthropic messages to OpenAI format
*/
convertAnthropicMessagesToOpenAI(messages) {
if (!Array.isArray(messages)) {
throw new Error('Messages must be an array - violates zero fallback principle');
}
return messages.map(msg => {
if (!msg || typeof msg !== 'object') {
throw new Error('Invalid message object - violates zero fallback principle');
}
const openaiMsg = {
role: msg.role,
content: null
};
// 处理内容转换
if (msg.content) {
if (typeof msg.content === 'string') {
openaiMsg.content = msg.content;
}
else if (Array.isArray(msg.content)) {
// 处理复杂内容块
const { content, toolCalls } = this.convertAnthropicContentToOpenAI(msg.content);
openaiMsg.content = content;
if (toolCalls.length > 0) {
openaiMsg.tool_calls = toolCalls;
}
}
}
return openaiMsg;
});
}
/**
* 🔧 Convert Anthropic content blocks to OpenAI format
*/
convertAnthropicContentToOpenAI(content) {
let textContent = '';
const toolCalls = [];
for (const block of content) {
if (!block || typeof block !== 'object') {
continue;
}
if (block.type === 'text') {
textContent += block.text || '';
}
else if (block.type === 'tool_use') {
if (!block.id || !block.name) {
throw new Error('Tool use block missing id or name - violates zero fallback principle');
}
toolCalls.push({
id: block.id,
type: 'function',
function: {
name: block.name,
arguments: JSON.stringify(block.input || {})
}
});
}
}
return {
content: textContent || (toolCalls.length > 0 ? null : ''),
toolCalls
};
}
/**
* 🔧 Convert Anthropic tools to OpenAI format
*/
convertAnthropicToolsToOpenAI(tools) {
if (!Array.isArray(tools)) {
throw new Error('Tools must be an array - violates zero fallback principle');
}
return tools.map(tool => {
if (!tool || typeof tool !== 'object') {
throw new Error('Invalid tool object - violates zero fallback principle');
}
// 🔧 支持两种格式:Anthropic格式和已预处理的OpenAI格式
const isAnthropicFormat = tool.name && tool.input_schema && !tool.function;
const isOpenAIFormat = tool.function && tool.function.name && !tool.name;
if (isAnthropicFormat) {
// ✅ Anthropic格式:{ name, description, input_schema }
return {
type: 'function',
function: {
name: tool.name,
description: tool.description || '',
parameters: tool.input_schema || {}
}
};
}
else if (isOpenAIFormat) {
// ✅ 已经是OpenAI格式:{ type: 'function', function: { name, description, parameters } }
return {
type: tool.type || 'function',
function: {
name: tool.function.name,
description: tool.function.description || '',
parameters: tool.function.parameters || {}
}
};
}
else {
// ❌ 未知格式或格式不完整
const toolName = tool.name || tool.function?.name;
if (!toolName) {
throw new Error(`Tool missing name - violates zero fallback principle. Tool: ${JSON.stringify(tool)}`);
}
// 尽力处理混合格式
return {
type: 'function',
function: {
name: toolName,
description: tool.description || tool.function?.description || '',
parameters: tool.input_schema || tool.function?.parameters || {}
}
};
}
});
}
/**
* 🔧 Convert tool choice
*/
convertToolChoice(toolChoice) {
if (!toolChoice) {
return undefined;
}
if (typeof toolChoice === 'string') {
if (toolChoice === 'auto' || toolChoice === 'none') {
return toolChoice;
}
// 具体工具名
return {
type: 'function',
function: { name: toolChoice }
};
}
return toolChoice;
}
/**
* 🔧 Convert OpenAI message to Anthropic content blocks
*/
convertOpenAIMessageToAnthropicContent(message) {
const content = [];
// 处理文本内容
if (message.content && typeof message.content === 'string') {
content.push({
type: 'text',
text: message.content
});
}
// 处理工具调用
if (message.tool_calls && Array.isArray(message.tool_calls)) {
for (const toolCall of message.tool_calls) {
if (!toolCall.id || !toolCall.function?.name) {
console.warn('🚨 [OPENAI-TRANSFORMER] Invalid tool call, skipping:', toolCall);
continue;
}
let input = {};
if (toolCall.function.arguments) {
try {
input = JSON.parse(toolCall.function.arguments);
}
catch (error) {
console.warn('🚨 [OPENAI-TRANSFORMER] Failed to parse tool arguments:', {
arguments: toolCall.function.arguments,
error: error instanceof Error ? error.message : String(error)
});
input = {}; // 不使用fallback,使用空对象
}
}
content.push({
type: 'tool_use',
id: toolCall.id,
name: toolCall.function.name,
input
});
}
}
// 如果没有内容,添加空文本块
if (content.length === 0) {
content.push({
type: 'text',
text: ''
});
}
return content;
}
/**
* 🎯 Map OpenAI finish_reason to Anthropic stop_reason
* 修复跨节点耦合问题 - finish reason映射应在Transformer内部处理
*/
mapOpenAIFinishReasonToAnthropic(finishReason, hasToolCalls) {
// 🔧 消除跨节点耦合 - 直接在Transformer中实现映射逻辑
if (!finishReason) {
throw new Error('finish_reason is required - violates zero fallback principle');
}
// 🎯 标准OpenAI finish_reason到Anthropic stop_reason映射
const finishReasonMap = {
'stop': 'end_turn',
'length': 'max_tokens',
'tool_calls': 'tool_use',
'content_filter': 'stop_sequence',
'function_call': 'tool_use' // 兼容旧版OpenAI API
};
const mappedReason = finishReasonMap[finishReason];
if (!mappedReason) {
logger_1.logger.error('Unknown OpenAI finish_reason', {
finishReason,
hasToolCalls,
supportedReasons: Object.keys(finishReasonMap)
});
throw new Error(`Unknown OpenAI finish_reason: ${finishReason} - violates zero fallback principle`);
}
// 🔧 Critical Fix: 如果有工具调用但映射不是tool_use,强制返回tool_use
if (hasToolCalls && mappedReason !== 'tool_use') {
logger_1.logger.info('Correcting finish_reason for tool calls', {
originalFinishReason: finishReason,
originalMappedReason: mappedReason,
correctedReason: 'tool_use',
hasToolCalls
});
return 'tool_use';
}
return mappedReason;
}
/**
* 🔧 Check if message has tool calls
*/
hasToolCalls(message) {
return !!(message?.tool_calls && Array.isArray(message.tool_calls) && message.tool_calls.length > 0);
}
}
exports.OpenAITransformer = OpenAITransformer;
/**
* Utility function to create OpenAI transformer
*/
function createOpenAITransformer() {
return new OpenAITransformer();
}
//# sourceMappingURL=openai.js.map