route-claudecode
Version:
Advanced routing and transformation system for Claude Code outputs to multiple AI providers
190 lines • 7.53 kB
JavaScript
;
/**
* Pipeline Coordinator - Clean Architecture
* 统一流水线协调器,确保清晰的层次分离
* Project owner: Jason Zhang
*
* Architecture Principles:
* Input → Preprocessing → Transformer → Provider → Third-party API
*
* 职责:
* 1. 协调各层之间的数据流
* 2. 确保单向依赖
* 3. 集中化转换和修复逻辑
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports.PipelineCoordinator = void 0;
exports.getPipelineCoordinator = getPipelineCoordinator;
exports.createPipelineCoordinator = createPipelineCoordinator;
const logger_1 = require("../utils/logger");
const gemini_1 = require("../transformers/gemini");
const unified_patch_preprocessor_1 = require("./unified-patch-preprocessor");
/**
* 流水线协调器
* 管理Input → Preprocessing → Transformer → Provider的数据流
*/
class PipelineCoordinator {
geminiTransformer;
patchPreprocessor;
constructor(port) {
this.geminiTransformer = new gemini_1.GeminiTransformer();
this.patchPreprocessor = new unified_patch_preprocessor_1.UnifiedPatchPreprocessor(port);
}
/**
* 输入阶段处理 - Input Layer
* 统一的请求预处理入口
*/
async processInput(request, provider, model) {
const context = {
requestId: request.metadata?.requestId || this.generateRequestId(),
provider,
model,
stage: 'input',
timestamp: Date.now()
};
logger_1.logger.debug('PipelineCoordinator: Processing input stage', {
provider,
model,
hasTools: !!request.tools
}, context.requestId, 'pipeline-coordinator');
// 通过统一预处理器处理输入
const processedInput = await this.patchPreprocessor.preprocessInput(request, provider, model, context.requestId);
return processedInput;
}
/**
* 转换阶段处理 - Transformer Layer
* 格式转换,不包含服务特定逻辑
*/
async processTransform(request, targetFormat) {
const context = {
requestId: request.metadata?.requestId || this.generateRequestId(),
provider: request.metadata?.provider || 'unknown',
model: request.model,
stage: 'transform',
timestamp: Date.now()
};
logger_1.logger.debug('PipelineCoordinator: Processing transform stage', {
targetFormat,
model: request.model
}, context.requestId, 'pipeline-coordinator');
switch (targetFormat) {
case 'gemini':
const geminiResult = this.geminiTransformer.transformAnthropicToGemini(request);
return {
transformedData: geminiResult.geminiRequest,
metadata: {
...geminiResult.metadata,
geminiFormat: true,
geminiRequest: geminiResult.geminiRequest
}
};
case 'openai':
// TODO: Implement OpenAI transformer
throw new Error('OpenAI transformer not yet implemented in clean architecture');
case 'anthropic':
// Pass through - already in Anthropic format
return {
transformedData: request,
metadata: { anthropicFormat: true }
};
default:
throw new Error(`Unsupported target format: ${targetFormat}`);
}
}
/**
* Provider准备阶段 - Provider Layer Input
* 将转换后的数据打包成Provider可以直接使用的格式
*/
async prepareForProvider(transformedData, metadata, originalModel) {
const context = {
requestId: metadata.requestId || this.generateRequestId(),
provider: metadata.provider || 'unknown',
model: originalModel,
stage: 'provider',
timestamp: Date.now()
};
logger_1.logger.debug('PipelineCoordinator: Preparing data for provider', {
hasMetadata: !!metadata,
originalModel
}, context.requestId, 'pipeline-coordinator');
// 构建Provider可以直接使用的请求格式
const providerRequest = {
model: originalModel,
messages: [], // Provider不需要原始messages,已转换
metadata: {
...metadata,
requestId: context.requestId,
stage: 'provider-ready'
},
// 占位符内容,实际数据在metadata中
max_tokens: 1000,
temperature: 0.7
};
return providerRequest;
}
/**
* 响应处理阶段 - Response Layer
* 处理Provider返回的原始响应,转换回统一格式
*/
async processResponse(providerResponse, originalFormat, originalModel) {
const context = {
requestId: providerResponse.metadata?.requestId || this.generateRequestId(),
provider: providerResponse.metadata?.provider || 'unknown',
model: originalModel,
stage: 'response',
timestamp: Date.now()
};
logger_1.logger.debug('PipelineCoordinator: Processing provider response', {
originalFormat,
hasMetadata: !!providerResponse.metadata
}, context.requestId, 'pipeline-coordinator');
// 如果是原始Provider响应,需要转换
if (providerResponse.metadata?.geminiResponse) {
const geminiResponse = providerResponse.metadata.geminiResponse;
// 使用transformer转换回Anthropic格式
const convertedResponse = this.geminiTransformer.transformGeminiToAnthropic(geminiResponse, originalModel, context.requestId);
// 通过patches系统进行最终修复
const finalResponse = await this.patchPreprocessor.preprocessResponse(convertedResponse, context.provider, originalModel, context.requestId);
return finalResponse;
}
// 对于其他格式,直接通过patches处理
const processedResponse = await this.patchPreprocessor.preprocessResponse(providerResponse, context.provider, originalModel, context.requestId);
return processedResponse;
}
/**
* 生成唯一请求ID
*/
generateRequestId() {
const timestamp = Date.now();
const random = Math.random().toString(36).substr(2, 9);
return `req_pipeline_${timestamp}_${random}`;
}
/**
* 获取性能统计
*/
getStats() {
return {
patchPreprocessorStats: this.patchPreprocessor.getPerformanceMetrics(),
// patchManagerStats: removed - patch system no longer exists
};
}
}
exports.PipelineCoordinator = PipelineCoordinator;
/**
* 全局Pipeline Coordinator实例管理
*/
const coordinatorInstances = new Map();
function getPipelineCoordinator(port) {
const key = port?.toString() || 'default';
if (!coordinatorInstances.has(key)) {
coordinatorInstances.set(key, new PipelineCoordinator(port));
}
return coordinatorInstances.get(key);
}
function createPipelineCoordinator(port) {
const key = port?.toString() || 'default';
const coordinator = new PipelineCoordinator(port);
coordinatorInstances.set(key, coordinator);
return coordinator;
}
//# sourceMappingURL=pipeline-coordinator.js.map