UNPKG

ai-platform-converter

Version:

Lossless API parameter conversion between multiple AI platforms (OpenAI, Anthropic, Gemini, DeepSeek, Wenwen, Vertex AI, Huawei, BigModel)

56 lines (55 loc) 2.09 kB
"use strict"; /** * Convert OpenAI request to BigModel (GLM) format */ Object.defineProperty(exports, "__esModule", { value: true }); exports.convertOpenAIRequestToBigModel = convertOpenAIRequestToBigModel; exports.convertBigModelRequestToOpenAI = convertBigModelRequestToOpenAI; const openai_compatible_1 = require("../universal/openai-compatible"); const common_1 = require("../../types/common"); /** * Convert OpenAI request to BigModel format * BigModel uses OpenAI-compatible API with GLM specific features */ function convertOpenAIRequestToBigModel(request, options) { // Use the universal converter since BigModel is OpenAI-compatible const convertedRequest = (0, openai_compatible_1.convertOpenAIRequestToCompatible)(request, common_1.Platform.BigModel, options); // Add BigModel-specific extensions if (convertedRequest._extensions) { convertedRequest._extensions.bigmodel = { ...convertedRequest._extensions.bigmodel, // BigModel-specific model handling model: normalizeBigModelModel(request.model), // Add GLM specific configurations ...(request._extensions?.bigmodel || {}) }; } return convertedRequest; } /** * Normalize model name for BigModel API */ function normalizeBigModelModel(model) { // BigModel model name mapping const modelMapping = { 'gpt-3.5-turbo': 'glm-4', 'gpt-4': 'glm-4', 'gpt-4-turbo': 'glm-4', 'text-davinci-003': 'glm-3-turbo', 'code-davinci-002': 'glm-4' }; // If it's already a BigModel model, return as-is if (model.startsWith('glm-')) { return model; } // Return mapped model or default return modelMapping[model] || 'glm-4'; } /** * Convert BigModel request to OpenAI format */ function convertBigModelRequestToOpenAI(request, options) { // Use the universal converter for reverse conversion const convertedRequest = (0, openai_compatible_1.convertOpenAIRequestToCompatible)(request, common_1.Platform.OpenAI, options); return convertedRequest; }