prompt-version-manager
Version:
Centralized prompt management system for Human Behavior AI agents
141 lines • 5.81 kB
JavaScript
;
/**
* OpenAI provider implementation for TypeScript.
*/
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.OpenAIProvider = void 0;
const openai_1 = __importDefault(require("openai"));
const exceptions_1 = require("../core/exceptions");
const base_1 = require("./base");
class OpenAIProvider extends base_1.BaseProvider {
client;
// OpenAI pricing per 1K tokens (updated with latest models)
static PRICING = {
// GPT-3.5 series
'gpt-3.5-turbo': { input: 0.001, output: 0.002 },
'gpt-3.5-turbo-1106': { input: 0.001, output: 0.002 },
'gpt-3.5-turbo-0125': { input: 0.0005, output: 0.0015 },
// GPT-4 legacy series
'gpt-4': { input: 0.03, output: 0.06 },
'gpt-4-turbo': { input: 0.01, output: 0.03 },
'gpt-4-turbo-preview': { input: 0.01, output: 0.03 },
'gpt-4-1106-preview': { input: 0.01, output: 0.03 },
'gpt-4-0125-preview': { input: 0.01, output: 0.03 },
// GPT-4o series
'gpt-4o': { input: 0.005, output: 0.015 },
'gpt-4o-mini': { input: 0.00015, output: 0.0006 },
// GPT-4.1 series (estimated pricing)
'gpt-4.1': { input: 0.006, output: 0.018 },
'gpt-4.1-mini': { input: 0.0002, output: 0.0008 },
'gpt-4.1-nano': { input: 0.0001, output: 0.0004 },
// o1 reasoning models (higher pricing due to reasoning compute)
'o1': { input: 0.015, output: 0.06 },
'o1-pro': { input: 0.06, output: 0.24 },
// o3 and o4 deep research models (estimated pricing)
'o3-deep-research': { input: 0.1, output: 0.4 },
'o4-mini-deep-research': { input: 0.02, output: 0.08 }
};
constructor(apiKey, config = {}) {
super(apiKey, config);
if (!this.apiKey) {
throw new exceptions_1.APIKeyError('OpenAI API key is required');
}
this.client = new openai_1.default({ apiKey: this.apiKey });
}
get name() {
return 'openai';
}
get supportedModels() {
return Object.keys(OpenAIProvider.PRICING);
}
async chatCompletion(model, messages, options = {}) {
if (!this.supportsModel(model)) {
const available = this.supportedModels.join(', ');
throw new exceptions_1.ProviderError(`Model ${model} not supported by OpenAI. Available: ${available}`);
}
try {
// Convert PVM messages to OpenAI format
const openaiMessages = messages.map(msg => ({
role: msg.role,
content: msg.content
}));
// Build request parameters
const requestParams = {
model,
messages: openaiMessages
};
if (options.temperature !== undefined) {
requestParams.temperature = options.temperature;
}
if (options.maxTokens !== undefined) {
requestParams.max_tokens = options.maxTokens;
}
// Handle response format
if (options.responseFormat?.type === 'json_object') {
requestParams.response_format = { type: 'json_object' };
}
// Add any additional options
Object.keys(options).forEach(key => {
if (!['temperature', 'maxTokens', 'responseFormat'].includes(key)) {
requestParams[key] = options[key];
}
});
// Make API call
const startTime = new Date();
const response = await this.client.chat.completions.create(requestParams);
const endTime = new Date();
// Calculate latency
const latency = endTime.getTime() - startTime.getTime();
// Extract response data
const choice = response.choices[0];
const content = choice.message.content || '';
const finishReason = choice.finish_reason;
// Extract usage
const usage = response.usage;
if (!usage) {
throw new exceptions_1.ProviderError('No usage information returned from OpenAI');
}
const tokenUsage = {
input: usage.prompt_tokens,
output: usage.completion_tokens,
total: usage.total_tokens
};
// Calculate cost
const cost = this.calculateCost(model, tokenUsage);
return {
content,
tokens: tokenUsage,
model,
provider: this.name,
latency,
cost,
timestamp: startTime,
finishReason,
rawResponse: response
};
}
catch (error) {
if (error.type === 'rate_limit_exceeded') {
throw new exceptions_1.RateLimitError(`OpenAI rate limit exceeded: ${error.message}`);
}
if (error.type === 'invalid_request_error' || error.status >= 400) {
throw new exceptions_1.ProviderError(`OpenAI API error: ${error.message}`);
}
throw new exceptions_1.ProviderError(`Unexpected error calling OpenAI: ${error.message}`);
}
}
calculateCost(model, tokens) {
const pricing = OpenAIProvider.PRICING[model];
if (!pricing) {
return 0.0;
}
const inputCost = (tokens.input / 1000) * pricing.input;
const outputCost = (tokens.output / 1000) * pricing.output;
return inputCost + outputCost;
}
}
exports.OpenAIProvider = OpenAIProvider;
//# sourceMappingURL=openai.js.map