prompt-version-manager
Version:
Centralized prompt management system for Human Behavior AI agents
407 lines • 15.8 kB
JavaScript
;
/**
* Unified model interface with automatic tracking and chaining for TypeScript
*/
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
if (k2 === undefined) k2 = k;
var desc = Object.getOwnPropertyDescriptor(m, k);
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
desc = { enumerable: true, get: function() { return m[k]; } };
}
Object.defineProperty(o, k2, desc);
}) : (function(o, m, k, k2) {
if (k2 === undefined) k2 = k;
o[k2] = m[k];
}));
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
Object.defineProperty(o, "default", { enumerable: true, value: v });
}) : function(o, v) {
o["default"] = v;
});
var __importStar = (this && this.__importStar) || (function () {
var ownKeys = function(o) {
ownKeys = Object.getOwnPropertyNames || function (o) {
var ar = [];
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
return ar;
};
return ownKeys(o);
};
return function (mod) {
if (mod && mod.__esModule) return mod;
var result = {};
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
__setModuleDefault(result, mod);
return result;
};
})();
Object.defineProperty(exports, "__esModule", { value: true });
exports.model = exports.ModelResponse = void 0;
const fs = __importStar(require("fs"));
const path = __importStar(require("path"));
const crypto = __importStar(require("crypto"));
const registry_1 = require("../providers/registry");
const operations_1 = require("../versioning/operations");
const structured_output_1 = require("./structured-output");
const execution_tracker_1 = require("../storage/execution-tracker");
const prompts_1 = require("./prompts");
/**
* ModelResponse implementation that enables automatic chaining
*/
class ModelResponse {
content;
model;
promptHash;
executionId;
metadata;
rawResponse;
_isPvmResponse = true;
constructor(params) {
this.content = params.content;
this.model = params.model;
this.promptHash = params.promptHash;
this.executionId = params.executionId;
this.metadata = params.metadata;
this.rawResponse = params.rawResponse;
}
/**
* String representation returns the content
*/
toString() {
if (typeof this.content === 'object') {
return JSON.stringify(this.content, null, 2);
}
return String(this.content);
}
/**
* Convert response to prompt string for chaining
*/
toPrompt() {
return this.toString();
}
}
exports.ModelResponse = ModelResponse;
/**
* Unified model interface with automatic tracking
*/
class Model {
repoPath;
versioning;
executionTracker;
registry;
// private activeChains: Map<string, string> = new Map();
constructor() {
this.repoPath = this.findPvmDirectory();
if (this.repoPath) {
this.versioning = new operations_1.VersioningEngine(this.repoPath);
this.executionTracker = new execution_tracker_1.ExecutionTracker(this.repoPath);
}
else {
this.versioning = null;
this.executionTracker = null;
}
// Initialize provider registry
this.registry = new registry_1.ProviderRegistry();
}
/**
* Execute model with automatic tracking and optional structured output
*/
async complete(modelName, prompt, tag, jsonOutput, options = {}) {
// Detect chaining
let parentExecutionId = options?.parentExecutionId;
let chainId;
let promptStr;
if (this.isModelResponse(prompt)) {
// This is a chained call
parentExecutionId = prompt.executionId;
chainId = prompt.metadata.chainId;
promptStr = prompt.toPrompt();
}
else {
promptStr = prompt;
// Only use detected parent ID if this prompt came from prompts() function
// Check if the prompt contains the special marker that prompts() adds
if (!parentExecutionId && (0, prompts_1.getCurrentPromptFile)()) {
const detectedParentId = (0, prompts_1.getParentExecutionId)();
if (detectedParentId) {
parentExecutionId = detectedParentId;
}
}
}
// Capture prompt file before clearing context
const promptFile = (0, prompts_1.getCurrentPromptFile)();
// Clear prompt context to prevent it from affecting subsequent calls
(0, prompts_1.clearPromptContext)();
// Detect provider from model name
const providerName = this.detectProvider(modelName);
// Get provider
const provider = this.registry.getProvider(providerName);
if (!provider) {
throw new Error(`Provider not found: ${providerName}`);
}
// Prepare structured output if needed
let structuredConfig = {};
if (jsonOutput) {
const handler = new structured_output_1.StructuredOutputHandler();
if (providerName === 'openai') {
structuredConfig = handler.prepareOpenAIStructured(jsonOutput);
}
else if (providerName === 'google') {
structuredConfig = handler.prepareGeminiStructured(jsonOutput);
}
else if (providerName === 'anthropic') {
structuredConfig = handler.prepareClaudeStructured(jsonOutput);
}
}
// Execute with tracking
const startTime = Date.now();
try {
// Create messages
const messages = [
{ role: 'user', content: promptStr }
];
// Add system prompt if provided
if (options.systemPrompt) {
messages.unshift({ role: 'system', content: options.systemPrompt });
}
// Handle structured output for different providers
let response;
if (providerName === 'openai' && jsonOutput && structuredConfig.useBeta) {
// Use OpenAI API for structured output
const OpenAI = require('openai');
const client = new OpenAI({ apiKey: provider.apiKey });
// Filter out parentExecutionId from options
const { parentExecutionId: _, ...cleanOptions } = options;
response = await client.chat.completions.create({
model: modelName,
messages: messages.map(m => ({ role: m.role, content: m.content })),
response_format: structuredConfig.response_format,
temperature: options.temperature,
max_tokens: options.maxTokens,
...cleanOptions
});
}
else {
// Regular API call
// Filter out parentExecutionId from provider options
const { parentExecutionId: _, ...cleanOptions } = options;
const providerOptions = {
...cleanOptions,
...structuredConfig
};
response = await provider.chatCompletion(modelName, messages, providerOptions);
}
// Parse structured output if needed
let content;
if (jsonOutput) {
const handler = new structured_output_1.StructuredOutputHandler();
if (providerName === 'openai') {
content = handler.parseOpenAIResponse(response, jsonOutput);
}
else if (providerName === 'google') {
content = handler.parseGeminiResponse(response, jsonOutput);
}
else if (providerName === 'anthropic') {
content = handler.parseClaudeResponse(response, jsonOutput);
}
else {
content = this.extractContent(response);
}
}
else {
// Regular text response
content = this.extractContent(response);
}
// Calculate metrics
const endTime = Date.now();
const latencyMs = endTime - startTime;
// Extract token usage
const tokens = this.extractTokens(response, providerName);
console.debug('Token extraction:', { provider: providerName, response: response.usage, tokens });
// Track execution
const promptHash = crypto.createHash('sha256').update(promptStr).digest('hex');
const executionId = crypto.createHash('sha256')
.update(`${promptHash}_${new Date().toISOString()}`)
.digest('hex');
// Create or update chain
if (!chainId && parentExecutionId) {
chainId = crypto.createHash('sha256')
.update(`${tag}_${new Date().toISOString()}`)
.digest('hex');
}
// Track execution if tracker available
if (this.executionTracker) {
// Use the captured prompt file (already captured before clearing context)
const cost = this.calculateCost(tokens, modelName, providerName);
await this.executionTracker.trackExecution(executionId, modelName, promptHash, tag, tokens, latencyMs, cost, chainId || null, parentExecutionId || null, {
provider: providerName,
structured_output: jsonOutput !== undefined,
prompt_file: promptFile
});
// Auto-commit if not in active chain
if (!parentExecutionId && this.versioning) {
try {
await this.versioning.commit(`Model execution: ${tag} with ${modelName}`, 'PVM Auto-Tracker');
}
catch (commitError) {
// Ignore commit errors for executions - they're not critical
console.debug('Auto-commit skipped:', commitError.message);
}
}
}
// Create response object
return new ModelResponse({
content,
model: modelName,
promptHash,
executionId,
metadata: {
tag,
provider: providerName,
chainId,
tokens,
latencyMs,
structured: jsonOutput !== undefined
},
rawResponse: response
});
}
catch (error) {
throw new Error(`Model execution failed: ${error.message}`);
}
}
/**
* Check if value is a ModelResponse
*/
isModelResponse(value) {
return value && value._isPvmResponse === true;
}
/**
* Find the .pvm directory by searching up from current directory
*/
findPvmDirectory() {
let current = process.cwd();
while (current !== path.dirname(current)) {
const pvmDir = path.join(current, '.pvm');
if (fs.existsSync(pvmDir) && fs.statSync(pvmDir).isDirectory()) {
return pvmDir;
}
current = path.dirname(current);
}
// Check current directory one more time
const pvmDir = path.join(process.cwd(), '.pvm');
if (fs.existsSync(pvmDir) && fs.statSync(pvmDir).isDirectory()) {
return pvmDir;
}
return null;
}
/**
* Detect provider from model name
*/
detectProvider(modelName) {
const modelLower = modelName.toLowerCase();
if (['gpt', 'o1', 'davinci', 'curie', 'babbage', 'ada'].some(x => modelLower.includes(x))) {
return 'openai';
}
else if (['gemini', 'palm', 'bison'].some(x => modelLower.includes(x))) {
return 'google';
}
else if (['claude', 'anthropic'].some(x => modelLower.includes(x))) {
return 'anthropic';
}
else {
// Default to OpenAI for unknown models
return 'openai';
}
}
/**
* Extract content from provider response
*/
extractContent(response) {
if (response.content) {
return response.content;
}
else if (response.choices && response.choices[0]?.message?.content) {
return response.choices[0].message.content;
}
else if (response.text) {
return response.text;
}
else {
return String(response);
}
}
/**
* Extract token usage from provider response
*/
extractTokens(response, provider) {
// The response from providers already has the tokens in the correct format
if (response.tokens) {
return {
prompt: response.tokens.input || 0,
completion: response.tokens.output || 0,
total: response.tokens.total || 0
};
}
// Fallback for raw responses
const tokens = { prompt: 0, completion: 0, total: 0 };
if (provider === 'openai') {
if (response.usage) {
tokens.prompt = response.usage.prompt_tokens || 0;
tokens.completion = response.usage.completion_tokens || 0;
tokens.total = response.usage.total_tokens || 0;
}
}
else if (provider === 'google') {
// Gemini doesn't provide token counts in response
// Would need to estimate
}
else if (provider === 'anthropic') {
if (response.usage) {
tokens.prompt = response.usage.input_tokens || 0;
tokens.completion = response.usage.output_tokens || 0;
tokens.total = tokens.prompt + tokens.completion;
}
}
return tokens;
}
/**
* Calculate estimated cost based on tokens and model
*/
calculateCost(tokens, model, provider) {
// Simplified pricing (would need full pricing table)
const pricing = {
openai: {
'gpt-4': { input: 0.03, output: 0.06 },
'gpt-3.5-turbo': { input: 0.001, output: 0.002 }
},
anthropic: {
'claude-3-opus': { input: 0.015, output: 0.075 },
'claude-3-sonnet': { input: 0.003, output: 0.015 }
},
google: {
'gemini-pro': { input: 0.001, output: 0.002 }
}
};
// Get provider pricing
const providerPricing = pricing[provider] || {};
let modelPricing = null;
// Find matching model pricing
for (const modelKey of Object.keys(providerPricing)) {
if (model.toLowerCase().includes(modelKey)) {
modelPricing = providerPricing[modelKey];
break;
}
}
if (!modelPricing) {
return 0.0;
}
// Calculate cost (per 1K tokens)
const inputCost = (tokens.prompt / 1000) * modelPricing.input;
const outputCost = (tokens.completion / 1000) * modelPricing.output;
return inputCost + outputCost;
}
}
// Create singleton instance
exports.model = new Model();
//# sourceMappingURL=model.js.map