credl-parser-evaluator
Version:
TypeScript-based CREDL Parser and Evaluator that processes CREDL files and outputs complete Intermediate Representations
598 lines (592 loc) ⢠24 kB
JavaScript
;
Object.defineProperty(exports, "__esModule", { value: true });
exports.createBenchmarkCommand = createBenchmarkCommand;
exports.executeBenchmarkCommand = executeBenchmarkCommand;
const commander_1 = require("commander");
const index_js_1 = require("../../api/index.js");
const fs_1 = require("fs");
const path_1 = require("path");
const perf_hooks_1 = require("perf_hooks");
// Error exit codes
var ExitCode;
(function (ExitCode) {
ExitCode[ExitCode["SUCCESS"] = 0] = "SUCCESS";
ExitCode[ExitCode["BENCHMARK_FAILURE"] = 1] = "BENCHMARK_FAILURE";
ExitCode[ExitCode["FILE_NOT_FOUND"] = 2] = "FILE_NOT_FOUND";
ExitCode[ExitCode["INVALID_OPTIONS"] = 3] = "INVALID_OPTIONS";
ExitCode[ExitCode["THRESHOLD_EXCEEDED"] = 4] = "THRESHOLD_EXCEEDED";
ExitCode[ExitCode["COMPARISON_FAILED"] = 5] = "COMPARISON_FAILED";
})(ExitCode || (ExitCode = {}));
// Predefined benchmark suites
const BENCHMARK_SUITES = {
quick: {
name: 'Quick Benchmark',
description: 'Fast benchmark for development',
tests: [
{
name: 'Small Office (10 spaces)',
generator: () => generateCREDLFile(10, 'office'),
expectedSpaces: 10,
threshold: 50
},
{
name: 'Medium Mixed-Use (25 spaces)',
generator: () => generateCREDLFile(25, 'mixed'),
expectedSpaces: 25,
threshold: 100
}
]
},
standard: {
name: 'Standard Benchmark',
description: 'Comprehensive benchmark for CI/CD',
tests: [
{
name: 'Small Office (10 spaces)',
generator: () => generateCREDLFile(10, 'office'),
expectedSpaces: 10,
threshold: 50
},
{
name: 'Medium Office (25 spaces)',
generator: () => generateCREDLFile(25, 'office'),
expectedSpaces: 25,
threshold: 100
},
{
name: 'Large Office (50 spaces)',
generator: () => generateCREDLFile(50, 'office'),
expectedSpaces: 50,
threshold: 200
},
{
name: 'Mixed-Use Portfolio (75 spaces)',
generator: () => generateCREDLFile(75, 'mixed'),
expectedSpaces: 75,
threshold: 300
},
{
name: 'Enterprise Portfolio (100 spaces)',
generator: () => generateCREDLFile(100, 'mixed'),
expectedSpaces: 100,
threshold: 500
}
]
},
comprehensive: {
name: 'Comprehensive Benchmark',
description: 'Exhaustive benchmark for performance validation',
tests: [
{
name: 'Tiny (5 spaces)',
generator: () => generateCREDLFile(5, 'office'),
expectedSpaces: 5,
threshold: 25
},
{
name: 'Small (10 spaces)',
generator: () => generateCREDLFile(10, 'office'),
expectedSpaces: 10,
threshold: 50
},
{
name: 'Medium (25 spaces)',
generator: () => generateCREDLFile(25, 'office'),
expectedSpaces: 25,
threshold: 100
},
{
name: 'Large (50 spaces)',
generator: () => generateCREDLFile(50, 'office'),
expectedSpaces: 50,
threshold: 200
},
{
name: 'Very Large (75 spaces)',
generator: () => generateCREDLFile(75, 'mixed'),
expectedSpaces: 75,
threshold: 300
},
{
name: 'Enterprise (100 spaces)',
generator: () => generateCREDLFile(100, 'mixed'),
expectedSpaces: 100,
threshold: 500
},
{
name: 'Complex Mixed-Use (150 spaces)',
generator: () => generateCREDLFile(150, 'mixed'),
expectedSpaces: 150,
threshold: 750
},
{
name: 'Portfolio Scale (200 spaces)',
generator: () => generateCREDLFile(200, 'mixed'),
expectedSpaces: 200,
threshold: 1000
}
]
}
};
function createBenchmarkCommand() {
const benchmarkCommand = new commander_1.Command('benchmark');
benchmarkCommand
.description('Run performance benchmarks on CREDL files and processing')
.option('-f, --file <path>', 'Benchmark a specific CREDL file')
.option('-o, --output <path>', 'Output benchmark results to file')
.option('--format <format>', 'Output format: table, json, csv', 'table')
.option('-i, --iterations <number>', 'Number of benchmark iterations', parseInt, 10)
.option('-w, --warmup <number>', 'Number of warmup iterations', parseInt, 3)
.option('-v, --verbose', 'Verbose benchmark output')
.option('-q, --quiet', 'Suppress output except results')
.option('-c, --compare <path>', 'Compare with previous benchmark results')
.option('--history', 'Save results to benchmark history')
.option('-s, --suite <suite>', 'Run predefined benchmark suite: quick, standard, comprehensive', 'standard')
.option('-t, --threshold <ms>', 'Performance threshold in milliseconds', parseInt)
.option('--memory', 'Include memory usage analysis')
.option('--concurrent <number>', 'Run concurrent benchmark tests', parseInt)
.option('--profile', 'Enable detailed profiling')
.action(async (options) => {
await executeBenchmarkCommand(options);
});
return benchmarkCommand;
}
async function executeBenchmarkCommand(options) {
try {
if (!options.quiet) {
console.log('š CREDL Performance Benchmark Suite');
console.log('');
}
// Validate options
validateBenchmarkOptions(options);
// Initialize history if requested
if (options.history) {
initializeBenchmarkHistory();
}
let results = [];
if (options.file) {
// Benchmark a specific file
const result = await benchmarkFile(options.file, options);
results.push(result);
}
else {
// Run benchmark suite
const suite = BENCHMARK_SUITES[options.suite || 'standard'];
if (!suite) {
throw new Error(`Unknown benchmark suite: ${options.suite}`);
}
if (!options.quiet) {
console.log(`š Running ${suite.name}`);
console.log(` ${suite.description}`);
console.log(` ${suite.tests.length} tests, ${options.iterations} iterations each`);
console.log('');
}
// Run concurrent benchmarks if specified
if (options.concurrent && options.concurrent > 1) {
results = await runConcurrentBenchmarks(suite, options);
}
else {
results = await runSequentialBenchmarks(suite, options);
}
}
// Compare with previous results if requested
if (options.compare) {
await compareResults(results, options.compare, options);
}
// Save to history if requested
if (options.history) {
await saveBenchmarkHistory(results);
}
// Output results
await outputResults(results, options);
// Check thresholds
const thresholdFailures = checkThresholds(results, options);
if (thresholdFailures.length > 0) {
console.error('\nā Performance threshold exceeded:');
thresholdFailures.forEach(failure => {
console.error(` ⢠${failure.name}: ${failure.actual.toFixed(2)}ms > ${failure.threshold}ms`);
});
process.exit(ExitCode.THRESHOLD_EXCEEDED);
}
if (!options.quiet) {
console.log('\nā
Benchmark completed successfully');
}
}
catch (error) {
console.error('ā Benchmark failed:');
if (error instanceof Error) {
console.error(` ${error.message}`);
if (options.verbose && error.stack) {
console.error('\nš Stack trace:');
console.error(error.stack);
}
}
process.exit(ExitCode.BENCHMARK_FAILURE);
}
}
async function benchmarkFile(filePath, options) {
if (!(0, fs_1.existsSync)(filePath)) {
throw new Error(`File not found: ${filePath}`);
}
if (!options.quiet) {
console.log(`š Benchmarking file: ${filePath}`);
}
// Read file content
const content = (0, fs_1.readFileSync)(filePath, 'utf8');
// Parse to get metadata
const ir = await (0, index_js_1.processFile)(content);
return await runBenchmark((0, path_1.basename)(filePath, '.credl'), () => content, ir.spaces.length, options, filePath);
}
async function runSequentialBenchmarks(suite, options) {
const results = [];
for (let i = 0; i < suite.tests.length; i++) {
const test = suite.tests[i];
if (!options.quiet) {
console.log(`š [${i + 1}/${suite.tests.length}] ${test?.name || 'Unknown test'}`);
}
const result = await runBenchmark(test.name, test.generator, test.expectedSpaces, options);
results.push(result);
if (!options.quiet && options.verbose) {
console.log(` ā
${result.results.mean.toFixed(2)}ms average`);
}
}
return results;
}
async function runConcurrentBenchmarks(suite, options) {
if (!options.quiet) {
console.log(`š Running ${suite.tests.length} benchmarks concurrently (${options.concurrent} workers)`);
}
// Split tests into batches
const batchSize = options.concurrent || 2;
const batches = [];
for (let i = 0; i < suite.tests.length; i += batchSize) {
batches.push(suite.tests.slice(i, i + batchSize));
}
const results = [];
for (const batch of batches) {
const batchPromises = batch.map(test => runBenchmark(test.name, test.generator, test.expectedSpaces, options));
const batchResults = await Promise.all(batchPromises);
results.push(...batchResults);
}
return results;
}
async function runBenchmark(name, contentGenerator, expectedSpaces, options, filePath) {
const iterations = options.iterations || 10;
const warmupIterations = options.warmup || 3;
const times = [];
let memoryUsage;
// Warmup runs
for (let i = 0; i < warmupIterations; i++) {
const content = contentGenerator();
await (0, index_js_1.processFile)(content);
}
// Benchmark runs
for (let i = 0; i < iterations; i++) {
const content = contentGenerator();
// Memory measurement
if (options.memory && i === 0) {
const memBefore = process.memoryUsage();
const startTime = perf_hooks_1.performance.now();
await (0, index_js_1.processFile)(content);
const endTime = perf_hooks_1.performance.now();
const memAfter = process.memoryUsage();
times.push(endTime - startTime);
memoryUsage = {
heapUsed: (memAfter.heapUsed - memBefore.heapUsed) / 1024 / 1024,
heapTotal: (memAfter.heapTotal - memBefore.heapTotal) / 1024 / 1024,
rss: (memAfter.rss - memBefore.rss) / 1024 / 1024
};
}
else {
const startTime = perf_hooks_1.performance.now();
await (0, index_js_1.processFile)(content);
const endTime = perf_hooks_1.performance.now();
times.push(endTime - startTime);
}
}
// Calculate statistics
const sortedTimes = times.sort((a, b) => a - b);
const mean = times.reduce((a, b) => a + b, 0) / times.length;
const variance = times.reduce((sum, time) => sum + Math.pow(time - mean, 2), 0) / times.length;
const stddev = Math.sqrt(variance);
const p95Index = Math.floor(sortedTimes.length * 0.95);
const p99Index = Math.floor(sortedTimes.length * 0.99);
const result = {
name,
spaces: expectedSpaces,
assumptions: 0, // Would need to parse to get actual count
models: 0, // Would need to parse to get actual count
iterations,
results: {
min: sortedTimes[0] || 0,
max: sortedTimes[sortedTimes.length - 1] || 0,
mean,
median: sortedTimes[Math.floor(sortedTimes.length / 2)] || 0,
stddev,
p95: sortedTimes[p95Index] || 0,
p99: sortedTimes[p99Index] || 0
},
memory: memoryUsage,
metadata: {
timestamp: new Date().toISOString(),
version: '1.0.0',
nodeVersion: process.version,
platform: process.platform
}
};
if (filePath) {
result.file = filePath;
}
return result;
}
function generateCREDLFile(spaceCount, propertyType) {
const spaces = [];
const assumptions = [];
// Generate spaces
for (let i = 1; i <= spaceCount; i++) {
const spaceType = propertyType === 'mixed' ?
(i % 3 === 0 ? 'retail' : i % 2 === 0 ? 'residential' : 'office') :
'office';
const areaSize = spaceType === 'office' ? 2500 + (i * 50) :
spaceType === 'retail' ? 1500 + (i * 30) :
800 + (i * 20);
spaces.push(` - id: "space-${i}"
parent_building: "building-${Math.floor((i - 1) / 20) + 1}"
type: "${spaceType}"
area_sf: ${areaSize}
lease:
status: "${i % 4 === 0 ? 'vacant' : 'leased'}"
tenant: "Tenant ${i}"
rent_psf: ${25 + (i % 20)}
lease_type: "${i % 2 === 0 ? 'NNN' : 'gross'}"
start_date: "2024-01-01"
end_date: "2029-01-01"`);
}
// Generate assumptions proportional to space count
const assumptionCount = Math.min(Math.floor(spaceCount / 5), 30);
for (let i = 1; i <= assumptionCount; i++) {
assumptions.push(` - name: "assumption_${i}"
type: "${i % 4 === 0 ? 'distribution' : 'fixed'}"
${i % 4 === 0 ?
`distribution: "normal"
parameters:
mean: ${0.02 + (i * 0.005)}
stddev: ${0.01 + (i * 0.002)}` :
`value: ${0.05 + (i * 0.01)}`}
scope: "${i % 3 === 0 ? 'global' : 'space_type:office'}"
tags: ["revenue", "benchmark"]`);
}
const buildingCount = Math.ceil(spaceCount / 20);
const buildings = Array.from({ length: buildingCount }, (_, i) => ` - id: "building-${i + 1}"
name: "Building ${i + 1}"
floors: ${Math.ceil(spaceCount / buildingCount / 10)}
total_area_sf: ${Math.floor(spaceCount * 2000 / buildingCount)}`).join('\n');
return `metadata:
version: 0.1
name: "Benchmark Test - ${spaceCount} Spaces"
description: "Generated CREDL file for performance benchmarking"
created_date: "2025-06-21"
assets:
- id: "asset-1"
name: "Benchmark Property"
property_type: "${propertyType === 'mixed' ? 'Mixed Use' : 'Office'}"
location: "Benchmark City, ST"
total_area_sf: ${spaceCount * 2000}
buildings:
${buildings}
spaces:
${spaces.join('\n\n')}
assumptions:
${assumptions.join('\n\n')}
models:
- name: "benchmark_model"
type: "deterministic"
duration_years: 10
steps_per_year: 12
inputs: [${assumptions.slice(0, 5).map((_, i) => `"assumption_${i + 1}"`).join(', ')}]
outputs: ["IRR", "NPV", "Cash_Flow"]
description: "Benchmark DCF model"
simulation:
type: "monte_carlo"
iterations: 1000
processes: {}
outputs:
summary_metrics: ["IRR", "NPV"]
outputs:
format: "json"
metrics: ["IRR", "NPV", "Cash_Flow"]`;
}
function validateBenchmarkOptions(options) {
if (options.iterations && options.iterations < 1) {
throw new Error('Iterations must be at least 1');
}
if (options.warmup && options.warmup < 0) {
throw new Error('Warmup iterations cannot be negative');
}
if (options.concurrent && options.concurrent < 1) {
throw new Error('Concurrent workers must be at least 1');
}
if (options.threshold && options.threshold < 0) {
throw new Error('Threshold must be positive');
}
if (options.format && !['table', 'json', 'csv'].includes(options.format)) {
throw new Error('Format must be one of: table, json, csv');
}
if (options.suite && !BENCHMARK_SUITES[options.suite]) {
throw new Error(`Unknown benchmark suite: ${options.suite}. Available: ${Object.keys(BENCHMARK_SUITES).join(', ')}`);
}
}
function initializeBenchmarkHistory() {
const historyDir = (0, path_1.join)(process.cwd(), '.credl', 'benchmarks');
if (!(0, fs_1.existsSync)(historyDir)) {
(0, fs_1.mkdirSync)(historyDir, { recursive: true });
}
}
async function saveBenchmarkHistory(results) {
const historyDir = (0, path_1.join)(process.cwd(), '.credl', 'benchmarks');
const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
const historyFile = (0, path_1.join)(historyDir, `benchmark-${timestamp}.json`);
(0, fs_1.writeFileSync)(historyFile, JSON.stringify(results, null, 2));
}
async function compareResults(results, compareFile, options) {
if (!(0, fs_1.existsSync)(compareFile)) {
throw new Error(`Comparison file not found: ${compareFile}`);
}
const previousResults = JSON.parse((0, fs_1.readFileSync)(compareFile, 'utf8'));
if (!options.quiet) {
console.log('\nš Performance Comparison:');
console.log('='.repeat(60));
}
for (const result of results) {
const previous = previousResults.find(p => p.name === result.name);
if (previous) {
const improvement = ((previous.results.mean - result.results.mean) / previous.results.mean) * 100;
const icon = improvement > 0 ? 'ā¬ļø' : improvement < -5 ? 'ā¬ļø' : 'ā”ļø';
console.log(`${icon} ${result.name}:`);
console.log(` Current: ${result.results.mean.toFixed(2)}ms`);
console.log(` Previous: ${previous.results.mean.toFixed(2)}ms`);
console.log(` Change: ${improvement > 0 ? '+' : ''}${improvement.toFixed(1)}%`);
console.log('');
}
}
}
function checkThresholds(results, options) {
const failures = [];
for (const result of results) {
let threshold = options.threshold;
// Use suite-specific thresholds if no global threshold set
if (!threshold) {
const suite = BENCHMARK_SUITES[options.suite || 'standard'];
const test = suite?.tests.find(t => t.name === result.name);
threshold = test?.threshold;
}
if (threshold && result.results.mean > threshold) {
failures.push({
name: result.name,
actual: result.results.mean,
threshold
});
}
}
return failures;
}
async function outputResults(results, options) {
const output = formatResults(results, options.format || 'table', options);
if (options.output) {
// Ensure output directory exists
const outputDir = (0, path_1.dirname)(options.output);
if (!(0, fs_1.existsSync)(outputDir)) {
(0, fs_1.mkdirSync)(outputDir, { recursive: true });
}
(0, fs_1.writeFileSync)(options.output, output);
if (!options.quiet) {
console.log(`\nš¾ Results saved to: ${options.output}`);
}
}
else {
console.log(output);
}
}
function formatResults(results, format, options) {
switch (format) {
case 'json':
return JSON.stringify(results, null, 2);
case 'csv':
const headers = ['Name', 'Spaces', 'Iterations', 'Min (ms)', 'Max (ms)', 'Mean (ms)', 'Median (ms)', 'Std Dev (ms)', 'P95 (ms)', 'P99 (ms)'];
if (options.memory) {
headers.push('Heap Used (MB)', 'RSS (MB)');
}
const rows = results.map(r => {
const row = [
r.name,
r.spaces.toString(),
r.iterations.toString(),
r.results.min.toFixed(2),
r.results.max.toFixed(2),
r.results.mean.toFixed(2),
r.results.median.toFixed(2),
r.results.stddev.toFixed(2),
r.results.p95.toFixed(2),
r.results.p99.toFixed(2)
];
if (options.memory && r.memory) {
row.push(r.memory.heapUsed.toFixed(2), r.memory.rss.toFixed(2));
}
return row;
});
return [headers.join(','), ...rows.map(row => row.join(','))].join('\n');
case 'table':
default:
return formatTableResults(results, options);
}
}
function formatTableResults(results, _options) {
const lines = [];
lines.push('\nš Benchmark Results:');
lines.push('='.repeat(80));
results.forEach(result => {
lines.push(`\nšÆ ${result.name}`);
lines.push(` Spaces: ${result.spaces}`);
lines.push(` Iterations: ${result.iterations}`);
lines.push(' ');
lines.push(' Performance:');
lines.push(` Min: ${result.results.min.toFixed(2)}ms`);
lines.push(` Max: ${result.results.max.toFixed(2)}ms`);
lines.push(` Mean: ${result.results.mean.toFixed(2)}ms`);
lines.push(` Median: ${result.results.median.toFixed(2)}ms`);
lines.push(` Std Dev: ${result.results.stddev.toFixed(2)}ms`);
lines.push(` P95: ${result.results.p95.toFixed(2)}ms`);
lines.push(` P99: ${result.results.p99.toFixed(2)}ms`);
if (result.memory) {
lines.push(' ');
lines.push(' Memory Usage:');
lines.push(` Heap Used: ${result.memory.heapUsed.toFixed(2)}MB`);
lines.push(` RSS: ${result.memory.rss.toFixed(2)}MB`);
}
lines.push(' ');
lines.push(' Efficiency:');
lines.push(` Per Space: ${(result.results.mean / result.spaces).toFixed(3)}ms`);
// Performance rating
const rating = result.results.mean < 50 ? 'š¢ Excellent' :
result.results.mean < 100 ? 'š” Good' :
result.results.mean < 200 ? 'š Fair' : 'š“ Needs Optimization';
lines.push(` Rating: ${rating}`);
});
// Summary statistics
if (results.length > 1) {
lines.push('\nš Summary:');
lines.push('-'.repeat(40));
const totalSpaces = results.reduce((sum, r) => sum + r.spaces, 0);
const avgTime = results.reduce((sum, r) => sum + r.results.mean, 0) / results.length;
const avgPerSpace = results.reduce((sum, r) => sum + (r.results.mean / r.spaces), 0) / results.length;
lines.push(` Total Spaces Tested: ${totalSpaces}`);
lines.push(` Average Time: ${avgTime.toFixed(2)}ms`);
lines.push(` Average Per Space: ${avgPerSpace.toFixed(3)}ms`);
const fastest = results.reduce((min, r) => r.results.mean < min.results.mean ? r : min);
const slowest = results.reduce((max, r) => r.results.mean > max.results.mean ? r : max);
lines.push(` Fastest: ${fastest.name} (${fastest.results.mean.toFixed(2)}ms)`);
lines.push(` Slowest: ${slowest.name} (${slowest.results.mean.toFixed(2)}ms)`);
}
return lines.join('\n');
}
//# sourceMappingURL=benchmark.js.map