UNPKG

@yipe/dice

Version:

A high-performance dice probability engine for D&D 5e DPR calculations. Powers dprcalc.com.

1,349 lines (1,344 loc) 141 kB
'use strict'; var __defProp = Object.defineProperty; var __defNormalProp = (obj, key, value) => key in obj ? __defProp(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value; var __publicField = (obj, key, value) => __defNormalProp(obj, typeof key !== "symbol" ? key + "" : key, value); // src/common/lru-cache.ts var LRUCache = class { constructor(maxSize = 1e3) { this.maxSize = maxSize; __publicField(this, "cache", /* @__PURE__ */ new Map()); } get(key) { const value = this.cache.get(key); if (value === void 0) return void 0; this.cache.delete(key); this.cache.set(key, value); return value; } delete(key) { this.cache.delete(key); } set(key, value) { if (this.cache.size >= this.maxSize && !this.cache.has(key)) { const oldestKey = this.cache.keys().next().value; this.cache.delete(oldestKey); } this.cache.delete(key); this.cache.set(key, value); return this; } clear() { this.cache.clear(); } get size() { return this.cache.size; } has(key) { return this.cache.has(key); } keys() { return this.cache.keys(); } values() { return this.cache.values(); } }; // src/common/types.ts var EPS = 1e-12; // src/pmf/query.ts var _DiceQuery = class _DiceQuery { constructor(singles, combined, eps = EPS) { __publicField(this, "singles"); __publicField(this, "combined"); __publicField(this, "_combinedWithAttr"); this.singles = Array.isArray(singles) ? singles : [singles]; if (this.singles.some((s) => s === void 0)) { throw new Error("DiceQuery contains undefined singles"); } const c = combined ?? PMF.convolveMany(this.singles); this.combined = Math.abs(c.mass() - 1) <= eps ? c : c.normalize(); } /** * Returns a new PMF with damage attribution metadata populated. * * This method computes attribution on-demand for builder-generated PMFs, * enabling them to work with damage attribution charts. The `attr` field * tracks how much damage each outcome type contributes at each damage value. * * For each bin at damage D: sum(attr.values()) ≈ D × P(damage = D) * * Performance: Cached after first call. Adds minimal overhead vs `combined`. * * @returns PMF with attr field populated for damage attribution charts * * @example * const attack = d20.plus(5).ac(15).onHit(d(2,6).plus(3)).onCrit(d(2,6)) * const query = attack.toQuery() * const pmf = query.combinedWithAttribution() * // Now pmf can be used with toDamageAttributionChartSeries() */ combinedWithAttribution() { if (this._combinedWithAttr) { return this._combinedWithAttr; } const singlesWithAttr = this.singles.map((pmf) => pmf.withAttribution()); const combined = PMF.convolveMany(singlesWithAttr, this.combined.epsilon); const normalized = Math.abs(combined.mass() - 1) <= this.combined.epsilon ? combined : combined.normalize(); this._combinedWithAttr = normalized; return normalized; } /** * Returns the expected damage across all possible outcomes. * * Example: `query.mean()` → 12.5 * Use case: "What's my average damage per round?" */ mean() { let totalSum = 0; for (const [damageValue, probabilityBin] of this.combined) { totalSum += damageValue * probabilityBin.p; } return totalSum; } /** * Returns the variance of the damage distribution. * * Example: `query.variance()` → 45.2 * Use case: "How much does my damage vary from the average?" * High variance means higher risk/reward. Lower variance means more consistent damage. */ variance() { const meanValue = this.mean(); let varianceSum = 0; for (const [damageValue, probabilityBin] of this.combined) { const deviationFromMean = damageValue - meanValue; varianceSum += deviationFromMean * deviationFromMean * probabilityBin.p; } return varianceSum; } /** * Returns the standard deviation of the damage distribution. * * Example: `query.stdev()` → 6.7 * Use case: "What's the typical spread around my average damage?" * Used to determine how consistent the damage is. */ stddev() { return Math.sqrt(this.variance()); } /** * Returns the Cumulative Distribution Function. */ cdf(x) { return this.probTotalAtMost(x); } /** * Returns the probability of dealing X damage or less. * In statistics, this is called the cumulative distribution function (CDF). * Example: `query.cdf(20)` → 0.75 * Use case: "What's the chance I deal 20 damage or less?" */ probTotalAtMost(x) { let cumulativeProbability = 0; for (const [damageValue, probabilityBin] of this.combined) { if (damageValue <= x) { cumulativeProbability += probabilityBin.p; } } return cumulativeProbability; } /** * Returns the Complementary Cumulative Distribution Function. */ ccdf(x) { return this.probTotalAtLeast(x); } /** * Returns the probability of dealing at least X damage. * * Example: `query.probTotalAtLeast(25)` → 0.35 * Use case: "What's the chance I deal at least 25 damage to finish the enemy?" */ probTotalAtLeast(threshold) { let probabilitySum = 0; for (const [damageValue, probabilityBin] of this.combined) { if (damageValue >= threshold) { probabilitySum += probabilityBin.p; } } return probabilitySum; } /** * Returns damage values at specific percentiles. * * Example: `query.percentiles([0.25, 0.5, 0.75])` → [8, 12, 18] * Use case: "What are my 25th, 50th, and 75th percentile damage values?" */ percentiles(percentileValues) { const sortedDamageValues = this.combined.support(); if (sortedDamageValues.length === 0) return percentileValues.map(() => 0); const cumulativeProbabilities = []; let runningProbabilitySum = 0; for (const damageValue of sortedDamageValues) { runningProbabilitySum += this.combined.map.get(damageValue).p; cumulativeProbabilities.push(runningProbabilitySum); } return percentileValues.map((targetPercentile) => { let leftBound = 0; let rightBound = cumulativeProbabilities.length - 1; while (leftBound <= rightBound) { const middleIndex = Math.floor((leftBound + rightBound) / 2); if (cumulativeProbabilities[middleIndex] >= targetPercentile) { rightBound = middleIndex - 1; } else { leftBound = middleIndex + 1; } } return leftBound < sortedDamageValues.length ? sortedDamageValues[leftBound] : sortedDamageValues[sortedDamageValues.length - 1]; }); } /** * Returns the minimum possible damage. * * Example: `query.min()` → 0 * Use case: "What's the worst-case damage if everything misses?" */ min() { return this.combined.min(); } /** * Returns the maximum possible damage. * * Example: `query.max()` → 56 * Use case: "What's the best-case damage if everything crits and rolls max?" */ max() { return this.combined.max(); } singleProb(diceIndex, label) { let probabilitySum = 0; for (const [, probabilityBin] of this.singles[diceIndex]) { probabilitySum += probabilityBin.count[label] || 0; } return probabilitySum; } probAtLeastK(labels, k) { const L = Array.isArray(labels) ? [...new Set(labels)] : [labels]; const n = this.singles.length; if (k <= 0) return 1; if (k > n) return 0; let tail = 0; for (let i = k; i <= n; i++) { tail += this.probExactlyK(L, i); } if (tail < 0) return 0; if (tail > 1) return 1; return tail; } /** * Returns the probability that at least one attack has the specified outcome(s). * - This is the complement of probAtMostK(labels, 0) * * Examples: * - `query.probAtLeastOne('hit')` → 0.88 (88% chance at least one attack hits) * - `query.probAtLeastOne(['hit', 'crit'])` → 0.96 (96% chance at least one succeeds) * * Use cases: * - "What's the chance at least one of my attacks connects?" * * Note: * * - You have to pass in an array of labels to avoid double-counting if you are * using multiple labels. You cannot just add them. */ probAtLeastOne(labels) { if (typeof labels === "string") { labels = [labels]; } let productOfNonOccurrence = 1; for (let diceIndex = 0; diceIndex < this.singles.length; diceIndex++) { let combinedProbability = 0; for (const label of labels) { combinedProbability += this.singleProb(diceIndex, label); } productOfNonOccurrence *= 1 - combinedProbability; } return 1 - productOfNonOccurrence; } /** * Computes binomial probabilities for exactly 0, 1, 2, ..., maxK occurrences of a label. * * Uses dynamic programming to efficiently calculate the probability distribution * of how many attacks will have the specified outcome, accounting for different * success probabilities across individual attacks. * * Example: For 3 attacks with 50% hit chance each, returns: * [0.125, 0.375, 0.375, 0.125] = [P(0 hits), P(1 hit), P(2 hits), P(3 hits)] * * @param label - The outcome type to count * @param maxK - Maximum number of occurrences to calculate (usually number of attacks) * @returns Array where index K contains P(exactly K attacks have the label) */ computeBinomialProbabilities(label, maxK) { const individualProbabilities = this.singles.map( (_, diceIndex) => this.singleProb(diceIndex, label) ); const binomialProbs = new Array(maxK + 1).fill(0); binomialProbs[0] = 1; for (const singleProbability of individualProbabilities) { for (let outcomeCount = maxK; outcomeCount >= 1; outcomeCount--) { binomialProbs[outcomeCount] = binomialProbs[outcomeCount] * (1 - singleProbability) + binomialProbs[outcomeCount - 1] * singleProbability; } binomialProbs[0] *= 1 - singleProbability; } return binomialProbs; } /** * Returns the probability that exactly K attacks result in the specified outcome(s). * * Single label examples: * - probExactlyK('hit', 2) = probability exactly 2 attacks hit * - probExactlyK('crit', 1) = probability exactly 1 attack crits * - probExactlyK('crit', 0) = probability no attacks crit * * Array examples: * - probExactlyK(['hit', 'crit'], 2) = probability exactly 2 attacks succeed * - probExactlyK(['hit', 'crit'], 1) = probability exactly 1 attack succeeds * - probExactlyK(['miss', 'missNone'], 0) = probability no attacks miss * * Use cases: * - "What's the chance exactly one of my attacks hits?" * - "How likely am I to get exactly 2 successes out of 3 attacks?" * - "What's the probability that exactly half my attacks succeed?" * * Note: For arrays, an attack counts as a "success" if it has any of the specified labels. * This is different from probAtMostK, which counts an attack as a "success" if it has ALL of the specified labels. */ probExactlyK(labels, k) { if (typeof labels === "string") { const probabilityArray = this.computeBinomialProbabilities(labels, k); return probabilityArray[k]; } const successProbabilities = this.singles.map((single) => { const singleQuery = new _DiceQuery([single]); return singleQuery.probabilityOf(labels); }); const binomialProbs = new Array(k + 1).fill(0); binomialProbs[0] = 1; for (const successProb of successProbabilities) { for (let outcomeCount = k; outcomeCount >= 1; outcomeCount--) { binomialProbs[outcomeCount] = binomialProbs[outcomeCount] * (1 - successProb) + binomialProbs[outcomeCount - 1] * successProb; } binomialProbs[0] *= 1 - successProb; } return binomialProbs[k]; } /** * Returns the probability that AT MOST K attacks result in the specified outcome(s). * * Single label examples: * - probAtMostK('hit', 1) = probability 0 or 1 attacks hit (at most 1) * - probAtMostK('crit', 0) = probability no attacks crit * - probAtMostK('miss', 2) = probability at most 2 attacks miss * * Array examples: * - probAtMostK(['hit', 'crit'], 1) = probability at most 1 attack succeeds * - probAtMostK(['hit', 'crit'], 0) = probability no attacks succeed (all miss) * * Use cases: * - "What's the chance that at most one attack hits?" (rest miss) * - "How likely am I to have mostly failures?" (at most 1 success) * - "What's the probability of a really bad turn?" (at most 0 successes) * */ probAtMostK(labels, k) { if (typeof labels === "string") { const probabilityArray = this.computeBinomialProbabilities(labels, k); let cumulativeSum2 = 0; for (let outcomeCount = 0; outcomeCount <= k; outcomeCount++) { cumulativeSum2 += probabilityArray[outcomeCount]; } return cumulativeSum2; } let cumulativeSum = 0; for (let outcomeCount = 0; outcomeCount <= k; outcomeCount++) { cumulativeSum += this.probExactlyK(labels, outcomeCount); } return cumulativeSum; } /** * Returns the expected damage attributed to specific outcome types. * * Single label examples: * - expectedDamageFrom('hit') = expected damage from hit components * - expectedDamageFrom('crit') = expected damage from crit components * * Array examples: * - expectedDamageFrom(['hit', 'crit']) = expected damage from any success * - expectedDamageFrom(['missDamage', 'missNone']) = expected damage from misses * * Use cases: * - "How much damage do I expect from successful attacks?" * - "What's the damage contribution from critical hits specifically?" * - "How much damage comes from miss effects (like save-for-half spells)?" */ expectedDamageFrom(labels) { const wanted = Array.isArray(labels) ? labels : [labels]; let total = 0; for (const single of this.singles) { for (const [dmg, bin] of single) { let p = 0; for (const label of wanted) p += bin.count[label] ?? 0; total += dmg * p; } } return total; } /** * Returns damage statistics for scenarios where AT LEAST ONE attack results in * the specified outcome(s). * * This method answers "What happens when things go reasonably well?" rather than * "What's the theoretical maximum?" It includes mixed scenarios which are more * common and tactically relevant than pure scenarios. * * Single label examples: * - damageStatsFrom('hit') = damage range when at least one attack hits * - damageStatsFrom('crit') = damage range when at least one attack crits * * Array examples: * - damageStatsFrom(['hit', 'crit']) = damage range when at least one attack succeeds * - damageStatsFrom(['miss', 'missNone']) = damage range when at least one attack misses * * Tactical Use Cases: * - "Given that I don't completely whiff (99% of turns), what damage should I expect?" * - "When planning to kill a 60 HP enemy, what's my damage range on successful turns?" * - "Should I use this risky spell if it has good damage when it works?" * - "What's my damage potential when something goes right?" (vs pure failure) * * Combat Planning Examples: * - 4 attacks with 90% hit chance: "96% of the time you'll do 25-150 damage, avg 52" * (Much more useful than "You average 50 damage including complete misses") * - Risk assessment: "80% of successful turns do 40-80 damage, but 20% do 80-150" * - Resource management: "If I hit anything, I'll likely finish this enemy" * * Statistical Note: * This includes mixed scenarios (2 hits + 1 crit, 3 hits + 1 miss, etc.) which * occur far more frequently than pure scenarios. For pure scenarios, use combinedDamageStats. * * @example * // High-level tactical planning * const successStats = query.damageStatsFrom('hit') * const successChance = query.probAtLeastOne('hit') * console.log(`${(successChance*100).toFixed(1)}% chance to do ${successStats.min}-${successStats.max} damage`) */ damageStatsFrom(labels) { const labelArray = typeof labels === "string" ? [labels] : labels; let minDamage = Infinity; let maxDamage = -Infinity; let totalDamage = 0; let totalCount = 0; for (const [damage, probabilityBin] of this.combined) { let binHasAnyLabel = false; let binContribution = 0; for (const label of labelArray) { const count = probabilityBin.count[label]; if (count && count > 0) { binHasAnyLabel = true; binContribution += count; } } if (damage > 0 && binHasAnyLabel) { minDamage = Math.min(minDamage, damage); maxDamage = Math.max(maxDamage, damage); const weightToUse = labelArray.length === 1 ? binContribution : probabilityBin.p; totalDamage += damage * weightToUse; totalCount += weightToUse; } } return { min: minDamage === Infinity ? 0 : minDamage, max: maxDamage === -Infinity ? 0 : maxDamage, avg: totalCount > 0 ? totalDamage / totalCount : 0, count: totalCount }; } /** * Returns damage statistics for scenarios where ALL attacks result in the specified * outcome, calculated by leveraging the pure partition of singles. * * This method answers "What's the theoretical best/worst case?" and "What are the * clean mathematical boundaries?" It provides pure scenarios without mixing outcomes. * * Examples: * - combinedDamageStats('hit') = damage range when all attacks hit (none crit, none miss) * - combinedDamageStats('crit') = damage range when all attacks crit (none just hit) * * UI and Display Use Cases: * - Statistics panels showing "MAX Hit Damage" (users expect pure hits, not mixed) * - "Best case scenario" vs "worst case scenario" analysis * - Mathematical verification: "Does our hit damage calculation match manual math?" * - Clean damage type attribution: "How much comes from base hits vs crits?" * * Design and Balance Use Cases: * - Game designers: "What's the damage ceiling if someone gets lucky?" * - Character optimization: "What's my absolute maximum potential?" * - Ability comparison: "Which build has higher crit ceiling?" * - Minimum guaranteed damage: "What's the worst I can do if everything hits?" * * Mathematical Use Cases: * - Validating complex calculations against simple manual math * - Understanding damage component contributions in isolation * - Separating luck (crit variance) from consistency (hit variance) * - Building intuition about damage sources * * When to Use This vs damageStatsFrom(): * - Use THIS for: UI max/min displays, theoretical limits, clean comparisons * - Use damageStatsFrom() for: tactical planning, realistic expectations, mixed scenarios * * Statistical Note: * Pure scenarios (all hits, all crits) are rare but represent clear mathematical * boundaries. These stats help understand the "shape" of your damage potential. * * @example * // UI display logic * const pureHitMax = query.combinedDamageStats('hit').max // Clean "MAX Hit Damage: 90" * const pureCritMax = query.combinedDamageStats('crit').max // Clean "MAX Crit Damage: 168" * * // vs tactical planning (use damageStatsFrom instead) * const realisticRange = query.damageStatsFrom('hit') // Includes mixed scenarios */ combinedDamageStats(targetLabel) { const singleStats = this.singles.map( (single) => new _DiceQuery([single]).damageStatsFrom(targetLabel) ); if (singleStats.some((stats) => stats.count === 0)) { return { min: 0, max: 0, avg: 0, count: 0 }; } const combinedMin = singleStats.reduce((sum, stats) => sum + stats.min, 0); const combinedMax = singleStats.reduce((sum, stats) => sum + stats.max, 0); const combinedAvg = singleStats.reduce((sum, stats) => sum + stats.avg, 0); const combinedProb = singleStats.reduce( (product, stats) => product * stats.count, 1 ); return { min: combinedMin, max: combinedMax, avg: combinedAvg, count: combinedProb }; } /** * Returns the probability that a result includes ANY of the specified labels. * * Examples: * - `query.probabilityOf('hit')` → 0.88 (probability at least one hit occurs) * - `query.probabilityOf(['hit', 'crit'])` → 0.96 (probability of any success) * * Use cases: * - "What's the chance my resolution includes a success label?" * - "How likely am I to get any hits or crits across all attacks?" */ probabilityOf(labels) { if (typeof labels === "string") { labels = [labels]; } let totalProbability = 0; for (const [, probabilityBin] of this.combined) { let binHasAnyLabel = false; for (const label of labels) { if (probabilityBin.count[label] && probabilityBin.count[label] > 0) { binHasAnyLabel = true; break; } } if (binHasAnyLabel) { totalProbability += probabilityBin.p; } } return totalProbability; } /** * Returns the probability of missing (any type of miss). * * Example: `query.missChance()` → 0.04 * Use case: "What's the chance I miss completely this turn?" */ missChance() { return this.probabilityOf(["missDamage", "missNone"]); } /** * Returns data formatted for plotting damage probability distribution. * * Example: `query.toChartSeries()` → [{x: 0, y: 0.04}, {x: 6, y: 0.1}, ...] * Use case: "I want to visualize my damage distribution in a chart." */ toChartSeries() { return this.combined.support().map((damageValue) => ({ x: damageValue, y: this.combined.map.get(damageValue).p })); } /** * Returns tabular data showing damage values and their probability breakdowns. * * Example: `query.toLabeledTable(['hit', 'crit'])` → * [{damage: 6, total: 0.01, hit: 0.008, crit: 0}, ...] * * Use case: "I want to see exactly how hit/crit probabilities contribute to each damage value." */ toLabeledTable(labels = []) { return this.combined.support().map((damageValue) => { const probabilityBin = this.combined.map.get(damageValue); const tableRow = { damage: damageValue, total: probabilityBin.p }; for (const outcomeLabel of labels) { tableRow[outcomeLabel] = probabilityBin.count[outcomeLabel] || 0; } return tableRow; }); } /** * Returns data for stacked charts with unconditional per-label probability mass per damage. * * - Each dataset value equals the unconditional probability mass for that label at that damage * (i.e., `bin.count[label]`). * - Column sums may be less than the total probability `bin.p` when you omit labels or when * there is unlabeled mass. Include all relevant outcome labels if you need the sum to match. * - This behavior matches tests that expect raw per-label mass (not proportional scaling). * - NOTE: This implementation may break dprcalc.com chart binning at large n, need to test it more. * * @example * query.toStackedChartData(['hit', 'crit']) * // → {labels: [0, 6, 12, ...], datasets: [{label: 'hit', data: [0, 0.03, ...]}, ...]} */ toStackedChartData(labels = [], epsilon = EPS) { const damageValues = this.combined.support(); damageValues.map((dmg) => { const bin = this.combined.map.get(dmg); return labels.reduce((sum, lab) => sum + (bin.count[lab] || 0), 0); }); const datasets = labels.map((outcomeLabel) => ({ label: outcomeLabel, data: damageValues.map((dmg) => { const bin = this.combined.map.get(dmg); const v = bin ? bin.count[outcomeLabel] || 0 : 0; return v <= epsilon ? 0 : v; }) })); return { labels: damageValues, datasets }; } /** * Returns pure mathematical data for attribution charts showing outcome contributions. * * Automatically discovers all outcome types present in the PMF, applies filtering rules, * and returns proportional data suitable for stacked visualization. * * @param options Configuration options * @param options.stackOrder Preferred order for outcome types (unknowns placed at end) * @param options.filterRules Function to determine if outcome should be included for a given damage value * @param options.asPercentages Whether to return percentages (0-100) or probabilities (0-1) * @returns Pure data structure with support, outcomes, and proportional data * * @example * query.toAttributionChartSeries() * // → {support: [0, 6, 12], outcomes: ['hit', 'crit'], data: {hit: [5.2, 8.1, ...], crit: [0, 2.3, ...]}} */ toAttributionChartSeries(options = {}) { const { stackOrder = [ "missNone", "missDamage", "saveFail", "saveHalf", "pc", "hit", "crit" ], filterRules = (outcome, damage) => !(outcome === "missNone" && damage !== 0), asPercentages = true } = options; const originalSupport = this.combined.support(); if (originalSupport.length === 0) { return { support: [], outcomes: [], data: {} }; } const minDamage = Math.min(...originalSupport); const maxDamage = Math.max(...originalSupport); const support = Array.from( { length: maxDamage - minDamage + 1 }, (_, i) => minDamage + i ); const allOutcomeTypes = /* @__PURE__ */ new Set(); for (const [, bin] of this.combined.map) { for (const outcomeType in bin.count) { if (bin.count[outcomeType] && bin.count[outcomeType] > 0) { allOutcomeTypes.add(outcomeType); } } } const existingOutcomes = Array.from(allOutcomeTypes).sort((a, b) => { const indexA = stackOrder.indexOf(a); const indexB = stackOrder.indexOf(b); if (indexA >= 0 && indexB >= 0) return indexA - indexB; if (indexA >= 0) return -1; if (indexB >= 0) return 1; return a.localeCompare(b); }); if (existingOutcomes.length === 0) { return { support, outcomes: [], data: {} }; } const data = {}; for (const outcome of existingOutcomes) { data[outcome] = support.map((damage) => { const bin = this.combined.map.get(damage); if (!bin) return 0; if (!filterRules(outcome, damage)) { return 0; } const outcomeCount = bin.count[outcome] || 0; let totalChartableCount = 0; for (const [outcomeName, count] of Object.entries(bin.count)) { if (filterRules(outcomeName, damage)) { totalChartableCount += count || 0; } } if (totalChartableCount === 0) return 0; const outcomeFraction = outcomeCount / totalChartableCount; const outcomeProbability = bin.p * outcomeFraction; return asPercentages ? outcomeProbability * 100 : outcomeProbability; }); } return { support, outcomes: existingOutcomes, data }; } /** * Returns pure mathematical data for damage attribution charts showing damage contribution * from each outcome type at each damage value. * * Similar to toAttributionChartSeries() but uses bin.attr (damage attribution) instead of * bin.count (probability attribution). * * @param options Configuration options * @param options.stackOrder Preferred order for outcome types (unknowns placed at end) * @param options.filterRules Function to determine if outcome should be included for a given damage value * @param options.asPercentages Whether to return percentages (0-100) or raw damage values (0+) * @returns Pure data structure with support, outcomes, and damage attribution data * * @example * query.toDamageAttributionChartSeries() * // → {support: [0, 6, 12], outcomes: ['hit', 'crit'], data: {hit: [3.2, 5.1, ...], crit: [0, 1.8, ...]}} */ toDamageAttributionChartSeries(options = {}) { const { stackOrder = [ "missNone", "missDamage", "saveFail", "saveHalf", "pc", "hit", "crit" ], filterRules = (outcome, damage) => !(outcome === "missNone" && damage !== 0), asPercentages = true } = options; const originalSupport = this.combined.support(); if (originalSupport.length === 0) { return { support: [], outcomes: [], data: {} }; } const minDamage = Math.min(...originalSupport); const maxDamage = Math.max(...originalSupport); const support = Array.from( { length: maxDamage - minDamage + 1 }, (_, i) => minDamage + i ); const allOutcomeTypes = /* @__PURE__ */ new Set(); for (const [, bin] of this.combined.map) { if (bin.attr) { for (const outcomeType in bin.attr) { if (bin.attr[outcomeType] && bin.attr[outcomeType] > 0) { allOutcomeTypes.add(outcomeType); } } } } const existingOutcomes = Array.from(allOutcomeTypes).sort((a, b) => { const indexA = stackOrder.indexOf(a); const indexB = stackOrder.indexOf(b); if (indexA >= 0 && indexB >= 0) return indexA - indexB; if (indexA >= 0) return -1; if (indexB >= 0) return 1; return a.localeCompare(b); }); if (existingOutcomes.length === 0) { return { support, outcomes: [], data: {} }; } const data = {}; for (const outcome of existingOutcomes) { data[outcome] = support.map((damage) => { const bin = this.combined.map.get(damage); if (!bin || !bin.attr) return 0; if (!filterRules(outcome, damage)) { return 0; } const outcomeDamageAttribution = bin.attr[outcome] || 0; if (asPercentages) { let totalDamageAttribution = 0; for (const [outcomeName, damageAttr] of Object.entries(bin.attr)) { if (filterRules(outcomeName, damage)) { totalDamageAttribution += damageAttr || 0; } } if (totalDamageAttribution === 0) return 0; const damagePercentage = outcomeDamageAttribution / totalDamageAttribution * 100; return damagePercentage * bin.p * 100; } else { return outcomeDamageAttribution; } }); } return { support, outcomes: existingOutcomes, data }; } /** * Returns pure mathematical data for outcome attribution charts showing which * attack outcome combinations can produce each damage value. * * Unlike toDamageAttributionChartSeries() which tracks damage sources, this tracks * outcome combinations - answering "what attack outcomes produced this damage?" * * @param options Configuration options * @param options.stackOrder Preferred order for outcome types (unknowns placed at end) * @param options.filterRules Function to determine if outcome should be included for a given damage value * @param options.asPercentages Whether to return percentages (0-100) or probabilities (0-1) * @returns Pure data structure with support, outcomes, and outcome combination probabilities * * @example * query.toOutcomeAttributionChartSeries() * // → {support: [0, 6, 12], outcomes: ['all_miss', 'mixed', 'all_hit'], data: {all_miss: [15, 0, 0], mixed: [60, 80, 20], all_hit: [25, 20, 80]}} */ toOutcomeAttributionChartSeries(options = {}) { const { stackOrder = [ "missNone", "missDamage", "saveFail", "saveHalf", "pc", "hit", "crit" ], filterRules = (outcome, damage) => !(outcome === "missNone" && damage !== 0), asPercentages = true } = options; const originalSupport = this.combined.support(); if (originalSupport.length === 0) { return { support: [], outcomes: [], data: {} }; } const minDamage = Math.min(...originalSupport); const maxDamage = Math.max(...originalSupport); const support = Array.from( { length: maxDamage - minDamage + 1 }, (_, i) => minDamage + i ); const allOutcomeTypes = /* @__PURE__ */ new Set(); for (const [, bin] of this.combined.map) { for (const outcomeType in bin.count) { if (bin.count[outcomeType] && bin.count[outcomeType] > 0) { allOutcomeTypes.add(outcomeType); } } } const existingOutcomes = Array.from(allOutcomeTypes).sort((a, b) => { const indexA = stackOrder.indexOf(a); const indexB = stackOrder.indexOf(b); if (indexA >= 0 && indexB >= 0) return indexA - indexB; if (indexA >= 0) return -1; if (indexB >= 0) return 1; return a.localeCompare(b); }); if (existingOutcomes.length === 0) { return { support, outcomes: [], data: {} }; } const data = {}; for (const outcome of existingOutcomes) { data[outcome] = support.map((damage) => { const bin = this.combined.map.get(damage); if (!bin) return 0; if (!filterRules(outcome, damage)) { return 0; } if (outcome === "missNone") { const outcomeCount = bin.count[outcome] || 0; if (outcomeCount === 0) return 0; if (asPercentages) { let totalChartableCount = 0; for (const [outcomeName, count] of Object.entries(bin.count)) { if (filterRules(outcomeName, damage)) { totalChartableCount += count || 0; } } if (totalChartableCount === 0) return 0; const outcomeFraction = outcomeCount / totalChartableCount; return outcomeFraction * bin.p * 100; } else { return outcomeCount; } } if (!bin.attr) return 0; const outcomeDamageContribution = bin.attr[outcome] || 0; if (asPercentages) { let totalDamageAttribution = 0; for (const [, damageAttr] of Object.entries(bin.attr)) { totalDamageAttribution += damageAttr || 0; } if (totalDamageAttribution === 0) return 0; const outcomeFraction = outcomeDamageContribution / totalDamageAttribution; return outcomeFraction * bin.p * 100; } else { return outcomeDamageContribution; } }); } return { support, outcomes: existingOutcomes, data }; } /** * Returns pure mathematical data for cumulative distribution function (CDF). * Shows P(X ≤ x) - the probability of getting at most x damage. * * @param asPercentages Whether to return percentages (0-100) or probabilities (0-1) * @returns Pure data structure with support and cumulative probabilities * * @example * query.toCDFSeries() * // → {support: [0, 6, 12], data: [5.2, 18.3, 45.1]} */ toCDFSeries(asPercentages = true) { const originalSupport = this.combined.support(); if (originalSupport.length === 0) { return { support: [], data: [] }; } const minDamage = Math.min(...originalSupport); const maxDamage = Math.max(...originalSupport); const support = Array.from( { length: maxDamage - minDamage + 1 }, (_, i) => minDamage + i ); let cumulativeProbability = 0; const cdfData = []; for (const damage of support) { const bin = this.combined.map.get(damage); if (bin) { cumulativeProbability += bin.p; } cdfData.push( asPercentages ? cumulativeProbability * 100 : cumulativeProbability ); } return { support, data: cdfData }; } /** * Returns pure mathematical data for complementary cumulative distribution function (CCDF). * Shows P(X ≥ x) - the probability of getting at least x damage. * * @param asPercentages Whether to return percentages (0-100) or probabilities (0-1) * @returns Pure data structure with support and complementary cumulative probabilities * * @example * query.toCCDFSeries() * // → {support: [0, 6, 12], data: [100, 94.8, 81.7]} */ toCCDFSeries(asPercentages = true) { const originalSupport = this.combined.support(); if (originalSupport.length === 0) { return { support: [], data: [] }; } const minDamage = Math.min(...originalSupport); const maxDamage = Math.max(...originalSupport); const support = Array.from( { length: maxDamage - minDamage + 1 }, (_, i) => minDamage + i ); let cumulativeProbability = 0; const ccdfData = []; for (const damage of support) { const ccdf = 1 - cumulativeProbability; ccdfData.push(asPercentages ? ccdf * 100 : ccdf); const bin = this.combined.map.get(damage); if (bin) { cumulativeProbability += bin.p; } } return { support, data: ccdfData }; } /* Statistics snapshot of the query. */ /** Probability of doing strictly more than threshold damage (default >0). */ probDamageGreaterThan(threshold = 0) { let acc = 0; for (const [x, bin] of this.combined.map) if (x > threshold) acc += bin.p; return acc; } /** All outcome keys actually present (typed & ordered if you pass an order). */ outcomeKeys(order) { const found = /* @__PURE__ */ new Set(); for (const [, bin] of this.combined.map) { for (const k in bin.count) if (bin.count[k] && bin.count[k] > 0) found.add(k); } if (found.size === 0) ["hit", "crit", "missNone"].forEach((k) => found.add(k)); const keys = Array.from(found).filter( (k) => order?.includes(k) ?? true ); if (order && order.length) keys.sort((a, b) => order.indexOf(a) + 999 - (order.indexOf(b) + 999)); return keys; } /** Total probability per outcome across the PMF. */ outcomeTotals(outcomes = this.outcomeKeys()) { const totals = /* @__PURE__ */ new Map(); outcomes.forEach((o) => totals.set(o, 0)); for (const [, row] of this.combined.map) { for (const o of outcomes) { const p = row.count[o] || 0; totals.set(o, (totals.get(o) || 0) + p); } } return totals; } /** Conditional damage range per outcome (min/avg/max of X | outcome). */ outcomeDamageRanges(outcomes = this.outcomeKeys()) { const table = this.toLabeledTable(outcomes); const ranges = /* @__PURE__ */ new Map(); outcomes.forEach((o) => ranges.set(o, { sum: 0, mass: 0 })); for (const row of table) { const dmg = row.damage; for (const o of outcomes) { const p = row[o] || 0; if (p > 0) { const r = ranges.get(o); r.sum += dmg * p; r.mass += p; if (r.min === void 0 || dmg < r.min) r.min = dmg; if (r.max === void 0 || dmg > r.max) r.max = dmg; } } } const out = /* @__PURE__ */ new Map(); for (const o of outcomes) { const r = ranges.get(o); const avg = r.mass > 0 ? r.sum / r.mass : 0; out.set(o, { min: r.min ?? 0, avg, max: r.max ?? 0 }); } return out; } /** * Snapshot of the distribution in the exact shape the UI consumes. * - outcome probabilities are "at least one" (and equal to "all" for a single PMF) * - damageRange is conditional on the outcome occurring */ snapshot(order) { const discovered = /* @__PURE__ */ new Set(); for (const [, bin] of this.combined.map) { for (const k in bin.count) { if (bin.count[k] && bin.count[k] > 0) discovered.add(k); } } if (discovered.size === 0) { for (const k of _DiceQuery.DEFAULT_OUTCOMES) discovered.add(k); } let outcomes = Array.from(discovered); if (order && order.length) { const inOrder = new Set(order); outcomes = outcomes.filter((k) => inOrder.has(k)); const rank = new Map(order.map((k, i) => [k, i])); outcomes.sort( (a, b) => (rank.get(a) ?? 999) - (rank.get(b) ?? 999) ); } const rows = this.toLabeledTable(outcomes); const totals = /* @__PURE__ */ new Map(); const rangeAcc = /* @__PURE__ */ new Map(); for (const ot of outcomes) { totals.set(ot, 0); rangeAcc.set(ot, { sum: 0, mass: 0 }); } for (const row of rows) { const dmg = row.damage; for (const ot of outcomes) { const p = row[ot] || 0; if (p <= 0) continue; totals.set(ot, (totals.get(ot) || 0) + p); const r = rangeAcc.get(ot); r.sum += dmg * p; r.mass += p; if (r.min === void 0 || dmg < r.min) r.min = dmg; if (r.max === void 0 || dmg > r.max) r.max = dmg; } } const outcomeMap = /* @__PURE__ */ new Map(); for (const ot of outcomes) { const total = totals.get(ot) || 0; const r = rangeAcc.get(ot); const avg = r.mass > 0 ? r.sum / r.mass : 0; outcomeMap.set(ot, { atLeastOneProbability: total, allProbability: total, // single aggregate PMF: same value damageRange: { min: r.min ?? 0, avg, max: r.max ?? 0 } }); } const averageDPR = this.mean(); let damageChance = 0; for (const [x, bin] of this.combined.map) if (x > 0) damageChance += bin.p; const { support, data } = this.toCDFSeries(false); const quantile = (p) => { if (support.length === 0) return 0; for (let i = 0; i < support.length; i++) if (data[i] >= p) return support[i]; return support[support.length - 1]; }; const percentiles = { p25: quantile(0.25), p50: quantile(0.5), p75: quantile(0.75) }; return { averageDPR, damageChance, percentiles, outcomes: outcomeMap }; } /** * PMF Transformation Methods * * These methods provide a fluent API for transforming dice queries by wrapping * the underlying PMF transformation methods. All operations work on the combined * PMF and return new DiceQuery instances. */ /** * Returns a new DiceQuery with normalized probabilities (ensuring they sum to 1.0). * * @returns New DiceQuery with normalized combined PMF */ normalize() { return new _DiceQuery([this.combined.normalize()]); } /** * Returns a new DiceQuery with low-probability outcomes removed. * * @param eps Minimum probability threshold (defaults to PMF epsilon) * @param keepFinalBin Whether to keep the highest damage bin regardless of probability * @returns New DiceQuery with compacted combined PMF */ compact(eps, keepFinalBin) { return new _DiceQuery([this.combined.compact(eps, keepFinalBin)]); } /** * Returns a new DiceQuery with an additional scaled branch added. * Useful for conditional outcomes like "30% chance of opportunity attack". * * @param branch DiceQuery to add as a scaled branch * @param probability Probability of the branch occurring (0-1) * @returns New DiceQuery combining this query with the scaled branch * * @example * const baseAttack = parse("(d20 + 5 AC 15) * (2d6 + 3)"); * const opportunityAttack = parse("(d20 + 5 AC 15) * (1d8 + 3)"); * const withOpportunity = baseAttack.addScaled(opportunityAttack, 0.3); */ addScaled(branch, probability) { return new _DiceQuery([ this.combined.addScaled(branch.combined, probability) ]); } /** * Returns a new DiceQuery with all probabilities scaled by a factor. * Used for conditional scenarios where the entire outcome has reduced probability. * * @param factor Scaling factor for probabilities * @returns New DiceQuery with scaled probabilities * * @example * const fullAttack = parse("(d20 + 5 AC 15) * (2d6 + 3)"); * const conditionalAttack = fullAttack.scaleMass(0.3); // 30% chance scenario */ scaleMass(factor) { return new _DiceQuery([this.combined.scaleMass(factor)]); } totalMass() { return this.combined.mass(); } /** * Returns a new DiceQuery with damage values transformed by a function. * Useful for applying modifiers, resistances, or other damage transformations. * * @param damageTransformFunction Function to transform each damage value * @returns New DiceQuery with transformed damage values * * @example * const baseAttack = parse("2d6 + 3"); * const withResistance = baseAttack.mapDamage(dmg => Math.floor(dmg / 2)); // Half damage * const withBonus = baseAttack.mapDamage(dmg => dmg + 5); // +5 damage */ mapDamage(damageTransformFunction) { return new _DiceQuery([this.combined.mapDamage(damageTransformFunction)]); } /** * Returns a new DiceQuery with damage values scaled by a factor. * Convenient wrapper around mapDamage for multiplicative scaling. * * @param factor Scaling factor for damage values * @param rounding Rounding method: "floor" (default), "round", or "ceil" * @returns New DiceQuery with scaled damage values * * @example * const baseAttack = parse("2d6 + 3"); * const doubled = baseAttack.scaleDamage(2); // Double damage * const halfDamage = baseAttack.scaleDamage(0.5, "round"); // Half damage, rounded */ scaleDamage(factor, rounding = "floor") { return new _DiceQuery([this.combined.scaleDamage(factor, rounding)]); } /** * Returns a new DiceQuery combining this query with another via convolution. * Equivalent to rolling both queries independently and adding results. * It is important to use this rather than combing()ing the PMFs directly! * This method maintains the provenance of the PMFs which is needed for damage attribution. * Combining the .combined PMFs directly is still valid for DPR calculations but * is not statistically sound for queries. * * @param other DiceQuery to combine with * @param eps Optional epsilon for precision control * @returns New DiceQuery representing the combined outcome * * @example * const mainAttack = parse("(d20 + 5 AC 15) * (2d6 + 3)"); * const bonusAttack = parse("(d20 + 3 AC 15) * (1d6 + 1)"); * const bothAttacks = mainAttack.convolve(bonusAttack); */ convolve(other) { const singles = [...this.singles, ...other.singles]; return new _DiceQuery(singles); } /** * First-success split over an ordered list of DISTINCT single-swing PMFs. * Each PMF may have different success/subset probabilities (from labels). * * successOutcome: e.g., ["success"] or ["hit", "crit"] * subsetOutcome: e.g., ["subset"] or ["crit"] where subset ⊆ success * * Returns tuple: [pFirstNonSubset, pFirstSubset, pAnySuccess, pNone] */ firstSuccessSplit(successOutcome, subsetOutcome, eps = EPS) { const pmfs = this.singles; if (!pmfs.length) { throw new Error("firstSuccessSplitFromPMFs: pmfs must be non-empty"); } const toArr = (x) => Array.isArray(x) ? x : [x]; const clamp01 = (x) => Math.max(0, Math.min(1, x)); const tol = Math.max(eps, 8 * Number.EPSILON); const per = pmfs.map((pmf) => { const dq = new _DiceQuery([pmf]); const pS = dq.probAtLeastOne(toArr(successOutcome)); const pB = dq.probAtLeastOne(toArr(subsetOutcome)); if (pB - pS > eps) { throw new Error( "firstSuccessSplitFromPMFs: P(subset) > P(success) for an event. Ensure subset \u2286 success." ); } return { pS, pB }; }); let missSoFar = 1; let pFirstSubset = 0; let pFirstNonSubset = 0; let pNone = 1; for (const { pS, pB } of per) { pFirstSubset += missSoFar * pB; pFirstNonSubset += missSoFar * (pS - pB); const miss = 1 - pS; missSoFar *= miss; pNone *= miss; } const pAny = 1 - pNone; const a = clamp01(pFirstNonSubset); const b = clamp01(pFirstSubset); const any = clamp01(pAny); const none = clamp01(pNone); if (Math.abs(a + b - any) > tol * Math.max(1, any)) { throw new Error( `firstSuccessSplitFromPMFs: parts do not sum to pAny. got a+b=${a + b}, pAny=${any}` ); } return [a, b, any, none]; } }; __publicField(_DiceQuery, "DEFAULT_OUTCOMES", [ "hit", "crit", "missNone" ]); var DiceQuery = _DiceQuery; var pmfCache = new LRUCache(1e3); var _PMF = class _PMF { constructor(map = /* @__PURE__ */ new Map(), epsilon = EPS, normalized = false, identifier = `anon#${_PMF.__anonIdCounter++}`, _preservedProvenance = true) { this.map = map; this.epsilon = epsilon; this.normalized = normalized; this.identifier = identifier; this._preservedProvenance = _preservedProvenance; // Cached computed values __publicField(this, "_support"); __publicField(this, "_min"); __publicField(this, "_max"); __publicField(this, "_totalMass"); __publicField(this, "_mean"); __publicField(this, "_variance"); __publicField(this, "_stdev"); } static empty(epsilon = EPS, identifier = "empty") { return new _PMF(/* @__PURE__ */ new Map(), epsilon, false, identifier); } // This has a single bin at value 0, mass of 1 static zero(epsilon = EPS) { const m = /* @__PURE__ */ new Map(); m.set(0, { p: 1, count: { miss: 1 }, attr: {} }); return new _PMF(m, epsilon, false, "zero"); } static delta(value, epsilon = EPS) { return _PMF.fromMap(/* @__PURE__ */ new Map([[value, 1]]), epsilon); } // This creates a single bin at value 0, but with weight 0. static emptyMass() { return _PMF.zero().scaleMass(0); } // Makes PMF iterable over [damage, bin] pairs. [Symbol.iterator]() { return this.map[Symbol.iterator](); } static clearCache() { pmfCache.clear(); } /** * Creates a conditional PMF from two branches (success and failure) and a probability. * This is the core logic for modeling any probabilistic event where there are two * distinct outcomes. */ static branch(successPMF, failurePMF, successProbability) {