@limetech/lime-elements
Version:
1,163 lines (1,154 loc) • 80.2 kB
JavaScript
import { r as registerInstance, h, H as Host, a as getElement } from './index-DBTJNfo7.js';
import { t as translate } from './translations-DVRaJQvC.js';
class Diff {
diff(oldStr, newStr,
// Type below is not accurate/complete - see above for full possibilities - but it compiles
options = {}) {
let callback;
if (typeof options === 'function') {
callback = options;
options = {};
}
else if ('callback' in options) {
callback = options.callback;
}
// Allow subclasses to massage the input prior to running
const oldString = this.castInput(oldStr, options);
const newString = this.castInput(newStr, options);
const oldTokens = this.removeEmpty(this.tokenize(oldString, options));
const newTokens = this.removeEmpty(this.tokenize(newString, options));
return this.diffWithOptionsObj(oldTokens, newTokens, options, callback);
}
diffWithOptionsObj(oldTokens, newTokens, options, callback) {
var _a;
const done = (value) => {
value = this.postProcess(value, options);
if (callback) {
setTimeout(function () { callback(value); }, 0);
return undefined;
}
else {
return value;
}
};
const newLen = newTokens.length, oldLen = oldTokens.length;
let editLength = 1;
let maxEditLength = newLen + oldLen;
if (options.maxEditLength != null) {
maxEditLength = Math.min(maxEditLength, options.maxEditLength);
}
const maxExecutionTime = (_a = options.timeout) !== null && _a !== void 0 ? _a : Infinity;
const abortAfterTimestamp = Date.now() + maxExecutionTime;
const bestPath = [{ oldPos: -1, lastComponent: undefined }];
// Seed editLength = 0, i.e. the content starts with the same values
let newPos = this.extractCommon(bestPath[0], newTokens, oldTokens, 0, options);
if (bestPath[0].oldPos + 1 >= oldLen && newPos + 1 >= newLen) {
// Identity per the equality and tokenizer
return done(this.buildValues(bestPath[0].lastComponent, newTokens, oldTokens));
}
// Once we hit the right edge of the edit graph on some diagonal k, we can
// definitely reach the end of the edit graph in no more than k edits, so
// there's no point in considering any moves to diagonal k+1 any more (from
// which we're guaranteed to need at least k+1 more edits).
// Similarly, once we've reached the bottom of the edit graph, there's no
// point considering moves to lower diagonals.
// We record this fact by setting minDiagonalToConsider and
// maxDiagonalToConsider to some finite value once we've hit the edge of
// the edit graph.
// This optimization is not faithful to the original algorithm presented in
// Myers's paper, which instead pointlessly extends D-paths off the end of
// the edit graph - see page 7 of Myers's paper which notes this point
// explicitly and illustrates it with a diagram. This has major performance
// implications for some common scenarios. For instance, to compute a diff
// where the new text simply appends d characters on the end of the
// original text of length n, the true Myers algorithm will take O(n+d^2)
// time while this optimization needs only O(n+d) time.
let minDiagonalToConsider = -Infinity, maxDiagonalToConsider = Infinity;
// Main worker method. checks all permutations of a given edit length for acceptance.
const execEditLength = () => {
for (let diagonalPath = Math.max(minDiagonalToConsider, -editLength); diagonalPath <= Math.min(maxDiagonalToConsider, editLength); diagonalPath += 2) {
let basePath;
const removePath = bestPath[diagonalPath - 1], addPath = bestPath[diagonalPath + 1];
if (removePath) {
// No one else is going to attempt to use this value, clear it
// @ts-expect-error - perf optimisation. This type-violating value will never be read.
bestPath[diagonalPath - 1] = undefined;
}
let canAdd = false;
if (addPath) {
// what newPos will be after we do an insertion:
const addPathNewPos = addPath.oldPos - diagonalPath;
canAdd = addPath && 0 <= addPathNewPos && addPathNewPos < newLen;
}
const canRemove = removePath && removePath.oldPos + 1 < oldLen;
if (!canAdd && !canRemove) {
// If this path is a terminal then prune
// @ts-expect-error - perf optimisation. This type-violating value will never be read.
bestPath[diagonalPath] = undefined;
continue;
}
// Select the diagonal that we want to branch from. We select the prior
// path whose position in the old string is the farthest from the origin
// and does not pass the bounds of the diff graph
if (!canRemove || (canAdd && removePath.oldPos < addPath.oldPos)) {
basePath = this.addToPath(addPath, true, false, 0, options);
}
else {
basePath = this.addToPath(removePath, false, true, 1, options);
}
newPos = this.extractCommon(basePath, newTokens, oldTokens, diagonalPath, options);
if (basePath.oldPos + 1 >= oldLen && newPos + 1 >= newLen) {
// If we have hit the end of both strings, then we are done
return done(this.buildValues(basePath.lastComponent, newTokens, oldTokens)) || true;
}
else {
bestPath[diagonalPath] = basePath;
if (basePath.oldPos + 1 >= oldLen) {
maxDiagonalToConsider = Math.min(maxDiagonalToConsider, diagonalPath - 1);
}
if (newPos + 1 >= newLen) {
minDiagonalToConsider = Math.max(minDiagonalToConsider, diagonalPath + 1);
}
}
}
editLength++;
};
// Performs the length of edit iteration. Is a bit fugly as this has to support the
// sync and async mode which is never fun. Loops over execEditLength until a value
// is produced, or until the edit length exceeds options.maxEditLength (if given),
// in which case it will return undefined.
if (callback) {
(function exec() {
setTimeout(function () {
if (editLength > maxEditLength || Date.now() > abortAfterTimestamp) {
return callback(undefined);
}
if (!execEditLength()) {
exec();
}
}, 0);
}());
}
else {
while (editLength <= maxEditLength && Date.now() <= abortAfterTimestamp) {
const ret = execEditLength();
if (ret) {
return ret;
}
}
}
}
addToPath(path, added, removed, oldPosInc, options) {
const last = path.lastComponent;
if (last && !options.oneChangePerToken && last.added === added && last.removed === removed) {
return {
oldPos: path.oldPos + oldPosInc,
lastComponent: { count: last.count + 1, added: added, removed: removed, previousComponent: last.previousComponent }
};
}
else {
return {
oldPos: path.oldPos + oldPosInc,
lastComponent: { count: 1, added: added, removed: removed, previousComponent: last }
};
}
}
extractCommon(basePath, newTokens, oldTokens, diagonalPath, options) {
const newLen = newTokens.length, oldLen = oldTokens.length;
let oldPos = basePath.oldPos, newPos = oldPos - diagonalPath, commonCount = 0;
while (newPos + 1 < newLen && oldPos + 1 < oldLen && this.equals(oldTokens[oldPos + 1], newTokens[newPos + 1], options)) {
newPos++;
oldPos++;
commonCount++;
if (options.oneChangePerToken) {
basePath.lastComponent = { count: 1, previousComponent: basePath.lastComponent, added: false, removed: false };
}
}
if (commonCount && !options.oneChangePerToken) {
basePath.lastComponent = { count: commonCount, previousComponent: basePath.lastComponent, added: false, removed: false };
}
basePath.oldPos = oldPos;
return newPos;
}
equals(left, right, options) {
if (options.comparator) {
return options.comparator(left, right);
}
else {
return left === right
|| (!!options.ignoreCase && left.toLowerCase() === right.toLowerCase());
}
}
removeEmpty(array) {
const ret = [];
for (let i = 0; i < array.length; i++) {
if (array[i]) {
ret.push(array[i]);
}
}
return ret;
}
// eslint-disable-next-line @typescript-eslint/no-unused-vars
castInput(value, options) {
return value;
}
// eslint-disable-next-line @typescript-eslint/no-unused-vars
tokenize(value, options) {
return Array.from(value);
}
join(chars) {
// Assumes ValueT is string, which is the case for most subclasses.
// When it's false, e.g. in diffArrays, this method needs to be overridden (e.g. with a no-op)
// Yes, the casts are verbose and ugly, because this pattern - of having the base class SORT OF
// assume tokens and values are strings, but not completely - is weird and janky.
return chars.join('');
}
postProcess(changeObjects,
// eslint-disable-next-line @typescript-eslint/no-unused-vars
options) {
return changeObjects;
}
get useLongestToken() {
return false;
}
buildValues(lastComponent, newTokens, oldTokens) {
// First we convert our linked list of components in reverse order to an
// array in the right order:
const components = [];
let nextComponent;
while (lastComponent) {
components.push(lastComponent);
nextComponent = lastComponent.previousComponent;
delete lastComponent.previousComponent;
lastComponent = nextComponent;
}
components.reverse();
const componentLen = components.length;
let componentPos = 0, newPos = 0, oldPos = 0;
for (; componentPos < componentLen; componentPos++) {
const component = components[componentPos];
if (!component.removed) {
if (!component.added && this.useLongestToken) {
let value = newTokens.slice(newPos, newPos + component.count);
value = value.map(function (value, i) {
const oldValue = oldTokens[oldPos + i];
return oldValue.length > value.length ? oldValue : value;
});
component.value = this.join(value);
}
else {
component.value = this.join(newTokens.slice(newPos, newPos + component.count));
}
newPos += component.count;
// Common case
if (!component.added) {
oldPos += component.count;
}
}
else {
component.value = this.join(oldTokens.slice(oldPos, oldPos + component.count));
oldPos += component.count;
}
}
return components;
}
}
function longestCommonPrefix(str1, str2) {
let i;
for (i = 0; i < str1.length && i < str2.length; i++) {
if (str1[i] != str2[i]) {
return str1.slice(0, i);
}
}
return str1.slice(0, i);
}
function longestCommonSuffix(str1, str2) {
let i;
// Unlike longestCommonPrefix, we need a special case to handle all scenarios
// where we return the empty string since str1.slice(-0) will return the
// entire string.
if (!str1 || !str2 || str1[str1.length - 1] != str2[str2.length - 1]) {
return '';
}
for (i = 0; i < str1.length && i < str2.length; i++) {
if (str1[str1.length - (i + 1)] != str2[str2.length - (i + 1)]) {
return str1.slice(-i);
}
}
return str1.slice(-i);
}
function replacePrefix(string, oldPrefix, newPrefix) {
if (string.slice(0, oldPrefix.length) != oldPrefix) {
throw Error(`string ${JSON.stringify(string)} doesn't start with prefix ${JSON.stringify(oldPrefix)}; this is a bug`);
}
return newPrefix + string.slice(oldPrefix.length);
}
function replaceSuffix(string, oldSuffix, newSuffix) {
if (!oldSuffix) {
return string + newSuffix;
}
if (string.slice(-oldSuffix.length) != oldSuffix) {
throw Error(`string ${JSON.stringify(string)} doesn't end with suffix ${JSON.stringify(oldSuffix)}; this is a bug`);
}
return string.slice(0, -oldSuffix.length) + newSuffix;
}
function removePrefix(string, oldPrefix) {
return replacePrefix(string, oldPrefix, '');
}
function removeSuffix(string, oldSuffix) {
return replaceSuffix(string, oldSuffix, '');
}
function maximumOverlap(string1, string2) {
return string2.slice(0, overlapCount(string1, string2));
}
// Nicked from https://stackoverflow.com/a/60422853/1709587
function overlapCount(a, b) {
// Deal with cases where the strings differ in length
let startA = 0;
if (a.length > b.length) {
startA = a.length - b.length;
}
let endB = b.length;
if (a.length < b.length) {
endB = a.length;
}
// Create a back-reference for each index
// that should be followed in case of a mismatch.
// We only need B to make these references:
const map = Array(endB);
let k = 0; // Index that lags behind j
map[0] = 0;
for (let j = 1; j < endB; j++) {
if (b[j] == b[k]) {
map[j] = map[k]; // skip over the same character (optional optimisation)
}
else {
map[j] = k;
}
while (k > 0 && b[j] != b[k]) {
k = map[k];
}
if (b[j] == b[k]) {
k++;
}
}
// Phase 2: use these references while iterating over A
k = 0;
for (let i = startA; i < a.length; i++) {
while (k > 0 && a[i] != b[k]) {
k = map[k];
}
if (a[i] == b[k]) {
k++;
}
}
return k;
}
function trailingWs(string) {
// Yes, this looks overcomplicated and dumb - why not replace the whole function with
// return string.match(/\s*$/)[0]
// you ask? Because:
// 1. the trap described at https://markamery.com/blog/quadratic-time-regexes/ would mean doing
// this would cause this function to take O(n²) time in the worst case (specifically when
// there is a massive run of NON-TRAILING whitespace in `string`), and
// 2. the fix proposed in the same blog post, of using a negative lookbehind, is incompatible
// with old Safari versions that we'd like to not break if possible (see
// https://github.com/kpdecker/jsdiff/pull/550)
// It feels absurd to do this with an explicit loop instead of a regex, but I really can't see a
// better way that doesn't result in broken behaviour.
let i;
for (i = string.length - 1; i >= 0; i--) {
if (!string[i].match(/\s/)) {
break;
}
}
return string.substring(i + 1);
}
function leadingWs(string) {
// Thankfully the annoying considerations described in trailingWs don't apply here:
const match = string.match(/^\s*/);
return match ? match[0] : '';
}
// Based on https://en.wikipedia.org/wiki/Latin_script_in_Unicode
//
// Chars/ranges counted as "word" characters by this regex are as follows:
//
// + U+00AD Soft hyphen
// + 00C0–00FF (letters with diacritics from the Latin-1 Supplement), except:
// - U+00D7 × Multiplication sign
// - U+00F7 ÷ Division sign
// + Latin Extended-A, 0100–017F
// + Latin Extended-B, 0180–024F
// + IPA Extensions, 0250–02AF
// + Spacing Modifier Letters, 02B0–02FF, except:
// - U+02C7 ˇ ˇ Caron
// - U+02D8 ˘ ˘ Breve
// - U+02D9 ˙ ˙ Dot Above
// - U+02DA ˚ ˚ Ring Above
// - U+02DB ˛ ˛ Ogonek
// - U+02DC ˜ ˜ Small Tilde
// - U+02DD ˝ ˝ Double Acute Accent
// + Latin Extended Additional, 1E00–1EFF
const extendedWordChars = 'a-zA-Z0-9_\\u{AD}\\u{C0}-\\u{D6}\\u{D8}-\\u{F6}\\u{F8}-\\u{2C6}\\u{2C8}-\\u{2D7}\\u{2DE}-\\u{2FF}\\u{1E00}-\\u{1EFF}';
// Each token is one of the following:
// - A punctuation mark plus the surrounding whitespace
// - A word plus the surrounding whitespace
// - Pure whitespace (but only in the special case where the entire text
// is just whitespace)
//
// We have to include surrounding whitespace in the tokens because the two
// alternative approaches produce horribly broken results:
// * If we just discard the whitespace, we can't fully reproduce the original
// text from the sequence of tokens and any attempt to render the diff will
// get the whitespace wrong.
// * If we have separate tokens for whitespace, then in a typical text every
// second token will be a single space character. But this often results in
// the optimal diff between two texts being a perverse one that preserves
// the spaces between words but deletes and reinserts actual common words.
// See https://github.com/kpdecker/jsdiff/issues/160#issuecomment-1866099640
// for an example.
//
// Keeping the surrounding whitespace of course has implications for .equals
// and .join, not just .tokenize.
// This regex does NOT fully implement the tokenization rules described above.
// Instead, it gives runs of whitespace their own "token". The tokenize method
// then handles stitching whitespace tokens onto adjacent word or punctuation
// tokens.
const tokenizeIncludingWhitespace = new RegExp(`[${extendedWordChars}]+|\\s+|[^${extendedWordChars}]`, 'ug');
class WordDiff extends Diff {
equals(left, right, options) {
if (options.ignoreCase) {
left = left.toLowerCase();
right = right.toLowerCase();
}
return left.trim() === right.trim();
}
tokenize(value, options = {}) {
let parts;
if (options.intlSegmenter) {
const segmenter = options.intlSegmenter;
if (segmenter.resolvedOptions().granularity != 'word') {
throw new Error('The segmenter passed must have a granularity of "word"');
}
// We want `parts` to be an array whose elements alternate between being
// pure whitespace and being pure non-whitespace. This is ALMOST what the
// segments returned by a word-based Intl.Segmenter already look like,
// and therefore we can ALMOST get what we want by simply doing...
// parts = Array.from(segmenter.segment(value), segment => segment.segment);
// ... but not QUITE, because there's of one annoying special case: every
// newline character gets its own segment, instead of sharing a segment
// with other surrounding whitespace. We therefore need to manually merge
// consecutive segments of whitespace into a single part:
parts = [];
for (const segmentObj of Array.from(segmenter.segment(value))) {
const segment = segmentObj.segment;
if (parts.length && (/\s/).test(parts[parts.length - 1]) && (/\s/).test(segment)) {
parts[parts.length - 1] += segment;
}
else {
parts.push(segment);
}
}
}
else {
parts = value.match(tokenizeIncludingWhitespace) || [];
}
const tokens = [];
let prevPart = null;
parts.forEach(part => {
if ((/\s/).test(part)) {
if (prevPart == null) {
tokens.push(part);
}
else {
tokens.push(tokens.pop() + part);
}
}
else if (prevPart != null && (/\s/).test(prevPart)) {
if (tokens[tokens.length - 1] == prevPart) {
tokens.push(tokens.pop() + part);
}
else {
tokens.push(prevPart + part);
}
}
else {
tokens.push(part);
}
prevPart = part;
});
return tokens;
}
join(tokens) {
// Tokens being joined here will always have appeared consecutively in the
// same text, so we can simply strip off the leading whitespace from all the
// tokens except the first (and except any whitespace-only tokens - but such
// a token will always be the first and only token anyway) and then join them
// and the whitespace around words and punctuation will end up correct.
return tokens.map((token, i) => {
if (i == 0) {
return token;
}
else {
return token.replace((/^\s+/), '');
}
}).join('');
}
postProcess(changes, options) {
if (!changes || options.oneChangePerToken) {
return changes;
}
let lastKeep = null;
// Change objects representing any insertion or deletion since the last
// "keep" change object. There can be at most one of each.
let insertion = null;
let deletion = null;
changes.forEach(change => {
if (change.added) {
insertion = change;
}
else if (change.removed) {
deletion = change;
}
else {
if (insertion || deletion) { // May be false at start of text
dedupeWhitespaceInChangeObjects(lastKeep, deletion, insertion, change);
}
lastKeep = change;
insertion = null;
deletion = null;
}
});
if (insertion || deletion) {
dedupeWhitespaceInChangeObjects(lastKeep, deletion, insertion, null);
}
return changes;
}
}
const wordDiff = new WordDiff();
function diffWords(oldStr, newStr, options) {
return wordDiff.diff(oldStr, newStr, options);
}
function dedupeWhitespaceInChangeObjects(startKeep, deletion, insertion, endKeep) {
// Before returning, we tidy up the leading and trailing whitespace of the
// change objects to eliminate cases where trailing whitespace in one object
// is repeated as leading whitespace in the next.
// Below are examples of the outcomes we want here to explain the code.
// I=insert, K=keep, D=delete
// 1. diffing 'foo bar baz' vs 'foo baz'
// Prior to cleanup, we have K:'foo ' D:' bar ' K:' baz'
// After cleanup, we want: K:'foo ' D:'bar ' K:'baz'
//
// 2. Diffing 'foo bar baz' vs 'foo qux baz'
// Prior to cleanup, we have K:'foo ' D:' bar ' I:' qux ' K:' baz'
// After cleanup, we want K:'foo ' D:'bar' I:'qux' K:' baz'
//
// 3. Diffing 'foo\nbar baz' vs 'foo baz'
// Prior to cleanup, we have K:'foo ' D:'\nbar ' K:' baz'
// After cleanup, we want K'foo' D:'\nbar' K:' baz'
//
// 4. Diffing 'foo baz' vs 'foo\nbar baz'
// Prior to cleanup, we have K:'foo\n' I:'\nbar ' K:' baz'
// After cleanup, we ideally want K'foo' I:'\nbar' K:' baz'
// but don't actually manage this currently (the pre-cleanup change
// objects don't contain enough information to make it possible).
//
// 5. Diffing 'foo bar baz' vs 'foo baz'
// Prior to cleanup, we have K:'foo ' D:' bar ' K:' baz'
// After cleanup, we want K:'foo ' D:' bar ' K:'baz'
//
// Our handling is unavoidably imperfect in the case where there's a single
// indel between keeps and the whitespace has changed. For instance, consider
// diffing 'foo\tbar\nbaz' vs 'foo baz'. Unless we create an extra change
// object to represent the insertion of the space character (which isn't even
// a token), we have no way to avoid losing information about the texts'
// original whitespace in the result we return. Still, we do our best to
// output something that will look sensible if we e.g. print it with
// insertions in green and deletions in red.
// Between two "keep" change objects (or before the first or after the last
// change object), we can have either:
// * A "delete" followed by an "insert"
// * Just an "insert"
// * Just a "delete"
// We handle the three cases separately.
if (deletion && insertion) {
const oldWsPrefix = leadingWs(deletion.value);
const oldWsSuffix = trailingWs(deletion.value);
const newWsPrefix = leadingWs(insertion.value);
const newWsSuffix = trailingWs(insertion.value);
if (startKeep) {
const commonWsPrefix = longestCommonPrefix(oldWsPrefix, newWsPrefix);
startKeep.value = replaceSuffix(startKeep.value, newWsPrefix, commonWsPrefix);
deletion.value = removePrefix(deletion.value, commonWsPrefix);
insertion.value = removePrefix(insertion.value, commonWsPrefix);
}
if (endKeep) {
const commonWsSuffix = longestCommonSuffix(oldWsSuffix, newWsSuffix);
endKeep.value = replacePrefix(endKeep.value, newWsSuffix, commonWsSuffix);
deletion.value = removeSuffix(deletion.value, commonWsSuffix);
insertion.value = removeSuffix(insertion.value, commonWsSuffix);
}
}
else if (insertion) {
// The whitespaces all reflect what was in the new text rather than
// the old, so we essentially have no information about whitespace
// insertion or deletion. We just want to dedupe the whitespace.
// We do that by having each change object keep its trailing
// whitespace and deleting duplicate leading whitespace where
// present.
if (startKeep) {
const ws = leadingWs(insertion.value);
insertion.value = insertion.value.substring(ws.length);
}
if (endKeep) {
const ws = leadingWs(endKeep.value);
endKeep.value = endKeep.value.substring(ws.length);
}
// otherwise we've got a deletion and no insertion
}
else if (startKeep && endKeep) {
const newWsFull = leadingWs(endKeep.value), delWsStart = leadingWs(deletion.value), delWsEnd = trailingWs(deletion.value);
// Any whitespace that comes straight after startKeep in both the old and
// new texts, assign to startKeep and remove from the deletion.
const newWsStart = longestCommonPrefix(newWsFull, delWsStart);
deletion.value = removePrefix(deletion.value, newWsStart);
// Any whitespace that comes straight before endKeep in both the old and
// new texts, and hasn't already been assigned to startKeep, assign to
// endKeep and remove from the deletion.
const newWsEnd = longestCommonSuffix(removePrefix(newWsFull, newWsStart), delWsEnd);
deletion.value = removeSuffix(deletion.value, newWsEnd);
endKeep.value = replacePrefix(endKeep.value, newWsFull, newWsEnd);
// If there's any whitespace from the new text that HASN'T already been
// assigned, assign it to the start:
startKeep.value = replaceSuffix(startKeep.value, newWsFull, newWsFull.slice(0, newWsFull.length - newWsEnd.length));
}
else if (endKeep) {
// We are at the start of the text. Preserve all the whitespace on
// endKeep, and just remove whitespace from the end of deletion to the
// extent that it overlaps with the start of endKeep.
const endKeepWsPrefix = leadingWs(endKeep.value);
const deletionWsSuffix = trailingWs(deletion.value);
const overlap = maximumOverlap(deletionWsSuffix, endKeepWsPrefix);
deletion.value = removeSuffix(deletion.value, overlap);
}
else if (startKeep) {
// We are at the END of the text. Preserve all the whitespace on
// startKeep, and just remove whitespace from the start of deletion to
// the extent that it overlaps with the end of startKeep.
const startKeepWsSuffix = trailingWs(startKeep.value);
const deletionWsPrefix = leadingWs(deletion.value);
const overlap = maximumOverlap(startKeepWsSuffix, deletionWsPrefix);
deletion.value = removePrefix(deletion.value, overlap);
}
}
class LineDiff extends Diff {
constructor() {
super(...arguments);
this.tokenize = tokenize$1;
}
equals(left, right, options) {
// If we're ignoring whitespace, we need to normalise lines by stripping
// whitespace before checking equality. (This has an annoying interaction
// with newlineIsToken that requires special handling: if newlines get their
// own token, then we DON'T want to trim the *newline* tokens down to empty
// strings, since this would cause us to treat whitespace-only line content
// as equal to a separator between lines, which would be weird and
// inconsistent with the documented behavior of the options.)
if (options.ignoreWhitespace) {
if (!options.newlineIsToken || !left.includes('\n')) {
left = left.trim();
}
if (!options.newlineIsToken || !right.includes('\n')) {
right = right.trim();
}
}
else if (options.ignoreNewlineAtEof && !options.newlineIsToken) {
if (left.endsWith('\n')) {
left = left.slice(0, -1);
}
if (right.endsWith('\n')) {
right = right.slice(0, -1);
}
}
return super.equals(left, right, options);
}
}
const lineDiff = new LineDiff();
function diffLines(oldStr, newStr, options) {
return lineDiff.diff(oldStr, newStr, options);
}
// Exported standalone so it can be used from jsonDiff too.
function tokenize$1(value, options) {
if (options.stripTrailingCr) {
// remove one \r before \n to match GNU diff's --strip-trailing-cr behavior
value = value.replace(/\r\n/g, '\n');
}
const retLines = [], linesAndNewlines = value.split(/(\n|\r\n)/);
// Ignore the final empty token that occurs if the string ends with a new line
if (!linesAndNewlines[linesAndNewlines.length - 1]) {
linesAndNewlines.pop();
}
// Merge the content and line separators into single tokens
for (let i = 0; i < linesAndNewlines.length; i++) {
const line = linesAndNewlines[i];
if (i % 2 && !options.newlineIsToken) {
retLines[retLines.length - 1] += line;
}
else {
retLines.push(line);
}
}
return retLines;
}
/**
* Compute a structured diff between two strings.
*
* @param oldText - the "before" text
* @param newText - the "after" text
* @param contextLines - number of unchanged lines to show around each change
* @returns a DiffResult with hunks, additions, and deletions counts
*/
function computeDiff(oldText, newText, contextLines = 3) {
const allLines = buildDiffLines(oldText, newText);
return groupIntoHunks(allLines, contextLines);
}
/**
* Build paired rows for split (side-by-side) view from flat diff lines.
* Context lines appear on both sides. Adjacent removed+added lines
* are paired into the same row.
*
* @param lines - flat diff lines
* @returns paired rows for split rendering
*/
function buildSplitLines(lines) {
const rows = [];
let i = 0;
while (i < lines.length) {
const line = lines[i];
if (line.type === 'context') {
rows.push({ left: line, right: line });
i++;
continue;
}
i = collectAndPairChanges(lines, i, rows);
}
return rows;
}
/**
* Collect consecutive removed then added lines starting at `index`,
* pair them into split rows, and return the new index.
* @param lines - flat diff lines
* @param index - starting index
* @param rows - output array to push paired rows into
*/
function collectAndPairChanges(lines, index, rows) {
const removed = [];
while (index < lines.length && lines[index].type === 'removed') {
removed.push(lines[index]);
index++;
}
const added = [];
while (index < lines.length && lines[index].type === 'added') {
added.push(lines[index]);
index++;
}
const maxPairs = Math.max(removed.length, added.length);
for (let j = 0; j < maxPairs; j++) {
rows.push({
left: j < removed.length ? removed[j] : undefined,
right: j < added.length ? added[j] : undefined,
});
}
return index;
}
/**
* Normalize values for diffing. If `reformatJson` is true,
* parse and re-stringify with sorted keys and consistent indentation.
* @param value
* @param reformatJson
*/
function normalizeForDiff(value, reformatJson = false) {
if (typeof value === 'object' && value !== null) {
return JSON.stringify(sortKeysDeep(value), null, 4);
}
if (typeof value === 'string' && reformatJson) {
try {
const parsed = JSON.parse(value);
return JSON.stringify(sortKeysDeep(parsed), null, 4);
}
catch (_a) {
return value;
}
}
return String(value !== null && value !== void 0 ? value : '');
}
function sortKeysDeep(obj) {
if (Array.isArray(obj)) {
return obj.map(sortKeysDeep);
}
if (obj !== null && typeof obj === 'object') {
const sorted = {};
const keys = Object.keys(obj).sort((a, b) => a.localeCompare(b));
for (const key of keys) {
sorted[key] = sortKeysDeep(obj[key]);
}
return sorted;
}
return obj;
}
/**
* Build a flat list of DiffLines from two text strings.
* @param oldText
* @param newText
*/
function buildDiffLines(oldText, newText) {
const changes = diffLines(oldText, newText);
const lines = [];
let oldLineNum = 1;
let newLineNum = 1;
for (const change of changes) {
const changeLines = splitIntoLines(change.value);
for (const line of changeLines) {
if (change.added) {
lines.push({
type: 'added',
content: line,
newLineNumber: newLineNum++,
});
}
else if (change.removed) {
lines.push({
type: 'removed',
content: line,
oldLineNumber: oldLineNum++,
});
}
else {
lines.push({
type: 'context',
content: line,
oldLineNumber: oldLineNum++,
newLineNumber: newLineNum++,
});
}
}
}
addWordLevelHighlighting(lines);
return lines;
}
/**
* Split a string into lines, handling the trailing newline
* that jsdiff includes in each change value.
* @param text
*/
function splitIntoLines(text) {
if (!text) {
return [];
}
const lines = text.split('\n');
// jsdiff includes a trailing newline, producing an empty last element
if (lines.length > 0 && lines.at(-1) === '') {
lines.pop();
}
return lines;
}
/**
* Pair adjacent removed+added lines and compute word-level diffs
* to highlight only the specific segments that changed.
* @param lines
*/
function addWordLevelHighlighting(lines) {
let i = 0;
while (i < lines.length) {
// Find consecutive removed lines
const removedStart = i;
while (i < lines.length && lines[i].type === 'removed') {
i++;
}
const removedEnd = i;
// Find consecutive added lines right after
const addedStart = i;
while (i < lines.length && lines[i].type === 'added') {
i++;
}
const addedEnd = i;
const removedCount = removedEnd - removedStart;
const addedCount = addedEnd - addedStart;
// Pair them up for word-level highlighting
if (removedCount > 0 && addedCount > 0) {
const pairCount = Math.min(removedCount, addedCount);
for (let j = 0; j < pairCount; j++) {
const removedLine = lines[removedStart + j];
const addedLine = lines[addedStart + j];
const [removedSegments, addedSegments] = computeWordSegments(removedLine.content, addedLine.content);
removedLine.segments = removedSegments;
addedLine.segments = addedSegments;
}
}
// Skip context lines
if (i === removedStart) {
i++;
}
}
}
/**
* Compute word-level diff segments for a pair of lines.
* @param oldContent
* @param newContent
*/
function computeWordSegments(oldContent, newContent) {
const wordChanges = diffWords(oldContent, newContent);
const removedSegments = [];
const addedSegments = [];
for (const change of wordChanges) {
if (change.added) {
addedSegments.push({ value: change.value, type: 'added' });
}
else if (change.removed) {
removedSegments.push({ value: change.value, type: 'removed' });
}
else {
removedSegments.push({ value: change.value, type: 'equal' });
addedSegments.push({ value: change.value, type: 'equal' });
}
}
return [removedSegments, addedSegments];
}
/**
* Group a flat list of diff lines into hunks with context.
* @param lines
* @param contextLines
*/
function groupIntoHunks(lines, contextLines) {
if (lines.length === 0) {
return { hunks: [], additions: 0, deletions: 0, allLines: lines };
}
let additions = 0;
let deletions = 0;
for (const line of lines) {
if (line.type === 'added') {
additions++;
}
else if (line.type === 'removed') {
deletions++;
}
}
// If there are no changes, return a single empty result
if (additions === 0 && deletions === 0) {
return { hunks: [], additions: 0, deletions: 0, allLines: lines };
}
// Find ranges of changed lines with their context
const changeIndices = [];
for (const [i, line] of lines.entries()) {
if (line.type !== 'context') {
changeIndices.push(i);
}
}
// Build hunk boundaries
const hunkBoundaries = buildHunkBoundaries(changeIndices, lines.length, contextLines);
const hunks = [];
let prevEnd = 0;
for (const boundary of hunkBoundaries) {
const hunkLines = lines.slice(boundary.start, boundary.end);
const hiddenBefore = boundary.start - prevEnd;
const collapsedBefore = hiddenBefore > 0 ? hiddenBefore : undefined;
hunks.push({
lines: hunkLines,
collapsedBefore,
startIndex: boundary.start,
});
prevEnd = boundary.end;
}
// Calculate collapsed lines after the last hunk
const lastBoundary = hunkBoundaries.at(-1);
const collapsedAfter = lastBoundary.end < lines.length
? lines.length - lastBoundary.end
: undefined;
return { hunks, additions, deletions, collapsedAfter, allLines: lines };
}
/**
* Build the start/end boundaries of each hunk based on change positions.
* Merges hunks that overlap or are adjacent.
* @param changeIndices
* @param totalLines
* @param contextLines
*/
function buildHunkBoundaries(changeIndices, totalLines, contextLines) {
if (changeIndices.length === 0) {
return [];
}
const boundaries = [];
let currentStart = Math.max(0, changeIndices[0] - contextLines);
let currentEnd = Math.min(totalLines, changeIndices[0] + contextLines + 1);
for (let i = 1; i < changeIndices.length; i++) {
const changeStart = Math.max(0, changeIndices[i] - contextLines);
const changeEnd = Math.min(totalLines, changeIndices[i] + contextLines + 1);
if (changeStart <= currentEnd) {
// Merge overlapping hunks
currentEnd = Math.max(currentEnd, changeEnd);
}
else {
boundaries.push({ start: currentStart, end: currentEnd });
currentStart = changeStart;
currentEnd = changeEnd;
}
}
boundaries.push({ start: currentStart, end: currentEnd });
return boundaries;
}
/**
* Tokenize a text fragment for syntax highlighting.
* Returns the original text as a single plain token when the
* language is not supported.
*
* @param text - the text to tokenize
* @param language - the language identifier (e.g. "json")
* @returns array of syntax tokens
*/
function tokenize(text, language) {
if (!language || text.length === 0) {
return [{ value: text, type: 'plain' }];
}
if (language === 'json') {
return tokenizeJson(text);
}
return [{ value: text, type: 'plain' }];
}
// ─── JSON tokenizer ─────────────────────────────────────────────────
/**
* Regex-based JSON tokenizer.
* Handles partial lines (individual lines of a JSON document).
*/
const JSON_PATTERNS = [
// String literals (keys and values)
[/"(?:[^"\\]|\\.)*"/, 'string'],
// Numbers
[/-?\d+(?:\.\d+)?(?:[eE][+-]?\d+)?/, 'number'],
// Booleans
[/\b(?:true|false)\b/, 'boolean'],
// Null
[/\bnull\b/, 'null'],
// Punctuation
[/[{}[\]:,]/, 'punctuation'],
];
const JSON_REGEX = new RegExp(JSON_PATTERNS.map(([re]) => `(${re.source})`).join('|'), 'g');
function tokenizeJson(text) {
const tokens = [];
let lastIndex = 0;
JSON_REGEX.lastIndex = 0;
let match = JSON_REGEX.exec(text);
while (match !== null) {
// Plain text before this match
if (match.index > lastIndex) {
tokens.push({
value: text.slice(lastIndex, match.index),
type: 'plain',
});
}
// Determine which capture group matched
const tokenType = getMatchedTokenType(match);
const value = match[0];
// Distinguish JSON keys from string values:
// A key is a string followed by optional whitespace and a colon
if (tokenType === 'string') {
const afterMatch = text.slice(match.index + value.length);
if (/^\s*:/.test(afterMatch)) {
tokens.push({ value, type: 'key' });
}
else {
tokens.push({ value, type: 'string' });
}
}
else {
tokens.push({ value, type: tokenType });
}
lastIndex = match.index + value.length;
match = JSON_REGEX.exec(text);
}
// Remaining plain text
if (lastIndex < text.length) {
tokens.push({ value: text.slice(lastIndex), type: 'plain' });
}
return tokens;
}
/**
* Determine which pattern matched by checking capture groups.
* @param match - the regex match result
*/
function getMatchedTokenType(match) {
for (const [index, [, type]] of JSON_PATTERNS.entries()) {
if (match[index + 1] !== undefined) {
return type;
}
}
return 'plain';
}
/**
* Pure utility functions for search-within-diff functionality.
*/
/**
* Escape special regex characters in a search term so it can
* be used as a literal pattern in a RegExp constructor.
*
* @param term - the raw search string
* @returns regex-safe string
*/
function escapeRegex(term) {
return term.replaceAll(/[.*+?^${}()|[\]\\]/g, String.raw `\$&`);
}
/**
* Build a case-insensitive regex that captures the search term.
* Returns `null` when the term is empty.
*
* @param term - the raw search string
* @returns a RegExp with a single capture group, or null
*/
function buildSearchRegex(term) {
if (!term) {
return null;
}
return new RegExp(`(${escapeRegex(term)})`, 'gi');
}
/**
* Calculate the next match index when navigating forward or backward,
* wrapping around at the boundaries.
*
* @param currentIndex - current zero-based match index
* @param direction - +1 for next, -1 for previous
* @param total - total number of matches
* @returns the new match index
*/
function navigateMatchIndex(currentIndex, direction, total) {
if (total === 0) {
return 0;
}
return (currentIndex + direction + total) % total;
}
/**
* Pure utility functions for extracting text content from diff structures.
*/
/**
* Extract the text content of removed lines from a unified change block.
*
* @param lines - consecutive changed lines from a unified diff hunk
* @returns the removed lines joined by newlines, or empty string if none
*/
function extractRemovedContent(lines) {
return lines
.filter((line) => line.type === 'removed')
.map((line) => line.content)
.join('\n');
}
/**
* Extract the text content of removed lines from a split change block.
*
* @param rows - consecutive changed rows from a split diff hunk
* @returns the removed lines joined by newlines, or empty string if none
*/
function extractRemovedContentFromSplit(rows) {
return rows
.filter((row) => { var _a; return ((_a = row.left) === null || _a === void 0 ? void 0 : _a.type) === 'removed'; })
.map((row) => row.left.content)
.join('\n');
}
const codeDiffCss = () => `@charset "UTF-8";*,*::before,*::after{box-sizing:border-box;min-width:0;min-height:0}:host(limel-code-diff){--diff-added-bg:rgb(var(--color-green-default), 0.1);--diff-added-bg-hover:rgb(var(--color-green-default), 0.3);--diff-added-highlight-bg:rgb(var(--color-green-default), 0.3);--diff-removed-bg:rgb(var(--color-red-default), 0.1);--diff-removed-bg-hover:rgb(var(--color-red-default), 0.3);--diff-removed-highlight-bg:rgb(var(--color-red-default), 0.3);--diff-context-bg:transparent;--diff-indicator-added-color:rgb(var(--color-green-default));--diff-indicator-removed-color:rgb(var(--color-red-default));--diff-stat-added-color:rgb(var(--color-green-default));--diff-stat-removed-color:rgb(var(--color-red-default));--search-match-bg:rgb(var(--color-amber-default), 0.3);--search-match-current-bg:rgb(var(--color-amber-default), 0.6);--diff-line-hover-bg:rgb(var(--contrast-800), 0.08);--diff-gutter-bg:rgb(var(--contrast-200));--diff-gutter-text-color:rgb(var(--contrast-700));--diff-text-color:rgb(var(--contrast-1100));--diff-border-color:rgb(var(--contrast-400));--diff-collapsed-bg:rgb(var(--contrast-200));--diff-collapsed-text-color:rgb(var(--contrast-800));--diff-header-bg:rgb(var(--contrast-200));--diff-empty-text-color:rgb(var(--contrast-700));--diff-split-divider-color:rgb(var(--contrast-400));--diff-empty-cell-bg:rgb(var(--contrast-100));--syntax-string-color:rgb(var(--color-green-dark));--syntax-number-color:rgb(var(--color-blue-default));--syntax-boolean-color:rgb(var(--color-amber-darker));--syntax-key-color:rgb(var(--color-sky-dark));--syntax-null-color:rgb(var(--contrast-700));--syntax-punctuation-color:rgb(var(--contrast-700));--search-bar-bg:rgb(var(--contrast-100));--search-bar-border:rgb(var(--contrast-400));--limel-code-diff-line-number-padding:0.25rem;font-family:ui-sans-serif, system-ui, sans-serif;display:flex;flex-direction:column;width:100%;height:100%;color:var(--diff-text-color);border:1px solid var(--diff-border-color);border-radius:0.5rem;max-height:var(--code-diff-max-height, none)}.screen-reader-only{position:absolute;width:0;height:0;margin:-1px;padding:0;border:0;overflow:hidden;clip:rect(0, 0, 0, 0);clip-path:inset(50%);white-space:nowrap}.diff-header{flex-shrink:0;display:flex;align-items:center;justify-content:space-between;padding:0.125rem 0.125rem 0.125rem 0.5rem;background:var(--diff-header-bg);border-bottom:1px solid var(--diff-border-color);font-family:ui-sans-serif, system-ui, sans-serif;font-size:0.75rem;border-radius:0.5rem 0.5rem 0 0}.diff-header__labels{display:flex;gap:0.75rem;font-weight:500}.diff-header__old,.diff-header__new{padding:0.125rem 0.25rem;border-radius:0.25rem;box-shadow:var(--shadow-brighten-edges-outside)}.diff-header__old{background-color:var(--diff-removed-bg)}.diff-header__new{background-color:var(--diff-added-bg)}.diff-header__actions{display:flex;align-items:center;gap:0.25rem}.search-toggle--active{--limel-theme-on-surface-color:var(--mdc-theme-primary)}.diff-header__stats{font-family:ui-monospace, "Cascadia Code", "Source Code Pro", Menlo, Consolas, "DejaVu Sans Mono", monospace;display:flex;gap:0.5rem;margin-right:0.5rem}.stat{font-family:ui-monospace, "Cascadia Code", "Source Code Pro", Menlo, Consolas, "DejaVu Sans Mono", monospace;font-size:0.8125rem;font-weight:600}.stat--added{color:var(--diff-stat-added-color)}.stat--removed{color:var(--diff-stat-removed-color)}.search-bar{flex-shrink:0;display:flex;align-items:center;gap:0.25rem;padding:0.25rem 0 0.25rem 0.25rem;background:var(--search-bar-bg);border-bottom:1px solid var(--search-bar-border)}.search-bar limel-action-bar{--action-bar-background-co