singularityui-tailer
Version:
A robust log tailer
586 lines (463 loc) • 19.4 kB
JavaScript
'use strict';
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.removeFileReducer = exports.removeChunkReducer = exports.addChunkReducer = exports.mergeLines = exports.createLines = exports.mergeChunks = exports.isOverlapping = exports.splitChunkIntoLines = exports.createMissingMarker = undefined;
var _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; };
var _textEncoding = require('text-encoding');
var _immutable = require('immutable');
var _actions = require('../actions');
function _defineProperty(obj, key, value) { if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; }
function _toConsumableArray(arr) { if (Array.isArray(arr)) { for (var i = 0, arr2 = Array(arr.length); i < arr.length; i++) { arr2[i] = arr[i]; } return arr2; } else { return Array.from(arr); } } // polyfill
var TE = new _textEncoding.TextEncoder();
var TD = new _textEncoding.TextDecoder('utf-8', { fatal: true });
// Key concept: rangeLike
// most of the objects in this file use the concept of a range-like, which
// is a duck-typed object that has a `start` and `end` field.
// the start and end fields are byte offsets into the file that we are dealing
// with. Many functions use them to figure out where to splice and dice lists.
var createMissingMarker = exports.createMissingMarker = function createMissingMarker(start, end) {
return {
isMissingMarker: true,
byteLength: end - start,
start: start,
end: end,
hasNewline: false
};
};
var splitChunkIntoLines = exports.splitChunkIntoLines = function splitChunkIntoLines(chunk) {
var text = chunk.text,
start = chunk.start; // { end, byteLength } should also be provided
var lines = new _immutable.List(text.split(/[\n\r]/));
var byteLengths = lines.map(function (line) {
return TE.encode(line).byteLength;
});
var partialLines = new _immutable.List();
var currentOffset = start;
lines.forEach(function (line, i) {
var hasNewline = i !== lines.size - 1;
// add newline byte
var lineLength = byteLengths.get(i) + (hasNewline ? 1 : 0);
partialLines = partialLines.insert(i, {
text: line,
byteLength: byteLengths.get(i),
start: currentOffset,
end: currentOffset += lineLength,
hasNewline: hasNewline
});
});
// don't allow zero-byte lines
if (partialLines.size) {
var lastLine = partialLines.last();
if (!lastLine.hasNewline && !lastLine.byteLength) {
return partialLines.butLast();
}
}
return partialLines;
};
/*
Justifying the byteLength algorithm used:
hipster text actual byte length: 2399
(generated at: http://hipsum.co/?paras=4&type=hipster-centric)
Speed tests:
benchmark = (f) => {
then = Date.now();
for(let i=0; i<100000; i++) { f() }
console.log(Date.now() - then);
}
const te = new TextEncoder();
Byte count
----------
TextEncoder:
benchmark(() => { te.encode(hipster).byteLength; });
-> 1216
byteCount:
function byteCount(s) {
return encodeURI(s).split(/%..|./).length - 1;
}
benchmark(() => { byteCount(hipster); });
-> 19114
For small strings, byteCount is way faster,
because it doesn't need to make a Uint8Array
For big strings, TextEncoder is way faster,
because it's not a hack.
Split
-----
Uint8Array:
benchmark(() => {
byteArray.reduce((newLines, b, index) => (
(b === 10) ? newLines.concat([index]) : newLines), []
)
});
-> 13624
let newLines;
benchmark(() => {
newLines = []; byteArray.forEach((b, index) => {
(b === 10) && newLines.push(index);
});
});
-> 23690
String split:
benchmark(() => { hipster.split('\n') });
-> 19
Combination
-----------
String split then byte count with byteCount:
benchmark(() => hipster.split("p").map((l) => byteCount(l)))
-> 23771
benchmark(() => hipster.split("p").map((l) => te.encode(l).byteLength))
-> 8267
*/
// get the byte start and end of a list
var getBookends = function getBookends(list) {
if (!list.size) {
return {
start: 0,
end: 0
};
}
return {
start: list.first().start,
end: list.last().end
};
};
// Checks if two chunks/ranges overlap
var isOverlapping = exports.isOverlapping = function isOverlapping(c1, c2) {
var inclusive = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : false;
if (inclusive) {
return c1.start <= c2.end && c2.start <= c1.end;
}
return c1.start < c2.end && c2.start < c1.end;
};
// rangeLike can be a range object (start, end), a chunk, or a line
// (they all have start and end byte fields)
var findOverlap = function findOverlap(list, rangeLike) {
var inclusive = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : false;
return {
startIndex: list.findIndex(function (c) {
return isOverlapping(rangeLike, c, inclusive);
}),
endIndex: list.findLastIndex(function (c) {
return isOverlapping(rangeLike, c, inclusive);
})
};
};
var getIndexRange = function getIndexRange(list, indexRange) {
var startIndex = indexRange.startIndex,
endIndex = indexRange.endIndex;
if (startIndex === -1) {
return new _immutable.List();
}
return list.slice(startIndex, endIndex + 1);
};
var getOverlap = function getOverlap(list, rangeLike) {
var inclusive = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : false;
return getIndexRange(list, findOverlap(list, rangeLike, inclusive));
};
// incoming: single chunk
var mergeChunks = exports.mergeChunks = function mergeChunks(existing, incoming) {
var replacementRange = findOverlap(existing, incoming);
var intersectingChunks = getIndexRange(existing, replacementRange);
if (intersectingChunks.size) {
// okay, we know that there are some chunks that overlap with us
// we only need to merge the first and last and only if each goes beyond
// the chunks
var firstBytes = void 0;
var firstIntersectingChunk = intersectingChunks.first();
if (firstIntersectingChunk.start < incoming.start) {
// let's slice and dice this one
// this is a loaded piece, handle carefully \u{1F52B}
firstBytes = TE.encode(firstIntersectingChunk.text).subarray(0, incoming.start - firstIntersectingChunk.start);
}
// the last could also be the first, but that's fine
var lastBytes = void 0;
var lastIntersectingChunk = intersectingChunks.last();
if (lastIntersectingChunk.end > incoming.end) {
// let's also slice and dice this one
// this math can almost certainly be simplified, but this works
lastBytes = TE.encode(lastIntersectingChunk.text).subarray(lastIntersectingChunk.byteLength - (lastIntersectingChunk.end - incoming.end));
}
var chunksToReplace = replacementRange.endIndex - replacementRange.startIndex + 1;
var newChunk = void 0;
// combine the bytes together if needed
if (firstBytes || lastBytes) {
// we have to convert the incoming chunk to bytes to combine
var incomingBytes = TE.encode(incoming.text);
// If this can be made better, it should be!
// allocate a new array for both, and decode the text
var combinedByteLength = (firstBytes ? firstBytes.byteLength : 0) + incomingBytes.byteLength + (lastBytes ? lastBytes.byteLength : 0);
var combined = new Uint8Array(combinedByteLength);
if (firstBytes) {
combined.set(firstBytes);
combined.set(incomingBytes, firstBytes.byteLength);
} else {
combined.set(incomingBytes);
}
if (lastBytes) {
// oh you think you're clever don't you
// do some simple math to figure out where to put this
combined.set(lastBytes, combinedByteLength - lastBytes.byteLength);
}
newChunk = {
text: TD.decode(combined),
byteLength: combinedByteLength,
start: firstBytes ? firstIntersectingChunk.start : incoming.start,
end: lastBytes ? lastIntersectingChunk.end : incoming.end
};
} else {
newChunk = incoming;
}
return existing.splice(replacementRange.startIndex, chunksToReplace, newChunk);
}
// oh, this is so much more simple
// find where to put this chunk
var indexBefore = existing.findLastIndex(function (c) {
return incoming.start >= c.end;
});
// works even if indexBefore === -1
return existing.insert(indexBefore + 1, incoming);
};
var createLines = exports.createLines = function createLines(chunks) {
// get chunks that overlap a byte range (inclusive)
return chunks.reduce(function (accumulatedLines, c) {
var chunkLines = splitChunkIntoLines(c);
if (accumulatedLines.size && chunkLines.size) {
var existingPart = accumulatedLines.last();
var newPart = chunkLines.first();
// create missing marker if the parts don't line up
if (existingPart.end !== newPart.start) {
accumulatedLines = accumulatedLines.push(createMissingMarker(existingPart.end, newPart.start));
} else if (!existingPart.hasNewline) {
// combine partial lines
accumulatedLines = accumulatedLines.set(-1, {
text: existingPart.text + newPart.text,
byteLength: existingPart.byteLength + newPart.byteLength,
start: existingPart.start,
end: newPart.end,
hasNewline: newPart.hasNewline
});
return accumulatedLines.concat(chunkLines.rest());
}
}
return accumulatedLines.concat(chunkLines);
}, new _immutable.List());
};
var getBoundingRange = function getBoundingRange() {
for (var _len = arguments.length, ranges = Array(_len), _key = 0; _key < _len; _key++) {
ranges[_key] = arguments[_key];
}
return {
start: Math.min.apply(Math, _toConsumableArray(ranges.map(function (r) {
return r.start;
}))),
end: Math.max.apply(Math, _toConsumableArray(ranges.map(function (r) {
return r.end;
})))
};
};
var createLinesForChunk = function createLinesForChunk(existingLines, chunks, incomingRangeLike) {
// get the full byte range of lines that the incoming chunk intersects
var overlappingLines = getOverlap(existingLines, incomingRangeLike, true);
if (!overlappingLines.size) {
// the range is beyond the range of existing lines, just make these
return createLines(getOverlap(chunks, incomingRangeLike));
}
var boundingByteRange = getBoundingRange(getBookends(overlappingLines), incomingRangeLike);
// get the chunks that intersect that range
var overlappingChunks = getOverlap(chunks, boundingByteRange, true);
// generate all of the lines from the new overlappingChunks
var untrimmedLines = createLines(overlappingChunks);
return getOverlap(untrimmedLines, incomingRangeLike, true);
};
// incoming: List of lines
var mergeLines = exports.mergeLines = function mergeLines(existing, incoming, replacementRange) {
if (replacementRange.startIndex === -1) {
// the new lines don't overlap at all, this means that the new lines go at
// the end. There is also a space in between the last element and this one
// so let's always add a missing marker here.
return existing.push(createMissingMarker(existing.last().end, incoming.first().start)).concat(incoming);
}
var generatedByteRange = getBookends(incoming);
var replacementByteRange = {
start: existing.get(replacementRange.startIndex).start,
end: existing.get(replacementRange.endIndex).end
};
// see if we need to add a missing marker to the start
if (generatedByteRange.start > replacementByteRange.start) {
incoming = incoming.unshift(createMissingMarker(replacementByteRange.start, generatedByteRange.start));
}
// and to the end
if (generatedByteRange.end < replacementByteRange.end) {
incoming = incoming.push(createMissingMarker(generatedByteRange.end, replacementByteRange.end));
}
return existing.slice(0, replacementRange.startIndex).concat(incoming).concat(existing.slice(replacementRange.endIndex + 1));
};
var addChunkReducer = exports.addChunkReducer = function addChunkReducer(state, action) {
var id = action.id,
chunk = action.chunk;
if (!state[id]) {
var _chunks = mergeChunks(new _immutable.List(), chunk);
var bookends = getBookends(_chunks);
var lines = createLines(_chunks);
if (bookends.start !== 0) {
lines = lines.unshift(createMissingMarker(0, bookends.start));
}
return _extends({}, state, _defineProperty({}, id, {
chunks: _chunks,
lines: lines,
fileSize: bookends.end
}));
}
// has been init but has no new data
if (!chunk.byteLength) {
return state;
}
// if length came back as 0 quick fix
if (!state[id].chunks.size) {
var _chunks2 = mergeChunks(new _immutable.List(), chunk);
var _bookends = getBookends(_chunks2);
var _lines = createLines(_chunks2);
if (_bookends.start !== 0) {
_lines = _lines.unshift(createMissingMarker(0, _bookends.start));
}
if (_bookends.end < state[id].fileSize) {
_lines = _lines.push(createMissingMarker(_bookends.end, state[id].fileSize));
}
return _extends({}, state, _defineProperty({}, id, {
chunks: _chunks2,
lines: _lines,
fileSize: Math.max(_bookends.end, state[id].fileSize)
}));
}
// has been init and has new data
var chunks = mergeChunks(state[id].chunks, chunk);
var incomingLines = createLinesForChunk(state[id].lines, chunks, chunk);
var replacementRange = findOverlap(state[id].lines, getBookends(incomingLines));
return _extends({}, state, _defineProperty({}, id, {
chunks: chunks,
lines: mergeLines(state[id].lines, incomingLines, replacementRange),
fileSize: Math.max(state[id].fileSize, chunk.end)
}));
};
var removeChunkReducer = exports.removeChunkReducer = function removeChunkReducer(state, action) {
var id = action.id,
index = action.index;
if (!state[id]) {
return state;
}
var existingChunks = state[id].chunks;
if (existingChunks.has(index)) {
var removedChunk = existingChunks.get(index);
var newChunks = existingChunks.delete(index);
// a new set of lines for the range that just got deleted.
var deletedLines = createLinesForChunk(state[id].lines, newChunks, removedChunk);
var replacementRange = findOverlap(state[id].lines, removedChunk, true);
return _extends({}, state, _defineProperty({}, id, {
chunks: newChunks,
lines: mergeLines(state[id].lines, deletedLines, replacementRange),
fileSize: state[id].fileSize
}));
}
return state;
};
var removeFileReducer = exports.removeFileReducer = function removeFileReducer(state, action) {
var id = action.id;
if (state[id]) {
return _extends({}, state, _defineProperty({}, id, undefined));
}
return state;
};
var initialState = {};
var filesReducer = function filesReducer() {
var state = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : initialState;
var action = arguments[1];
switch (action.type) {
case _actions.ADD_FILE_CHUNK:
try {
return addChunkReducer(state, action);
} catch (e) {
console.warn( // eslint-disable-line no-console
'LogTailer caught ' + e.name + '. Invalidating log', e);
return addChunkReducer(removeFileReducer(state, action), action);
}
case _actions.UNLOAD_FILE:
{
// scope
var newState = _extends({}, state);
delete newState[action.id];
return newState;
}
case _actions.UNLOAD_FILE_CHUNK:
return removeChunkReducer(state, action);
case _actions.STOP_TAILING:
var newStopTailState = _extends({}, state);
if (!newStopTailState.tailing) {
newStopTailState.tailing = {};
}
newStopTailState.tailing[action.id] = false;
return newStopTailState;
case _actions.START_TAILING:
var newStartTailState = _extends({}, state);
if (!newStartTailState.tailing) {
newStartTailState.tailing = {};
}
newStartTailState.tailing[action.id] = true;
return newStartTailState;
case _actions.SET_FILE_SIZE:
if (!state[action.id]) {
return _extends({}, state, _defineProperty({}, action.id, {
chunks: new _immutable.List(),
lines: new _immutable.List().push(createMissingMarker(0, action.fileSize)),
fileSize: action.fileSize
}));
}
// make sure to add the missing marker if the fileSize is larger than
// what we knew
if (state[action.id].lines.size) {
var lines = state[action.id].lines;
var lastLine = lines.last();
var updatedLines = lines;
if (action.fileSize > lastLine.end) {
updatedLines = lines.push(createMissingMarker(lastLine.end, action.fileSize));
}
return _extends({}, state, _defineProperty({}, action.id, _extends({}, state[action.id], {
lines: updatedLines,
fileSize: Math.max(state[action.id].fileSize, action.fileSize)
})));
}
return _extends({}, state, _defineProperty({}, action.id, _extends({}, state[action.id], {
fileSize: Math.max(state[action.id].fileSize, action.fileSize)
})));
default:
return state;
}
};
var _default = filesReducer;
exports.default = _default;
;
var _temp = function () {
if (typeof __REACT_HOT_LOADER__ === 'undefined') {
return;
}
__REACT_HOT_LOADER__.register(TE, 'TE', 'src/reducers/files.js');
__REACT_HOT_LOADER__.register(TD, 'TD', 'src/reducers/files.js');
__REACT_HOT_LOADER__.register(createMissingMarker, 'createMissingMarker', 'src/reducers/files.js');
__REACT_HOT_LOADER__.register(splitChunkIntoLines, 'splitChunkIntoLines', 'src/reducers/files.js');
__REACT_HOT_LOADER__.register(getBookends, 'getBookends', 'src/reducers/files.js');
__REACT_HOT_LOADER__.register(isOverlapping, 'isOverlapping', 'src/reducers/files.js');
__REACT_HOT_LOADER__.register(findOverlap, 'findOverlap', 'src/reducers/files.js');
__REACT_HOT_LOADER__.register(getIndexRange, 'getIndexRange', 'src/reducers/files.js');
__REACT_HOT_LOADER__.register(getOverlap, 'getOverlap', 'src/reducers/files.js');
__REACT_HOT_LOADER__.register(mergeChunks, 'mergeChunks', 'src/reducers/files.js');
__REACT_HOT_LOADER__.register(createLines, 'createLines', 'src/reducers/files.js');
__REACT_HOT_LOADER__.register(getBoundingRange, 'getBoundingRange', 'src/reducers/files.js');
__REACT_HOT_LOADER__.register(createLinesForChunk, 'createLinesForChunk', 'src/reducers/files.js');
__REACT_HOT_LOADER__.register(mergeLines, 'mergeLines', 'src/reducers/files.js');
__REACT_HOT_LOADER__.register(addChunkReducer, 'addChunkReducer', 'src/reducers/files.js');
__REACT_HOT_LOADER__.register(removeChunkReducer, 'removeChunkReducer', 'src/reducers/files.js');
__REACT_HOT_LOADER__.register(removeFileReducer, 'removeFileReducer', 'src/reducers/files.js');
__REACT_HOT_LOADER__.register(initialState, 'initialState', 'src/reducers/files.js');
__REACT_HOT_LOADER__.register(filesReducer, 'filesReducer', 'src/reducers/files.js');
__REACT_HOT_LOADER__.register(_default, 'default', 'src/reducers/files.js');
}();
;