@actyx/sdk
Version:
Actyx SDK
222 lines • 9.16 kB
JavaScript
;
Object.defineProperty(exports, "__esModule", { value: true });
exports.testEventStore = exports.binSearchOffsets = exports.includeEvent = void 0;
/*
* Actyx SDK: Functions for writing distributed apps
* deployed on peer-to-peer networks, without any servers.
*
* Copyright (C) 2021 Actyx AG
*/
const Option_1 = require("fp-ts/lib/Option");
const rxjs_1 = require("../../node_modules/rxjs");
const operators_1 = require("../../node_modules/rxjs/operators");
const types_1 = require("../types");
const util_1 = require("../util");
const lookup = (offsets, source) => (0, Option_1.fromNullable)(offsets[source]);
const includeEvent = (offsetsBuilder, ev) => {
const { offset, stream } = ev;
const current = lookup(offsetsBuilder, stream);
if (!(0, Option_1.exists)((c) => c >= offset)(current)) {
offsetsBuilder[stream] = offset;
}
return offsetsBuilder;
};
exports.includeEvent = includeEvent;
const isBetweenPsnLimits = (from, to, onboardNewSources) => (e) => {
const source = e.stream;
const lower = lookup(from, source);
const upper = lookup(to, source);
const passLower = (0, Option_1.getOrElse)(() => true)((0, Option_1.map)((lw) => e.offset > lw)(lower));
const passUpper = (0, Option_1.getOrElse)(() => onboardNewSources)((0, Option_1.map)((up) => e.offset <= up)(upper));
return passLower && passUpper;
};
/**
* HERE BE DRAGONS: This function is just a draft of an optimisation we may do in a Rust-side impl
* Take an offset map and a sorted array of events -> find an index that fully covers the offsetmap.
* - Offset of event is smaller than in map -> Too low
* - Offset of event is higher -> Too high
* - Event’s source is not in offsets: Too high if offsets default is 'min'
* - Psn eq: Too low, unless the next event is too high
*/
const binSearchOffsets = (a, offsets) => {
let low = 0;
let high = a.length;
const c = ordOffsetsEvent(offsets, a);
while (low < high) {
const mid = (low + high) >>> 1;
if (c(mid) < 0)
low = mid + 1;
else
high = mid;
}
return low;
};
exports.binSearchOffsets = binSearchOffsets;
// For use within `binSearchOffsets` -- very specialized comparison.
const ordOffsetsEvent = (offsets, events) => (i) => {
const ev = events[i];
const source = ev.stream;
const offset = lookup(offsets, source);
return (0, Option_1.fold)(
// Unknown source: Too high.
() => 1, (o) => {
const d = ev.offset - o;
if (d !== 0 || i + 1 === events.length) {
return d;
}
// If d=0, delegate to the next higher index
return ordOffsetsEvent(offsets, events)(i + 1);
})(offset);
};
const filterSortedEvents = (events, from, to, subs, min) => {
// If min is given, should be among the existing events
const sliceStart = min ? (0, util_1.binarySearch)(events, min, types_1.EventKey.ord.compare) + 1 : 0;
return events
.slice(sliceStart)
.filter(isBetweenPsnLimits(from, to, false))
.filter((0, types_1.toEventPredicate)(subs));
};
const filterUnsortedEvents = (from, to, subs, min) => (events) => {
return events
.filter((ev) => !min || types_1.EventKey.ord.compare(ev, min) > 0)
.filter(isBetweenPsnLimits(from, to, true))
.filter((0, types_1.toEventPredicate)(subs));
};
const persistence = () => {
let persisted = [];
const persist = (evsUnsorted) => {
const evs = [...evsUnsorted].sort(types_1.EventKey.ord.compare);
if (persisted.length === 0) {
persisted = evs;
return;
}
if (evs.length === 0) {
return;
}
const oldPersisted = [...persisted];
// Array with lower first element has to go first
if (types_1.EventKey.ord.compare(oldPersisted[0], evs[0]) > 0) {
persisted = oldPersisted.concat(evs);
(0, util_1.mergeSortedInto)(oldPersisted, evs, persisted, types_1.EventKey.ord.compare);
}
else {
persisted = evs.concat(oldPersisted);
(0, util_1.mergeSortedInto)(evs, oldPersisted, persisted, types_1.EventKey.ord.compare);
}
};
const allPersisted = () => {
return persisted;
};
// Get persisted events as a mutable slice with best-effort pre-filtering
const getPersistedPreFiltered = (from, _to) => {
const events = allPersisted();
if (types_1.OffsetMap.isEmpty(from)) {
return [...events];
}
return [...events];
// Here be dragons...
// We actually want to use this when picking up from a snapshot, but that only works when
// we can guarantee the snapshot is still valid. In case we are hydrating from scratch, we cannot guarantee that!
// So currently it’s only used for the "live" stream to basically detect that no persisted event needs to be delivered
// (because in tests the live stream will relibably always start from present and the persisted events will exactly cover the present.)
// const start = binSearchOffsets(events, from)
// return events.slice(start)
};
return {
persist,
getPersistedPreFiltered,
allPersisted,
};
};
const testEventStore = (nodeId = types_1.NodeId.of('TEST'), timeInjector) => {
const { persist, getPersistedPreFiltered, allPersisted } = persistence();
const time = timeInjector || (() => types_1.Timestamp.now());
const present = new rxjs_1.ReplaySubject(1);
const live = new rxjs_1.Subject();
const query = (from, to, subs, sortOrder) => {
const events = getPersistedPreFiltered(from, to);
if (typeof subs === 'string') {
throw new Error('direct AQL not yet supported by testEventStore');
}
const filtered = filterSortedEvents(events, from, to, subs);
const ret = sortOrder === types_1.EventsSortOrder.Descending ? filtered.reverse() : filtered;
return (0, rxjs_1.from)(ret);
};
const liveStream = (from, subs) => {
if (typeof subs === 'string') {
throw new Error('direct AQL not yet supported by testEventStore');
}
return live.asObservable().pipe((0, operators_1.mergeMap)((x) => (0, rxjs_1.from)(filterUnsortedEvents(from, {}, subs)(x))),
// Delivering live events may trigger new events (via onStateChange) and again new events,
// until we exhaust the call stack. The prod store shouldn’t have that problem due to obvious reasons.
(0, operators_1.observeOn)(rxjs_1.queueScheduler));
};
let curOffsets = {};
present.next(curOffsets);
const subscribe = (fromPsn, subs) => {
const k = () => {
return (0, rxjs_1.concat)(query(fromPsn, curOffsets, subs, types_1.EventsSortOrder.StreamAscending), liveStream(fromPsn, subs));
};
return (0, rxjs_1.defer)(k);
};
let psn = 0;
let lamport = types_1.Lamport.of(99999);
const streamId = types_1.NodeId.streamNo(nodeId, 0);
const persistEvents = (x) => {
const newEvents = x.map((unstoredEvent) => {
lamport = types_1.Lamport.of(lamport + 1);
return {
...unstoredEvent,
appId: types_1.AppId.of('test'),
stream: streamId,
lamport,
timestamp: time(unstoredEvent.tags, unstoredEvent.payload),
offset: types_1.Offset.of(psn++),
};
});
directlyPushEvents(newEvents);
return (0, rxjs_1.of)(newEvents);
};
const directlyPushEvents = (newEvents) => {
let b = { ...curOffsets };
for (const ev of newEvents) {
b = (0, exports.includeEvent)(b, ev);
}
curOffsets = b;
if (newEvents.length > 0) {
lamport = types_1.Lamport.of(Math.max(lamport, ...newEvents.map((x) => x.lamport)) + 1);
}
const newEventsCompat = newEvents.map((ev) => ({
...ev,
semantics: '_t_',
name: '_t_',
appId: types_1.AppId.of('test'),
}));
persist(newEventsCompat);
present.next(curOffsets);
live.next(newEventsCompat);
};
const getPresent = () => (0, rxjs_1.lastValueFrom)(present.asObservable().pipe((0, operators_1.first)(), (0, operators_1.map)((present) => ({ present, toReplicate: {} }))));
return {
nodeId,
offsets: getPresent,
query,
queryUnchecked: () => {
throw new Error('not implemented for test event store');
},
subscribe,
subscribeUnchecked: () => {
throw new Error('not implemented for test event store');
},
subscribeMonotonic: () => {
throw new Error('not implemented for test event store');
},
persistEvents,
directlyPushEvents,
storedEvents: allPersisted,
connectivityStatus: () => rxjs_1.EMPTY,
close: () => live.complete(),
};
};
exports.testEventStore = testEventStore;
//# sourceMappingURL=testEventStore.js.map