@actyx/sdk
Version:
Actyx SDK
432 lines • 18.9 kB
JavaScript
;
Object.defineProperty(exports, "__esModule", { value: true });
exports.EventFnsFromEventStoreV2 = exports._ordByKey = exports._ordByTimestamp = void 0;
/*
* Actyx SDK: Functions for writing distributed apps
* deployed on peer-to-peer networks, without any servers.
*
* Copyright (C) 2021 Actyx AG
*/
const Ord_1 = require("fp-ts/lib/Ord");
const string_1 = require("fp-ts/lib/string");
const number_1 = require("fp-ts/lib/number");
const rxjs_1 = require("../../node_modules/rxjs");
const operators_1 = require("../../node_modules/rxjs/operators");
const event_fns_1 = require("../event-fns");
const types_1 = require("../types");
const util_1 = require("../util");
const subscribe_monotonic_1 = require("./subscribe_monotonic");
const bufferOp_1 = require("../util/bufferOp");
const semver_1 = require("semver");
const subscribe_monotonic_emulated_1 = require("./subscribe_monotonic_emulated");
exports._ordByTimestamp = (0, Ord_1.contramap)((e) => [
e.meta.timestampMicros,
e.meta.eventId,
])((0, Ord_1.tuple)(number_1.Ord, string_1.Ord));
exports._ordByKey = (0, Ord_1.contramap)((e) => e.meta.eventId)(string_1.Ord);
const EventFnsFromEventStoreV2 = (nodeId, eventStore, snapshotStore, currentActyxVersion) => {
const mkMeta = (0, types_1.toMetadata)(nodeId);
const wrap = (e) => ({
payload: e.payload,
meta: mkMeta(e),
});
const bookKeepingOnChunk = (initialLowerBound, onChunk) => {
let curLowerBound = { ...initialLowerBound };
const onChunk0 = async (events) => {
const upperBound = { ...curLowerBound };
for (const ev of events) {
upperBound[ev.stream] = ev.offset;
}
const chunk = {
events: events.map(wrap),
// Better pass a copy of our offsets to the client
upperBound: { ...upperBound },
lowerBound: { ...curLowerBound },
};
curLowerBound = upperBound;
// Promise.resolve converts to a Promise if it's not yet a Promise.
await Promise.resolve(onChunk(chunk));
};
return onChunk0;
};
const reverseBookKeepingOnChunk = (initialUpperBound, onChunk) => {
let curUpperBound = { ...initialUpperBound };
const onChunk0 = async (events) => {
const lowerBound = { ...curUpperBound };
const sourcesInChunk = new Set();
for (const ev of events) {
lowerBound[ev.stream] = ev.offset;
sourcesInChunk.add(ev.stream);
}
for (const src of sourcesInChunk) {
// lowerbound is *exclusive* meaning we must subtract 1...
const bound = lowerBound[src];
if (bound === 0) {
delete lowerBound[src];
}
else {
lowerBound[src] = bound - 1;
}
}
const chunk = {
events: events.map(wrap),
// Better pass a copy of our offsets to the client
upperBound: { ...curUpperBound },
lowerBound: { ...lowerBound },
};
curUpperBound = lowerBound;
// Promise.resolve converts to a Promise if it's not yet a Promise.
await Promise.resolve(onChunk(chunk));
};
return onChunk0;
};
const present = () => eventStore.offsets().then((x) => x.present);
const offsets = () => eventStore.offsets();
const queryKnownRange = (rangeQuery) => {
const { lowerBound, upperBound, query, order, horizon } = rangeQuery;
return (0, rxjs_1.lastValueFrom)(eventStore
.query(lowerBound || {}, upperBound, query || types_1.allEvents, order || types_1.EventsSortOrder.Ascending, horizon)
.pipe((0, operators_1.map)(wrap), (0, operators_1.toArray)()));
};
const queryKnownRangeChunked = (rangeQuery, chunkSize, onChunk, onComplete) => {
const { lowerBound, upperBound, query, order, horizon } = rangeQuery;
const lb = lowerBound || {};
const cb = order === types_1.EventsSortOrder.Ascending
? bookKeepingOnChunk(lb, onChunk)
: reverseBookKeepingOnChunk(upperBound, onChunk);
let cancelled = false;
const onCompleteOrErr = onComplete ? onComplete : util_1.noop;
const s = eventStore
.query(lb, upperBound, query || types_1.allEvents, order || types_1.EventsSortOrder.Ascending, horizon)
.pipe((0, operators_1.bufferCount)(chunkSize), (0, operators_1.subscribeOn)(rxjs_1.asyncScheduler), // ensure that callbacks are never called during .subscribe below
(0, operators_1.mergeScan)((_a, chunk) => {
return cancelled ? rxjs_1.EMPTY : (0, rxjs_1.from)(cb(chunk));
}, void 0, 1))
// The only way to avoid parallel invocations is to use mergeScan with final arg=1
.subscribe({ complete: onCompleteOrErr, error: onCompleteOrErr });
return () => {
cancelled = true;
s.unsubscribe();
};
};
const queryAllKnown = async (query) => {
const curPresent = await present();
const rangeQuery = {
...query,
upperBound: curPresent,
};
const events = await queryKnownRange(rangeQuery);
return { events, lowerBound: query.lowerBound || {}, upperBound: curPresent };
};
const queryAllKnownChunked = (query, chunkSize, onChunk, onComplete) => {
let canceled = false;
let cancelUpstream = () => {
onComplete && onComplete();
// Function is bound again when the real query starts
};
present()
.then((present) => {
if (canceled) {
return;
}
const rangeQuery = {
...query,
upperBound: present,
};
// this is safe because queryKnownRangeChunked doesn’t invoke callbacks synchronously
cancelUpstream = queryKnownRangeChunked(rangeQuery, chunkSize, onChunk, onComplete);
})
.catch((e) => onComplete && onComplete(e));
return () => {
canceled = true;
cancelUpstream();
};
};
const subscribe = (openQuery, onEvent, onError) => {
const { lowerBound, query } = openQuery;
const lb = lowerBound || {};
const rxSub = eventStore
.subscribe(lb, query || types_1.allEvents)
.pipe((0, operators_1.subscribeOn)(rxjs_1.asyncScheduler), // ensure that no cb is called synchronously
(0, operators_1.map)(wrap), (0, operators_1.mergeScan)((_a, e) => (0, rxjs_1.from)(Promise.resolve(onEvent(e))), void 0, 1))
.subscribe({ error: onError || util_1.noop });
return () => rxSub.unsubscribe();
};
const subscribeChunked = (openQuery, cfg, onChunk, onError) => {
const { lowerBound, query } = openQuery;
const lb = lowerBound || {};
const cb = bookKeepingOnChunk(lb, onChunk);
const bufTime = cfg.maxChunkTimeMs || 5;
const bufSize = cfg.maxChunkSize || 1000;
const s = eventStore.subscribe(lb, query || types_1.allEvents);
const buffered = s.pipe((0, bufferOp_1.bufferOp)(bufTime, bufSize), (0, operators_1.map)((buf) => buf.sort(types_1.EventKey.ord.compare)));
// The only way to avoid parallel invocations is to use mergeScan with final arg=1
const rxSub = buffered
.pipe((0, operators_1.mergeScan)((_a, chunk) => (0, rxjs_1.from)(cb(chunk)), void 0, 1))
.subscribe({ error: onError || util_1.noop });
return () => rxSub.unsubscribe();
};
const convertMsg = (m) => {
switch (m.type) {
case types_1.MsgType.state:
return m;
case types_1.MsgType.events:
return {
type: types_1.MsgType.events,
events: m.events.map(wrap),
caughtUp: m.caughtUp,
};
case types_1.MsgType.timetravel:
return {
type: types_1.MsgType.timetravel,
trigger: m.trigger,
};
default:
throw new Error('Unknown msg type in: ' + JSON.stringify(m));
}
};
const subMonoReal = (0, subscribe_monotonic_1.eventsMonotonic)(eventStore);
const subMonoEmulated = (0, subscribe_monotonic_emulated_1.eventsMonotonicEmulated)(eventStore, snapshotStore);
const subMono = () => ((0, semver_1.gte)(currentActyxVersion(), '2.12.0') ? subMonoReal : subMonoEmulated);
const subscribeMonotonic = (query, cb, onCompleteOrError) => {
const x = subMono()(query.sessionId, query.query, query.attemptStartFrom)
.pipe((0, operators_1.map)((x) => convertMsg(x)), (0, operators_1.mergeScan)((_a, m) => (0, rxjs_1.from)(Promise.resolve(cb(m))), void 0, 1))
// The only way to avoid parallel invocations is to use mergeScan with final arg=1
.subscribe({
complete: onCompleteOrError || util_1.noop,
error: onCompleteOrError || util_1.noop,
});
return () => x.unsubscribe();
};
// Find first currently known event according to given sorting
const findFirstKnown = async (query, order) => {
const cur = await present();
const firstEvent = await (0, rxjs_1.lastValueFrom)(eventStore.query({}, cur, query, order).pipe((0, rxjs_1.defaultIfEmpty)(null), (0, rxjs_1.first)()));
return [firstEvent ? wrap(firstEvent) : undefined, cur];
};
// Find first currently known event according to an arbitrary decision logic
const reduceUpToPresent = async (query, reduce, initial) => {
const cur = await present();
const reducedValue = await (0, rxjs_1.lastValueFrom)(eventStore
.query({}, cur, query,
// Doesn't matter, we have to go through all known events anyways
types_1.EventsSortOrder.Ascending)
.pipe((0, operators_1.map)((e) => wrap(e)), (0, operators_1.reduce)(reduce, initial)));
return [reducedValue, cur];
};
const callbackWhenReplaced = (query, startingOffsets, initial, onEvent, shouldReplace, onError) => {
let cur = initial;
if (cur) {
onEvent(cur.payload, cur.meta);
}
const cb = async (boxedChunk) => {
const untypedChunk = boxedChunk.events;
if (untypedChunk.length === 0) {
return;
}
const chunk = untypedChunk;
let replaced = false;
// Chunk is NOT sorted internally in live-mode. Any event may replace cur.
// (Actually, we are now internally sorting. Maybe this can be improved.)
for (const event of chunk) {
if (!cur || shouldReplace(event, cur)) {
cur = event;
replaced = true;
}
}
// Replaced=true implies cur!=null, but the compiler doesn't know.
if (replaced && cur) {
onEvent(cur.payload, cur.meta);
}
};
return subscribeChunked({ query, lowerBound: startingOffsets }, {}, cb, onError);
};
const observeBestMatch = (query, shouldReplace, onReplaced, onError) => {
let cancelled = false;
let cancelSubscription = () => (cancelled = true);
reduceUpToPresent(query, (e0, e1) => (!e0 || shouldReplace(e1, e0) ? e1 : e0), undefined)
.then(([initial, offsets]) => {
if (cancelled) {
return;
}
cancelSubscription = callbackWhenReplaced(query, offsets, initial, onReplaced, shouldReplace, onError);
if (cancelled) {
cancelSubscription();
}
})
.catch((e) => onError && onError(e));
return () => {
cancelled = true;
cancelSubscription();
};
};
const observeEarliest = (tq, onEvent, onError) => {
const { query, eventOrder } = tq;
if (eventOrder === event_fns_1.EventOrder.Timestamp) {
return observeBestMatch(query, (0, Ord_1.lt)(exports._ordByTimestamp), onEvent);
}
let cancelled = false;
let cancelSubscription = () => (cancelled = true);
/** If lamport order is desired, we can use store-support to speed up the query. */
findFirstKnown(query, types_1.EventsSortOrder.Ascending)
.then(([earliest, offsets]) => {
if (cancelled) {
return;
}
cancelSubscription = callbackWhenReplaced(query, offsets, earliest, onEvent, (0, Ord_1.lt)(exports._ordByKey), onError);
if (cancelled) {
cancelSubscription();
}
})
.catch((e) => onError && onError(e));
return () => {
cancelled = true;
cancelSubscription();
};
};
const observeLatest = (tq, onEvent, onError) => {
const { query, eventOrder } = tq;
if (eventOrder === event_fns_1.EventOrder.Timestamp) {
return observeBestMatch(query, (0, Ord_1.gt)(exports._ordByTimestamp), onEvent);
}
let cancelled = false;
let cancelSubscription = () => (cancelled = true);
/** If lamport order is desired, we can use store-support to speed up the query. */
findFirstKnown(query, types_1.EventsSortOrder.Descending)
.then(([latest, offsets]) => {
if (cancelled) {
return;
}
cancelSubscription = callbackWhenReplaced(query, offsets, latest, onEvent, (0, Ord_1.gt)(exports._ordByKey), onError);
if (cancelled) {
cancelSubscription();
}
})
.catch((e) => onError && onError(e));
return () => {
cancelled = true;
cancelSubscription();
};
};
const observeUnorderedReduce = (query, reduce, initialVal, onUpdate, onError) => {
let cancelled = false;
let cancelSubscription = () => (cancelled = true);
const reduceDirect = (r, evt) => reduce(r, evt.payload, evt.meta);
reduceUpToPresent(query, reduceDirect, initialVal)
.then(([initial, offsets]) => {
if (cancelled) {
return;
}
let cur = initial;
onUpdate(cur);
const cb = async (chunk) => {
if (chunk.events.length === 0) {
return;
}
cur = chunk.events.reduce(reduceDirect, cur);
onUpdate(cur);
};
cancelSubscription = subscribeChunked({ query, lowerBound: offsets }, {}, cb, onError);
if (cancelled) {
cancelSubscription();
}
})
.catch((e) => onError && onError(e));
return () => {
cancelled = true;
cancelSubscription();
};
};
const emit = (taggedEvents) => {
const events = taggedEvents.map(({ tags, event }) => {
return {
tags,
payload: event,
};
});
const allPersisted = new rxjs_1.ReplaySubject(1);
eventStore
.persistEvents(events)
.pipe((0, operators_1.toArray)(), (0, operators_1.map)((x) => x.flat().map(mkMeta)))
.subscribe(allPersisted);
return (0, types_1.pendingEmission)(allPersisted.asObservable());
};
// TS doesn’t understand how we are implementing this overload.
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-ignore
const publish = (taggedEvents) => {
if (Array.isArray(taggedEvents)) {
return emit(taggedEvents).toPromise();
}
else {
return emit([taggedEvents])
.toPromise()
.then((x) => x[0]);
}
};
// FIXME properly type EventStore. (This runs without error because in production mode the ws event store does not use io-ts.)
const wrapAql = (e) => {
const actualType = e.type;
if (actualType === 'offsets' || actualType === 'diagnostic') {
return e;
}
const w = wrap(e);
return {
...w,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
type: actualType,
};
};
const getQueryAndOrd = (query) => {
if ((0, types_1.isString)(query)) {
return [query, types_1.EventsSortOrder.Ascending];
}
else {
return [query.query, query.order || types_1.EventsSortOrder.Ascending];
}
};
const queryAql = async (query, lowerBound) => {
const [aql, ord] = getQueryAndOrd(query);
return (0, rxjs_1.lastValueFrom)(eventStore.queryUnchecked(aql, ord, lowerBound).pipe((0, operators_1.map)(wrapAql), (0, operators_1.toArray)()));
};
const subscribeAql = (query, onResponse, onError, lowerBound) => {
const lb = lowerBound || {};
const qr = typeof query === 'string' ? query : query.query;
const rxSub = eventStore
.subscribeUnchecked(qr, lb)
.pipe((0, operators_1.map)(wrapAql), (0, operators_1.mergeScan)((_a, r) => (0, rxjs_1.from)(Promise.resolve(onResponse(r))), void 0, 1))
.subscribe({ error: onError || util_1.noop });
return () => rxSub.unsubscribe();
};
const queryAqlChunked = (query, chunkSize, onChunk, onCompleteOrError) => {
const [aql, ord] = getQueryAndOrd(query);
const buffered = eventStore.queryUnchecked(aql, ord).pipe((0, operators_1.map)(wrapAql), (0, operators_1.bufferCount)(chunkSize));
// The only way to avoid parallel invocations is to use mergeScan with final arg=1
const rxSub = buffered
.pipe((0, operators_1.mergeScan)((_a, chunk) => (0, rxjs_1.from)(Promise.resolve(onChunk(chunk))), undefined, 1))
.subscribe({ error: onCompleteOrError, complete: onCompleteOrError });
return () => rxSub.unsubscribe();
};
return {
present,
offsets,
queryKnownRange,
queryKnownRangeChunked,
queryAllKnown,
queryAllKnownChunked,
queryAql,
queryAqlChunked,
subscribe,
subscribeChunked,
subscribeAql,
subscribeMonotonic,
observeEarliest,
observeLatest,
observeBestMatch,
observeUnorderedReduce,
emit,
publish,
};
};
exports.EventFnsFromEventStoreV2 = EventFnsFromEventStoreV2;
//# sourceMappingURL=event-fns-impl.js.map