raiden-ts
Version:
Raiden Light Client Typescript/Javascript SDK
358 lines • 20.6 kB
JavaScript
;
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.channelMonitoredEpic = exports.channelEventsEpic = exports.initMonitorProviderEpic = exports.initTokensRegistryEpic = void 0;
const abi_1 = require("@ethersproject/abi");
const constants_1 = require("@ethersproject/constants");
const isEmpty_1 = __importDefault(require("lodash/isEmpty"));
const sortBy_1 = __importDefault(require("lodash/sortBy"));
const rxjs_1 = require("rxjs");
const operators_1 = require("rxjs/operators");
const actions_1 = require("../../actions");
const constants_2 = require("../../constants");
const contracts_1 = require("../../contracts");
const error_1 = require("../../utils/error");
const ethers_1 = require("../../utils/ethers");
const rx_1 = require("../../utils/rx");
const types_1 = require("../../utils/types");
const actions_2 = require("../actions");
const utils_1 = require("../utils");
const tokenNetworkInterface = contracts_1.TokenNetwork__factory.createInterface();
function scanRegistryTokenNetworks({ address, provider, registryContract, contractsInfo, }) {
const encodedAddress = abi_1.defaultAbiCoder.encode(['address'], [address]);
return (0, ethers_1.getLogsByChunk$)(provider, Object.assign(registryContract.filters.TokenNetworkCreated(), {
fromBlock: contractsInfo.TokenNetworkRegistry.block_number,
toBlock: provider.blockNumber,
})).pipe((0, operators_1.map)((0, ethers_1.logToContractEvent)(registryContract)), (0, operators_1.filter)(([, tokenNetwork]) => !!tokenNetwork), (0, operators_1.toArray)(), (0, operators_1.mergeMap)((logs) => {
const alwaysMonitored$ = (0, rxjs_1.from)(logs.splice(0, 2).map(([token, tokenNetwork, , event]) => (0, actions_2.tokenMonitored)({
token: token,
tokenNetwork: tokenNetwork,
fromBlock: event.blockNumber,
})));
let monitorsIfHasChannels$ = rxjs_1.EMPTY;
if (logs.length) {
const firstBlock = (0, types_1.last)(logs[0]).blockNumber;
const tokenNetworks = new Map(logs.map(([token, tokenNetwork, , event]) => [tokenNetwork, [token, event]]));
const allTokenNetworkAddrs = Array.from(tokenNetworks.keys());
// simultaneously query all tokenNetworks for channels from us and to us
monitorsIfHasChannels$ = (0, rxjs_1.merge)((0, ethers_1.getLogsByChunk$)(provider, {
address: allTokenNetworkAddrs,
topics: [channelEventsTopics.openTopic, null, encodedAddress],
fromBlock: firstBlock,
toBlock: provider.blockNumber,
}), (0, ethers_1.getLogsByChunk$)(provider, {
address: allTokenNetworkAddrs,
topics: [channelEventsTopics.openTopic, null, null, encodedAddress],
fromBlock: firstBlock,
toBlock: provider.blockNumber,
})).pipe((0, operators_1.distinct)((log) => log.address), // only act on the first log found for each tokenNetwork
(0, operators_1.filter)((log) => tokenNetworks.has(log.address)), // shouldn't fail
(0, operators_1.map)((log) => (0, actions_2.tokenMonitored)({
token: tokenNetworks.get(log.address)[0],
tokenNetwork: log.address,
fromBlock: log.blockNumber,
})));
}
return (0, rxjs_1.merge)(alwaysMonitored$, monitorsIfHasChannels$);
}));
}
/**
* If state.tokens is empty (usually only on first run), scan registry and token networks for
* registered TokenNetworks of interest (ones which has/had channels with us) and monitors them.
* Otherwise, just emit tokenMonitored actions for all previously monitored TokenNetworks
*
* @param action$ - Observable of RaidenActions
* @param state$ - Observable of RaidenStates
* @param deps - RaidenEpicDeps members
* @param deps.address - Our address
* @param deps.provider - Eth provider
* @param deps.registryContract - TokenNetworkRegistry contract instance
* @param deps.contractsInfo - Contracts info mapping
* @param deps.init$ - Init$ tasks subject
* @returns Observable of tokenMonitored actions
*/
function initTokensRegistryEpic(action$, state$, deps) {
return action$.pipe((0, operators_1.filter)(actions_2.newBlock.is), (0, operators_1.take)(1), (0, operators_1.withLatestFrom)(state$), (0, operators_1.mergeMap)(([, state]) => {
const initSub = new rxjs_1.AsyncSubject();
deps.init$.next(initSub);
let monitored$;
if ((0, isEmpty_1.default)(state.tokens))
monitored$ = scanRegistryTokenNetworks(deps);
else
monitored$ = (0, rxjs_1.from)(Object.entries(state.tokens).map(([token, tokenNetwork]) => (0, actions_2.tokenMonitored)({ token: token, tokenNetwork })));
return monitored$.pipe((0, operators_1.finalize)(() => initSub.complete()));
}));
}
exports.initTokensRegistryEpic = initTokensRegistryEpic;
/**
* Monitor provider to ensure account continues to be available and network stays the same
*
* @param action$ - Observable of RaidenActions
* @param state$ - Observable of RaidenStates
* @param deps - RaidenEpicDeps members
* @param deps.address - Our address
* @param deps.network - Current network
* @param deps.provider - Eth provider
* @param deps.main - Main account
* @returns Observable of raidenShutdown actions
*/
function initMonitorProviderEpic(action$, {}, { address, main, network, provider }) {
const mainAddress = main?.address ?? address;
let isProviderAccount;
return (0, rxjs_1.timer)(0, provider.pollingInterval).pipe((0, rx_1.completeWith)(action$), (0, operators_1.exhaustMap)(async () => {
try {
const [accounts, currentNetwork] = await Promise.all([
isProviderAccount === false ? Promise.resolve(null) : provider.listAccounts(),
provider.getNetwork(),
]);
// usually, getNetwork will reject if 'underlying network changed', but let's assert here
// as well against our state's network to be double-sure
(0, error_1.assert)(currentNetwork.chainId === network.chainId, 'network changed');
// at init time, check if our address is in provider's accounts list;
// if not, it means Signer is a local Wallet or another non-provider-side account
if (isProviderAccount === undefined)
isProviderAccount = accounts?.includes(mainAddress);
if (isProviderAccount && accounts && !accounts.includes(mainAddress))
return (0, actions_1.raidenShutdown)({ reason: constants_2.ShutdownReason.ACCOUNT_CHANGED });
}
catch (error) {
if (error_1.ErrorCodec.is(error) && error.message.includes('network changed'))
return (0, actions_1.raidenShutdown)({ reason: constants_2.ShutdownReason.NETWORK_CHANGED });
// ignore network errors, so they're retried by timer
if ((0, error_1.matchError)(error_1.networkErrors, error))
return;
throw error;
}
}), (0, operators_1.filter)(types_1.isntNil));
}
exports.initMonitorProviderEpic = initMonitorProviderEpic;
const oldSettledFragment = abi_1.EventFragment.fromString('ChannelSettled(uint256 indexed,uint256,bytes32,uint256,bytes32)');
const channelEventsTopics = {
openTopic: tokenNetworkInterface.getEventTopic('ChannelOpened'),
depositTopic: tokenNetworkInterface.getEventTopic('ChannelNewDeposit'),
withdrawTopic: tokenNetworkInterface.getEventTopic('ChannelWithdraw'),
closedTopic: tokenNetworkInterface.getEventTopic('ChannelClosed'),
settledTopic: tokenNetworkInterface.getEventTopic('ChannelSettled'),
oldSettledTopic: tokenNetworkInterface.getEventTopic(oldSettledFragment),
};
/**
* 0.37 contracts had ChannelSettled event parameters as [id,amount1,hash1,amount2,hash2], but 0.39
* (our build base) emits/declares [id,addr1,amount1,hash1,addr2,amount2,hash2], i.e. expects addr1
* and addr2 before the respective amounts. In order for the contract object to be able to parse
* the old events, we need to map them to be compatible with the new ABI. Since we don't use the
* parameters and only care for the channelId, we may put zero'd addresses there
* FIXME: remove this function once we don't care for the old contracts compatibility anymore
*
* @param log - Log of old or new contracts
* @returns log compatible with contracts initialized with new ABI
*/
function mapOldToNewLogs(log) {
if (log.topics[0] === channelEventsTopics.oldSettledTopic) {
const decoded = tokenNetworkInterface.decodeEventLog(oldSettledFragment, log.data, log.topics);
log = {
...log,
// re-encode old log as new, inserting dummy addresses as parameters[1,4]
...tokenNetworkInterface.encodeEventLog(tokenNetworkInterface.getEvent('ChannelSettled'), [
decoded[0],
constants_1.AddressZero,
decoded[1],
decoded[2],
constants_1.AddressZero,
decoded[3],
decoded[4],
]),
};
}
return log;
}
function mapChannelEventsToAction([token, tokenNetwork], { address, latest$ }) {
const { openTopic, depositTopic, withdrawTopic, closedTopic, settledTopic } = channelEventsTopics;
return (input$) => input$.pipe((0, operators_1.withLatestFrom)(latest$), (0, operators_1.map)(([args, { state, config }]) => {
const id = args[0].toNumber();
// if it's undefined, this channel is unknown/not with us, and should be filtered out
const channel = Object.values(state.channels).find((c) => c.tokenNetwork === tokenNetwork && c.id === id);
const event = (0, types_1.last)(args);
const topic = event.topics?.[0];
const txHash = event.transactionHash;
const txBlock = event.blockNumber;
const confirmed = txBlock + config.confirmationBlocks <= state.blockNumber ? true : undefined;
let action;
switch (topic) {
case openTopic: {
const [, p1, p2] = args;
// filter out open events not with us
if ((address === p1 || address === p2) && (!channel || id > channel.id)) {
const partner = (address == p1 ? p2 : p1);
action = actions_2.channelOpen.success({
id,
token: token,
isFirstParticipant: address === p1,
txHash,
txBlock,
confirmed,
}, { tokenNetwork, partner });
}
break;
}
case depositTopic: {
const [, participant, totalDeposit] = args;
if (channel?.id === id &&
totalDeposit.gt(channel[participant === channel.partner.address ? 'partner' : 'own'].deposit))
action = actions_2.channelDeposit.success({
id,
participant: participant,
totalDeposit: totalDeposit,
txHash,
txBlock,
confirmed,
}, { tokenNetwork, partner: channel.partner.address });
break;
}
case withdrawTopic: {
const [, participant, totalWithdraw] = args;
if (channel?.id === id &&
totalWithdraw.gt(channel[participant === channel.partner.address ? 'partner' : 'own'].withdraw))
action = (0, actions_2.channelWithdrawn)({
id,
participant: participant,
totalWithdraw: totalWithdraw,
txHash,
txBlock,
confirmed,
}, { tokenNetwork, partner: channel.partner.address });
break;
}
case closedTopic: {
if (channel?.id === id && !('closeBlock' in channel)) {
const [, participant] = args;
action = actions_2.channelClose.success({ id, participant: participant, txHash, txBlock, confirmed }, { tokenNetwork, partner: channel.partner.address });
}
break;
}
case settledTopic: {
// settle may only happen more than confirmation blocks after opening, so be stricter;
// oldSettledTopic & settledTopic both have id as first arg, so it's compatible
if (channel?.id === id)
action = actions_2.channelSettle.success({ id, txHash, txBlock, confirmed, locks: channel.partner.locks }, { tokenNetwork, partner: channel.partner.address });
break;
}
}
return action; // action isn't any, it gets its type from assignments above
}), (0, operators_1.filter)(types_1.isntNil));
}
function fetchPastChannelEvents$([fromBlock, toBlock], [token, tokenNetwork], deps) {
const { address, provider, latest$, getTokenNetworkContract } = deps;
const tokenNetworkContract = getTokenNetworkContract(tokenNetwork);
// start by scanning [fromBlock, toBlock] interval for ChannelOpened events limited to or from us
return (0, rxjs_1.merge)((0, ethers_1.getLogsByChunk$)(provider, Object.assign(tokenNetworkContract.filters.ChannelOpened(null, address, null), {
fromBlock,
toBlock,
})), (0, ethers_1.getLogsByChunk$)(provider, Object.assign(tokenNetworkContract.filters.ChannelOpened(null, null, address), {
fromBlock,
toBlock,
}))).pipe((0, operators_1.map)((0, ethers_1.logToContractEvent)(tokenNetworkContract)), (0, operators_1.toArray)(), (0, operators_1.withLatestFrom)(latest$), (0, operators_1.mergeMap)(([logs, { state }]) => {
// map Log to ContractEvent and filter out channels which we know are already gone
const openEvents = logs.filter(([_id, p1, p2]) => {
const partner = (address === p1 ? p2 : p1);
const id = _id.toNumber();
const key = (0, utils_1.channelKey)({ tokenNetwork, partner });
const channel = state.channels[key];
// filter out settled or old channels, no new event could come from it
return !((0, utils_1.channelUniqueKey)({ id, tokenNetwork, partner }) in state.oldChannels ||
(channel && id < channel.id));
});
const channelIds = [
...openEvents,
...Object.values(state.channels)
.filter((c) => c.tokenNetwork === tokenNetwork)
.map((c) => [c.id]), // use previous confirmed channels ids
].map(([id]) => abi_1.defaultAbiCoder.encode(['uint256'], [id]));
if (channelIds.length === 0)
return rxjs_1.EMPTY;
// get all events of interest in the block range for all channelIds from open events above
const allButOpenedFilter = {
address: tokenNetwork,
topics: [
// events of interest as topics[0], without open events (already fetched above)
Object.values(channelEventsTopics).filter((topic) => topic !== channelEventsTopics.openTopic),
channelIds, // ORed channelIds set as topics[1]=channelId
],
};
return (0, ethers_1.getLogsByChunk$)(provider, Object.assign(allButOpenedFilter, { fromBlock, toBlock })).pipe((0, operators_1.map)(mapOldToNewLogs), (0, operators_1.map)((0, ethers_1.logToContractEvent)(tokenNetworkContract)), (0, operators_1.toArray)(),
// synchronously sort/interleave open|(deposit|withdraw|close|settle) events, and unwind
(0, operators_1.mergeMap)((logs) => {
const allEvents = [...openEvents, ...logs];
return (0, rxjs_1.from)((0, sortBy_1.default)(allEvents, [
(args) => (0, types_1.last)(args).blockNumber,
(args) => (0, types_1.last)(args).transactionIndex,
]));
}));
}), mapChannelEventsToAction([token, tokenNetwork], deps));
}
function fetchNewChannelEvents$(fromBlock, [token, tokenNetwork], deps) {
const { provider, getTokenNetworkContract, config$, latest$ } = deps;
const tokenNetworkContract = getTokenNetworkContract(tokenNetwork);
const blockNumber$ = latest$.pipe((0, rx_1.pluckDistinct)('state', 'blockNumber'));
// this mapping is needed to handle channel events emitted before open is confirmed/stored
const channelFilter = {
address: tokenNetwork,
// set only topics[0], to get also open events (new ids); filter client-side
topics: [Object.values(channelEventsTopics)],
};
return (0, ethers_1.fromEthersEvent)(provider, channelFilter, {
fromBlock,
blockNumber$,
confirmations: config$.pipe((0, operators_1.pluck)('confirmationBlocks')),
}).pipe((0, operators_1.map)(mapOldToNewLogs), (0, operators_1.map)((0, ethers_1.logToContractEvent)(tokenNetworkContract)), mapChannelEventsToAction([token, tokenNetwork], deps));
}
/**
* Listen TokenNetwork contract for channel Events
* Currently monitored events:
* - ChannelOpened, fires a channelopen.success action
* - ChannelNewDeposit, fires a channelDeposit.success action
* - ChannelWithdraw, fires a channelWithdrawn action
* - ChannelClosedEvent, fires a channelClose.success action
* - ChannelSettledEvent, fires a channelSettle.success action
* Also emits tokenMonitored to tell we're monitoring a tokenNetwork, with its [fromBlock, toBlock]
* ranges of fetched pastEvents
*
* @param action$ - Observable of RaidenActions
* @param state$ - Observable of RaidenStates
* @param deps - RaidenEpicDeps members
* @returns Observable of channelOpen.success,channelDeposit.success,channelClose.success,
* channelSettle.success actions
*/
function channelEventsEpic(action$, {}, deps) {
const resetEventsBlock = deps.provider._lastBlockNumber;
return action$.pipe((0, operators_1.filter)(actions_2.newBlock.is), (0, operators_1.pluck)('payload', 'blockNumber'), (0, operators_1.connect)((blockNumber$) => action$.pipe((0, operators_1.filter)(actions_2.tokenMonitored.is), (0, operators_1.distinct)((action) => action.payload.tokenNetwork), (0, operators_1.withLatestFrom)(deps.config$), (0, operators_1.mergeMap)(([action, { confirmationBlocks }]) => {
const { token, tokenNetwork } = action.payload;
// fromBlock is latest on-chain event seen for this contract, or registry deployment block +1
const fromBlock = action.payload.fromBlock ?? resetEventsBlock - confirmationBlocks;
// notifies when past events fetching completes
const pastDone$ = new rxjs_1.AsyncSubject();
deps.init$.next(pastDone$);
// blockNumber$ holds latest blockNumber, or waits for it to be fetched
return blockNumber$.pipe((0, operators_1.first)(), (0, operators_1.mergeMap)((toBlock) =>
// this merge + finalize + delayWhen AsyncSubject outputs like concat, but ensures
// both subscriptions are done simultaneously, to avoid losing monitored new events
// or that they'd come before any pastEvent
(0, rxjs_1.merge)(fetchPastChannelEvents$([fromBlock, toBlock], [token, tokenNetwork], deps).pipe((0, operators_1.finalize)(() => {
pastDone$.next(true);
pastDone$.complete();
})), fetchNewChannelEvents$(toBlock + 1, [token, tokenNetwork], deps).pipe((0, operators_1.delayWhen)(() => pastDone$)))));
})), { connector: () => new rxjs_1.ReplaySubject(1) }), (0, rx_1.completeWith)(action$));
}
exports.channelEventsEpic = channelEventsEpic;
/**
* Emit channelMonitored action for channels on state
*
* @param state$ - Observable of RaidenStates
* @returns Observable of channelMonitored actions
*/
function channelMonitoredEpic({}, state$) {
return state$.pipe((0, utils_1.groupChannel)(), (0, operators_1.mergeMap)((grouped$) => grouped$.pipe((0, operators_1.first)(), (0, operators_1.map)((channel) => (0, actions_2.channelMonitored)({ id: channel.id }, { tokenNetwork: channel.tokenNetwork, partner: channel.partner.address })))));
}
exports.channelMonitoredEpic = channelMonitoredEpic;
//# sourceMappingURL=monitor.js.map