UNPKG

raiden-ts

Version:

Raiden Light Client Typescript/Javascript SDK

244 lines 11.5 kB
"use strict"; var __importDefault = (this && this.__importDefault) || function (mod) { return (mod && mod.__esModule) ? mod : { "default": mod }; }; Object.defineProperty(exports, "__esModule", { value: true }); exports.checkContractHasMethod$ = exports.contractHasMethod = exports.getNetworkName = exports.logToContractEvent = exports.fromEthersEvent = exports.getLogsByChunk$ = void 0; /* eslint-disable @typescript-eslint/no-explicit-any */ const bytes_1 = require("@ethersproject/bytes"); const providers_1 = require("@ethersproject/providers"); const memoize_1 = __importDefault(require("lodash/memoize")); const rxjs_1 = require("rxjs"); const operators_1 = require("rxjs/operators"); const constants_1 = require("../constants"); const error_1 = require("./error"); const rx_1 = require("./rx"); const types_1 = require("./types"); /** * Like JsonRpcProvider.getLogs, but split block scan range in chunks, adapting to smaller chunks * in case provider times out with such big ranges, and also supporting arrays of addresses in * filter.address field, to scan multiple similar contracts on a single request. * * @param provider - Provider to getLogs from * @param filter - getLogs filter * @param filter.address - Contract address or array of addresses * @param filter.topics - Array of topics * @param filter.fromBlock - Scan block start * @param filter.toBlock - Scan block end * @param chunk - Initial chunk size * @param minChunk - Minimum chunk size in case of getLogs errors * @returns Observable of fetched logs */ function getLogsByChunk$(provider, filter, chunk = 1e5, minChunk = 1e3) { const { fromBlock, toBlock } = filter; // this defer ensures consistent behavior upon re-subscription return (0, rxjs_1.defer)(() => { let start = fromBlock; let curChunk = Math.min(chunk, toBlock - fromBlock + 1); let retry = 5; // every time repeatWhen re-subscribes to this defer, yield (current/retried/next) range/chunk return (0, rxjs_1.defer)(async () => provider.send('eth_getLogs', [ { ...filter, fromBlock: (0, bytes_1.hexValue)(start), toBlock: (0, bytes_1.hexValue)(Math.min(start + curChunk - 1, toBlock)), }, ])).pipe( // mimics the post-request handling on BaseProvider.getLogs (0, operators_1.map)((logs) => { logs.forEach((log) => { if (log.removed == null) log.removed = false; }); return providers_1.Formatter.arrayOf(provider.formatter.filterLog.bind(provider.formatter))(logs); }), (0, operators_1.tap)({ complete: () => (start += curChunk), error: () => { // on error, halven curChunk (retried), with minChunk as lower bound; curChunk = Math.round(curChunk / 2); if (curChunk < minChunk) { curChunk = minChunk; retry--; } }, }), (0, operators_1.mergeMap)((logs) => (0, rxjs_1.from)(logs)), // unwind // if it still fail [retry] times on lower bound, give up; // otherwise wait pollingInterval before retrying (0, operators_1.catchError)((err) => { if (retry < 0) throw err; return (0, rxjs_1.timer)(provider.pollingInterval).pipe((0, operators_1.ignoreElements)()); }), // repeat from inner defer while there's still ranges to scan (0, operators_1.repeatWhen)((complete$) => complete$.pipe((0, operators_1.takeWhile)(() => start <= toBlock)))); }); } exports.getLogsByChunk$ = getLogsByChunk$; /** * Like rxjs' fromEvent, but event can be an EventFilter * * @param target - Object to hook event listener, maybe a Provider or Contract * @param event - EventFilter or string representing the event to listen to * @param opts - Options object * @param opts.fromBlock - Block since when to fetch events from * @param opts.confirmations - After how many blocks a tx is considered confirmed; if observable, * it should have a value at subscription time, like a ReplaySubject(1); * @param opts.blockNumber$ - New blockNumber observable * @param opts.onPastCompleted - Callback when first/past blocks scan completes * @returns Observable of target.on(event) events */ function fromEthersEvent(target, event, { fromBlock, confirmations, blockNumber$, onPastCompleted, } = {}) { if (typeof event === 'string' || Array.isArray(event)) return (0, rxjs_1.fromEventPattern)((handler) => target.on(event, handler), (handler) => target.removeListener(event, handler)); const confirmations$ = !confirmations ? (0, rxjs_1.of)(constants_1.DEFAULT_CONFIRMATIONS) : typeof confirmations === 'number' ? (0, rxjs_1.of)(confirmations) : confirmations; // sorted 'fromBlock' queue, at most of [confirmations * 2] size const blockQueue = [0]; let start = Date.now(); return confirmations$.pipe((0, operators_1.distinctUntilChanged)(), (0, rx_1.withMergeFrom)((confirmationBlocks) => { if (!fromBlock) { let resetBlock = target._lastBlockNumber; const innerBlockNumber = target.blockNumber; resetBlock = resetBlock && resetBlock > confirmationBlocks ? resetBlock : innerBlockNumber && innerBlockNumber > confirmationBlocks ? innerBlockNumber : confirmationBlocks + 1; // starts 'blockQueue' with subscription-time's resetEventsBlock fromBlock = resetBlock - confirmationBlocks; } blockQueue.splice(0, blockQueue.length, fromBlock); return blockNumber$ ?? fromEthersEvent(target, 'block'); }, operators_1.switchMap), (0, operators_1.debounceTime)(Math.ceil(target.pollingInterval / 10)), // debounce bursts of blocks // exhaustMap will skip new events if it's still busy with a previous getLogs call, // but next [fromBlock] in queue always includes range for any skipped block (0, operators_1.exhaustMap)(([confirmationBlocks, blockNumber]) => getLogsByChunk$(target, { ...event, fromBlock: blockQueue[0], toBlock: blockNumber, }).pipe((0, operators_1.tap)({ next: (log) => { // don't clear blockQueue for non-confirmed logs if (!log.blockNumber || log.blockNumber + confirmationBlocks > blockNumber) return; const nextBlock = log.blockNumber + 1; // index of first block which should stay on the queue; let clearHead = blockQueue.findIndex((b) => b > nextBlock); if (!clearHead) return; else if (clearHead < 0) clearHead = blockQueue.length; // clear whole queue blockQueue.splice(0, clearHead, nextBlock); // invariant: blockQueue length never increases here }, complete: () => { // if queue is full, pop_front 'fromBlock' which was just queried // half for confirmed, half for unconfirmed logs while (blockQueue.length && blockQueue.length >= confirmationBlocks * 2) blockQueue.shift(); if (onPastCompleted && start) { start = 0; // this is called only once as soon as first stretch/past scan completes onPastCompleted(Date.now() - start); } // push_back next block iff getLogs didn't throw, queue is never empty blockQueue.push(blockNumber + 1); }, })))); } exports.fromEthersEvent = fromEthersEvent; /** * Function to map an ethers's Provider log to a contract event tuple * It requires logs coming from getLogsByChunk$ or fromEthersEvent overloads which tag at * type-check time to which set of events the logs belong, and use that information to narrow * the types of the tuple events emitted * * @param contract - Contract fo parse logs for * @returns Function to map logs to event tuples for contract */ function logToContractEvent(contract) { return function mapper(log) { // parse log into [...args, event: Event] array, // the same that contract.on events/callbacks const parsed = contract.interface.parseLog(log); // not all parameters quite needed right now, but let's comply with the interface const event = { ...log, ...parsed, removeListener: () => { /* getLogs don't install filter */ }, getBlock: () => contract.provider.getBlock(log.blockHash), getTransaction: () => contract.provider.getTransaction(log.transactionHash), getTransactionReceipt: () => contract.provider.getTransactionReceipt(log.transactionHash), }; return [...parsed.args, event]; }; } exports.logToContractEvent = logToContractEvent; /** * Return a network name, if known, or stringified chainId otherwise * * @param network - Network to get name from * @returns name or chainId as string */ function getNetworkName(network) { let name = network.name; switch (network.name) { case 'homestead': name = 'mainnet'; break; case 'arbitrum-rinkeby': name = 'rinkeby-arbitrum'; break; case 'arbitrum': name = 'arbitrum-one'; break; case 'unknown': name = network.chainId.toString(); break; } return name; } exports.getNetworkName = getNetworkName; // memoized get contract's code as hex string const getContractCode = (0, memoize_1.default)(async function _getContractCode(address, provider) { return provider.getCode(address); }); /** * Verify that contract has given method * * @param sighash - method to search for, as signature hash * @param contract - Contract-like interface * @param contract.address - Contract's address * @param contract.provider - Contract's provider * @returns truthy if contract has a method with given signature */ async function contractHasMethod(sighash, { address, provider }) { const code = await getContractCode(address, provider); const push4opcode = '63'; // 0x63 is PUSH4 opcode, prefixes sighash in method contracts return code.includes(push4opcode + sighash.substring(2)); } exports.contractHasMethod = contractHasMethod; /** * Fetches contract's code and parse if it has given method (by name) * * @param contract - contract instance to check * @param method - method name * @returns Observable of true, emitting a single value if successful, or erroring */ function checkContractHasMethod$(contract, method) { return (0, rxjs_1.defer)(async () => { const sighash = contract.interface.getSighash(method); // decode shouldn't fail if building with ^0.39 contracts, but runtime may be running // with 0.37 contracts, and the only way to know is by checking contract's code (memoized) (0, error_1.assert)(await contractHasMethod((0, types_1.decode)((0, types_1.HexString)(4), sighash, 'signature hash not found'), contract), ['contract does not have method', { contract: contract.address, method }]); return true; }); } exports.checkContractHasMethod$ = checkContractHasMethod$; //# sourceMappingURL=ethers.js.map