@waku/core
Version:
TypeScript implementation of the Waku v2 protocol
1,524 lines (1,497 loc) • 328 kB
JavaScript
import { e as equals$2, c as coerce, b as base32, a as base58btc, d as base36, v as version_0, f as allocUnsafe, g as alloc, h as encodingLength$1, i as encode$3, j as decode$4, L as Logger, F as FilterSubscribeRequest, k as FilterSubscribeResponse$1, M as MessagePush, P as ProtocolError, l as PushRpc$1, m as PushResponse, S as StoreQueryRequest$1, n as StoreQueryResponse$1, C as CONNECTION_LOCKED_TAG, T as Tags, o as createEncoder, p as pubsubTopicToSingleShardInfo, u as utf8ToBytes, q as contentTopicToShardIndex, t as toString, r as fromString, s as hexToBytes, w as isBytes, x as abytes, y as bytesToHex, z as concatBytes, A as anumber, B as randomBytes, D as sha512, E as enumeration, G as message, H as encodeMessage, I as decodeMessage, J as Hash, K as ahash, N as toBytes, O as clean, Q as aexists, R as sha256$1, U as bases, V as base64url, W as encodeUint8Array, X as bytesToUtf8, Y as WakuMetadataRequest, Z as WakuMetadataResponse, _ as concat$1, $ as sha256$2, a0 as bytesToHex$1, a1 as numberToBytes } from './version_0-BHaZD8Qu.js';
export { a2 as createDecoder } from './version_0-BHaZD8Qu.js';
/* eslint-disable */
var encode_1 = encode$2;
var MSB = 0x80, MSBALL = -128, INT = Math.pow(2, 31);
/**
* @param {number} num
* @param {number[]} out
* @param {number} offset
*/
function encode$2(num, out, offset) {
out = out || [];
offset = offset || 0;
var oldOffset = offset;
while (num >= INT) {
out[offset++] = (num & 0xFF) | MSB;
num /= 128;
}
while (num & MSBALL) {
out[offset++] = (num & 0xFF) | MSB;
num >>>= 7;
}
out[offset] = num | 0;
// @ts-ignore
encode$2.bytes = offset - oldOffset + 1;
return out;
}
var decode$3 = read;
var MSB$1 = 0x80, REST$1 = 0x7F;
/**
* @param {string | any[]} buf
* @param {number} offset
*/
function read(buf, offset) {
var res = 0, offset = offset || 0, shift = 0, counter = offset, b, l = buf.length;
do {
if (counter >= l) {
// @ts-ignore
read.bytes = 0;
throw new RangeError('Could not decode varint');
}
b = buf[counter++];
res += shift < 28
? (b & REST$1) << shift
: (b & REST$1) * Math.pow(2, shift);
shift += 7;
} while (b >= MSB$1);
// @ts-ignore
read.bytes = counter - offset;
return res;
}
var N1 = Math.pow(2, 7);
var N2 = Math.pow(2, 14);
var N3 = Math.pow(2, 21);
var N4 = Math.pow(2, 28);
var N5 = Math.pow(2, 35);
var N6 = Math.pow(2, 42);
var N7 = Math.pow(2, 49);
var N8 = Math.pow(2, 56);
var N9 = Math.pow(2, 63);
var length = function (/** @type {number} */ value) {
return (value < N1 ? 1
: value < N2 ? 2
: value < N3 ? 3
: value < N4 ? 4
: value < N5 ? 5
: value < N6 ? 6
: value < N7 ? 7
: value < N8 ? 8
: value < N9 ? 9
: 10);
};
var varint = {
encode: encode_1,
decode: decode$3,
encodingLength: length
};
var _brrp_varint = varint;
function decode$2(data, offset = 0) {
const code = _brrp_varint.decode(data, offset);
return [code, _brrp_varint.decode.bytes];
}
function encodeTo(int, target, offset = 0) {
_brrp_varint.encode(int, target, offset);
return target;
}
function encodingLength(int) {
return _brrp_varint.encodingLength(int);
}
/**
* Creates a multihash digest.
*/
function create(code, digest) {
const size = digest.byteLength;
const sizeOffset = encodingLength(code);
const digestOffset = sizeOffset + encodingLength(size);
const bytes = new Uint8Array(digestOffset + size);
encodeTo(code, bytes, 0);
encodeTo(size, bytes, sizeOffset);
bytes.set(digest, digestOffset);
return new Digest(code, size, digest, bytes);
}
/**
* Turns bytes representation of multihash digest into an instance.
*/
function decode$1(multihash) {
const bytes = coerce(multihash);
const [code, sizeOffset] = decode$2(bytes);
const [size, digestOffset] = decode$2(bytes.subarray(sizeOffset));
const digest = bytes.subarray(sizeOffset + digestOffset);
if (digest.byteLength !== size) {
throw new Error('Incorrect length');
}
return new Digest(code, size, digest, bytes);
}
function equals$1(a, b) {
if (a === b) {
return true;
}
else {
const data = b;
return (a.code === data.code &&
a.size === data.size &&
data.bytes instanceof Uint8Array &&
equals$2(a.bytes, data.bytes));
}
}
/**
* Represents a multihash digest which carries information about the
* hashing algorithm and an actual hash digest.
*/
class Digest {
code;
size;
digest;
bytes;
/**
* Creates a multihash digest.
*/
constructor(code, size, digest, bytes) {
this.code = code;
this.size = size;
this.digest = digest;
this.bytes = bytes;
}
}
const code = 0x0;
const name = 'identity';
const encode$1 = coerce;
function digest(input) {
return create(code, encode$1(input));
}
const identity = { code, name, encode: encode$1, digest };
function from({ name, code, encode }) {
return new Hasher(name, code, encode);
}
/**
* Hasher represents a hashing algorithm implementation that produces as
* `MultihashDigest`.
*/
class Hasher {
name;
code;
encode;
constructor(name, code, encode) {
this.name = name;
this.code = code;
this.encode = encode;
}
digest(input) {
if (input instanceof Uint8Array) {
const result = this.encode(input);
return result instanceof Uint8Array
? create(this.code, result)
/* c8 ignore next 1 */
: result.then(digest => create(this.code, digest));
}
else {
throw Error('Unknown type, must be binary type');
/* c8 ignore next 1 */
}
}
}
/* global crypto */
function sha(name) {
return async (data) => new Uint8Array(await crypto.subtle.digest(name, data));
}
const sha256 = from({
name: 'sha2-256',
code: 0x12,
encode: sha('SHA-256')
});
function format(link, base) {
const { bytes, version } = link;
switch (version) {
case 0:
return toStringV0(bytes, baseCache(link), base ?? base58btc.encoder);
default:
return toStringV1(bytes, baseCache(link), (base ?? base32.encoder));
}
}
const cache = new WeakMap();
function baseCache(cid) {
const baseCache = cache.get(cid);
if (baseCache == null) {
const baseCache = new Map();
cache.set(cid, baseCache);
return baseCache;
}
return baseCache;
}
class CID {
code;
version;
multihash;
bytes;
'/';
/**
* @param version - Version of the CID
* @param code - Code of the codec content is encoded in, see https://github.com/multiformats/multicodec/blob/master/table.csv
* @param multihash - (Multi)hash of the of the content.
*/
constructor(version, code, multihash, bytes) {
this.code = code;
this.version = version;
this.multihash = multihash;
this.bytes = bytes;
// flag to serializers that this is a CID and
// should be treated specially
this['/'] = bytes;
}
/**
* Signalling `cid.asCID === cid` has been replaced with `cid['/'] === cid.bytes`
* please either use `CID.asCID(cid)` or switch to new signalling mechanism
*
* @deprecated
*/
get asCID() {
return this;
}
// ArrayBufferView
get byteOffset() {
return this.bytes.byteOffset;
}
// ArrayBufferView
get byteLength() {
return this.bytes.byteLength;
}
toV0() {
switch (this.version) {
case 0: {
return this;
}
case 1: {
const { code, multihash } = this;
if (code !== DAG_PB_CODE) {
throw new Error('Cannot convert a non dag-pb CID to CIDv0');
}
// sha2-256
if (multihash.code !== SHA_256_CODE) {
throw new Error('Cannot convert non sha2-256 multihash CID to CIDv0');
}
return (CID.createV0(multihash));
}
default: {
throw Error(`Can not convert CID version ${this.version} to version 0. This is a bug please report`);
}
}
}
toV1() {
switch (this.version) {
case 0: {
const { code, digest } = this.multihash;
const multihash = create(code, digest);
return (CID.createV1(this.code, multihash));
}
case 1: {
return this;
}
default: {
throw Error(`Can not convert CID version ${this.version} to version 1. This is a bug please report`);
}
}
}
equals(other) {
return CID.equals(this, other);
}
static equals(self, other) {
const unknown = other;
return (unknown != null &&
self.code === unknown.code &&
self.version === unknown.version &&
equals$1(self.multihash, unknown.multihash));
}
toString(base) {
return format(this, base);
}
toJSON() {
return { '/': format(this) };
}
link() {
return this;
}
[Symbol.toStringTag] = 'CID';
// Legacy
[Symbol.for('nodejs.util.inspect.custom')]() {
return `CID(${this.toString()})`;
}
/**
* Takes any input `value` and returns a `CID` instance if it was
* a `CID` otherwise returns `null`. If `value` is instanceof `CID`
* it will return value back. If `value` is not instance of this CID
* class, but is compatible CID it will return new instance of this
* `CID` class. Otherwise returns null.
*
* This allows two different incompatible versions of CID library to
* co-exist and interop as long as binary interface is compatible.
*/
static asCID(input) {
if (input == null) {
return null;
}
const value = input;
if (value instanceof CID) {
// If value is instance of CID then we're all set.
return value;
}
else if ((value['/'] != null && value['/'] === value.bytes) || value.asCID === value) {
// If value isn't instance of this CID class but `this.asCID === this` or
// `value['/'] === value.bytes` is true it is CID instance coming from a
// different implementation (diff version or duplicate). In that case we
// rebase it to this `CID` implementation so caller is guaranteed to get
// instance with expected API.
const { version, code, multihash, bytes } = value;
return new CID(version, code, multihash, bytes ?? encodeCID(version, code, multihash.bytes));
}
else if (value[cidSymbol] === true) {
// If value is a CID from older implementation that used to be tagged via
// symbol we still rebase it to the this `CID` implementation by
// delegating that to a constructor.
const { version, multihash, code } = value;
const digest = decode$1(multihash);
return CID.create(version, code, digest);
}
else {
// Otherwise value is not a CID (or an incompatible version of it) in
// which case we return `null`.
return null;
}
}
/**
* @param version - Version of the CID
* @param code - Code of the codec content is encoded in, see https://github.com/multiformats/multicodec/blob/master/table.csv
* @param digest - (Multi)hash of the of the content.
*/
static create(version, code, digest) {
if (typeof code !== 'number') {
throw new Error('String codecs are no longer supported');
}
if (!(digest.bytes instanceof Uint8Array)) {
throw new Error('Invalid digest');
}
switch (version) {
case 0: {
if (code !== DAG_PB_CODE) {
throw new Error(`Version 0 CID must use dag-pb (code: ${DAG_PB_CODE}) block encoding`);
}
else {
return new CID(version, code, digest, digest.bytes);
}
}
case 1: {
const bytes = encodeCID(version, code, digest.bytes);
return new CID(version, code, digest, bytes);
}
default: {
throw new Error('Invalid version');
}
}
}
/**
* Simplified version of `create` for CIDv0.
*/
static createV0(digest) {
return CID.create(0, DAG_PB_CODE, digest);
}
/**
* Simplified version of `create` for CIDv1.
*
* @param code - Content encoding format code.
* @param digest - Multihash of the content.
*/
static createV1(code, digest) {
return CID.create(1, code, digest);
}
/**
* Decoded a CID from its binary representation. The byte array must contain
* only the CID with no additional bytes.
*
* An error will be thrown if the bytes provided do not contain a valid
* binary representation of a CID.
*/
static decode(bytes) {
const [cid, remainder] = CID.decodeFirst(bytes);
if (remainder.length !== 0) {
throw new Error('Incorrect length');
}
return cid;
}
/**
* Decoded a CID from its binary representation at the beginning of a byte
* array.
*
* Returns an array with the first element containing the CID and the second
* element containing the remainder of the original byte array. The remainder
* will be a zero-length byte array if the provided bytes only contained a
* binary CID representation.
*/
static decodeFirst(bytes) {
const specs = CID.inspectBytes(bytes);
const prefixSize = specs.size - specs.multihashSize;
const multihashBytes = coerce(bytes.subarray(prefixSize, prefixSize + specs.multihashSize));
if (multihashBytes.byteLength !== specs.multihashSize) {
throw new Error('Incorrect length');
}
const digestBytes = multihashBytes.subarray(specs.multihashSize - specs.digestSize);
const digest = new Digest(specs.multihashCode, specs.digestSize, digestBytes, multihashBytes);
const cid = specs.version === 0
? CID.createV0(digest)
: CID.createV1(specs.codec, digest);
return [cid, bytes.subarray(specs.size)];
}
/**
* Inspect the initial bytes of a CID to determine its properties.
*
* Involves decoding up to 4 varints. Typically this will require only 4 to 6
* bytes but for larger multicodec code values and larger multihash digest
* lengths these varints can be quite large. It is recommended that at least
* 10 bytes be made available in the `initialBytes` argument for a complete
* inspection.
*/
static inspectBytes(initialBytes) {
let offset = 0;
const next = () => {
const [i, length] = decode$2(initialBytes.subarray(offset));
offset += length;
return i;
};
let version = next();
let codec = DAG_PB_CODE;
if (version === 18) {
// CIDv0
version = 0;
offset = 0;
}
else {
codec = next();
}
if (version !== 0 && version !== 1) {
throw new RangeError(`Invalid CID version ${version}`);
}
const prefixSize = offset;
const multihashCode = next(); // multihash code
const digestSize = next(); // multihash length
const size = offset + digestSize;
const multihashSize = size - prefixSize;
return { version, codec, multihashCode, digestSize, multihashSize, size };
}
/**
* Takes cid in a string representation and creates an instance. If `base`
* decoder is not provided will use a default from the configuration. It will
* throw an error if encoding of the CID is not compatible with supplied (or
* a default decoder).
*/
static parse(source, base) {
const [prefix, bytes] = parseCIDtoBytes(source, base);
const cid = CID.decode(bytes);
if (cid.version === 0 && source[0] !== 'Q') {
throw Error('Version 0 CID string must not include multibase prefix');
}
// Cache string representation to avoid computing it on `this.toString()`
baseCache(cid).set(prefix, source);
return cid;
}
}
function parseCIDtoBytes(source, base) {
switch (source[0]) {
// CIDv0 is parsed differently
case 'Q': {
const decoder = base ?? base58btc;
return [
base58btc.prefix,
decoder.decode(`${base58btc.prefix}${source}`)
];
}
case base58btc.prefix: {
const decoder = base ?? base58btc;
return [base58btc.prefix, decoder.decode(source)];
}
case base32.prefix: {
const decoder = base ?? base32;
return [base32.prefix, decoder.decode(source)];
}
case base36.prefix: {
const decoder = base ?? base36;
return [base36.prefix, decoder.decode(source)];
}
default: {
if (base == null) {
throw Error('To parse non base32, base36 or base58btc encoded CID multibase decoder must be provided');
}
return [source[0], base.decode(source)];
}
}
}
function toStringV0(bytes, cache, base) {
const { prefix } = base;
if (prefix !== base58btc.prefix) {
throw Error(`Cannot string encode V0 in ${base.name} encoding`);
}
const cid = cache.get(prefix);
if (cid == null) {
const cid = base.encode(bytes).slice(1);
cache.set(prefix, cid);
return cid;
}
else {
return cid;
}
}
function toStringV1(bytes, cache, base) {
const { prefix } = base;
const cid = cache.get(prefix);
if (cid == null) {
const cid = base.encode(bytes);
cache.set(prefix, cid);
return cid;
}
else {
return cid;
}
}
const DAG_PB_CODE = 0x70;
const SHA_256_CODE = 0x12;
function encodeCID(version, code, multihash) {
const codeOffset = encodingLength(version);
const hashOffset = codeOffset + encodingLength(code);
const bytes = new Uint8Array(hashOffset + multihash.byteLength);
encodeTo(version, bytes, 0);
encodeTo(code, bytes, codeOffset);
bytes.set(multihash, hashOffset);
return bytes;
}
const cidSymbol = Symbol.for('@ipld/js-cid/CID');
function isDefined(value) {
return Boolean(value);
}
const MB = 1024 ** 2;
const SIZE_CAP_IN_MB = 1;
/**
* Return whether the size of the message is under the upper limit for the network.
* This performs a protobuf encoding! If you have access to the fully encoded message,
* use {@link isSizeUnderCapBuf} instead.
* @param message
* @param encoder
*/
async function isMessageSizeUnderCap(encoder, message) {
const buf = await encoder.toWire(message);
if (!buf)
return false;
return isWireSizeUnderCap(buf);
}
const isWireSizeUnderCap = (buf) => buf.length / MB <= SIZE_CAP_IN_MB;
const decodeRelayShard = (bytes) => {
// explicitly converting to Uint8Array to avoid Buffer
// https://github.com/libp2p/js-libp2p/issues/2146
bytes = new Uint8Array(bytes);
if (bytes.length < 3)
throw new Error("Insufficient data");
const view = new DataView(bytes.buffer);
const clusterId = view.getUint16(0);
const shards = [];
if (bytes.length === 130) {
// rsv format (Bit Vector)
for (let i = 0; i < 1024; i++) {
const byteIndex = Math.floor(i / 8) + 2; // Adjusted for the 2-byte cluster field
const bitIndex = 7 - (i % 8);
if (view.getUint8(byteIndex) & (1 << bitIndex)) {
shards.push(i);
}
}
}
else {
// rs format (Index List)
const numIndices = view.getUint8(2);
for (let i = 0, offset = 3; i < numIndices; i++, offset += 2) {
if (offset + 1 >= bytes.length)
throw new Error("Unexpected end of data");
shards.push(view.getUint16(offset));
}
}
return { clusterId, shards };
};
const encodeRelayShard = (shardInfo) => {
const { clusterId, shards } = shardInfo;
const totalLength = shards.length >= 64 ? 130 : 3 + 2 * shards.length;
const buffer = new ArrayBuffer(totalLength);
const view = new DataView(buffer);
view.setUint16(0, clusterId);
if (shards.length >= 64) {
// rsv format (Bit Vector)
for (const index of shards) {
const byteIndex = Math.floor(index / 8) + 2; // Adjusted for the 2-byte cluster field
const bitIndex = 7 - (index % 8);
view.setUint8(byteIndex, view.getUint8(byteIndex) | (1 << bitIndex));
}
}
else {
// rs format (Index List)
view.setUint8(2, shards.length);
for (let i = 0, offset = 3; i < shards.length; i++, offset += 2) {
view.setUint16(offset, shards[i]);
}
}
return new Uint8Array(buffer);
};
var index$3 = /*#__PURE__*/Object.freeze({
__proto__: null,
version_0: version_0
});
/**
* @packageDocumentation
*
* For when you need a one-liner to collect iterable values.
*
* @example
*
* ```javascript
* import all from 'it-all'
*
* // This can also be an iterator, etc
* const values = function * () {
* yield * [0, 1, 2, 3, 4]
* }
*
* const arr = all(values)
*
* console.info(arr) // 0, 1, 2, 3, 4
* ```
*
* Async sources must be awaited:
*
* ```javascript
* const values = async function * () {
* yield * [0, 1, 2, 3, 4]
* }
*
* const arr = await all(values())
*
* console.info(arr) // 0, 1, 2, 3, 4
* ```
*/
function isAsyncIterable$3(thing) {
return thing[Symbol.asyncIterator] != null;
}
function all(source) {
if (isAsyncIterable$3(source)) {
return (async () => {
const arr = [];
for await (const entry of source) {
arr.push(entry);
}
return arr;
})();
}
const arr = [];
for (const entry of source) {
arr.push(entry);
}
return arr;
}
/**
* To guarantee Uint8Array semantics, convert nodejs Buffers
* into vanilla Uint8Arrays
*/
function asUint8Array(buf) {
return buf;
}
/**
* Returns a new Uint8Array created by concatenating the passed Uint8Arrays
*/
function concat(arrays, length) {
if (length == null) {
length = arrays.reduce((acc, curr) => acc + curr.length, 0);
}
const output = allocUnsafe(length);
let offset = 0;
for (const arr of arrays) {
output.set(arr, offset);
offset += arr.length;
}
return asUint8Array(output);
}
/**
* Returns true if the two passed Uint8Arrays have the same content
*/
function equals(a, b) {
if (a === b) {
return true;
}
if (a.byteLength !== b.byteLength) {
return false;
}
for (let i = 0; i < a.byteLength; i++) {
if (a[i] !== b[i]) {
return false;
}
}
return true;
}
/**
* @packageDocumentation
*
* A class that lets you do operations over a list of Uint8Arrays without
* copying them.
*
* ```js
* import { Uint8ArrayList } from 'uint8arraylist'
*
* const list = new Uint8ArrayList()
* list.append(Uint8Array.from([0, 1, 2]))
* list.append(Uint8Array.from([3, 4, 5]))
*
* list.subarray()
* // -> Uint8Array([0, 1, 2, 3, 4, 5])
*
* list.consume(3)
* list.subarray()
* // -> Uint8Array([3, 4, 5])
*
* // you can also iterate over the list
* for (const buf of list) {
* // ..do something with `buf`
* }
*
* list.subarray(0, 1)
* // -> Uint8Array([0])
* ```
*
* ## Converting Uint8ArrayLists to Uint8Arrays
*
* There are two ways to turn a `Uint8ArrayList` into a `Uint8Array` - `.slice` and `.subarray` and one way to turn a `Uint8ArrayList` into a `Uint8ArrayList` with different contents - `.sublist`.
*
* ### slice
*
* Slice follows the same semantics as [Uint8Array.slice](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray/slice) in that it creates a new `Uint8Array` and copies bytes into it using an optional offset & length.
*
* ```js
* const list = new Uint8ArrayList()
* list.append(Uint8Array.from([0, 1, 2]))
* list.append(Uint8Array.from([3, 4, 5]))
*
* list.slice(0, 1)
* // -> Uint8Array([0])
* ```
*
* ### subarray
*
* Subarray attempts to follow the same semantics as [Uint8Array.subarray](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray/subarray) with one important different - this is a no-copy operation, unless the requested bytes span two internal buffers in which case it is a copy operation.
*
* ```js
* const list = new Uint8ArrayList()
* list.append(Uint8Array.from([0, 1, 2]))
* list.append(Uint8Array.from([3, 4, 5]))
*
* list.subarray(0, 1)
* // -> Uint8Array([0]) - no-copy
*
* list.subarray(2, 5)
* // -> Uint8Array([2, 3, 4]) - copy
* ```
*
* ### sublist
*
* Sublist creates and returns a new `Uint8ArrayList` that shares the underlying buffers with the original so is always a no-copy operation.
*
* ```js
* const list = new Uint8ArrayList()
* list.append(Uint8Array.from([0, 1, 2]))
* list.append(Uint8Array.from([3, 4, 5]))
*
* list.sublist(0, 1)
* // -> Uint8ArrayList([0]) - no-copy
*
* list.sublist(2, 5)
* // -> Uint8ArrayList([2], [3, 4]) - no-copy
* ```
*
* ## Inspiration
*
* Borrows liberally from [bl](https://www.npmjs.com/package/bl) but only uses native JS types.
*/
const symbol$1 = Symbol.for('@achingbrain/uint8arraylist');
function findBufAndOffset(bufs, index) {
if (index == null || index < 0) {
throw new RangeError('index is out of bounds');
}
let offset = 0;
for (const buf of bufs) {
const bufEnd = offset + buf.byteLength;
if (index < bufEnd) {
return {
buf,
index: index - offset
};
}
offset = bufEnd;
}
throw new RangeError('index is out of bounds');
}
/**
* Check if object is a CID instance
*
* @example
*
* ```js
* import { isUint8ArrayList, Uint8ArrayList } from 'uint8arraylist'
*
* isUint8ArrayList(true) // false
* isUint8ArrayList([]) // false
* isUint8ArrayList(new Uint8ArrayList()) // true
* ```
*/
function isUint8ArrayList(value) {
return Boolean(value?.[symbol$1]);
}
class Uint8ArrayList {
bufs;
length;
[symbol$1] = true;
constructor(...data) {
this.bufs = [];
this.length = 0;
if (data.length > 0) {
this.appendAll(data);
}
}
*[Symbol.iterator]() {
yield* this.bufs;
}
get byteLength() {
return this.length;
}
/**
* Add one or more `bufs` to the end of this Uint8ArrayList
*/
append(...bufs) {
this.appendAll(bufs);
}
/**
* Add all `bufs` to the end of this Uint8ArrayList
*/
appendAll(bufs) {
let length = 0;
for (const buf of bufs) {
if (buf instanceof Uint8Array) {
length += buf.byteLength;
this.bufs.push(buf);
}
else if (isUint8ArrayList(buf)) {
length += buf.byteLength;
this.bufs.push(...buf.bufs);
}
else {
throw new Error('Could not append value, must be an Uint8Array or a Uint8ArrayList');
}
}
this.length += length;
}
/**
* Add one or more `bufs` to the start of this Uint8ArrayList
*/
prepend(...bufs) {
this.prependAll(bufs);
}
/**
* Add all `bufs` to the start of this Uint8ArrayList
*/
prependAll(bufs) {
let length = 0;
for (const buf of bufs.reverse()) {
if (buf instanceof Uint8Array) {
length += buf.byteLength;
this.bufs.unshift(buf);
}
else if (isUint8ArrayList(buf)) {
length += buf.byteLength;
this.bufs.unshift(...buf.bufs);
}
else {
throw new Error('Could not prepend value, must be an Uint8Array or a Uint8ArrayList');
}
}
this.length += length;
}
/**
* Read the value at `index`
*/
get(index) {
const res = findBufAndOffset(this.bufs, index);
return res.buf[res.index];
}
/**
* Set the value at `index` to `value`
*/
set(index, value) {
const res = findBufAndOffset(this.bufs, index);
res.buf[res.index] = value;
}
/**
* Copy bytes from `buf` to the index specified by `offset`
*/
write(buf, offset = 0) {
if (buf instanceof Uint8Array) {
for (let i = 0; i < buf.length; i++) {
this.set(offset + i, buf[i]);
}
}
else if (isUint8ArrayList(buf)) {
for (let i = 0; i < buf.length; i++) {
this.set(offset + i, buf.get(i));
}
}
else {
throw new Error('Could not write value, must be an Uint8Array or a Uint8ArrayList');
}
}
/**
* Remove bytes from the front of the pool
*/
consume(bytes) {
// first, normalize the argument, in accordance with how Buffer does it
bytes = Math.trunc(bytes);
// do nothing if not a positive number
if (Number.isNaN(bytes) || bytes <= 0) {
return;
}
// if consuming all bytes, skip iterating
if (bytes === this.byteLength) {
this.bufs = [];
this.length = 0;
return;
}
while (this.bufs.length > 0) {
if (bytes >= this.bufs[0].byteLength) {
bytes -= this.bufs[0].byteLength;
this.length -= this.bufs[0].byteLength;
this.bufs.shift();
}
else {
this.bufs[0] = this.bufs[0].subarray(bytes);
this.length -= bytes;
break;
}
}
}
/**
* Extracts a section of an array and returns a new array.
*
* This is a copy operation as it is with Uint8Arrays and Arrays
* - note this is different to the behaviour of Node Buffers.
*/
slice(beginInclusive, endExclusive) {
const { bufs, length } = this._subList(beginInclusive, endExclusive);
return concat(bufs, length);
}
/**
* Returns a alloc from the given start and end element index.
*
* In the best case where the data extracted comes from a single Uint8Array
* internally this is a no-copy operation otherwise it is a copy operation.
*/
subarray(beginInclusive, endExclusive) {
const { bufs, length } = this._subList(beginInclusive, endExclusive);
if (bufs.length === 1) {
return bufs[0];
}
return concat(bufs, length);
}
/**
* Returns a allocList from the given start and end element index.
*
* This is a no-copy operation.
*/
sublist(beginInclusive, endExclusive) {
const { bufs, length } = this._subList(beginInclusive, endExclusive);
const list = new Uint8ArrayList();
list.length = length;
// don't loop, just set the bufs
list.bufs = [...bufs];
return list;
}
_subList(beginInclusive, endExclusive) {
beginInclusive = beginInclusive ?? 0;
endExclusive = endExclusive ?? this.length;
if (beginInclusive < 0) {
beginInclusive = this.length + beginInclusive;
}
if (endExclusive < 0) {
endExclusive = this.length + endExclusive;
}
if (beginInclusive < 0 || endExclusive > this.length) {
throw new RangeError('index is out of bounds');
}
if (beginInclusive === endExclusive) {
return { bufs: [], length: 0 };
}
if (beginInclusive === 0 && endExclusive === this.length) {
return { bufs: this.bufs, length: this.length };
}
const bufs = [];
let offset = 0;
for (let i = 0; i < this.bufs.length; i++) {
const buf = this.bufs[i];
const bufStart = offset;
const bufEnd = bufStart + buf.byteLength;
// for next loop
offset = bufEnd;
if (beginInclusive >= bufEnd) {
// start after this buf
continue;
}
const sliceStartInBuf = beginInclusive >= bufStart && beginInclusive < bufEnd;
const sliceEndsInBuf = endExclusive > bufStart && endExclusive <= bufEnd;
if (sliceStartInBuf && sliceEndsInBuf) {
// slice is wholly contained within this buffer
if (beginInclusive === bufStart && endExclusive === bufEnd) {
// requested whole buffer
bufs.push(buf);
break;
}
// requested part of buffer
const start = beginInclusive - bufStart;
bufs.push(buf.subarray(start, start + (endExclusive - beginInclusive)));
break;
}
if (sliceStartInBuf) {
// slice starts in this buffer
if (beginInclusive === 0) {
// requested whole buffer
bufs.push(buf);
continue;
}
// requested part of buffer
bufs.push(buf.subarray(beginInclusive - bufStart));
continue;
}
if (sliceEndsInBuf) {
if (endExclusive === bufEnd) {
// requested whole buffer
bufs.push(buf);
break;
}
// requested part of buffer
bufs.push(buf.subarray(0, endExclusive - bufStart));
break;
}
// slice started before this buffer and ends after it
bufs.push(buf);
}
return { bufs, length: endExclusive - beginInclusive };
}
indexOf(search, offset = 0) {
if (!isUint8ArrayList(search) && !(search instanceof Uint8Array)) {
throw new TypeError('The "value" argument must be a Uint8ArrayList or Uint8Array');
}
const needle = search instanceof Uint8Array ? search : search.subarray();
offset = Number(offset ?? 0);
if (isNaN(offset)) {
offset = 0;
}
if (offset < 0) {
offset = this.length + offset;
}
if (offset < 0) {
offset = 0;
}
if (search.length === 0) {
return offset > this.length ? this.length : offset;
}
// https://en.wikipedia.org/wiki/Boyer%E2%80%93Moore_string-search_algorithm
const M = needle.byteLength;
if (M === 0) {
throw new TypeError('search must be at least 1 byte long');
}
// radix
const radix = 256;
const rightmostPositions = new Int32Array(radix);
// position of the rightmost occurrence of the byte c in the pattern
for (let c = 0; c < radix; c++) {
// -1 for bytes not in pattern
rightmostPositions[c] = -1;
}
for (let j = 0; j < M; j++) {
// rightmost position for bytes in pattern
rightmostPositions[needle[j]] = j;
}
// Return offset of first match, -1 if no match
const right = rightmostPositions;
const lastIndex = this.byteLength - needle.byteLength;
const lastPatIndex = needle.byteLength - 1;
let skip;
for (let i = offset; i <= lastIndex; i += skip) {
skip = 0;
for (let j = lastPatIndex; j >= 0; j--) {
const char = this.get(i + j);
if (needle[j] !== char) {
skip = Math.max(1, j - right[char]);
break;
}
}
if (skip === 0) {
return i;
}
}
return -1;
}
getInt8(byteOffset) {
const buf = this.subarray(byteOffset, byteOffset + 1);
const view = new DataView(buf.buffer, buf.byteOffset, buf.byteLength);
return view.getInt8(0);
}
setInt8(byteOffset, value) {
const buf = allocUnsafe(1);
const view = new DataView(buf.buffer, buf.byteOffset, buf.byteLength);
view.setInt8(0, value);
this.write(buf, byteOffset);
}
getInt16(byteOffset, littleEndian) {
const buf = this.subarray(byteOffset, byteOffset + 2);
const view = new DataView(buf.buffer, buf.byteOffset, buf.byteLength);
return view.getInt16(0, littleEndian);
}
setInt16(byteOffset, value, littleEndian) {
const buf = alloc(2);
const view = new DataView(buf.buffer, buf.byteOffset, buf.byteLength);
view.setInt16(0, value, littleEndian);
this.write(buf, byteOffset);
}
getInt32(byteOffset, littleEndian) {
const buf = this.subarray(byteOffset, byteOffset + 4);
const view = new DataView(buf.buffer, buf.byteOffset, buf.byteLength);
return view.getInt32(0, littleEndian);
}
setInt32(byteOffset, value, littleEndian) {
const buf = alloc(4);
const view = new DataView(buf.buffer, buf.byteOffset, buf.byteLength);
view.setInt32(0, value, littleEndian);
this.write(buf, byteOffset);
}
getBigInt64(byteOffset, littleEndian) {
const buf = this.subarray(byteOffset, byteOffset + 8);
const view = new DataView(buf.buffer, buf.byteOffset, buf.byteLength);
return view.getBigInt64(0, littleEndian);
}
setBigInt64(byteOffset, value, littleEndian) {
const buf = alloc(8);
const view = new DataView(buf.buffer, buf.byteOffset, buf.byteLength);
view.setBigInt64(0, value, littleEndian);
this.write(buf, byteOffset);
}
getUint8(byteOffset) {
const buf = this.subarray(byteOffset, byteOffset + 1);
const view = new DataView(buf.buffer, buf.byteOffset, buf.byteLength);
return view.getUint8(0);
}
setUint8(byteOffset, value) {
const buf = allocUnsafe(1);
const view = new DataView(buf.buffer, buf.byteOffset, buf.byteLength);
view.setUint8(0, value);
this.write(buf, byteOffset);
}
getUint16(byteOffset, littleEndian) {
const buf = this.subarray(byteOffset, byteOffset + 2);
const view = new DataView(buf.buffer, buf.byteOffset, buf.byteLength);
return view.getUint16(0, littleEndian);
}
setUint16(byteOffset, value, littleEndian) {
const buf = alloc(2);
const view = new DataView(buf.buffer, buf.byteOffset, buf.byteLength);
view.setUint16(0, value, littleEndian);
this.write(buf, byteOffset);
}
getUint32(byteOffset, littleEndian) {
const buf = this.subarray(byteOffset, byteOffset + 4);
const view = new DataView(buf.buffer, buf.byteOffset, buf.byteLength);
return view.getUint32(0, littleEndian);
}
setUint32(byteOffset, value, littleEndian) {
const buf = alloc(4);
const view = new DataView(buf.buffer, buf.byteOffset, buf.byteLength);
view.setUint32(0, value, littleEndian);
this.write(buf, byteOffset);
}
getBigUint64(byteOffset, littleEndian) {
const buf = this.subarray(byteOffset, byteOffset + 8);
const view = new DataView(buf.buffer, buf.byteOffset, buf.byteLength);
return view.getBigUint64(0, littleEndian);
}
setBigUint64(byteOffset, value, littleEndian) {
const buf = alloc(8);
const view = new DataView(buf.buffer, buf.byteOffset, buf.byteLength);
view.setBigUint64(0, value, littleEndian);
this.write(buf, byteOffset);
}
getFloat32(byteOffset, littleEndian) {
const buf = this.subarray(byteOffset, byteOffset + 4);
const view = new DataView(buf.buffer, buf.byteOffset, buf.byteLength);
return view.getFloat32(0, littleEndian);
}
setFloat32(byteOffset, value, littleEndian) {
const buf = alloc(4);
const view = new DataView(buf.buffer, buf.byteOffset, buf.byteLength);
view.setFloat32(0, value, littleEndian);
this.write(buf, byteOffset);
}
getFloat64(byteOffset, littleEndian) {
const buf = this.subarray(byteOffset, byteOffset + 8);
const view = new DataView(buf.buffer, buf.byteOffset, buf.byteLength);
return view.getFloat64(0, littleEndian);
}
setFloat64(byteOffset, value, littleEndian) {
const buf = alloc(8);
const view = new DataView(buf.buffer, buf.byteOffset, buf.byteLength);
view.setFloat64(0, value, littleEndian);
this.write(buf, byteOffset);
}
equals(other) {
if (other == null) {
return false;
}
if (!(other instanceof Uint8ArrayList)) {
return false;
}
if (other.bufs.length !== this.bufs.length) {
return false;
}
for (let i = 0; i < this.bufs.length; i++) {
if (!equals(this.bufs[i], other.bufs[i])) {
return false;
}
}
return true;
}
/**
* Create a Uint8ArrayList from a pre-existing list of Uint8Arrays. Use this
* method if you know the total size of all the Uint8Arrays ahead of time.
*/
static fromUint8Arrays(bufs, length) {
const list = new Uint8ArrayList();
list.bufs = bufs;
if (length == null) {
length = bufs.reduce((acc, curr) => acc + curr.byteLength, 0);
}
list.length = length;
return list;
}
}
/*
function indexOf (needle: Uint8Array, haystack: Uint8Array, offset = 0) {
for (let i = offset; i < haystack.byteLength; i++) {
for (let j = 0; j < needle.length; j++) {
if (haystack[i + j] !== needle[j]) {
break
}
if (j === needle.byteLength -1) {
return i
}
}
if (haystack.byteLength - i < needle.byteLength) {
break
}
}
return -1
}
*/
function isAsyncIterable$2(thing) {
return thing[Symbol.asyncIterator] != null;
}
const defaultEncoder = (length) => {
const lengthLength = encodingLength$1(length);
const lengthBuf = allocUnsafe(lengthLength);
encode$3(length, lengthBuf);
defaultEncoder.bytes = lengthLength;
return lengthBuf;
};
defaultEncoder.bytes = 0;
function encode(source, options) {
options = options ?? {};
const encodeLength = options.lengthEncoder ?? defaultEncoder;
function* maybeYield(chunk) {
// length + data
const length = encodeLength(chunk.byteLength);
// yield only Uint8Arrays
if (length instanceof Uint8Array) {
yield length;
}
else {
yield* length;
}
// yield only Uint8Arrays
if (chunk instanceof Uint8Array) {
yield chunk;
}
else {
yield* chunk;
}
}
if (isAsyncIterable$2(source)) {
return (async function* () {
for await (const chunk of source) {
yield* maybeYield(chunk);
}
})();
}
return (function* () {
for (const chunk of source) {
yield* maybeYield(chunk);
}
})();
}
encode.single = (chunk, options) => {
options = options ?? {};
const encodeLength = options.lengthEncoder ?? defaultEncoder;
return new Uint8ArrayList(encodeLength(chunk.byteLength), chunk);
};
/**
* The reported length of the next data message was not a positive integer
*/
class InvalidMessageLengthError extends Error {
name = 'InvalidMessageLengthError';
code = 'ERR_INVALID_MSG_LENGTH';
}
/**
* The reported length of the next data message was larger than the configured
* max allowable value
*/
class InvalidDataLengthError extends Error {
name = 'InvalidDataLengthError';
code = 'ERR_MSG_DATA_TOO_LONG';
}
/**
* The varint used to specify the length of the next data message contained more
* bytes than the configured max allowable value
*/
class InvalidDataLengthLengthError extends Error {
name = 'InvalidDataLengthLengthError';
code = 'ERR_MSG_LENGTH_TOO_LONG';
}
/**
* The incoming stream ended before the expected number of bytes were read
*/
class UnexpectedEOFError extends Error {
name = 'UnexpectedEOFError';
code = 'ERR_UNEXPECTED_EOF';
}
/* eslint max-depth: ["error", 6] */
// Maximum length of the length section of the message
const MAX_LENGTH_LENGTH = 8; // Varint.encode(Number.MAX_SAFE_INTEGER).length
// Maximum length of the data section of the message
const MAX_DATA_LENGTH = 1024 * 1024 * 4;
var ReadMode;
(function (ReadMode) {
ReadMode[ReadMode["LENGTH"] = 0] = "LENGTH";
ReadMode[ReadMode["DATA"] = 1] = "DATA";
})(ReadMode || (ReadMode = {}));
const defaultDecoder = (buf) => {
const length = decode$4(buf);
defaultDecoder.bytes = encodingLength$1(length);
return length;
};
defaultDecoder.bytes = 0;
function decode(source, options) {
const buffer = new Uint8ArrayList();
let mode = ReadMode.LENGTH;
let dataLength = -1;
const lengthDecoder = options?.lengthDecoder ?? defaultDecoder;
const maxLengthLength = options?.maxLengthLength ?? MAX_LENGTH_LENGTH;
const maxDataLength = options?.maxDataLength ?? MAX_DATA_LENGTH;
function* maybeYield() {
while (buffer.byteLength > 0) {
if (mode === ReadMode.LENGTH) {
// read length, ignore errors for short reads
try {
dataLength = lengthDecoder(buffer);
if (dataLength < 0) {
throw new InvalidMessageLengthError('Invalid message length');
}
if (dataLength > maxDataLength) {
throw new InvalidDataLengthError('Message length too long');
}
const dataLengthLength = lengthDecoder.bytes;
buffer.consume(dataLengthLength);
if (options?.onLength != null) {
options.onLength(dataLength);
}
mode = ReadMode.DATA;
}
catch (err) {
if (err instanceof RangeError) {
if (buffer.byteLength > maxLengthLength) {
throw new InvalidDataLengthLengthError('Message length length too long');
}
break;
}
throw err;
}
}
if (mode === ReadMode.DATA) {
if (buffer.byteLength < dataLength) {
// not enough data, wait for more
break;
}
const data = buffer.sublist(0, dataLength);
buffer.consume(dataLength);
if (options?.onData != null) {
options.onData(data);
}
yield data;
mode = ReadMode.LENGTH;
}
}
}
if (isAsyncIterable$2(source)) {
return (async function* () {
for await (const buf of source) {
buffer.append(buf);
yield* maybeYield();
}
if (buffer.byteLength > 0) {
throw new UnexpectedEOFError('Unexpected end of input');
}
})();
}
return (function* () {
for (const buf of source) {
buffer.append(buf);
yield* maybeYield();
}
if (buffer.byteLength > 0) {
throw new UnexpectedEOFError('Unexpected end of input');
}
})();
}
decode.fromReader = (reader, options) => {
let byteLength = 1; // Read single byte chunks until the length is known
const varByteSource = (async function* () {
while (true) {
try {
const { done, value } = await reader.next(byteLength);
if (done === true) {
return;
}
if (value != null) {
yield value;
}
}
catch (err) {
if (err.code === 'ERR_UNDER_READ') {
return { done: true, value: null };
}
throw err;
}
finally {
// Reset the byteLength so we continue to check for varints
byteLength = 1;
}
}
}());
/**
* Once the length has been parsed, read chunk for that length
*/
const onLength = (l) => { byteLength = l; };
return decode(varByteSource, {
...(options ?? {}),
onLength
});
};
function pDefer() {
const deferred = {};
deferred.promise = new Promise((resolve, reject) => {
deferred.resolve = resolve;
deferred.reject = reject;
});
return deferred;
}
// ported from https://www.npmjs.com/