@libp2p/gossipsub
Version:
A typescript implementation of gossipsub
1,470 lines (1,222 loc) • 104 kB
text/typescript
import { TypedEventEmitter, serviceCapabilities, serviceDependencies } from '@libp2p/interface'
import { peerIdFromMultihash, peerIdFromString } from '@libp2p/peer-id'
import { encode } from 'it-length-prefixed'
import { pipe } from 'it-pipe'
import { pushable } from 'it-pushable'
import * as Digest from 'multiformats/hashes/digest'
import * as constants from './constants.js'
import {
ACCEPT_FROM_WHITELIST_DURATION_MS,
ACCEPT_FROM_WHITELIST_MAX_MESSAGES,
ACCEPT_FROM_WHITELIST_THRESHOLD_SCORE,
BACKOFF_SLACK
} from './constants.js'
import { StrictNoSign, StrictSign, TopicValidatorResult } from './index.ts'
import { defaultDecodeRpcLimits } from './message/decodeRpc.js'
import { RPC } from './message/rpc.js'
import { MessageCache } from './message-cache.js'
import {
ChurnReason,
getMetrics,
IHaveIgnoreReason,
InclusionReason,
ScorePenalty
} from './metrics.js'
import {
PeerScore,
createPeerScoreParams,
createPeerScoreThresholds
} from './score/index.js'
import { computeAllPeersScoreWeights } from './score/scoreMetrics.js'
import { InboundStream, OutboundStream } from './stream.js'
import { IWantTracer } from './tracer.js'
import {
ValidateError,
MessageStatus,
RejectReason,
rejectReasonFromAcceptance
} from './types.js'
import { buildRawMessage, validateToRawMessage } from './utils/buildRawMessage.js'
import { createGossipRpc, ensureControl } from './utils/create-gossip-rpc.js'
import { shuffle, messageIdToString } from './utils/index.js'
import { msgIdFnStrictNoSign, msgIdFnStrictSign } from './utils/msgIdFn.js'
import { multiaddrToIPStr } from './utils/multiaddr.js'
import { getPublishConfigFromPeerId } from './utils/publishConfig.js'
import { removeFirstNItemsFromSet, removeItemsFromSet } from './utils/set.js'
import { SimpleTimeCache } from './utils/time-cache.js'
import type { GossipSubComponents, GossipSubEvents, GossipsubMessage, GossipsubOpts, MeshPeer, Message, PublishResult, SubscriptionChangeData, TopicValidatorFn } from './index.ts'
import type { DecodeRPCLimits } from './message/decodeRpc.js'
import type { MessageCacheRecord } from './message-cache.js'
import type { Metrics, ToSendGroupCount } from './metrics.js'
import type { PeerScoreParams, PeerScoreThresholds, PeerScoreStatsDump } from './score/index.js'
import type { MsgIdFn, PublishConfig, TopicStr, MsgIdStr, PeerIdStr, RejectReasonObj, FastMsgIdFn, DataTransform, MsgIdToStrFn, MessageId, PublishOpts } from './types.js'
import type {
Connection, Stream, PeerId, Peer,
Logger,
Topology,
TypedEventTarget,
MessageStreamDirection
} from '@libp2p/interface'
import type { Multiaddr } from '@multiformats/multiaddr'
import type { Uint8ArrayList } from 'uint8arraylist'
enum GossipStatusCode {
started,
stopped
}
type GossipStatus =
| {
code: GossipStatusCode.started
registrarTopologyIds: string[]
heartbeatTimeout: ReturnType<typeof setTimeout>
hearbeatStartMs: number
}
| {
code: GossipStatusCode.stopped
}
interface GossipOptions extends GossipsubOpts {
scoreParams: PeerScoreParams
scoreThresholds: PeerScoreThresholds
}
interface AcceptFromWhitelistEntry {
/** number of messages accepted since recomputing the peer's score */
messagesAccepted: number
/** have to recompute score after this time */
acceptUntil: number
}
type ReceivedMessageResult =
| { code: MessageStatus.duplicate, msgIdStr: MsgIdStr }
| ({ code: MessageStatus.invalid, msgIdStr?: MsgIdStr } & RejectReasonObj)
| { code: MessageStatus.valid, messageId: MessageId, msg: Message }
export class GossipSub extends TypedEventEmitter<GossipSubEvents> implements TypedEventTarget<GossipSubEvents> {
/**
* The signature policy to follow by default
*/
public readonly globalSignaturePolicy: typeof StrictSign | typeof StrictNoSign
public protocols: string[] = [constants.GossipsubIDv12, constants.GossipsubIDv11, constants.GossipsubIDv10]
private publishConfig: PublishConfig | undefined
private readonly dataTransform: DataTransform | undefined
// State
public readonly peers = new Map<PeerIdStr, PeerId>()
public readonly streamsInbound = new Map<PeerIdStr, InboundStream>()
public readonly streamsOutbound = new Map<PeerIdStr, OutboundStream>()
/** Ensures outbound streams are created sequentially */
private outboundInflightQueue = pushable<{ peerId: PeerId, connection: Connection }>({ objectMode: true })
/** Direct peers */
public readonly direct = new Set<PeerIdStr>()
/** Floodsub peers */
private readonly floodsubPeers = new Set<PeerIdStr>()
/** Cache of seen messages */
private readonly seenCache: SimpleTimeCache<void>
/**
* Map of peer id and AcceptRequestWhileListEntry
*/
private readonly acceptFromWhitelist = new Map<PeerIdStr, AcceptFromWhitelistEntry>()
/**
* Map of topics to which peers are subscribed to
*/
private readonly topics = new Map<TopicStr, Set<PeerIdStr>>()
/**
* List of our subscriptions
*/
private readonly subscriptions = new Set<TopicStr>()
/**
* Map of topic meshes
* topic => peer id set
*/
public readonly mesh = new Map<TopicStr, Set<PeerIdStr>>()
/**
* Map of topics to set of peers. These mesh peers are the ones to which we are publishing without a topic membership
* topic => peer id set
*/
public readonly fanout = new Map<TopicStr, Set<PeerIdStr>>()
/**
* Map of last publish time for fanout topics
* topic => last publish time
*/
private readonly fanoutLastpub = new Map<TopicStr, number>()
/**
* Map of pending messages to gossip
* peer id => control messages
*/
public readonly gossip = new Map<PeerIdStr, RPC.ControlIHave[]>()
/**
* Map of control messages
* peer id => control message
*/
public readonly control = new Map<PeerIdStr, RPC.ControlMessage>()
/**
* Number of IHAVEs received from peer in the last heartbeat
*/
private readonly peerhave = new Map<PeerIdStr, number>()
/** Number of messages we have asked from peer in the last heartbeat */
private readonly iasked = new Map<PeerIdStr, number>()
/** Prune backoff map */
private readonly backoff = new Map<TopicStr, Map<PeerIdStr, number>>()
/**
* Connection direction cache, marks peers with outbound connections
* peer id => direction
*/
private readonly outbound = new Map<PeerIdStr, boolean>()
private readonly msgIdFn: MsgIdFn
/**
* A fast message id function used for internal message de-duplication
*/
private readonly fastMsgIdFn: FastMsgIdFn | undefined
private readonly msgIdToStrFn: MsgIdToStrFn
/** Maps fast message-id to canonical message-id */
private readonly fastMsgIdCache: SimpleTimeCache<MsgIdStr> | undefined
/**
* Short term cache for published message ids. This is used for penalizing peers sending
* our own messages back if the messages are anonymous or use a random author.
*/
private readonly publishedMessageIds: SimpleTimeCache<void>
/**
* A message cache that contains the messages for last few heartbeat ticks
*/
private readonly mcache: MessageCache
/** Peer score tracking */
public readonly score: PeerScore
/**
* Custom validator function per topic.
* Must return or resolve quickly (< 100ms) to prevent causing penalties for late messages.
* If you need to apply validation that may require longer times use `asyncValidation` option and callback the
* validation result through `Gossipsub.reportValidationResult`
*/
public readonly topicValidators = new Map<TopicStr, TopicValidatorFn>()
/**
* Make this protected so child class may want to redirect to its own log.
*/
protected readonly log: Logger
/**
* Number of heartbeats since the beginning of time
* This allows us to amortize some resource cleanup -- eg: backoff cleanup
*/
private heartbeatTicks = 0
/**
* Tracks IHAVE/IWANT promises broken by peers
*/
readonly gossipTracer: IWantTracer
/**
* Tracks IDONTWANT messages received by peers in the current heartbeat
*/
private readonly idontwantCounts = new Map<PeerIdStr, number>()
/**
* Tracks IDONTWANT messages received by peers and the heartbeat they were received in
*
* idontwants are stored for `mcacheLength` heartbeats before being pruned,
* so this map is bounded by peerCount * idontwantMaxMessages * mcacheLength
*/
private readonly idontwants = new Map<PeerIdStr, Map<MsgIdStr, number>>()
private readonly components: GossipSubComponents
private directPeerInitial: ReturnType<typeof setTimeout> | null = null
public static multicodec: string = constants.GossipsubIDv12
// Options
readonly opts: Required<GossipOptions>
private readonly decodeRpcLimits: DecodeRPCLimits
private readonly metrics: Metrics | null
private status: GossipStatus = { code: GossipStatusCode.stopped }
private readonly maxInboundStreams?: number
private readonly maxOutboundStreams?: number
private readonly runOnLimitedConnection?: boolean
private readonly allowedTopics: Set<TopicStr> | null
private heartbeatTimer: {
_intervalId: ReturnType<typeof setInterval> | undefined
runPeriodically(fn: () => void, period: number): void
cancel(): void
} | null = null
constructor (components: GossipSubComponents, options: Partial<GossipsubOpts> = {}) {
super()
const opts = {
fallbackToFloodsub: true,
floodPublish: true,
batchPublish: false,
tagMeshPeers: true,
doPX: false,
directPeers: [],
D: constants.GossipsubD,
Dlo: constants.GossipsubDlo,
Dhi: constants.GossipsubDhi,
Dscore: constants.GossipsubDscore,
Dout: constants.GossipsubDout,
Dlazy: constants.GossipsubDlazy,
heartbeatInterval: constants.GossipsubHeartbeatInterval,
fanoutTTL: constants.GossipsubFanoutTTL,
mcacheLength: constants.GossipsubHistoryLength,
mcacheGossip: constants.GossipsubHistoryGossip,
seenTTL: constants.GossipsubSeenTTL,
gossipsubIWantFollowupMs: constants.GossipsubIWantFollowupTime,
prunePeers: constants.GossipsubPrunePeers,
pruneBackoff: constants.GossipsubPruneBackoff,
unsubcribeBackoff: constants.GossipsubUnsubscribeBackoff,
graftFloodThreshold: constants.GossipsubGraftFloodThreshold,
opportunisticGraftPeers: constants.GossipsubOpportunisticGraftPeers,
opportunisticGraftTicks: constants.GossipsubOpportunisticGraftTicks,
directConnectTicks: constants.GossipsubDirectConnectTicks,
gossipFactor: constants.GossipsubGossipFactor,
idontwantMinDataSize: constants.GossipsubIdontwantMinDataSize,
idontwantMaxMessages: constants.GossipsubIdontwantMaxMessages,
...options,
scoreParams: createPeerScoreParams(options.scoreParams),
scoreThresholds: createPeerScoreThresholds(options.scoreThresholds)
}
this.components = components
this.decodeRpcLimits = opts.decodeRpcLimits ?? defaultDecodeRpcLimits
this.globalSignaturePolicy = opts.globalSignaturePolicy ?? StrictSign
// Also wants to get notified of peers connected using floodsub
if (opts.fallbackToFloodsub) {
this.protocols.push(constants.FloodsubID)
}
// From pubsub
this.log = components.logger.forComponent(opts.debugName ?? 'libp2p:gossipsub')
// Gossipsub
this.opts = opts as Required<GossipOptions>
this.direct = new Set(opts.directPeers.map((p) => p.id.toString()))
this.seenCache = new SimpleTimeCache<void>({ validityMs: opts.seenTTL })
this.publishedMessageIds = new SimpleTimeCache<void>({ validityMs: opts.seenTTL })
if (options.msgIdFn != null) {
// Use custom function
this.msgIdFn = options.msgIdFn
} else {
switch (this.globalSignaturePolicy) {
case StrictSign:
this.msgIdFn = msgIdFnStrictSign
break
case StrictNoSign:
this.msgIdFn = msgIdFnStrictNoSign
break
default:
throw new Error(`Invalid globalSignaturePolicy: ${this.globalSignaturePolicy}`)
}
}
if (options.fastMsgIdFn != null) {
this.fastMsgIdFn = options.fastMsgIdFn
this.fastMsgIdCache = new SimpleTimeCache<MsgIdStr>({ validityMs: opts.seenTTL })
}
// By default, gossipsub only provide a browser friendly function to convert Uint8Array message id to string.
this.msgIdToStrFn = options.msgIdToStrFn ?? messageIdToString
this.mcache = options.messageCache ?? new MessageCache(opts.mcacheGossip, opts.mcacheLength, this.msgIdToStrFn)
if (options.dataTransform != null) {
this.dataTransform = options.dataTransform
}
if (options.metricsRegister != null) {
if (options.metricsTopicStrToLabel == null) {
throw Error('Must set metricsTopicStrToLabel with metrics')
}
// in theory, each topic has its own meshMessageDeliveriesWindow param
// however in lodestar, we configure it mostly the same so just pick the max of positive ones
// (some topics have meshMessageDeliveriesWindow as 0)
const maxMeshMessageDeliveriesWindowMs = Math.max(
...Object.values(opts.scoreParams.topics).map((topicParam) => topicParam.meshMessageDeliveriesWindow),
constants.DEFAULT_METRIC_MESH_MESSAGE_DELIVERIES_WINDOWS
)
const metrics = getMetrics(options.metricsRegister, options.metricsTopicStrToLabel, {
gossipPromiseExpireSec: this.opts.gossipsubIWantFollowupMs / 1000,
behaviourPenaltyThreshold: opts.scoreParams.behaviourPenaltyThreshold,
maxMeshMessageDeliveriesWindowSec: maxMeshMessageDeliveriesWindowMs / 1000
})
metrics.mcacheSize.addCollect(() => { this.onScrapeMetrics(metrics) })
for (const protocol of this.protocols) {
metrics.protocolsEnabled.set({ protocol }, 1)
}
this.metrics = metrics
} else {
this.metrics = null
}
this.gossipTracer = new IWantTracer(this.opts.gossipsubIWantFollowupMs, this.msgIdToStrFn, this.metrics)
/**
* libp2p
*/
this.score = new PeerScore(this.opts.scoreParams, this.metrics, this.components.logger, {
scoreCacheValidityMs: opts.heartbeatInterval
})
this.maxInboundStreams = options.maxInboundStreams
this.maxOutboundStreams = options.maxOutboundStreams
this.runOnLimitedConnection = options.runOnLimitedConnection
this.allowedTopics = (opts.allowedTopics != null) ? new Set(opts.allowedTopics) : null
}
readonly [Symbol.toStringTag] = '@chainsafe/libp2p-gossipsub'
readonly [serviceCapabilities]: string[] = [
'@libp2p/pubsub'
]
readonly [serviceDependencies]: string[] = [
'@libp2p/identify'
]
getPeers (): PeerId[] {
return [...this.peers.values()]
}
isStarted (): boolean {
return this.status.code === GossipStatusCode.started
}
// LIFECYCLE METHODS
/**
* Mounts the gossipsub protocol onto the libp2p node and sends our
* our subscriptions to every peer connected
*/
async start (): Promise<void> {
// From pubsub
if (this.isStarted()) {
return
}
this.log('starting')
this.publishConfig = getPublishConfigFromPeerId(this.globalSignaturePolicy, this.components.peerId, this.components.privateKey)
// Create the outbound inflight queue
// This ensures that outbound stream creation happens sequentially
this.outboundInflightQueue = pushable({ objectMode: true })
pipe(this.outboundInflightQueue, async (source) => {
for await (const { peerId, connection } of source) {
await this.createOutboundStream(peerId, connection)
}
}).catch((e) => { this.log.error('outbound inflight queue error', e) })
// set direct peer addresses in the address book
await Promise.all(
this.opts.directPeers.map(async (p) => {
await this.components.peerStore.merge(p.id, {
multiaddrs: p.addrs
})
})
)
const registrar = this.components.registrar
// Incoming streams
// Called after a peer dials us
await Promise.all(
this.protocols.map(async (protocol) =>
registrar.handle(protocol, this.onIncomingStream.bind(this), {
maxInboundStreams: this.maxInboundStreams,
maxOutboundStreams: this.maxOutboundStreams,
runOnLimitedConnection: this.runOnLimitedConnection
})
)
)
// # How does Gossipsub interact with libp2p? Rough guide from Mar 2022
//
// ## Setup:
// Gossipsub requests libp2p to callback, TBD
//
// `this.libp2p.handle()` registers a handler for `/meshsub/1.1.0` and other Gossipsub protocols
// The handler callback is registered in libp2p Upgrader.protocols map.
//
// Upgrader receives an inbound connection from some transport and (`Upgrader.upgradeInbound`):
// - Adds encryption (NOISE in our case)
// - Multiplex stream
// - Create a muxer and register that for each new stream call Upgrader.protocols handler
//
// ## Topology
// - new instance of Topology (unlinked to libp2p) with handlers
// - registar.register(topology)
// register protocol with topology
// Topology callbacks called on connection manager changes
const topology: Topology = {
onConnect: this.onPeerConnected.bind(this),
onDisconnect: this.onPeerDisconnected.bind(this),
notifyOnLimitedConnection: this.runOnLimitedConnection
}
const registrarTopologyIds = await Promise.all(
this.protocols.map(async (protocol) => registrar.register(protocol, topology))
)
// Schedule to start heartbeat after `GossipsubHeartbeatInitialDelay`
const heartbeatTimeout = setTimeout(this.runHeartbeat, constants.GossipsubHeartbeatInitialDelay)
// Then, run heartbeat every `heartbeatInterval` offset by `GossipsubHeartbeatInitialDelay`
this.status = {
code: GossipStatusCode.started,
registrarTopologyIds,
heartbeatTimeout,
hearbeatStartMs: Date.now() + constants.GossipsubHeartbeatInitialDelay
}
this.score.start()
// connect to direct peers
this.directPeerInitial = setTimeout(() => {
Promise.resolve()
.then(async () => {
await Promise.all(Array.from(this.direct).map(async (id) => this.connect(id)))
})
.catch((err) => {
this.log(err)
})
}, constants.GossipsubDirectConnectInitialDelay)
if (this.opts.tagMeshPeers) {
this.addEventListener('gossipsub:graft', this.tagMeshPeer)
this.addEventListener('gossipsub:prune', this.untagMeshPeer)
}
this.log('started')
}
/**
* Unmounts the gossipsub protocol and shuts down every connection
*/
async stop (): Promise<void> {
this.log('stopping')
// From pubsub
if (this.status.code !== GossipStatusCode.started) {
return
}
const { registrarTopologyIds } = this.status
this.status = { code: GossipStatusCode.stopped }
if (this.opts.tagMeshPeers) {
this.removeEventListener('gossipsub:graft', this.tagMeshPeer)
this.removeEventListener('gossipsub:prune', this.untagMeshPeer)
}
// unregister protocol and handlers
const registrar = this.components.registrar
await Promise.all(this.protocols.map(async (protocol) => registrar.unhandle(protocol)))
registrarTopologyIds.forEach((id) => { registrar.unregister(id) })
this.outboundInflightQueue.end()
const closePromises = []
for (const outboundStream of this.streamsOutbound.values()) {
closePromises.push(outboundStream.close())
}
this.streamsOutbound.clear()
for (const inboundStream of this.streamsInbound.values()) {
closePromises.push(inboundStream.close())
}
this.streamsInbound.clear()
await Promise.all(closePromises)
this.peers.clear()
this.subscriptions.clear()
// Gossipsub
if (this.heartbeatTimer != null) {
this.heartbeatTimer.cancel()
this.heartbeatTimer = null
}
this.score.stop()
this.mesh.clear()
this.fanout.clear()
this.fanoutLastpub.clear()
this.gossip.clear()
this.control.clear()
this.peerhave.clear()
this.iasked.clear()
this.backoff.clear()
this.outbound.clear()
this.gossipTracer.clear()
this.seenCache.clear()
if (this.fastMsgIdCache != null) { this.fastMsgIdCache.clear() }
if (this.directPeerInitial != null) { clearTimeout(this.directPeerInitial) }
this.idontwantCounts.clear()
this.idontwants.clear()
this.log('stopped')
}
/** FOR DEBUG ONLY - Dump peer stats for all peers. Data is cloned, safe to mutate */
dumpPeerScoreStats (): PeerScoreStatsDump {
return this.score.dumpPeerScoreStats()
}
/**
* On an inbound stream opened
*/
private onIncomingStream (stream: Stream, connection: Connection): void {
if (!this.isStarted()) {
return
}
const peerId = connection.remotePeer
// add peer to router
this.addPeer(peerId, connection.direction, connection.remoteAddr)
// create inbound stream
this.createInboundStream(peerId, stream)
// attempt to create outbound stream
this.outboundInflightQueue.push({ peerId, connection })
}
/**
* Registrar notifies an established connection with pubsub protocol
*/
private onPeerConnected (peerId: PeerId, connection: Connection): void {
this.metrics?.newConnectionCount.inc({ status: connection.status })
// libp2p may emit a closed connection and never issue peer:disconnect event
// see https://github.com/ChainSafe/js-libp2p-gossipsub/issues/398
if (!this.isStarted() || connection.status !== 'open') {
return
}
this.addPeer(peerId, connection.direction, connection.remoteAddr)
this.outboundInflightQueue.push({ peerId, connection })
}
/**
* Registrar notifies a closing connection with pubsub protocol
*/
private onPeerDisconnected (peerId: PeerId): void {
this.log('connection ended %p', peerId)
this.removePeer(peerId)
}
private async createOutboundStream (peerId: PeerId, connection: Connection): Promise<void> {
if (!this.isStarted()) {
return
}
const id = peerId.toString()
if (!this.peers.has(id)) {
return
}
// TODO make this behavior more robust
// This behavior is different than for inbound streams
// If an outbound stream already exists, don't create a new stream
if (this.streamsOutbound.has(id)) {
return
}
try {
const stream = new OutboundStream(
await connection.newStream(this.protocols, {
runOnLimitedConnection: this.runOnLimitedConnection
}),
(e) => { this.log.error('outbound pipe error', e) },
{ maxBufferSize: this.opts.maxOutboundBufferSize }
)
this.log('create outbound stream %p', peerId)
this.streamsOutbound.set(id, stream)
const protocol = stream.protocol
if (protocol === constants.FloodsubID) {
this.floodsubPeers.add(id)
}
this.metrics?.peersPerProtocol.inc({ protocol }, 1)
// Immediately send own subscriptions via the newly attached stream
if (this.subscriptions.size > 0) {
this.log('send subscriptions to', id)
this.sendSubscriptions(id, Array.from(this.subscriptions), true)
}
} catch (e) {
this.log.error('createOutboundStream error', e)
}
}
private createInboundStream (peerId: PeerId, stream: Stream): void {
if (!this.isStarted()) {
return
}
const id = peerId.toString()
if (!this.peers.has(id)) {
return
}
// TODO make this behavior more robust
// This behavior is different than for outbound streams
// If a peer initiates a new inbound connection
// we assume that one is the new canonical inbound stream
const priorInboundStream = this.streamsInbound.get(id)
if (priorInboundStream !== undefined) {
this.log('replacing existing inbound steam %s', id)
priorInboundStream.close().catch((err) => { this.log.error(err) })
}
this.log('create inbound stream %s', id)
const inboundStream = new InboundStream(stream, { maxDataLength: this.opts.maxInboundDataLength })
this.streamsInbound.set(id, inboundStream)
this.pipePeerReadStream(peerId, inboundStream.source).catch((err) => { this.log(err) })
}
/**
* Add a peer to the router
*/
private addPeer (peerId: PeerId, direction: MessageStreamDirection, addr: Multiaddr): void {
const id = peerId.toString()
if (!this.peers.has(id)) {
this.peers.set(id, peerId)
// Add to peer scoring
this.score.addPeer(id)
const currentIP = multiaddrToIPStr(addr)
if (currentIP !== null) {
this.score.addIP(id, currentIP)
} else {
this.log('Added peer has no IP in current address %s %s', id, addr.toString())
}
// track the connection direction. Don't allow to unset outbound
if (!this.outbound.has(id)) {
this.outbound.set(id, direction === 'outbound')
}
}
}
/**
* Removes a peer from the router
*/
private removePeer (peerId: PeerId): void {
const id = peerId.toString()
if (!this.peers.has(id)) {
return
}
// delete peer
this.log('delete peer %p', peerId)
this.peers.delete(id)
const outboundStream = this.streamsOutbound.get(id)
const inboundStream = this.streamsInbound.get(id)
if (outboundStream != null) {
this.metrics?.peersPerProtocol.inc({ protocol: outboundStream.protocol }, -1)
}
// close streams
outboundStream?.close().catch((err) => { this.log.error(err) })
inboundStream?.close().catch((err) => { this.log.error(err) })
// remove streams
this.streamsOutbound.delete(id)
this.streamsInbound.delete(id)
// remove peer from topics map
for (const peers of this.topics.values()) {
peers.delete(id)
}
// Remove this peer from the mesh
for (const [topicStr, peers] of this.mesh) {
if (peers.delete(id)) {
this.metrics?.onRemoveFromMesh(topicStr, ChurnReason.Dc, 1)
}
}
// Remove this peer from the fanout
for (const peers of this.fanout.values()) {
peers.delete(id)
}
// Remove from floodsubPeers
this.floodsubPeers.delete(id)
// Remove from gossip mapping
this.gossip.delete(id)
// Remove from control mapping
this.control.delete(id)
// Remove from backoff mapping
this.outbound.delete(id)
// Remove from idontwant tracking
this.idontwantCounts.delete(id)
this.idontwants.delete(id)
// Remove from peer scoring
this.score.removePeer(id)
this.acceptFromWhitelist.delete(id)
}
// API METHODS
get started (): boolean {
return this.status.code === GossipStatusCode.started
}
/**
* Get a the peer-ids in a topic mesh
*/
getMeshPeers (topic: TopicStr): PeerIdStr[] {
const peersInTopic = this.mesh.get(topic)
return (peersInTopic != null) ? Array.from(peersInTopic) : []
}
/**
* Get a list of the peer-ids that are subscribed to one topic.
*/
getSubscribers (topic: TopicStr): PeerId[] {
const peersInTopic = this.topics.get(topic)
return ((peersInTopic != null) ? Array.from(peersInTopic) : []).map((str) => this.peers.get(str) ?? peerIdFromString(str))
}
/**
* Get the list of topics which the peer is subscribed to.
*/
getTopics (): TopicStr[] {
return Array.from(this.subscriptions)
}
// TODO: Reviewing Pubsub API
// MESSAGE METHODS
/**
* Responsible for processing each RPC message received by other peers.
*/
private async pipePeerReadStream (peerId: PeerId, stream: AsyncIterable<Uint8ArrayList>): Promise<void> {
try {
await pipe(stream, async (source) => {
for await (const data of source) {
try {
// TODO: Check max gossip message size, before decodeRpc()
const rpcBytes = data.subarray()
// Note: This function may throw, it must be wrapped in a try {} catch {} to prevent closing the stream.
// TODO: What should we do if the entire RPC is invalid?
const rpc = RPC.decode(rpcBytes, {
limits: {
subscriptions: this.decodeRpcLimits.maxSubscriptions,
messages: this.decodeRpcLimits.maxMessages,
control$: {
ihave: this.decodeRpcLimits.maxIhaveMessageIDs,
iwant: this.decodeRpcLimits.maxIwantMessageIDs,
graft: this.decodeRpcLimits.maxControlMessages,
prune: this.decodeRpcLimits.maxControlMessages,
prune$: {
peers: this.decodeRpcLimits.maxPeerInfos
},
idontwant: this.decodeRpcLimits.maxControlMessages,
idontwant$: {
messageIDs: this.decodeRpcLimits.maxIdontwantMessageIDs
}
}
}
})
this.metrics?.onRpcRecv(rpc, rpcBytes.length)
// Since processRpc may be overridden entirely in unsafe ways,
// the simplest/safest option here is to wrap in a function and capture all errors
// to prevent a top-level unhandled exception
// This processing of rpc messages should happen without awaiting full validation/execution of prior messages
if (this.opts.awaitRpcHandler) {
try {
await this.handleReceivedRpc(peerId, rpc)
} catch (err) {
this.metrics?.onRpcRecvError()
this.log(err)
}
} else {
this.handleReceivedRpc(peerId, rpc).catch((err) => {
this.metrics?.onRpcRecvError()
this.log(err)
})
}
} catch (e) {
this.metrics?.onRpcDataError()
this.log(e as Error)
}
}
})
} catch (err) {
this.metrics?.onPeerReadStreamError()
this.handlePeerReadStreamError(err as Error, peerId)
}
}
/**
* Handle error when read stream pipe throws, less of the functional use but more
* to for testing purposes to spy on the error handling
*/
private handlePeerReadStreamError (err: Error, peerId: PeerId): void {
this.log.error(err)
this.onPeerDisconnected(peerId)
}
/**
* Handles an rpc request from a peer
*/
public async handleReceivedRpc (from: PeerId, rpc: RPC): Promise<void> {
// Check if peer is graylisted in which case we ignore the event
if (!this.acceptFrom(from.toString())) {
this.log('received message from unacceptable peer %p', from)
this.metrics?.rpcRecvNotAccepted.inc()
return
}
const subscriptions = (rpc.subscriptions != null) ? rpc.subscriptions.length : 0
const messages = (rpc.messages != null) ? rpc.messages.length : 0
let ihave = 0
let iwant = 0
let graft = 0
let prune = 0
if (rpc.control != null) {
if (rpc.control.ihave != null) { ihave = rpc.control.ihave.length }
if (rpc.control.iwant != null) { iwant = rpc.control.iwant.length }
if (rpc.control.graft != null) { graft = rpc.control.graft.length }
if (rpc.control.prune != null) { prune = rpc.control.prune.length }
}
this.log(
`rpc.from ${from.toString()} subscriptions ${subscriptions} messages ${messages} ihave ${ihave} iwant ${iwant} graft ${graft} prune ${prune}`
)
// Handle received subscriptions
if ((rpc.subscriptions != null) && rpc.subscriptions.length > 0) {
// update peer subscriptions
const subscriptions: Array<{ topic: TopicStr, subscribe: boolean }> = []
rpc.subscriptions.forEach((subOpt) => {
const topic = subOpt.topic
const subscribe = subOpt.subscribe === true
if (topic != null) {
if ((this.allowedTopics != null) && !this.allowedTopics.has(topic)) {
// Not allowed: subscription data-structures are not bounded by topic count
// TODO: Should apply behaviour penalties?
return
}
this.handleReceivedSubscription(from, topic, subscribe)
subscriptions.push({ topic, subscribe })
}
})
this.safeDispatchEvent<SubscriptionChangeData>('subscription-change', {
detail: { peerId: from, subscriptions }
})
}
// Handle messages
// TODO: (up to limit)
for (const message of rpc.messages) {
if ((this.allowedTopics != null) && !this.allowedTopics.has(message.topic)) {
// Not allowed: message cache data-structures are not bounded by topic count
// TODO: Should apply behaviour penalties?
continue
}
const handleReceivedMessagePromise = this.handleReceivedMessage(from, message)
// Should never throw, but handle just in case
.catch((err) => {
this.metrics?.onMsgRecvError(message.topic)
this.log(err)
})
if (this.opts.awaitRpcMessageHandler) {
await handleReceivedMessagePromise
}
}
// Handle control messages
if (rpc.control != null) {
await this.handleControlMessage(from.toString(), rpc.control)
}
}
/**
* Handles a subscription change from a peer
*/
private handleReceivedSubscription (from: PeerId, topic: TopicStr, subscribe: boolean): void {
this.log('subscription update from %p topic %s', from, topic)
let topicSet = this.topics.get(topic)
if (topicSet == null) {
topicSet = new Set()
this.topics.set(topic, topicSet)
}
if (subscribe) {
// subscribe peer to new topic
topicSet.add(from.toString())
} else {
// unsubscribe from existing topic
topicSet.delete(from.toString())
}
// TODO: rust-libp2p has A LOT more logic here
}
/**
* Handles a newly received message from an RPC.
* May forward to all peers in the mesh.
*/
private async handleReceivedMessage (from: PeerId, rpcMsg: RPC.Message): Promise<void> {
this.metrics?.onMsgRecvPreValidation(rpcMsg.topic)
const validationResult = await this.validateReceivedMessage(from, rpcMsg)
this.metrics?.onPrevalidationResult(rpcMsg.topic, validationResult.code)
const validationCode = validationResult.code
switch (validationCode) {
case MessageStatus.duplicate:
// Report the duplicate
this.score.duplicateMessage(from.toString(), validationResult.msgIdStr, rpcMsg.topic)
// due to the collision of fastMsgIdFn, 2 different messages may end up the same fastMsgId
// so we need to also mark the duplicate message as delivered or the promise is not resolved
// and peer gets penalized. See https://github.com/ChainSafe/js-libp2p-gossipsub/pull/385
this.gossipTracer.deliverMessage(validationResult.msgIdStr, true)
this.mcache.observeDuplicate(validationResult.msgIdStr, from.toString())
return
case MessageStatus.invalid:
// invalid messages received
// metrics.register_invalid_message(&raw_message.topic)
// Tell peer_score about reject
// Reject the original source, and any duplicates we've seen from other peers.
if (validationResult.msgIdStr != null) {
const msgIdStr = validationResult.msgIdStr
this.score.rejectMessage(from.toString(), msgIdStr, rpcMsg.topic, validationResult.reason)
this.gossipTracer.rejectMessage(msgIdStr, validationResult.reason)
} else {
this.score.rejectInvalidMessage(from.toString(), rpcMsg.topic)
}
this.metrics?.onMsgRecvInvalid(rpcMsg.topic, validationResult)
return
case MessageStatus.valid:
// Tells score that message arrived (but is maybe not fully validated yet).
// Consider the message as delivered for gossip promises.
this.score.validateMessage(validationResult.messageId.msgIdStr)
this.gossipTracer.deliverMessage(validationResult.messageId.msgIdStr)
// Add the message to our memcache
// if no validation is required, mark the message as validated
this.mcache.put(validationResult.messageId, rpcMsg, !this.opts.asyncValidation)
// Dispatch the message to the user if we are subscribed to the topic
if (this.subscriptions.has(rpcMsg.topic)) {
const isFromSelf = this.components.peerId.equals(from)
if (!isFromSelf || this.opts.emitSelf) {
super.dispatchEvent(
new CustomEvent<GossipsubMessage>('gossipsub:message', {
detail: {
propagationSource: from,
msgId: validationResult.messageId.msgIdStr,
msg: validationResult.msg
}
})
)
// TODO: Add option to switch between emit per topic or all messages in one
super.dispatchEvent(new CustomEvent<Message>('message', { detail: validationResult.msg }))
}
}
// Forward the message to mesh peers, if no validation is required
// If asyncValidation is ON, expect the app layer to call reportMessageValidationResult(), then forward
if (!this.opts.asyncValidation) {
// TODO: in rust-libp2p
// .forward_msg(&msg_id, raw_message, Some(propagation_source))
this.forwardMessage(validationResult.messageId.msgIdStr, rpcMsg, from.toString())
}
break
default:
throw new Error(`Invalid validation result: ${validationCode}`)
}
}
/**
* Handles a newly received message from an RPC.
* May forward to all peers in the mesh.
*/
private async validateReceivedMessage (
propagationSource: PeerId,
rpcMsg: RPC.Message
): Promise<ReceivedMessageResult> {
// Fast message ID stuff
const fastMsgIdStr = this.fastMsgIdFn?.(rpcMsg)
const msgIdCached = fastMsgIdStr !== undefined ? this.fastMsgIdCache?.get(fastMsgIdStr) : undefined
if (msgIdCached != null) {
// This message has been seen previously. Ignore it
return { code: MessageStatus.duplicate, msgIdStr: msgIdCached }
}
// Perform basic validation on message and convert to RawGossipsubMessage for fastMsgIdFn()
const validationResult = await validateToRawMessage(this.globalSignaturePolicy, rpcMsg)
if (!validationResult.valid) {
return { code: MessageStatus.invalid, reason: RejectReason.Error, error: validationResult.error }
}
const msg = validationResult.message
// Try and perform the data transform to the message. If it fails, consider it invalid.
try {
if (this.dataTransform != null) {
msg.data = this.dataTransform.inboundTransform(rpcMsg.topic, msg.data)
}
} catch (e) {
this.log('Invalid message, transform failed', e)
return { code: MessageStatus.invalid, reason: RejectReason.Error, error: ValidateError.TransformFailed }
}
// TODO: Check if message is from a blacklisted source or propagation origin
// - Reject any message from a blacklisted peer
// - Also reject any message that originated from a blacklisted peer
// - reject messages claiming to be from ourselves but not locally published
// Calculate the message id on the transformed data.
const msgId = await this.msgIdFn(msg)
const msgIdStr = this.msgIdToStrFn(msgId)
const messageId = { msgId, msgIdStr }
// Add the message to the duplicate caches
if (fastMsgIdStr !== undefined && (this.fastMsgIdCache != null)) {
const collision = this.fastMsgIdCache.put(fastMsgIdStr, msgIdStr)
if (collision) {
this.metrics?.fastMsgIdCacheCollision.inc()
}
}
if (this.seenCache.has(msgIdStr)) {
return { code: MessageStatus.duplicate, msgIdStr }
} else {
this.seenCache.put(msgIdStr)
}
// possibly send IDONTWANTs to mesh peers
if ((rpcMsg.data?.length ?? 0) >= this.opts.idontwantMinDataSize) {
this.sendIDontWants(msgId, rpcMsg.topic, propagationSource.toString())
}
// (Optional) Provide custom validation here with dynamic validators per topic
// NOTE: This custom topicValidator() must resolve fast (< 100ms) to allow scores
// to not penalize peers for long validation times.
const topicValidator = this.topicValidators.get(rpcMsg.topic)
if (topicValidator != null) {
let acceptance: TopicValidatorResult
// Use try {} catch {} in case topicValidator() is synchronous
try {
acceptance = await topicValidator(propagationSource, msg)
} catch (e) {
const errCode = (e as { code: string }).code
if (errCode === constants.ERR_TOPIC_VALIDATOR_IGNORE) { acceptance = TopicValidatorResult.Ignore }
if (errCode === constants.ERR_TOPIC_VALIDATOR_REJECT) { acceptance = TopicValidatorResult.Reject } else { acceptance = TopicValidatorResult.Ignore }
}
if (acceptance !== TopicValidatorResult.Accept) {
return { code: MessageStatus.invalid, reason: rejectReasonFromAcceptance(acceptance), msgIdStr }
}
}
return { code: MessageStatus.valid, messageId, msg }
}
/**
* Return score of a peer.
*/
getScore (peerId: PeerIdStr): number {
return this.score.score(peerId)
}
/**
* Send an rpc object to a peer with subscriptions
*/
private sendSubscriptions (toPeer: PeerIdStr, topics: string[], subscribe: boolean): void {
this.sendRpc(toPeer, {
subscriptions: topics.map((topic) => ({ topic, subscribe })),
messages: []
})
}
/**
* Handles an rpc control message from a peer
*/
private async handleControlMessage (id: PeerIdStr, controlMsg: RPC.ControlMessage): Promise<void> {
if (controlMsg === undefined) {
return
}
const iwant = (controlMsg.ihave?.length > 0) ? this.handleIHave(id, controlMsg.ihave) : []
const ihave = (controlMsg.iwant?.length > 0) ? this.handleIWant(id, controlMsg.iwant) : []
const prune = (controlMsg.graft?.length > 0) ? await this.handleGraft(id, controlMsg.graft) : []
;(controlMsg.prune?.length > 0) && (await this.handlePrune(id, controlMsg.prune))
;(controlMsg.idontwant?.length > 0) && this.handleIdontwant(id, controlMsg.idontwant)
if ((iwant.length === 0) && (ihave.length === 0) && (prune.length === 0)) {
return
}
const sent = this.sendRpc(id, createGossipRpc(ihave, { iwant, prune }))
const iwantMessageIds = iwant[0]?.messageIDs
if (iwantMessageIds != null) {
if (sent) {
this.gossipTracer.addPromise(id, iwantMessageIds)
} else {
this.metrics?.iwantPromiseUntracked.inc(1)
}
}
}
/**
* Whether to accept a message from a peer
*/
public acceptFrom (id: PeerIdStr): boolean {
if (this.direct.has(id)) {
return true
}
const now = Date.now()
const entry = this.acceptFromWhitelist.get(id)
if ((entry != null) && entry.messagesAccepted < ACCEPT_FROM_WHITELIST_MAX_MESSAGES && entry.acceptUntil >= now) {
entry.messagesAccepted += 1
return true
}
const score = this.score.score(id)
if (score >= ACCEPT_FROM_WHITELIST_THRESHOLD_SCORE) {
// peer is unlikely to be able to drop its score to `graylistThreshold`
// after 128 messages or 1s
this.acceptFromWhitelist.set(id, {
messagesAccepted: 0,
acceptUntil: now + ACCEPT_FROM_WHITELIST_DURATION_MS
})
} else {
this.acceptFromWhitelist.delete(id)
}
return score >= this.opts.scoreThresholds.graylistThreshold
}
/**
* Handles IHAVE messages
*/
private handleIHave (id: PeerIdStr, ihave: RPC.ControlIHave[]): RPC.ControlIWant[] {
if (ihave.length === 0) {
return []
}
// we ignore IHAVE gossip from any peer whose score is below the gossips threshold
const score = this.score.score(id)
if (score < this.opts.scoreThresholds.gossipThreshold) {
this.log('IHAVE: ignoring peer %s with score below threshold [ score = %d ]', id, score)
this.metrics?.ihaveRcvIgnored.inc({ reason: IHaveIgnoreReason.LowScore })
return []
}
// IHAVE flood protection
const peerhave = (this.peerhave.get(id) ?? 0) + 1
this.peerhave.set(id, peerhave)
if (peerhave > constants.GossipsubMaxIHaveMessages) {
this.log(
'IHAVE: peer %s has advertised too many times (%d) within this heartbeat interval; ignoring',
id,
peerhave
)
this.metrics?.ihaveRcvIgnored.inc({ reason: IHaveIgnoreReason.MaxIhave })
return []
}
const iasked = this.iasked.get(id) ?? 0
if (iasked >= constants.GossipsubMaxIHaveLength) {
this.log('IHAVE: peer %s has already advertised too many messages (%d); ignoring', id, iasked)
this.metrics?.ihaveRcvIgnored.inc({ reason: IHaveIgnoreReason.MaxIasked })
return []
}
// string msgId => msgId
const iwant = new Map<MsgIdStr, Uint8Array>()
ihave.forEach(({ topicID, messageIDs }) => {
if (topicID == null || (messageIDs == null) || !this.mesh.has(topicID)) {
return
}
let idonthave = 0
messageIDs.forEach((msgId) => {
const msgIdStr = this.msgIdToStrFn(msgId)
if (!this.seenCache.has(msgIdStr)) {
iwant.set(msgIdStr, msgId)
idonthave++
}
})
this.metrics?.onIhaveRcv(topicID, messageIDs.length, idonthave)
})
if (iwant.size === 0) {
return []
}
let iask = iwant.size
if (iask + iasked > constants.GossipsubMaxIHaveLength) {
iask = constants.GossipsubMaxIHaveLength - iasked
}
this.log('IHAVE: Asking for %d out of %d messages from %s', iask, iwant.size, id)
let iwantList = Array.from(iwant.values())
// ask in random order
shuffle(iwantList)
// truncate to the messages we are actually asking for and update the iasked counter
iwantList = iwantList.slice(0, iask)
this.iasked.set(id, iasked + iask)
// do not add gossipTracer promise here until a successful sendRpc()
return [
{
messageIDs: iwantList
}
]
}
/**
* Handles IWANT messages
* Returns messages to send back to peer
*/
private handleIWant (id: PeerIdStr, iwant: RPC.ControlIWant[]): RPC.Message[] {
if (iwant.length === 0) {
return []
}
// we don't respond to IWANT requests from any per whose score is below the gossip threshold
const score = this.score.score(id)
if (score < this.opts.scoreThresholds.gossipThreshold) {
this.log('IWANT: ignoring peer %s with score below threshold [score = %d]', id, score)
return []
}
const ihave = new Map<MsgIdStr, RPC.Message>()
const iwantByTopic = new Map<TopicStr, number>()
let iwantDonthave = 0
iwant.forEach(({ messageIDs }) => {
messageIDs?.forEach((msgId) => {
const msgIdStr = this.msgIdToStrFn(msgId)
const entry = this.mcache.getWithIWantCount(msgIdStr, id)
if (entry == null) {
iwantDonthave++
return
}
iwantByTopic.set(entry.msg.topic, 1 + (iwantByTopic.get(entry.msg.topic) ?? 0))
if (entry.count > constants.GossipsubGossipRetransmission) {
this.log('IWANT: Peer %s has asked for message %s too many times: ignoring request', id, msgId)
return
}
ihave.set(msgIdStr, entry.msg)
})
})
this.metrics?.onIwantRcv(iwantByTopic, iwantDonthave)
if (ihave.size === 0) {
this.log('IWANT: Could not provide any wanted messages to %s', id)
return []
}
this.log('IWANT: Sending %d messages to %s', ihave.size, id)
return Array.from(ihave.values())
}
/**
* Handles Graft messages
*/
private async handleGraft (id: PeerIdStr, graft: RPC.ControlGraft[]): Promise<RPC.ControlPrune[]> {
const prune: TopicStr[] = []
const score = this.score.score(id)
const now = Date.now()
let doPX = this.opts.doPX
graft.forEach(({ topicID }) => {
if (topicID == null) {
return
}
const peersInMesh = this.mesh.get(topicID)
if (peersInMesh == null) {
// don't do PX when there is an unknown topic to avoid leaking our peers
doPX = false
// spam hardening: ignore GRAFTs for unknown topics
return
}
// check if peer is already in the mesh; if so do nothing
if (peersInMesh.has(id)) {
return
}
const backoffExpiry = this.backoff.get(topicID)?.get(id)
// This if/else chain contains the various cases of valid (and semi-valid) GRAFTs
// Most of these cases result in a PRUNE immediately being sent in response
// we don't GRAFT to/from direct peers; complain loudly if this happens
if (this.direct.has(id)) {
this.log('GRAFT: ignoring request from direct peer %s', id)
// this is possibly a bug from a non-reciprical configuration; send a PRUNE
prune.push(topicID)
// but don't px
doPX = false
// make sure we are not backing off that peer
} else if (typeof backoffExpiry === 'number' && now < backoffExpiry) {
this.log('GRAFT: ignoring backed off peer %s', id)
// add behavioral penalty
this.score.addPenalty(id, 1, ScorePenalty.GraftBackoff)
// no PX
doPX = false
// check the flood cutoff -- is the GRAFT coming too fast?
const floodCutoff = backoffExpiry + this.opts.graftFloodThreshold - this.opts.pruneBackoff
if (now < floodCutoff) {
// extra penalty
this.score.addPenalty(id, 1, ScorePenalty.GraftBackoff)
}
// refresh the backoff
this.addBackoff(id, topicID)
prune.push(topicID)
// check the score
} else if (score < 0) {
// we don't GRAFT peers with negative score
this.log('GRAFT: ignoring peer %s with negative score: score=%d, topic=%s', id, score, topicID)