UNPKG

@platformatic/kafka

Version:

Modern and performant client for Apache Kafka

1,149 lines (1,147 loc) 52.9 kB
import { createPromisifiedCallback, createTimeoutCallback, kCallbackPromise, runConcurrentCallbacks } from "../../apis/callbacks.js"; import { FetchIsolationLevels, FindCoordinatorKeyTypes } from "../../apis/enumerations.js"; import { consumerCommitsChannel, consumerConsumesChannel, consumerFetchesChannel, consumerGroupChannel, consumerHeartbeatChannel, consumerLagChannel, consumerOffsetsChannel, createDiagnosticContext } from "../../diagnostic.js"; import { protocolErrors, UserError } from "../../errors.js"; import { INT32_SIZE } from "../../protocol/definitions.js"; import { Reader } from "../../protocol/reader.js"; import { Writer } from "../../protocol/writer.js"; import { kAutocommit, kRefreshOffsetsAndFetch } from "../../symbols.js"; import { Base, kAfterCreate, kCheckNotClosed, kClosed, kCreateConnectionPool, kFetchConnections, kFormatValidationErrors, kGetApi, kGetBootstrapConnection, kGetConnection, kMetadata, kOptions, kPerformDeduplicated, kPerformWithRetry, kPrometheus, kValidateOptions } from "../base/base.js"; import { defaultBaseOptions } from "../base/options.js"; import { ensureMetric } from "../metrics.js"; import { MessagesStream } from "./messages-stream.js"; import { commitOptionsValidator, consumeOptionsValidator, consumerOptionsValidator, defaultConsumerOptions, fetchOptionsValidator, getLagOptionsValidator, groupIdAndOptionsValidator, groupOptionsValidator, listCommitsOptionsValidator, listOffsetsOptionsValidator } from "./options.js"; import { roundRobinAssigner } from "./partitions-assigners.js"; import { TopicsMap } from "./topics-map.js"; export class Consumer extends Base { groupId; generationId; memberId; topics; assignments; #assignments; #members; #membershipActive; #isLeader; #protocol; #coordinatorId; #heartbeatInterval; #lastHeartbeatIntervalMs; #lastHeartbeat; #useConsumerGroupProtocol; #memberEpoch; #groupRemoteAssignor; #streams; #lagMonitoring; /* The following requests are blocking in Kafka: FetchRequest (soprattutto con maxWaitMs) JoinGroupRequest SyncGroupRequest OffsetCommitRequest ProduceRequest ListOffsetsRequest ListGroupsRequest DescribeGroupsRequest In order to avoid consumer group problems, we separate FetchRequest only on a separate connection. */ [kFetchConnections]; // Metrics #metricActiveStreams; #metricLags; constructor(options) { super(options); this[kOptions] = Object.assign({}, defaultBaseOptions, defaultConsumerOptions, options); this[kValidateOptions](options, consumerOptionsValidator, '/options'); this.groupId = options.groupId; this.generationId = 0; this.memberId = null; this.topics = new TopicsMap(); this.assignments = null; this.#assignments = []; this.#members = new Map(); this.#membershipActive = false; this.#isLeader = false; this.#protocol = null; this.#coordinatorId = null; this.#heartbeatInterval = null; this.#lastHeartbeatIntervalMs = 0; this.#lastHeartbeat = null; this.#streams = new Set(); this.#lagMonitoring = null; this.#memberEpoch = 0; this.#useConsumerGroupProtocol = this[kOptions].groupProtocol === 'consumer'; this.#groupRemoteAssignor = this[kOptions].groupRemoteAssignor ?? null; this.#validateGroupOptions(this[kOptions], groupIdAndOptionsValidator); // Initialize connection pool this[kFetchConnections] = this[kCreateConnectionPool](); if (this[kPrometheus]) { ensureMetric(this[kPrometheus], 'Gauge', 'kafka_consumers', 'Number of active Kafka consumers').inc(); this.#metricActiveStreams = ensureMetric(this[kPrometheus], 'Gauge', 'kafka_consumers_streams', 'Number of active Kafka consumers streams'); this.topics.setMetric(ensureMetric(this[kPrometheus], 'Gauge', 'kafka_consumers_topics', 'Number of topics being consumed')); this.#metricLags = ensureMetric(this[kPrometheus], 'Histogram', 'kafka_consumers_lags', 'Lag of active Kafka consumers'); } this[kAfterCreate]('consumer'); } get streamsCount() { return this.#streams.size; } get lastHeartbeat() { return this.#lastHeartbeat; } close(force, callback) { if (typeof force === 'function') { callback = force; force = false; } if (!callback) { callback = createPromisifiedCallback(); } if (this[kClosed]) { callback(null); return callback[kCallbackPromise]; } this[kClosed] = true; clearTimeout(this.#lagMonitoring); let closer; if (this.#useConsumerGroupProtocol) { closer = this.#leaveGroupConsumerProtocol.bind(this); } else if (this.#membershipActive) { closer = this.#leaveGroupClassicProtocol.bind(this); } else { closer = function noopCloser(_, callback) { callback(null); }; } closer(force, error => { if (error) { this[kClosed] = false; callback(error); return; } this[kFetchConnections].close(error => { if (error) { this[kClosed] = false; callback(error); return; } super.close(error => { if (error) { this[kClosed] = false; callback(error); return; } this.topics.clear(); if (this[kPrometheus]) { ensureMetric(this[kPrometheus], 'Gauge', 'kafka_consumers', 'Number of active Kafka consumers').dec(); } callback(null); }); }); }); return callback[kCallbackPromise]; } isActive() { const baseReady = super.isActive(); if (!baseReady) { return false; } if (this.#useConsumerGroupProtocol) { return !!this.memberId && this.#memberEpoch >= 0; } // We consider the group ready if we have a groupId, a memberId and heartbeat interval return this.#membershipActive && Boolean(this.groupId) && Boolean(this.memberId) && this.#heartbeatInterval !== null; } consume(options, callback) { if (!callback) { callback = createPromisifiedCallback(); } if (this[kCheckNotClosed](callback)) { return callback[kCallbackPromise]; } const validationError = this[kValidateOptions](options, consumeOptionsValidator, '/options', false); if (validationError) { callback(validationError, undefined); return callback[kCallbackPromise]; } options.autocommit ??= this[kOptions].autocommit ?? true; options.maxBytes ??= this[kOptions].maxBytes; options.deserializers = Object.assign({}, options.deserializers, this[kOptions].deserializers); options.highWaterMark ??= this[kOptions].highWaterMark; this.#consume(options, callback); return callback[kCallbackPromise]; } fetch(options, callback) { if (!callback) { callback = createPromisifiedCallback(); } if (this[kCheckNotClosed](callback)) { return callback[kCallbackPromise]; } const validationError = this[kValidateOptions](options, fetchOptionsValidator, '/options', false); if (validationError) { callback(validationError, undefined); return callback[kCallbackPromise]; } consumerFetchesChannel.traceCallback(this.#fetch, 1, createDiagnosticContext({ client: this, operation: 'fetch', options }), this, options, callback); return callback[kCallbackPromise]; } commit(options, callback) { if (!callback) { callback = createPromisifiedCallback(); } if (this[kCheckNotClosed](callback)) { return callback[kCallbackPromise]; } const validationError = this[kValidateOptions](options, commitOptionsValidator, '/options', false); if (validationError) { callback(validationError); return callback[kCallbackPromise]; } consumerCommitsChannel.traceCallback(this.#commit, 1, createDiagnosticContext({ client: this, operation: 'commit', options }), this, options, callback); return callback[kCallbackPromise]; } listOffsets(options, callback) { if (!callback) { callback = createPromisifiedCallback(); } if (this[kCheckNotClosed](callback)) { return callback[kCallbackPromise]; } const validationError = this[kValidateOptions](options, listOffsetsOptionsValidator, '/options', false); if (validationError) { callback(validationError, undefined); return callback[kCallbackPromise]; } consumerOffsetsChannel.traceCallback(this.#listOffsets, 2, createDiagnosticContext({ client: this, operation: 'listOffsets', options }), this, false, options, callback); return callback[kCallbackPromise]; } listOffsetsWithTimestamps(options, callback) { if (!callback) { callback = createPromisifiedCallback(); } if (this[kCheckNotClosed](callback)) { return callback[kCallbackPromise]; } const validationError = this[kValidateOptions](options, listOffsetsOptionsValidator, '/options', false); if (validationError) { callback(validationError, undefined); return callback[kCallbackPromise]; } consumerOffsetsChannel.traceCallback(this.#listOffsets, 2, createDiagnosticContext({ client: this, operation: 'listOffsets', options }), this, true, options, callback); return callback[kCallbackPromise]; } listCommittedOffsets(options, callback) { if (!callback) { callback = createPromisifiedCallback(); } if (this[kCheckNotClosed](callback)) { return callback[kCallbackPromise]; } const validationError = this[kValidateOptions](options, listCommitsOptionsValidator, '/options', false); if (validationError) { callback(validationError, undefined); return callback[kCallbackPromise]; } consumerOffsetsChannel.traceCallback(this.#listCommittedOffsets, 1, createDiagnosticContext({ client: this, operation: 'listCommittedOffsets', options }), this, options, callback); return callback[kCallbackPromise]; } getLag(options, callback) { if (!callback) { callback = createPromisifiedCallback(); } if (this[kCheckNotClosed](callback)) { return callback[kCallbackPromise]; } const validationError = this[kValidateOptions](options, getLagOptionsValidator, '/options', false); if (validationError) { callback(validationError, undefined); return callback[kCallbackPromise]; } this.listOffsets(options, (error, offsets) => { if (error) { this.emit('consumer:lag:error', error); callback(error, undefined); return; } // Now gather the last committed offsets from each stream const committeds = new Map(); for (const stream of this.#streams) { for (const [topic, offset] of stream.offsetsCommitted) { committeds.set(topic, offset); } } // Build the lag map back. A -1n denotes that the consumer is not assigned to a certain partition const lag = new Map(); for (const [topic, partitions] of offsets) { const toInclude = new Set(options.partitions?.[topic] ?? []); const hasPartitionsFilter = toInclude.size > 0; const partitionLags = []; for (let i = 0; i < partitions.length; i++) { if (hasPartitionsFilter && !toInclude.has(i)) { partitionLags.push(-2n); continue; } const latest = partitions[i]; const committed = committeds.get(`${topic}:${i}`); // If the consumer is not assigned to this partition, we return -1n. // Otherwise we compute the lag as latest - committed - 1. The -1 is because latest is the offset of the next message to be produced. partitionLags.push(typeof committed === 'undefined' ? -1n : latest - committed - 1n); } lag.set(topic, partitionLags); } // Publish to the diagnostic channel consumerLagChannel.publish({ client: this, lag }); // Publish to the metric if available if (this.#metricLags) { for (const partitions of lag.values()) { for (const l of partitions) { if (l >= 0n) { this.#metricLags.observe(Number(l)); } } } } this.emit('consumer:lag', lag); callback(null, lag); }); return callback[kCallbackPromise]; } startLagMonitoring(options, interval) { const validationError = this[kValidateOptions](options, getLagOptionsValidator, '/options', false); if (validationError) { throw validationError; } this.#lagMonitoring = setTimeout(() => { this.getLag(options, () => this.#lagMonitoring.refresh()); }, interval); } stopLagMonitoring() { clearTimeout(this.#lagMonitoring); } findGroupCoordinator(callback) { if (!callback) { callback = createPromisifiedCallback(); } if (this[kCheckNotClosed](callback)) { return callback[kCallbackPromise]; } if (this.#coordinatorId) { callback(null, this.#coordinatorId); return callback[kCallbackPromise]; } this.#findGroupCoordinator(callback); return callback[kCallbackPromise]; } joinGroup(options, callback) { if (!callback) { callback = createPromisifiedCallback(); } if (this[kCheckNotClosed](callback)) { return callback[kCallbackPromise]; } if (this.#useConsumerGroupProtocol) { callback(null, ''); return callback[kCallbackPromise]; } const validationError = this[kValidateOptions](options, groupOptionsValidator, '/options', false); if (validationError) { callback(validationError, undefined); return callback[kCallbackPromise]; } options.sessionTimeout ??= this[kOptions].sessionTimeout; options.rebalanceTimeout ??= this[kOptions].rebalanceTimeout; options.heartbeatInterval ??= this[kOptions].heartbeatInterval; options.protocols ??= this[kOptions].protocols; this.#validateGroupOptions(options); this.#membershipActive = true; this.#joinGroup(options, callback); return callback[kCallbackPromise]; } leaveGroup(force, callback) { if (typeof force === 'function') { callback = force; force = false; } if (!callback) { callback = createPromisifiedCallback(); } if (this[kCheckNotClosed](callback)) { return callback[kCallbackPromise]; } if (this.#useConsumerGroupProtocol) { callback(null); return callback[kCallbackPromise]; } this.#membershipActive = false; this.#leaveGroupClassicProtocol(force, error => { if (error) { this.#membershipActive = true; callback(error); return; } this.#lastHeartbeat = null; callback(null); }); return callback[kCallbackPromise]; } #consume(options, callback) { consumerConsumesChannel.traceCallback(this.#performConsume, 2, createDiagnosticContext({ client: this, operation: 'consume', options }), this, options, true, callback); } #fetch(options, callback) { this[kPerformWithRetry]('fetch', retryCallback => { this[kMetadata]({ topics: this.topics.current }, (error, metadata) => { if (error) { retryCallback(error, undefined); return; } const broker = metadata.brokers.get(options.node); if (!broker) { retryCallback(new UserError(`Cannot find broker with node id ${options.node}`), undefined); return; } this[kFetchConnections].get(broker, (error, connection) => { if (error) { retryCallback(error, undefined); return; } this[kGetApi]('Fetch', (error, api) => { if (error) { retryCallback(error, undefined); return; } api(connection, options.maxWaitTime ?? this[kOptions].maxWaitTime, options.minBytes ?? this[kOptions].minBytes, options.maxBytes ?? this[kOptions].maxBytes, FetchIsolationLevels[options.isolationLevel ?? this[kOptions].isolationLevel], 0, 0, options.topics, [], '', retryCallback); }); }); }); }, callback, 0); } #commit(options, callback) { this.#performGroupOperation('commit', (connection, groupCallback) => { const topics = new Map(); for (const { topic, partition, offset, leaderEpoch } of options.offsets) { let topicOffsets = topics.get(topic); if (!topicOffsets) { topicOffsets = { name: topic, partitions: [] }; topics.set(topic, topicOffsets); } topicOffsets.partitions.push({ partitionIndex: partition, committedOffset: offset, committedLeaderEpoch: leaderEpoch, committedMetadata: null }); } this[kGetApi]('OffsetCommit', (error, api) => { if (error) { groupCallback(error, undefined); return; } api(connection, this.groupId, this.#useConsumerGroupProtocol ? this.#memberEpoch : this.generationId, this.memberId, null, Array.from(topics.values()), groupCallback); }); }, error => { callback(error); }); } #listOffsets(withTimestamps, options, callback) { let topics = options.topics; if (!topics || topics.length === 0) { topics = this.topics.current; } this[kMetadata]({ topics }, (error, metadata) => { if (error) { callback(error, undefined); return; } const requests = new Map(); for (const name of topics) { const topic = metadata.topics.get(name); const toInclude = new Set(options.partitions?.[name] ?? []); const hasPartitionsFilter = toInclude.size > 0; for (let i = 0; i < topic.partitionsCount; i++) { if (hasPartitionsFilter && !toInclude.delete(i)) { continue; } const partition = topic.partitions[i]; const { leader, leaderEpoch } = partition; let leaderRequests = requests.get(leader); if (!leaderRequests) { leaderRequests = new Map(); requests.set(leader, leaderRequests); } let topicRequests = leaderRequests.get(name); if (!topicRequests) { topicRequests = { name, partitions: [] }; leaderRequests.set(name, topicRequests); } topicRequests.partitions.push({ partitionIndex: i, currentLeaderEpoch: leaderEpoch, timestamp: options.timestamp ?? -1n }); } if (toInclude.size > 0) { callback(new UserError(`Specified partition(s) not found in topic ${name}`), undefined); return; } } runConcurrentCallbacks('Listing offsets failed.', requests, ([leader, requests], concurrentCallback) => { this[kPerformWithRetry]('listOffsets', retryCallback => { this[kGetConnection](metadata.brokers.get(leader), (error, connection) => { if (error) { retryCallback(error, undefined); return; } this[kGetApi]('ListOffsets', (error, api) => { if (error) { retryCallback(error, undefined); return; } api(connection, -1, FetchIsolationLevels[options.isolationLevel ?? this[kOptions].isolationLevel], Array.from(requests.values()), retryCallback); }); }); }, concurrentCallback, 0); }, (error, responses) => { if (error) { callback(this.#handleMetadataError(error), undefined); return; } let offsets = new Map(); if (withTimestamps) { offsets = new Map(); for (const response of responses) { for (const { name: topic, partitions } of response.topics) { let topicOffsets = offsets.get(topic); if (!topicOffsets) { topicOffsets = new Map(); offsets.set(topic, topicOffsets); } for (const { partitionIndex: index, offset, timestamp } of partitions) { topicOffsets.set(index, { offset, timestamp }); } } } } else { offsets = new Map(); for (const response of responses) { for (const { name: topic, partitions } of response.topics) { let topicOffsets = offsets.get(topic); if (!topicOffsets) { topicOffsets = Array(metadata.topics.get(topic).partitionsCount); offsets.set(topic, topicOffsets); } for (const { partitionIndex: index, offset } of partitions) { topicOffsets[index] = offset; } } } } callback(null, offsets); }); }); } #listCommittedOffsets(options, callback) { const topics = []; for (const { topic: name, partitions } of options.topics) { topics.push({ name, partitionIndexes: partitions }); } this.#performGroupOperation('listCommits', (connection, groupCallback) => { this[kGetApi]('OffsetFetch', (error, api) => { if (error) { groupCallback(error, undefined); return; } api(connection, [ { groupId: this.groupId, memberId: this.memberId, memberEpoch: this.#useConsumerGroupProtocol ? this.#memberEpoch : -1, topics } ], false, groupCallback); }); }, (error, response) => { if (error) { callback(this.#handleMetadataError(error), undefined); return; } const committed = new Map(); for (const responseGroup of response.groups) { for (const responseTopic of responseGroup.topics) { const topic = responseTopic.name; const partitions = Array(responseTopic.partitions.length); for (const { partitionIndex: index, committedOffset } of responseTopic.partitions) { partitions[index] = committedOffset; } committed.set(topic, partitions); } } callback(null, committed); }); } #findGroupCoordinator(callback) { if (this.#coordinatorId) { callback(null, this.#coordinatorId); return; } consumerGroupChannel.traceCallback(this.#performFindGroupCoordinator, 0, createDiagnosticContext({ client: this, operation: 'findGroupCoordinator' }), this, callback); } #joinGroup(options, callback) { consumerGroupChannel.traceCallback(this.#performJoinGroup, 1, createDiagnosticContext({ client: this, operation: 'joinGroup', options }), this, options, callback); } #leaveGroupClassicProtocol(force, callback) { consumerGroupChannel.traceCallback(this.#performLeaveGroup, 1, createDiagnosticContext({ client: this, operation: 'leaveGroup', force }), this, force, callback); } #syncGroup(partitionsAssigner, callback) { consumerGroupChannel.traceCallback(this.#performSyncGroup, 2, createDiagnosticContext({ client: this, operation: 'syncGroup' }), this, partitionsAssigner, null, callback); } #heartbeat(options) { const eventPayload = { groupId: this.groupId, memberId: this.memberId, generationId: this.generationId }; consumerHeartbeatChannel.traceCallback((this.#performDeduplicateGroupOperaton), 2, createDiagnosticContext({ client: this, operation: 'heartbeat' }), this, 'heartbeat', (connection, groupCallback) => { // We have left the group in the meanwhile, abort if (!this.#membershipActive) { this.emitWithDebug('consumer:heartbeat', 'cancel', eventPayload); return; } this.emitWithDebug('consumer:heartbeat', 'start', eventPayload); this[kGetApi]('Heartbeat', (error, api) => { if (error) { groupCallback(error, undefined); return; } api(connection, this.groupId, this.generationId, this.memberId, null, groupCallback); }); }, error => { // The heartbeat has been aborted elsewhere, ignore the response if (this.#heartbeatInterval === null || !this.#membershipActive) { this.emitWithDebug('consumer:heartbeat', 'cancel', eventPayload); return; } if (error) { this.#cancelHeartbeat(); if (this.#getRejoinError(error)) { this[kPerformWithRetry]('rejoinGroup', retryCallback => { this.#joinGroup(options, retryCallback); }, error => { if (error) { this.emitWithDebug(null, 'error', error); } this.emitWithDebug('consumer', 'rejoin'); }, 0); return; } this.emitWithDebug('consumer:heartbeat', 'error', { ...eventPayload, error }); // Note that here we purposely do not return, since it was not a group related problem we schedule another heartbeat } else { this.#lastHeartbeat = new Date(); this.emitWithDebug('consumer:heartbeat', 'end', eventPayload); } this.#heartbeatInterval?.refresh(); }); } #cancelHeartbeat() { clearTimeout(this.#heartbeatInterval); this.#heartbeatInterval = null; } #consumerGroupHeartbeat(options, callback) { options.rebalanceTimeout ??= this[kOptions].rebalanceTimeout; consumerHeartbeatChannel.traceCallback(this.#performConsumerGroupHeartbeat, 1, createDiagnosticContext({ client: this, operation: 'consumerGroupHeartbeat' }), this, options, callback); } #performConsumerGroupHeartbeat(options, callback) { this.#performGroupOperation('consumerGroupHeartbeat', (connection, groupCallback) => { this.emitWithDebug('consumer:heartbeat', 'start'); this[kGetApi]('ConsumerGroupHeartbeat', (error, api) => { if (error) { groupCallback(error, undefined); return; } const timeoutCallback = createTimeoutCallback(groupCallback, this[kOptions].timeout, 'Heartbeat timeout.'); api(connection, this.groupId, this.memberId || '', this.#memberEpoch, null, // instanceId null, // rackId options.rebalanceTimeout, this.topics.current, this.#groupRemoteAssignor, this.#assignments, timeoutCallback); }); }, (error, response) => { if (this[kClosed]) { this.emitWithDebug('consumer:heartbeat', 'end'); callback(null); return; } if (error) { this.#cancelHeartbeat(); this.emitWithDebug('consumer:heartbeat', 'error', { error }); const fenced = error.response?.errorCode === protocolErrors.FENCED_MEMBER_EPOCH.code; if (fenced) { this.#assignments = []; this.assignments = []; this.#memberEpoch = 0; this.#consumerGroupHeartbeat(options, () => { }); callback(error); return; } this.#heartbeatInterval = setTimeout(() => { this.#consumerGroupHeartbeat(options, () => { }); }, this.#lastHeartbeatIntervalMs || 1000); callback(error); return; } this.#lastHeartbeat = new Date(); this.#memberEpoch = response.memberEpoch; if (response.memberId) { const changed = this.memberId !== response.memberId; this.memberId = response.memberId; if (changed) { this.memberId = response.memberId; this.#consumerGroupHeartbeat(options, () => { }); this.emitWithDebug('consumer:heartbeat', 'end'); callback(null); return; } } if (response.heartbeatIntervalMs > 0) { this.#cancelHeartbeat(); this.#lastHeartbeatIntervalMs = response.heartbeatIntervalMs; this.#heartbeatInterval = setTimeout(() => { this.#consumerGroupHeartbeat(options, () => { }); }, response.heartbeatIntervalMs); } const newAssignments = response.assignment?.topicPartitions; if (newAssignments) { this.#revokePartitions(newAssignments); this.#assignPartitions(newAssignments); } this.emitWithDebug('consumer:heartbeat', 'end'); callback(null); }); } #diffAssignments(A, B) { const result = []; for (const a of A) { const b = B.find(tp => tp.topicId === a.topicId); if (!b) { result.push(a); } else { const diff = a.partitions.filter(partition => !b.partitions.includes(partition)); if (diff.length > 0) { result.push({ topicId: a.topicId, partitions: diff }); } } } return result; } #revokePartitions(newAssignment) { const toRevoke = this.#diffAssignments(this.#assignments, newAssignment); if (toRevoke.length === 0) { return; } for (const stream of this.#streams) { stream.pause(); stream[kAutocommit](); } this.#updateAssignments(newAssignment, error => { for (const stream of this.#streams) { stream.resume(); } /* c8 ignore next 3 - Hard to test */ if (error) { return; } this.#cancelHeartbeat(); this.#consumerGroupHeartbeat(this[kOptions], () => { }); }); } #assignPartitions(newAssignment) { const toAssign = this.#diffAssignments(newAssignment, this.#assignments); if (toAssign.length === 0) { return; } this.#updateAssignments(newAssignment, error => { if (error) { return; } this.#cancelHeartbeat(); this.#consumerGroupHeartbeat(this[kOptions], () => { }); for (const stream of this.#streams) { // 3. Refresh partition offsets stream[kRefreshOffsetsAndFetch](); } }); } #updateAssignments(newAssignments, callback) { this[kMetadata]({ topics: this.topics.current }, (error, metadata) => { if (error) { callback(error); return; } const topicIdToTopic = new Map(); for (const [topic, topicMetadata] of metadata.topics) { topicIdToTopic.set(topicMetadata.id, topic); } const assignments = newAssignments.map(tp => ({ topic: topicIdToTopic.get(tp.topicId), partitions: tp.partitions })); this.#assignments = newAssignments; this.assignments = assignments; callback(null); }); } #joinGroupConsumerProtocol(options, callback) { this.#memberEpoch = 0; this.#assignments = []; this.#membershipActive = true; this.#consumerGroupHeartbeat(options, err => { if (this.memberId) { this.emitWithDebug('consumer', 'group:join', { groupId: this.groupId, memberId: this.memberId }); } callback(err); }); } #leaveGroupConsumerProtocol(_, callback) { // Leave by sending a heartbeat with memberEpoch = -1 this.#cancelHeartbeat(); this.#performDeduplicateGroupOperaton('leaveGroupConsumerProtocol', (connection, groupCallback) => { this[kGetApi]('ConsumerGroupHeartbeat', (error, api) => { if (error) { groupCallback(error, undefined); return; } api(connection, this.groupId, this.memberId, -1, // memberEpoch = -1 signals leave null, // instanceId null, // rackId 0, // rebalanceTimeout [], // subscribedTopicNames this.#groupRemoteAssignor, [], // topicPartitions groupCallback); }); }, _error => { this.emitWithDebug('consumer', 'group:leave', { groupId: this.groupId, memberId: this.memberId }); this.memberId = null; this.#memberEpoch = -1; this.#assignments = []; this.assignments = []; callback(null); }); } #performConsume(options, trackTopics, callback) { // Subscribe all topics let joinNeeded = this.memberId === null; if (trackTopics) { for (const topic of options.topics) { if (this.topics.track(topic)) { joinNeeded = true; } } } // If we need to (re)join the group, do that first and then try again if (joinNeeded) { if (this.#useConsumerGroupProtocol) { this.#joinGroupConsumerProtocol(options, error => { if (error) { callback(error, undefined); return; } this.#performConsume(options, false, callback); }); return; } // Classic consumer protocol join this.joinGroup(options, error => { if (error) { callback(error, undefined); return; } this.#performConsume(options, false, callback); }); return; } // Create the stream and start consuming const stream = new MessagesStream(this, options); this.#streams.add(stream); this.#metricActiveStreams?.inc(); stream.once('close', () => { this.#metricActiveStreams?.dec(); this.topics.untrackAll(...options.topics); this.#streams.delete(stream); }); callback(null, stream); } #performFindGroupCoordinator(callback) { this[kPerformDeduplicated]('findGroupCoordinator', deduplicateCallback => { this[kPerformWithRetry]('findGroupCoordinator', retryCallback => { this[kGetBootstrapConnection]((error, connection) => { if (error) { retryCallback(error, undefined); return; } this[kGetApi]('FindCoordinator', (error, api) => { if (error) { retryCallback(error, undefined); return; } api(connection, FindCoordinatorKeyTypes.GROUP, [this.groupId], retryCallback); }); }); }, (error, response) => { if (error) { deduplicateCallback(error, undefined); return; } const groupInfo = response.coordinators.find(coordinator => coordinator.key === this.groupId); this.#coordinatorId = groupInfo.nodeId; deduplicateCallback(null, this.#coordinatorId); }, 0); }, callback); } #performJoinGroup(options, callback) { if (!this.#membershipActive) { callback(null, undefined); return; } this.#cancelHeartbeat(); const protocols = []; for (const protocol of options.protocols) { protocols.push({ name: protocol.name, metadata: this.#encodeProtocolSubscriptionMetadata(protocol, this.topics.current) }); } this.#performDeduplicateGroupOperaton('joinGroup', (connection, groupCallback) => { this[kGetApi]('JoinGroup', (error, api) => { if (error) { groupCallback(error, undefined); return; } api(connection, this.groupId, options.sessionTimeout, options.rebalanceTimeout, this.memberId ?? '', null, 'consumer', protocols, '', groupCallback); }); }, (error, response) => { if (!this.#membershipActive) { callback(null, undefined); return; } if (error) { if (this.#getRejoinError(error)) { this.#performJoinGroup(options, callback); return; } callback(error, undefined); return; } // This is for Azure Event Hubs compatibility, which does not respond with an error on the first join this.memberId = response.memberId; this.generationId = response.generationId; this.#isLeader = response.leader === this.memberId; this.#protocol = response.protocolName; this.#members = new Map(); for (const member of response.members) { this.#members.set(member.memberId, this.#decodeProtocolSubscriptionMetadata(member.memberId, member.metadata)); } // Send a syncGroup request this.#syncGroup(options.partitionAssigner, (error, response) => { if (!this.#membershipActive) { callback(null, undefined); return; } if (error) { if (this.#getRejoinError(error)) { this.#performJoinGroup(options, callback); return; } callback(error, undefined); return; } this.assignments = response; this.#cancelHeartbeat(); this.#heartbeatInterval = setTimeout(() => { this.#heartbeat(options); }, options.heartbeatInterval); this.emitWithDebug('consumer', 'group:join', { groupId: this.groupId, memberId: this.memberId, generationId: this.generationId, isLeader: this.#isLeader, assignments: this.assignments }); callback(null, this.memberId); }); }); } #performLeaveGroup(force, callback) { if (!this.memberId) { callback(null); return; } // Remove streams that might have been exited in the meanwhile for (const stream of this.#streams) { if (stream.closed || stream.destroyed) { this.#streams.delete(stream); } } if (this.#streams.size) { if (!force) { callback(new UserError('Cannot leave group while consuming messages.')); return; } runConcurrentCallbacks('Closing streams failed.', this.#streams, (stream, concurrentCallback) => { stream.close(concurrentCallback); }, error => { if (error) { callback(error); return; } // All streams are closed, try the operation again without force this.#performLeaveGroup(false, callback); }); return; } this.#cancelHeartbeat(); this.#performDeduplicateGroupOperaton('leaveGroup', (connection, groupCallback) => { this[kGetApi]('LeaveGroup', (error, api) => { if (error) { groupCallback(error, undefined); return; } api(connection, this.groupId, [{ memberId: this.memberId }], groupCallback); }); }, error => { if (error) { const unknownMemberError = error.findBy?.('unknownMemberId', true); // This is to avoid throwing an error if a group join was cancelled. if (!unknownMemberError) { callback(error); return; } } this.emitWithDebug('consumer', 'group:leave', { groupId: this.groupId, memberId: this.memberId, generationId: this.generationId }); this.memberId = null; this.generationId = 0; this.assignments = null; callback(null); }); } #performSyncGroup(partitionsAssigner, assignments, callback) { if (!this.#membershipActive) { callback(null, []); return; } if (!Array.isArray(assignments)) { if (this.#isLeader) { // Get all the metadata for the topics the consumer are listening to, then compute the assignments const topicsSubscriptions = new Map(); for (const subscription of this.#members.values()) { for (const topic of subscription.topics) { let topicSubscriptions = topicsSubscriptions.get(topic); if (!topicSubscriptions) { topicSubscriptions = []; topicsSubscriptions.set(topic, topicSubscriptions); } topicSubscriptions.push(subscription); } } this[kMetadata]({ topics: Array.from(topicsSubscriptions.keys()) }, (error, metadata) => { if (error) { callback(this.#handleMetadataError(error), undefined); return; } this.#performSyncGroup(partitionsAssigner, this.#createAssignments(partitionsAssigner, metadata), callback); }); return; } else { // Non leader simply do not send any assignments and wait assignments = []; } } this.#performDeduplicateGroupOperaton('syncGroup', (connection, groupCallback) => { this[kGetApi]('SyncGroup', (error, api) => { if (error) { groupCallback(error, undefined); return; } api(connection, this.groupId, this.generationId, this.memberId, null, 'consumer', this.#protocol, assignments, groupCallback); }); }, (error, response) => { if (!this.#membershipActive) { callback(null, undefined); return; } if (error) { callback(error, undefined); return; } callback(error, this.#decodeProtocolAssignment(response.assignment)); }); } #performDeduplicateGroupOperaton(operationId, operation, callback) { return this[kPerformDeduplicated](operationId, deduplicateCallback => { this.#performGroupOperation(operationId, operation, deduplicateCallback); }, callback); } #performGroupOperation(operationId, operation, callback) { this.#findGroupCoordinator((error, coordinatorId) => { if (error) { callback(error, undefined); return; } this[kMetadata]({ topics: this.topics.current }, (error, metadata) => { if (error) { callback(this.#handleMetadataError(error), undefined); return; } this[kPerformWithRetry](operationId, retryCallback => { this[kGetConnection](metadata.brokers.get(coordinatorId), (error, connection) => { if (error) { retryCallback(error, undefined); return; } operation(connection, retryCallback); }); }, callback); }); }); } #validateGroupOptions(options, validator) { validator ??= groupOptionsValidator; const valid = validator(options); if (!valid) { throw new UserError(this[kFormatValidationErrors](validator, '/options')); } } /* The following two methods follow: https://github.com/apache/kafka/blob/trunk/clients/src/main/resources/common/message/ConsumerProtocolSubscription.json */ #encodeProtocolSubscriptionMetadata(metadata, topics) { return Writer.create() .appendInt16(metadata.version) .appendArray(topics, (w, t) => w.appendString(t, false), false, false) .appendBytes(typeof metadata.metadata === 'string' ? Buffer.from(metadata.metadata) : metadata.metadata, false) .buffer; } #decodeProtocolSubscriptionMetadata(memberId, buffer) { const reader = Reader.from(buffer); return { memberId, version: reader.readInt16(), topics: reader.readArray(r => r.readString(false), false, false), metadata: reader.readBytes(false) }; } /* The following two methods follow: https://github.com/apache/kafka/blob/trunk/clients/src/main/resources/common/message/ConsumerProtocolAssignment.json */ #encodeProtocolAssignment(assignments) { return Writer.create() .appendInt16(0) // Version information .appendArray(assignments, (w, { topic, partitions }) => { w.appendString(topic, false).appendArray(partitions, (w, a) => w.appendInt32(a), false, false); }, false, f