@message-queue-toolkit/kafka
Version:
Kafka adapter for message-queue-toolkit
77 lines • 3.24 kB
JavaScript
import { Duplex } from 'node:stream';
/**
* Collects messages in batches based on provided batchSize and flushes them when messages amount or timeout is reached.
*/
// biome-ignore lint/suspicious/noUnsafeDeclarationMerging: merging interface with class to add strong typing for 'data' event
export class KafkaMessageBatchStream extends Duplex {
batchSize;
timeout;
currentBatchPerTopicPartition;
batchTimeoutPerTopicPartition;
constructor(options) {
super({ objectMode: true });
this.batchSize = options.batchSize;
this.timeout = options.timeoutMilliseconds;
this.currentBatchPerTopicPartition = {};
this.batchTimeoutPerTopicPartition = {};
}
_read() {
// No-op, as we push data when we have a full batch or timeout
}
_write(message, _encoding, callback) {
const key = this.getTopicPartitionKey(message.topic, message.partition);
if (!this.currentBatchPerTopicPartition[key]) {
this.currentBatchPerTopicPartition[key] = [message];
}
else {
// biome-ignore lint/style/noNonNullAssertion: non-existing entry is handled above
this.currentBatchPerTopicPartition[key].push(message);
}
// biome-ignore lint/style/noNonNullAssertion: we ensure above that the array is defined
if (this.currentBatchPerTopicPartition[key].length >= this.batchSize) {
this.flushCurrentBatchMessages(message.topic, message.partition);
return callback(null);
}
if (!this.batchTimeoutPerTopicPartition[key]) {
this.batchTimeoutPerTopicPartition[key] = setTimeout(() => {
this.flushCurrentBatchMessages(message.topic, message.partition);
}, this.timeout);
}
callback(null);
}
// Write side is closed, flush the remaining messages
_final(callback) {
this.flushAllBatches();
this.push(null); // End readable side
callback();
}
flushAllBatches() {
for (const key of Object.keys(this.currentBatchPerTopicPartition)) {
const { topic, partition } = this.splitTopicPartitionKey(key);
this.flushCurrentBatchMessages(topic, partition);
}
}
flushCurrentBatchMessages(topic, partition) {
const key = this.getTopicPartitionKey(topic, partition);
if (this.batchTimeoutPerTopicPartition[key]) {
clearTimeout(this.batchTimeoutPerTopicPartition[key]);
this.batchTimeoutPerTopicPartition[key] = undefined;
}
if (!this.currentBatchPerTopicPartition[key]?.length) {
return;
}
this.push({ topic, partition, messages: this.currentBatchPerTopicPartition[key] });
this.currentBatchPerTopicPartition[key] = [];
}
getTopicPartitionKey(topic, partition) {
return `${topic}:${partition}`;
}
splitTopicPartitionKey(key) {
const [topic, partition] = key.split(':');
if (!topic || !partition) {
throw new Error('Invalid topic-partition key format');
}
return { topic, partition: Number.parseInt(partition, 10) };
}
}
//# sourceMappingURL=KafkaMessageBatchStream.js.map