kfk
Version:
The high-level node kafka client based on node-rdkafka .
189 lines • 7.05 kB
JavaScript
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
const Kafka = require("node-rdkafka");
const _ = require("lodash");
const bluebird = require("bluebird");
const winston = require("winston");
const errors_1 = require("./errors");
const DEFAULT_CONSUME_SIZE = 100;
const DEFAULT_CONCURRENT = 100;
const DEFAULT_AUTO_COMMIT_INTERVAL = 1000; // ms
const ErrorCode = Kafka.CODES.ERRORS;
class KafkaBasicConsumer {
constructor(conf, topicConf = {}, options = {}) {
this.dying = false;
this.dead = false;
this.topics = [];
conf['auto.commit.interval.ms'] =
conf['auto.commit.interval.ms'] || DEFAULT_AUTO_COMMIT_INTERVAL;
if (!conf['rebalance_cb']) {
conf['rebalance_cb'] = (err, assignment) => {
if (err.code === ErrorCode.ERR__ASSIGN_PARTITIONS) {
this.consumer.assign(assignment);
let rebalanceLog = 'consumer rebalance : ';
for (const assign of assignment) {
rebalanceLog += `{topic ${assign.topic}, partition: ${assign.partition}} `;
}
this.logger.info(rebalanceLog);
}
else if (err.code === ErrorCode.ERR__REVOKE_PARTITIONS) {
this.consumer.unassign();
}
else {
this.logger.error(err);
}
};
}
this.consumer = new Kafka.KafkaConsumer(conf, topicConf);
this.debug = options.debug === undefined ? false : options.debug;
this.logger = winston.createLogger({
level: this.debug ? 'debug' : 'info',
format: winston.format.simple(),
transports: [new winston.transports.Console()],
});
this.logger.debug(`debug mode : ${this.debug}`);
}
disconnect() {
return new Promise((resolve, reject) => {
return this.consumer.disconnect((err, data) => {
if (err) {
reject(err);
}
this.logger.info('consumer disconnect');
resolve(data);
});
});
}
async connect(metadataOptions = {}) {
return new Promise((resolve, reject) => {
this.consumer.connect(metadataOptions, (err, data) => {
if (err) {
reject(err);
}
resolve(data);
});
});
}
async die() {
this.dying = true;
// empty topics and unsubscribe them
this.unsubscribe();
// disconnect from brokers
await this.disconnect();
this.dead = true;
this.logger.info('consumer died');
}
async subscribe(topics) {
this.topics = _.uniq(_.concat(topics, this.topics));
// synchronously
this.consumer.subscribe(this.topics);
}
unsubscribe() {
this.topics.length = 0;
this.consumer.unsubscribe();
}
offsetsStore(topicPartitions) {
if (topicPartitions.length) {
return this.consumer.offsetsStore(topicPartitions);
}
}
fetch(size) {
// This will keep going until it gets ERR__PARTITION_EOF or ERR__TIMED_OUT
return new Promise((resolve, reject) => {
return this.consumer.consume(size, (err, messages) => {
if (err) {
return reject(err);
}
resolve(messages);
});
});
}
}
exports.KafkaBasicConsumer = KafkaBasicConsumer;
// `at least once` Consumer
class KafkaALOConsumer extends KafkaBasicConsumer {
constructor(conf, topicConf = {}, options = {}) {
conf['enable.auto.commit'] = true;
conf['enable.auto.offset.store'] = false;
super(conf, topicConf, options);
this.legacyMessages = null;
}
async consume(cb, options = {}) {
// default option value
options.size = options.size || DEFAULT_CONSUME_SIZE;
options.concurrency = options.concurrency || DEFAULT_CONCURRENT;
const topicPartitionMap = {};
if (this.dying || this.dead) {
throw new errors_1.ConnectionDeadError('Connection has been dead or is dying');
}
const messages = this.legacyMessages || (await this.fetch(options.size));
if (this.debug) {
this.logger.debug(`fetched ${messages.length} messages`);
}
// get latest topicPartitions
for (let i = messages.length - 1; i >= 0; i -= 1) {
const message = messages[i];
const key = `${message.topic}:${message.partition}`;
if (topicPartitionMap[key] === undefined) {
topicPartitionMap[key] = {
topic: message.topic,
partition: message.partition,
offset: message.offset,
};
}
}
if (!cb) {
this.offsetsStore(_.values(topicPartitionMap));
return messages;
}
try {
const results = await bluebird.map(messages, async (message) => {
const ret = await bluebird.resolve(cb(message));
return ret;
}, { concurrency: options.concurrency });
this.offsetsStore(_.values(topicPartitionMap));
this.legacyMessages = null;
return results;
}
catch (e) {
this.legacyMessages = messages;
throw e;
}
}
}
exports.KafkaALOConsumer = KafkaALOConsumer;
// `At Most Once` Consumer
class KafkaAMOConsumer extends KafkaBasicConsumer {
constructor(conf, topicConf = {}, options = {}) {
conf['enable.auto.commit'] = true;
conf['enable.auto.offset.store'] = true;
super(conf, topicConf, options);
}
async consume(cb, options = {}) {
// default option value
options.size = options.size || DEFAULT_CONSUME_SIZE;
options.concurrency = options.concurrency || DEFAULT_CONCURRENT;
return new Promise((resolve, reject) => {
// This will keep going until it gets ERR__PARTITION_EOF or ERR__TIMED_OUT
return this.consumer.consume(options.size, async (err, messages) => {
if (this.dying || this.dead) {
reject(new errors_1.ConnectionDeadError('Connection has been dead or is dying'));
}
if (err) {
reject(err);
}
try {
const results = await bluebird.map(messages, async (message) => {
return await Promise.resolve(cb(message));
}, { concurrency: options.concurrency });
resolve(results);
}
catch (err) {
reject(err);
}
});
});
}
}
exports.KafkaAMOConsumer = KafkaAMOConsumer;
//# sourceMappingURL=consumer.js.map