UNPKG

kfk

Version:

The high-level node kafka client based on node-rdkafka .

93 lines 3.08 kB
"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); const Kafka = require("node-rdkafka"); const winston = require("winston"); const errors_1 = require("./errors"); const ErrorCode = Kafka.CODES.ERRORS; const FLUSH_TIMEOUT = 10000; // ms class KafkaBasicProducer { constructor(conf, topicConf = {}, options = {}) { this.dying = false; this.dead = false; this.flushing = false; this.client = new Kafka.Producer(conf, topicConf); this.debug = options.debug === undefined ? false : options.debug; this.logger = winston.createLogger({ level: this.debug ? 'debug' : 'info', format: winston.format.simple(), transports: [new winston.transports.Console()], }); } disconnect() { return new Promise((resolve, reject) => { return this.client.disconnect((err, data) => { if (err) { reject(err); } this.logger.info('producer disconnect'); resolve(data); }); }); } async flush(timeout = FLUSH_TIMEOUT) { if (this.flushing) { return; } this.flushing = true; return new Promise((resolve, reject) => { return this.client.flush(timeout, (err) => { this.flushing = false; if (err) { reject(err); } resolve(); }); }); } connect(metadataOptions = {}) { return new Promise((resolve, reject) => { this.client.connect(metadataOptions, (err, data) => { if (err) { reject(err); } resolve(data); }); }); } async die() { this.dying = true; await this.disconnect(); this.dead = true; this.logger.info('producer graceful died'); } } exports.KafkaBasicProducer = KafkaBasicProducer; class KafkaProducer extends KafkaBasicProducer { async gracefulDead() { await this.flush(FLUSH_TIMEOUT); return true; } async produce(topic, partition, message, key, timestamp, opaque) { return new Promise((resolve, reject) => { if (this.dying || this.dead) { reject(new errors_1.ConnectionDeadError('Connection has been dead or is dying')); } try { // synchronously this.client.produce(topic, partition, Buffer.from(message), key, timestamp || Date.now(), opaque); resolve(); } catch (err) { if (err.code === ErrorCode.ERR__QUEUE_FULL) { // flush all queued messages return this.flush(FLUSH_TIMEOUT).then(() => { resolve(); }); } reject(err); } }); } } exports.KafkaProducer = KafkaProducer; //# sourceMappingURL=producer.js.map