@pulzar/core
Version:
Next-generation Node.js framework for ultra-fast web applications with zero-reflection DI, GraphQL, WebSockets, events, and edge runtime support
614 lines • 21.2 kB
JavaScript
import { EventError, } from "../types";
import { logger } from "../../utils/logger";
export class KafkaEventAdapter {
name = "kafka";
version = "1.0.0";
capabilities = {
persistence: true,
clustering: true,
partitioning: true,
consumerGroups: true,
deadLetterQueue: false, // Kafka doesn't have built-in DLQ
exactly_once: true,
at_least_once: true,
ordering: true,
wildcards: false,
replay: true,
backpressure: true,
};
kafka;
producer;
consumer;
admin;
config;
connected = false;
subscriptions = new Map();
stats = {
published: 0,
delivered: 0,
acknowledged: 0,
failed: 0,
retries: 0,
dlqSize: 0,
activeSubscriptions: 0,
throughputPerSecond: 0,
averageLatency: 0,
errorRate: 0,
backpressureEvents: 0,
lastActivity: new Date(),
};
constructor(config) {
if (!config.brokers || config.brokers.length === 0) {
throw new EventError("Kafka brokers are required", "INVALID_CONFIG");
}
this.config = {
clientId: config.clientId || "pulzar-event-bus",
brokers: config.brokers,
ssl: config.ssl || false,
...(config.sasl && { sasl: config.sasl }),
connectionTimeout: config.connectionTimeout || 1000,
requestTimeout: config.requestTimeout || 30000,
retry: {
retries: 8,
initialRetryTime: 300,
maxRetryTime: 30000,
...config.retry,
},
producer: {
maxInFlightRequests: 1,
idempotent: true,
transactionTimeout: 30000,
allowAutoTopicCreation: true,
...config.producer,
},
consumer: {
groupId: config.consumer?.groupId || "pulzar-consumers",
sessionTimeout: 30000,
rebalanceTimeout: 60000,
heartbeatInterval: 3000,
maxBytesPerPartition: 1048576, // 1MB
maxBytes: 52428800, // 50MB
allowAutoTopicCreation: true,
maxWaitTimeInMs: 5000,
fromBeginning: false,
...config.consumer,
},
admin: {
timeout: 30000,
...config.admin,
},
};
}
/**
* Connect to Kafka cluster
*/
async connect() {
if (this.connected) {
return;
}
try {
// Try to dynamically import KafkaJS
const kafkaModule = await this.importKafka();
if (!kafkaModule) {
throw new EventError("KafkaJS package not installed. Run: npm install kafkajs", "KAFKA_NOT_INSTALLED");
}
// Create Kafka client
this.kafka = kafkaModule.Kafka({
clientId: this.config.clientId,
brokers: this.config.brokers,
ssl: this.config.ssl,
...(this.config.sasl && { sasl: this.config.sasl }),
connectionTimeout: this.config.connectionTimeout,
requestTimeout: this.config.requestTimeout,
retry: this.config.retry,
});
// Create producer
this.producer = this.kafka.producer({
maxInFlightRequests: this.config.producer.maxInFlightRequests,
idempotent: this.config.producer.idempotent,
transactionTimeout: this.config.producer.transactionTimeout,
allowAutoTopicCreation: this.config.producer.allowAutoTopicCreation,
});
// Create consumer
this.consumer = this.kafka.consumer({
groupId: this.config.consumer.groupId,
sessionTimeout: this.config.consumer.sessionTimeout,
rebalanceTimeout: this.config.consumer.rebalanceTimeout,
heartbeatInterval: this.config.consumer.heartbeatInterval,
maxBytesPerPartition: this.config.consumer.maxBytesPerPartition,
maxBytes: this.config.consumer.maxBytes,
allowAutoTopicCreation: this.config.consumer.allowAutoTopicCreation,
maxWaitTimeInMs: this.config.consumer.maxWaitTimeInMs,
});
// Create admin client
this.admin = this.kafka.admin({
timeout: this.config.admin.timeout,
});
// Connect all clients
await Promise.all([
this.producer.connect(),
this.consumer.connect(),
this.admin.connect(),
]);
// Setup event handlers
this.setupEventHandlers();
this.connected = true;
logger.info("Kafka adapter connected", {
brokers: this.config.brokers,
clientId: this.config.clientId,
groupId: this.config.consumer.groupId,
});
}
catch (error) {
logger.error("Failed to connect to Kafka", { error });
throw new EventError(`Kafka connection failed: ${error.message}`, "CONNECTION_FAILED", undefined, error);
}
}
/**
* Disconnect from Kafka
*/
async disconnect() {
if (!this.connected) {
return;
}
try {
// Stop consumer first
if (this.consumer) {
await this.consumer.stop();
await this.consumer.disconnect();
}
// Disconnect producer
if (this.producer) {
await this.producer.disconnect();
}
// Disconnect admin
if (this.admin) {
await this.admin.disconnect();
}
this.connected = false;
this.subscriptions.clear();
this.resetStats();
logger.info("Kafka adapter disconnected");
}
catch (error) {
logger.error("Error disconnecting from Kafka", { error });
throw error;
}
}
/**
* Check if connected
*/
isConnected() {
return this.connected;
}
/**
* Publish event to Kafka topic
*/
async publish(subject, event) {
if (!this.connected) {
throw new EventError("Kafka not connected", "NOT_CONNECTED");
}
const startTime = Date.now();
try {
// Ensure topic exists
await this.ensureTopic(subject);
const message = {
key: event.metadata.userId || event.id,
value: this.serialize(event),
headers: this.createHeaders(event),
timestamp: new Date(event.metadata.timestamp).getTime().toString(),
};
const result = await this.producer.send({
topic: subject,
messages: [message],
});
const recordMetadata = result[0];
this.updateStats("published", Date.now() - startTime);
return {
messageId: event.id,
partition: recordMetadata.partition?.toString(),
offset: recordMetadata.offset?.toString(),
timestamp: new Date().toISOString(),
};
}
catch (error) {
this.updateStats("failed");
logger.error("Failed to publish to Kafka", { subject, error });
throw new EventError(`Kafka publish failed: ${error.message}`, "PUBLISH_FAILED", event, error);
}
}
/**
* Subscribe to Kafka topic
*/
async subscribe(subject, handler, options = {}) {
if (!this.connected) {
throw new EventError("Kafka not connected", "NOT_CONNECTED");
}
try {
const subscriptionId = this.generateId();
// Create consumer for this subscription if using different group
let consumer = this.consumer;
if (options.consumerGroup &&
options.consumerGroup !== this.config.consumer.groupId) {
consumer = this.kafka.consumer({
...this.config.consumer,
groupId: options.consumerGroup,
});
await consumer.connect();
}
// Subscribe to topic
await consumer.subscribe({
topic: subject,
fromBeginning: options.deliverPolicy === "all" || this.config.consumer.fromBeginning,
});
// Setup message handler
const messageHandler = {
eachMessage: async ({ topic, partition, message }) => {
await this.handleMessage(message, handler, options, topic, partition);
},
};
// Start consuming
await consumer.run(messageHandler);
const handle = {
id: subscriptionId,
subject,
active: true,
consumerGroup: options.consumerGroup || this.config.consumer.groupId,
createdAt: new Date(),
unsubscribe: async () => {
await this.unsubscribe(handle);
},
};
this.subscriptions.set(subscriptionId, {
consumer,
handle,
isCustomConsumer: options.consumerGroup !== this.config.consumer.groupId,
});
this.stats.activeSubscriptions = this.subscriptions.size;
logger.debug("Kafka subscription created", {
subject,
subscriptionId,
consumerGroup: options.consumerGroup || this.config.consumer.groupId,
});
return handle;
}
catch (error) {
logger.error("Failed to subscribe to Kafka", { subject, error });
throw new EventError(`Kafka subscribe failed: ${error.message}`, "SUBSCRIBE_FAILED", undefined, error);
}
}
/**
* Unsubscribe from Kafka topic
*/
async unsubscribe(handle) {
const subscription = this.subscriptions.get(handle.id);
if (!subscription) {
return;
}
try {
// Stop the consumer if it's a custom one
if (subscription.isCustomConsumer) {
await subscription.consumer.stop();
await subscription.consumer.disconnect();
}
else {
// For shared consumer, we can't easily unsubscribe from specific topics
// This is a limitation of KafkaJS - need to stop and restart consumer
logger.warn("Cannot unsubscribe from specific topic on shared consumer", {
subscriptionId: handle.id,
subject: handle.subject,
});
}
this.subscriptions.delete(handle.id);
this.stats.activeSubscriptions = this.subscriptions.size;
logger.debug("Kafka subscription removed", { subscriptionId: handle.id });
}
catch (error) {
logger.error("Failed to unsubscribe from Kafka", {
subscriptionId: handle.id,
error,
});
throw error;
}
}
/**
* Acknowledge message (commit offset)
*/
async ack(event) {
// Kafka automatically commits offsets based on consumer configuration
// Manual commit would require storing message metadata
this.stats.acknowledged++;
}
/**
* Negative acknowledge (seek back to retry)
*/
async nack(event, requeue = false) {
// Kafka doesn't have explicit NACK
// Would need to seek back to previous offset to reprocess
logger.warn("Kafka NACK not implemented - message will not be reprocessed", {
eventId: event.id,
requeue,
});
}
/**
* Flush producer
*/
async flush() {
if (this.producer) {
await this.producer.flush();
}
}
/**
* Get adapter statistics
*/
async getStats() {
// Get additional Kafka metrics if available
try {
if (this.admin) {
// Could fetch cluster info, topic metadata, consumer lag, etc.
// const metadata = await this.admin.fetchTopicMetadata();
}
}
catch (error) {
logger.debug("Failed to fetch Kafka metrics", { error });
}
return { ...this.stats };
}
/**
* Health check
*/
async healthCheck() {
const checks = [
{
name: "connection",
status: this.isConnected() ? "pass" : "fail",
message: this.isConnected()
? "Connected to Kafka"
: "Not connected to Kafka",
},
];
// Check broker connectivity
if (this.admin) {
try {
const startTime = Date.now();
await this.admin.listTopics();
const duration = Date.now() - startTime;
checks.push({
name: "broker_connectivity",
status: duration < 5000 ? "pass" : "warn",
message: `Broker response time: ${duration}ms`,
duration,
});
}
catch (error) {
checks.push({
name: "broker_connectivity",
status: "fail",
message: `Broker error: ${error.message}`,
});
}
}
// Check producer health
if (this.producer) {
try {
// Producer doesn't have a direct health check, but we can check if it's connected
checks.push({
name: "producer",
status: "pass",
message: "Producer is connected",
});
}
catch (error) {
checks.push({
name: "producer",
status: "fail",
message: `Producer error: ${error.message}`,
});
}
}
const hasFailures = checks.some((c) => c.status === "fail");
const hasWarnings = checks.some((c) => c.status === "warn");
return {
status: hasFailures ? "unhealthy" : hasWarnings ? "degraded" : "healthy",
checks,
timestamp: new Date(),
};
}
/**
* Handle incoming Kafka message
*/
async handleMessage(message, handler, options, topic, partition) {
const startTime = Date.now();
try {
const event = this.deserialize(message.value, message, topic, partition);
// Apply filter if provided
if (options.filter) {
const matches = await options.filter(event);
if (!matches) {
return; // Skip filtered messages
}
}
await handler(event);
this.updateStats("delivered", Date.now() - startTime);
}
catch (error) {
this.updateStats("failed");
logger.error("Failed to handle Kafka message", {
topic,
partition,
offset: message.offset,
error,
});
throw error;
}
}
/**
* Ensure topic exists
*/
async ensureTopic(topic) {
if (!this.admin) {
return;
}
try {
const metadata = await this.admin.fetchTopicMetadata({ topics: [topic] });
const topicExists = metadata.topics.some((t) => t.name === topic);
if (!topicExists) {
const topicConfig = {
topic,
numPartitions: 3,
replicationFactor: 1,
configEntries: [
{ name: "cleanup.policy", value: "delete" },
{ name: "retention.ms", value: "604800000" }, // 7 days
],
};
await this.admin.createTopics({
topics: [topicConfig],
});
logger.info("Created Kafka topic", { topic });
}
}
catch (error) {
logger.debug("Failed to create topic (may already exist)", {
topic,
error,
});
}
}
/**
* Create Kafka headers
*/
createHeaders(event) {
const headers = {};
// Copy event headers
for (const [key, value] of Object.entries(event.headers)) {
headers[key] = Buffer.from(String(value));
}
// Add metadata as headers
if (event.metadata.correlationId) {
headers["X-Correlation-ID"] = Buffer.from(event.metadata.correlationId);
}
if (event.metadata.traceId) {
headers["X-Trace-ID"] = Buffer.from(event.metadata.traceId);
}
if (event.metadata.userId) {
headers["X-User-ID"] = Buffer.from(event.metadata.userId);
}
return headers;
}
/**
* Serialize event for Kafka
*/
serialize(event) {
const payload = JSON.stringify(event);
return Buffer.from(payload, "utf8");
}
/**
* Deserialize Kafka message
*/
deserialize(data, message, topic, partition) {
const payload = data.toString("utf8");
const event = JSON.parse(payload);
// Add internal fields
event._adapter = this.name;
event._partition = partition.toString();
event._offset = message.offset;
// Extract headers
if (message.headers) {
for (const [key, value] of Object.entries(message.headers)) {
if (Buffer.isBuffer(value)) {
event.headers[key] = value.toString("utf8");
}
}
}
return event;
}
/**
* Setup event handlers
*/
setupEventHandlers() {
// Producer events
if (this.producer) {
this.producer.on("producer.connect", () => {
logger.debug("Kafka producer connected");
});
this.producer.on("producer.disconnect", () => {
logger.warn("Kafka producer disconnected");
});
this.producer.on("producer.network.request_timeout", (payload) => {
logger.warn("Kafka producer request timeout", payload);
});
}
// Consumer events
if (this.consumer) {
this.consumer.on("consumer.connect", () => {
logger.debug("Kafka consumer connected");
});
this.consumer.on("consumer.disconnect", () => {
logger.warn("Kafka consumer disconnected");
});
this.consumer.on("consumer.group_join", (payload) => {
logger.info("Kafka consumer joined group", payload);
});
this.consumer.on("consumer.rebalancing", (payload) => {
logger.info("Kafka consumer rebalancing", payload);
});
this.consumer.on("consumer.crash", (payload) => {
logger.error("Kafka consumer crashed", payload);
});
}
}
/**
* Try to import KafkaJS package
*/
async importKafka() {
try {
return await import("kafkajs");
}
catch (error) {
logger.warn("KafkaJS package not available", { error });
return null;
}
}
/**
* Update statistics
*/
updateStats(type, latency) {
this.stats[type]++;
this.stats.lastActivity = new Date();
if (latency !== undefined) {
// Update average latency (simple moving average)
this.stats.averageLatency = (this.stats.averageLatency + latency) / 2;
}
// Calculate error rate
const total = this.stats.published + this.stats.delivered;
this.stats.errorRate = total > 0 ? this.stats.failed / total : 0;
}
/**
* Reset statistics
*/
resetStats() {
this.stats = {
published: 0,
delivered: 0,
acknowledged: 0,
failed: 0,
retries: 0,
dlqSize: 0,
activeSubscriptions: 0,
throughputPerSecond: 0,
averageLatency: 0,
errorRate: 0,
backpressureEvents: 0,
lastActivity: new Date(),
};
}
/**
* Generate unique ID
*/
generateId() {
return `kafka-${Date.now()}-${Math.random().toString(36).slice(2)}`;
}
}
export default KafkaEventAdapter;
//# sourceMappingURL=kafka.js.map