UNPKG

@socket.io/postgres-adapter

Version:

The Socket.IO Postgres adapter, allowing to broadcast events between several Socket.IO servers

212 lines (211 loc) 7.6 kB
"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.PostgresAdapter = exports.createAdapter = void 0; const msgpack_1 = require("@msgpack/msgpack"); const socket_io_adapter_1 = require("socket.io-adapter"); const debug = require("debug")("socket.io-postgres-adapter"); const hasBinary = (obj, toJSON) => { if (!obj || typeof obj !== "object") { return false; } if (obj instanceof ArrayBuffer || ArrayBuffer.isView(obj)) { return true; } if (Array.isArray(obj)) { for (let i = 0, l = obj.length; i < l; i++) { if (hasBinary(obj[i])) { return true; } } return false; } for (const key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key) && hasBinary(obj[key])) { return true; } } if (obj.toJSON && typeof obj.toJSON === "function" && !toJSON) { return hasBinary(obj.toJSON(), true); } return false; }; const defaultErrorHandler = (err) => debug(err); /** * Returns a function that will create a PostgresAdapter instance. * * @param pool - a pg.Pool instance * @param opts - additional options * * @public */ function createAdapter(pool, opts = {}) { const errorHandler = opts.errorHandler || defaultErrorHandler; const tableName = opts.tableName || "socket_io_attachments"; const cleanupInterval = opts.cleanupInterval || 30000; const channelToAdapters = new Map(); let isConnectionInProgress = false; let client; let cleanupTimer; const scheduleReconnection = () => { const reconnectionDelay = Math.floor(2000 * (0.5 + Math.random())); setTimeout(initClient, reconnectionDelay); }; const initClient = async () => { try { debug("fetching client from the pool"); client = await pool.connect(); isConnectionInProgress = false; for (const [channel] of channelToAdapters) { debug("client listening to %s", channel); await client.query(`LISTEN "${channel}"`); } client.on("notification", async (msg) => { var _a; try { await ((_a = channelToAdapters.get(msg.channel)) === null || _a === void 0 ? void 0 : _a.onEvent(msg.payload)); } catch (err) { errorHandler(err); } }); client.on("error", () => { debug("client error"); }); client.on("end", () => { debug("client was closed, scheduling reconnection..."); scheduleReconnection(); }); } catch (err) { errorHandler(err); debug("error while initializing client, scheduling reconnection..."); scheduleReconnection(); } }; const scheduleCleanup = () => { cleanupTimer = setTimeout(async () => { try { await pool.query(`DELETE FROM ${tableName} WHERE created_at < now() - interval '${cleanupInterval} milliseconds'`); } catch (err) { errorHandler(err); } scheduleCleanup(); }, cleanupInterval); }; return function (nsp) { let adapter = new PostgresAdapter(nsp, pool, opts); channelToAdapters.set(adapter.channel, adapter); if (isConnectionInProgress) { // nothing to do } else if (client) { debug("client listening to %s", adapter.channel); client.query(`LISTEN "${adapter.channel}"`).catch(errorHandler); } else { isConnectionInProgress = true; initClient(); scheduleCleanup(); } const defaultClose = adapter.close; adapter.close = () => { channelToAdapters.delete(adapter.channel); if (channelToAdapters.size === 0) { if (client) { client.removeAllListeners("end"); client.release(); client = null; } if (cleanupTimer) { clearTimeout(cleanupTimer); } } defaultClose.call(adapter); }; return adapter; }; } exports.createAdapter = createAdapter; class PostgresAdapter extends socket_io_adapter_1.ClusterAdapterWithHeartbeat { /** * Adapter constructor. * * @param nsp - the namespace * @param pool - a pg.Pool instance * @param opts - additional options * * @public */ constructor(nsp, pool, opts = {}) { super(nsp, opts); this.pool = pool; const channelPrefix = opts.channelPrefix || "socket.io"; this.channel = `${channelPrefix}#${nsp.name}`; this.tableName = opts.tableName || "socket_io_attachments"; this.payloadThreshold = opts.payloadThreshold || 8000; this.errorHandler = opts.errorHandler || defaultErrorHandler; } async onEvent(event) { let document = JSON.parse(event); if (document.uid === this.uid) { return debug("ignore message from self"); } if (document.attachmentId) { const result = await this.pool.query(`SELECT payload FROM ${this.tableName} WHERE id = $1`, [document.attachmentId]); document = (0, msgpack_1.decode)(result.rows[0].payload); } this.onMessage(document); } doPublish(message) { return this._publish(message).then(() => { // connection state recovery is not currently supported return ""; }); } doPublishResponse(requesterUid, response) { return this._publish(response); } async _publish(document) { document.uid = this.uid; try { if ([ socket_io_adapter_1.MessageType.BROADCAST, socket_io_adapter_1.MessageType.BROADCAST_ACK, socket_io_adapter_1.MessageType.SERVER_SIDE_EMIT, socket_io_adapter_1.MessageType.SERVER_SIDE_EMIT_RESPONSE, ].includes(document.type) && hasBinary(document)) { return await this.publishWithAttachment(document); } const payload = JSON.stringify(document); if (Buffer.byteLength(payload) > this.payloadThreshold) { return await this.publishWithAttachment(document); } debug("sending event of type %s to channel %s", document.type, this.channel); await this.pool.query(`SELECT pg_notify($1, $2)`, [ this.channel, payload, ]); } catch (err) { this.errorHandler(err); } } async publishWithAttachment(document) { const payload = (0, msgpack_1.encode)(document); debug("sending event of type %s with attachment to channel %s", document.type, this.channel); const result = await this.pool.query(`INSERT INTO ${this.tableName} (payload) VALUES ($1) RETURNING id;`, [payload]); const attachmentId = result.rows[0].id; const headerPayload = JSON.stringify({ uid: document.uid, type: document.type, attachmentId, }); await this.pool.query(`SELECT pg_notify($1, $2)`, [ this.channel, headerPayload, ]); } } exports.PostgresAdapter = PostgresAdapter;