UNPKG

@loglayer/transport-log-file-rotation

Version:

Log file rotation transport for the LogLayer logging library.

368 lines (365 loc) 13.1 kB
//#region rolldown:runtime var __create = Object.create; var __defProp = Object.defineProperty; var __getOwnPropDesc = Object.getOwnPropertyDescriptor; var __getOwnPropNames = Object.getOwnPropertyNames; var __getProtoOf = Object.getPrototypeOf; var __hasOwnProp = Object.prototype.hasOwnProperty; var __copyProps = (to, from, except, desc) => { if (from && typeof from === "object" || typeof from === "function") { for (var keys = __getOwnPropNames(from), i = 0, n = keys.length, key; i < n; i++) { key = keys[i]; if (!__hasOwnProp.call(to, key) && key !== except) { __defProp(to, key, { get: ((k) => from[k]).bind(null, key), enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable }); } } } return to; }; var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target, mod)); //#endregion let node_fs = require("node:fs"); let node_fs_promises = require("node:fs/promises"); let node_stream_promises = require("node:stream/promises"); let node_zlib = require("node:zlib"); let _loglayer_transport = require("@loglayer/transport"); let file_stream_rotator = require("file-stream-rotator"); file_stream_rotator = __toESM(file_stream_rotator); //#region src/LogFileRotationTransport.ts /** * A transport that writes logs to rotating files with support for time-based and size-based rotation. * Features include: * - Automatic log file rotation based on time (hourly, daily) or size * - Support for date patterns in filenames using numerical values (YYYY, MM, DD, etc.) * - Size-based rotation with support for KB, MB, and GB units * - Compression of rotated log files using gzip * - Maximum file count or age-based retention * - Automatic cleanup of old log files * - Batch processing of logs for improved performance * - Safe handling of process termination signals * * Each instance must have a unique filename to prevent race conditions. * If you need multiple loggers to write to the same file, share the same transport instance between them. */ var LogFileRotationTransport = class LogFileRotationTransport extends _loglayer_transport.LoggerlessTransport { /** Registry of active filenames to prevent multiple transports writing to the same file */ static activeFilenames = /* @__PURE__ */ new Set(); /** The current write stream for the log file */ stream; /** Custom field names for log entries */ fieldNames; /** Delimiter between log entries */ delimiter; /** Function to generate timestamps for log entries */ timestampFn; /** Custom mapping for log levels */ levelMap; /** Whether to compress rotated files */ compressOnRotate; /** Whether a file is currently being compressed */ isCompressing; /** The base filename pattern for log files */ filename; /** Static data to be included in every log entry */ staticData; /** Whether batch processing is enabled */ batchEnabled; /** Maximum number of log entries to queue before writing */ batchSize; /** Maximum time in milliseconds to wait before writing queued logs */ batchTimeout; /** Queue of log entries waiting to be written */ batchQueue; /** Timer for batch flush timeout */ batchTimer; /** Whether the transport is being disposed */ isDisposing; /** Event callbacks for various file stream events */ callbacks; /** Frequency of rotation (daily, hourly, etc.) */ frequency; /** Whether to enable verbose mode */ verbose; /** Date format for filename patterns */ dateFormat; /** Size threshold for rotation */ size; /** Maximum number of log files to keep */ maxLogs; /** Path to the audit file */ auditFile; /** File extension for log files */ extension; /** Whether to create a symlink to current log */ createSymlink; /** Name of the symlink file */ symlinkName; /** Whether to use UTC time in filenames */ utc; /** Hash algorithm for audit file */ auditHashType; /** Options for file streams */ fileOptions; /** File mode to be used when creating log files */ fileMode; /** * Generates the options for FileStreamRotator consistently across the transport * @returns FileStreamRotatorOptions object * @private */ getRotatorOptions() { return { filename: this.filename, frequency: this.frequency, verbose: this.verbose ?? false, date_format: this.dateFormat, size: this.size, max_logs: this.maxLogs?.toString(), audit_file: this.auditFile || void 0, end_stream: true, extension: this.extension, create_symlink: this.createSymlink, symlink_name: this.symlinkName, utc: this.utc, audit_hash_type: this.auditHashType, file_options: { flags: "a", encoding: "utf8", mode: this.fileMode ?? 416, ...this.fileOptions } }; } /** * Creates a new LogFileRotationTransport instance. * @param params - Configuration options for the transport * @throws {Error} If the filename is already in use by another transport instance */ constructor(params) { super(params); if (LogFileRotationTransport.activeFilenames.has(params.filename)) throw new Error(`LogFileRotationTransport: Filename "${params.filename}" is already in use by another instance. To use the same file for multiple loggers, share the same transport instance between them.`); this.filename = params.filename; LogFileRotationTransport.activeFilenames.add(this.filename); this.fieldNames = { level: params.fieldNames?.level ?? "level", message: params.fieldNames?.message ?? "message", timestamp: params.fieldNames?.timestamp ?? "timestamp" }; this.delimiter = params.delimiter ?? "\n"; this.timestampFn = params.timestampFn ?? (() => (/* @__PURE__ */ new Date()).toISOString()); this.levelMap = params.levelMap ?? {}; this.compressOnRotate = params.compressOnRotate ?? false; this.isCompressing = false; this.batchEnabled = !!params.batch; this.batchSize = params.batch?.size ?? 1e3; this.batchTimeout = params.batch?.timeout ?? 5e3; this.batchQueue = []; this.batchTimer = null; this.isDisposing = false; this.callbacks = params.callbacks; this.frequency = params.frequency; this.verbose = params.verbose; this.dateFormat = params.dateFormat; this.size = params.size; this.maxLogs = params.maxLogs; this.auditFile = params.auditFile; this.extension = params.extension; this.createSymlink = params.createSymlink; this.symlinkName = params.symlinkName; this.utc = params.utc; this.auditHashType = params.auditHashType; this.fileOptions = params.fileOptions; this.fileMode = params.fileMode; this.staticData = params.staticData; if (this.batchEnabled) { process.on("beforeExit", () => { if (!this.isDisposing) this.flush(); }); const handleSignal = (signal) => { if (!this.isDisposing) { this.flushSync(); LogFileRotationTransport.activeFilenames.delete(this.filename); process.exit(signal === "SIGINT" ? 130 : 143); } }; process.on("SIGINT", () => handleSignal("SIGINT")); process.on("SIGTERM", () => handleSignal("SIGTERM")); } if (!this.batchEnabled) this.initStream(this.getRotatorOptions()); } /** * Initializes the write stream and sets up event listeners. * This is called either immediately if batching is disabled, * or lazily when the first batch needs to be written if batching is enabled. * @param options - Options for the file stream rotator * @private */ initStream(options) { this.stream = file_stream_rotator.default.getStream(options); if (this.callbacks) { const { onRotate, onNew, onOpen, onClose, onError, onFinish, onLogRemoved } = this.callbacks; if (this.compressOnRotate) this.stream.on("rotate", async (oldFile, newFile) => { try { this.isCompressing = true; const compressedPath = await this.compressFile(oldFile); await (0, node_fs_promises.unlink)(oldFile); onRotate?.(compressedPath, newFile); } catch (error) { this.callbacks?.onError?.(error); } finally { this.isCompressing = false; } }); else if (onRotate) this.stream.on("rotate", onRotate); if (onNew) this.stream.on("new", onNew); if (onOpen) this.stream.on("open", onOpen); if (onClose) this.stream.on("close", onClose); if (onError) this.stream.on("error", onError); if (onFinish) this.stream.on("finish", onFinish); if (onLogRemoved) this.stream.on("logRemoved", onLogRemoved); } } /** * Generates a unique path for a compressed log file. * If a file with .gz extension already exists, appends timestamp and counter. * @param filePath - The original log file path * @returns The unique path for the compressed file * @private */ async getUniqueCompressedFilePath(filePath) { let finalPath = `${filePath}.gz`; let counter = 0; try { while (true) try { await (0, node_fs_promises.access)(finalPath); counter++; finalPath = `${filePath}.${Date.now()}.${counter}.gz`; } catch { break; } } catch (_error) { finalPath = `${filePath}.${Date.now()}.gz`; } return finalPath; } /** * Compresses a log file using gzip. * @param filePath - Path to the file to compress * @returns Path to the compressed file * @private */ async compressFile(filePath) { const gzPath = await this.getUniqueCompressedFilePath(filePath); const gzip = (0, node_zlib.createGzip)(); await (0, node_stream_promises.pipeline)((0, node_fs.createReadStream)(filePath), gzip, (0, node_fs.createWriteStream)(gzPath)); return gzPath; } /** * Flushes queued log entries to disk asynchronously. * This is used for normal batch processing operations. * @private */ flush() { if (!this.batchEnabled || this.batchQueue.length === 0) return; if (this.batchTimer) { clearTimeout(this.batchTimer); this.batchTimer = null; } if (!this.stream) this.initStream(this.getRotatorOptions()); const batchContent = this.batchQueue.join(""); this.stream.write(batchContent); this.batchQueue = []; } /** * Synchronously flush logs to disk. * This is used during process termination (SIGINT/SIGTERM) to ensure logs are written * before the process exits. This method uses synchronous file I/O to guarantee that * logs are written even during abrupt process termination. * @private */ flushSync() { if (!this.batchEnabled || this.batchQueue.length === 0) return; if (this.batchTimer) { clearTimeout(this.batchTimer); this.batchTimer = null; } if (!this.stream) this.initStream(this.getRotatorOptions()); const batchContent = this.batchQueue.join(""); const rotator = this.stream; if (rotator.currentFile) (0, node_fs.writeFileSync)(rotator.currentFile, batchContent, { flag: "a" }); this.batchQueue = []; } /** * Schedules a batch flush operation. * This creates a timer that will flush the batch after the configured timeout. * The timer is unref'd to prevent keeping the process alive. * @private */ scheduleBatchFlush() { if (!this.batchTimer && !this.isDisposing) { this.batchTimer = setTimeout(() => { this.flush(); }, this.batchTimeout); if (this.batchTimer.unref) this.batchTimer.unref(); } } /** * Processes and writes a log entry. * If batching is enabled, the entry is queued and written based on batch settings. * If batching is disabled, the entry is written immediately. * @param params - The log entry parameters * @returns The original messages array */ shipToLogger({ logLevel, messages, data, hasData }) { const logEntry = { [this.fieldNames.level]: this.levelMap[logLevel] ?? logLevel, [this.fieldNames.message]: messages.join(" ") || "", [this.fieldNames.timestamp]: this.timestampFn(), ...this.staticData ? typeof this.staticData === "function" ? this.staticData() : this.staticData : {}, ...hasData ? data : {} }; const logString = `${JSON.stringify(logEntry)}${this.delimiter}`; if (this.batchEnabled) { this.batchQueue.push(logString); if (this.batchQueue.length >= this.batchSize) this.flush(); else this.scheduleBatchFlush(); } else this.stream.write(logString); return messages; } /** * Disposes of the transport, cleaning up resources and flushing any remaining logs. * This method: * 1. Prevents new batch flushes from being scheduled * 2. Cancels any pending batch flush * 3. Flushes any remaining logs * 4. Waits for any in-progress compression to complete * 5. Closes the write stream * 6. Removes the filename from the registry */ [Symbol.dispose]() { if (this.stream || this.batchEnabled) { this.isDisposing = true; if (this.batchTimer) { clearTimeout(this.batchTimer); this.batchTimer = null; } if (this.batchEnabled) this.flush(); const checkAndEnd = () => { if (!this.isCompressing) { if (this.stream) this.stream.end(); LogFileRotationTransport.activeFilenames.delete(this.filename); } else setTimeout(checkAndEnd, 100); }; checkAndEnd(); } } }; //#endregion exports.LogFileRotationTransport = LogFileRotationTransport; //# sourceMappingURL=index.cjs.map