UNPKG

@loglayer/transport-log-file-rotation

Version:

Log file rotation transport for the LogLayer logging library.

374 lines 12.2 kB
// src/LogFileRotationTransport.ts import { createReadStream, createWriteStream, writeFileSync } from "fs"; import { access, unlink } from "fs/promises"; import { pipeline } from "stream/promises"; import { createGzip } from "zlib"; import { LoggerlessTransport } from "@loglayer/transport"; import FileStreamRotator from "file-stream-rotator"; var LogFileRotationTransport = class _LogFileRotationTransport extends LoggerlessTransport { /** Registry of active filenames to prevent multiple transports writing to the same file */ static activeFilenames = /* @__PURE__ */ new Set(); /** The current write stream for the log file */ stream; /** Custom field names for log entries */ fieldNames; /** Delimiter between log entries */ delimiter; /** Function to generate timestamps for log entries */ timestampFn; /** Custom mapping for log levels */ levelMap; /** Whether to compress rotated files */ compressOnRotate; /** Whether a file is currently being compressed */ isCompressing; /** The base filename pattern for log files */ filename; /** Static data to be included in every log entry */ staticData; /** Whether batch processing is enabled */ batchEnabled; /** Maximum number of log entries to queue before writing */ batchSize; /** Maximum time in milliseconds to wait before writing queued logs */ batchTimeout; /** Queue of log entries waiting to be written */ batchQueue; /** Timer for batch flush timeout */ batchTimer; /** Whether the transport is being disposed */ isDisposing; /** Event callbacks for various file stream events */ callbacks; /** Frequency of rotation (daily, hourly, etc.) */ frequency; /** Whether to enable verbose mode */ verbose; /** Date format for filename patterns */ dateFormat; /** Size threshold for rotation */ size; /** Maximum number of log files to keep */ maxLogs; /** Path to the audit file */ auditFile; /** File extension for log files */ extension; /** Whether to create a symlink to current log */ createSymlink; /** Name of the symlink file */ symlinkName; /** Whether to use UTC time in filenames */ utc; /** Hash algorithm for audit file */ auditHashType; /** Options for file streams */ fileOptions; /** File mode to be used when creating log files */ fileMode; /** * Generates the options for FileStreamRotator consistently across the transport * @returns FileStreamRotatorOptions object * @private */ getRotatorOptions() { return { filename: this.filename, frequency: this.frequency, verbose: this.verbose ?? false, date_format: this.dateFormat, size: this.size, max_logs: this.maxLogs?.toString(), audit_file: this.auditFile || void 0, end_stream: true, extension: this.extension, create_symlink: this.createSymlink, symlink_name: this.symlinkName, utc: this.utc, audit_hash_type: this.auditHashType, file_options: { flags: "a", encoding: "utf8", mode: this.fileMode ?? 416, ...this.fileOptions } }; } /** * Creates a new LogFileRotationTransport instance. * @param params - Configuration options for the transport * @throws {Error} If the filename is already in use by another transport instance */ constructor(params) { super(params); if (_LogFileRotationTransport.activeFilenames.has(params.filename)) { throw new Error( `LogFileRotationTransport: Filename "${params.filename}" is already in use by another instance. To use the same file for multiple loggers, share the same transport instance between them.` ); } this.filename = params.filename; _LogFileRotationTransport.activeFilenames.add(this.filename); this.fieldNames = { level: params.fieldNames?.level ?? "level", message: params.fieldNames?.message ?? "message", timestamp: params.fieldNames?.timestamp ?? "timestamp" }; this.delimiter = params.delimiter ?? "\n"; this.timestampFn = params.timestampFn ?? (() => (/* @__PURE__ */ new Date()).toISOString()); this.levelMap = params.levelMap ?? {}; this.compressOnRotate = params.compressOnRotate ?? false; this.isCompressing = false; this.batchEnabled = !!params.batch; this.batchSize = params.batch?.size ?? 1e3; this.batchTimeout = params.batch?.timeout ?? 5e3; this.batchQueue = []; this.batchTimer = null; this.isDisposing = false; this.callbacks = params.callbacks; this.frequency = params.frequency; this.verbose = params.verbose; this.dateFormat = params.dateFormat; this.size = params.size; this.maxLogs = params.maxLogs; this.auditFile = params.auditFile; this.extension = params.extension; this.createSymlink = params.createSymlink; this.symlinkName = params.symlinkName; this.utc = params.utc; this.auditHashType = params.auditHashType; this.fileOptions = params.fileOptions; this.fileMode = params.fileMode; this.staticData = params.staticData; if (this.batchEnabled) { process.on("beforeExit", () => { if (!this.isDisposing) { this.flush(); } }); const handleSignal = (signal) => { if (!this.isDisposing) { this.flushSync(); _LogFileRotationTransport.activeFilenames.delete(this.filename); process.exit(signal === "SIGINT" ? 130 : 143); } }; process.on("SIGINT", () => handleSignal("SIGINT")); process.on("SIGTERM", () => handleSignal("SIGTERM")); } if (!this.batchEnabled) { this.initStream(this.getRotatorOptions()); } } /** * Initializes the write stream and sets up event listeners. * This is called either immediately if batching is disabled, * or lazily when the first batch needs to be written if batching is enabled. * @param options - Options for the file stream rotator * @private */ initStream(options) { this.stream = FileStreamRotator.getStream(options); if (this.callbacks) { const { onRotate, onNew, onOpen, onClose, onError, onFinish, onLogRemoved } = this.callbacks; if (this.compressOnRotate) { this.stream.on("rotate", async (oldFile, newFile) => { try { this.isCompressing = true; const compressedPath = await this.compressFile(oldFile); await unlink(oldFile); onRotate?.(compressedPath, newFile); } catch (error) { this.callbacks?.onError?.(error); } finally { this.isCompressing = false; } }); } else if (onRotate) { this.stream.on("rotate", onRotate); } if (onNew) { this.stream.on("new", onNew); } if (onOpen) { this.stream.on("open", onOpen); } if (onClose) { this.stream.on("close", onClose); } if (onError) { this.stream.on("error", onError); } if (onFinish) { this.stream.on("finish", onFinish); } if (onLogRemoved) { this.stream.on("logRemoved", onLogRemoved); } } } /** * Generates a unique path for a compressed log file. * If a file with .gz extension already exists, appends timestamp and counter. * @param filePath - The original log file path * @returns The unique path for the compressed file * @private */ async getUniqueCompressedFilePath(filePath) { let finalPath = `${filePath}.gz`; let counter = 0; try { while (true) { try { await access(finalPath); counter++; finalPath = `${filePath}.${Date.now()}.${counter}.gz`; } catch { break; } } } catch (_error) { finalPath = `${filePath}.${Date.now()}.gz`; } return finalPath; } /** * Compresses a log file using gzip. * @param filePath - Path to the file to compress * @returns Path to the compressed file * @private */ async compressFile(filePath) { const gzPath = await this.getUniqueCompressedFilePath(filePath); const gzip = createGzip(); const source = createReadStream(filePath); const destination = createWriteStream(gzPath); await pipeline(source, gzip, destination); return gzPath; } /** * Flushes queued log entries to disk asynchronously. * This is used for normal batch processing operations. * @private */ flush() { if (!this.batchEnabled || this.batchQueue.length === 0) { return; } if (this.batchTimer) { clearTimeout(this.batchTimer); this.batchTimer = null; } if (!this.stream) { this.initStream(this.getRotatorOptions()); } const batchContent = this.batchQueue.join(""); this.stream.write(batchContent); this.batchQueue = []; } /** * Synchronously flush logs to disk. * This is used during process termination (SIGINT/SIGTERM) to ensure logs are written * before the process exits. This method uses synchronous file I/O to guarantee that * logs are written even during abrupt process termination. * @private */ flushSync() { if (!this.batchEnabled || this.batchQueue.length === 0) { return; } if (this.batchTimer) { clearTimeout(this.batchTimer); this.batchTimer = null; } if (!this.stream) { this.initStream(this.getRotatorOptions()); } const batchContent = this.batchQueue.join(""); const rotator = this.stream; if (rotator.currentFile) { writeFileSync(rotator.currentFile, batchContent, { flag: "a" }); } this.batchQueue = []; } /** * Schedules a batch flush operation. * This creates a timer that will flush the batch after the configured timeout. * The timer is unref'd to prevent keeping the process alive. * @private */ scheduleBatchFlush() { if (!this.batchTimer && !this.isDisposing) { this.batchTimer = setTimeout(() => { this.flush(); }, this.batchTimeout); if (this.batchTimer.unref) { this.batchTimer.unref(); } } } /** * Processes and writes a log entry. * If batching is enabled, the entry is queued and written based on batch settings. * If batching is disabled, the entry is written immediately. * @param params - The log entry parameters * @returns The original messages array */ shipToLogger({ logLevel, messages, data, hasData }) { const logEntry = { [this.fieldNames.level]: this.levelMap[logLevel] ?? logLevel, [this.fieldNames.message]: messages.join(" ") || "", [this.fieldNames.timestamp]: this.timestampFn(), ...this.staticData ? typeof this.staticData === "function" ? this.staticData() : this.staticData : {}, ...hasData ? data : {} }; const logString = `${JSON.stringify(logEntry)}${this.delimiter}`; if (this.batchEnabled) { this.batchQueue.push(logString); if (this.batchQueue.length >= this.batchSize) { this.flush(); } else { this.scheduleBatchFlush(); } } else { this.stream.write(logString); } return messages; } /** * Disposes of the transport, cleaning up resources and flushing any remaining logs. * This method: * 1. Prevents new batch flushes from being scheduled * 2. Cancels any pending batch flush * 3. Flushes any remaining logs * 4. Waits for any in-progress compression to complete * 5. Closes the write stream * 6. Removes the filename from the registry */ [Symbol.dispose]() { if (this.stream || this.batchEnabled) { this.isDisposing = true; if (this.batchTimer) { clearTimeout(this.batchTimer); this.batchTimer = null; } if (this.batchEnabled) { this.flush(); } const checkAndEnd = () => { if (!this.isCompressing) { if (this.stream) { this.stream.end(); } _LogFileRotationTransport.activeFilenames.delete(this.filename); } else { setTimeout(checkAndEnd, 100); } }; checkAndEnd(); } } }; export { LogFileRotationTransport }; //# sourceMappingURL=index.js.map