@loglayer/transport-log-file-rotation
Version:
Log file rotation transport for the LogLayer logging library.
374 lines (343 loc) • 13.8 kB
JavaScript
"use strict";Object.defineProperty(exports, "__esModule", {value: true}); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } function _nullishCoalesce(lhs, rhsFn) { if (lhs != null) { return lhs; } else { return rhsFn(); } } function _optionalChain(ops) { let lastAccessLHS = undefined; let value = ops[0]; let i = 1; while (i < ops.length) { const op = ops[i]; const fn = ops[i + 1]; i += 2; if ((op === 'optionalAccess' || op === 'optionalCall') && value == null) { return undefined; } if (op === 'access' || op === 'optionalAccess') { lastAccessLHS = value; value = fn(value); } else if (op === 'call' || op === 'optionalCall') { value = fn((...args) => value.call(lastAccessLHS, ...args)); lastAccessLHS = undefined; } } return value; } var _class;// src/LogFileRotationTransport.ts
var _fs = require('fs');
var _promises = require('fs/promises');
var _promises3 = require('stream/promises');
var _zlib = require('zlib');
var _transport = require('@loglayer/transport');
var _filestreamrotator = require('file-stream-rotator'); var _filestreamrotator2 = _interopRequireDefault(_filestreamrotator);
var LogFileRotationTransport = (_class = class _LogFileRotationTransport extends _transport.LoggerlessTransport {
/** Registry of active filenames to prevent multiple transports writing to the same file */
static __initStatic() {this.activeFilenames = /* @__PURE__ */ new Set()}
/** The current write stream for the log file */
/** Custom field names for log entries */
/** Delimiter between log entries */
/** Function to generate timestamps for log entries */
/** Custom mapping for log levels */
/** Whether to compress rotated files */
/** Whether a file is currently being compressed */
/** The base filename pattern for log files */
/** Static data to be included in every log entry */
/** Whether batch processing is enabled */
/** Maximum number of log entries to queue before writing */
/** Maximum time in milliseconds to wait before writing queued logs */
/** Queue of log entries waiting to be written */
/** Timer for batch flush timeout */
/** Whether the transport is being disposed */
/** Event callbacks for various file stream events */
/** Frequency of rotation (daily, hourly, etc.) */
/** Whether to enable verbose mode */
/** Date format for filename patterns */
/** Size threshold for rotation */
/** Maximum number of log files to keep */
/** Path to the audit file */
/** File extension for log files */
/** Whether to create a symlink to current log */
/** Name of the symlink file */
/** Whether to use UTC time in filenames */
/** Hash algorithm for audit file */
/** Options for file streams */
/** File mode to be used when creating log files */
/**
* Generates the options for FileStreamRotator consistently across the transport
* @returns FileStreamRotatorOptions object
* @private
*/
getRotatorOptions() {
return {
filename: this.filename,
frequency: this.frequency,
verbose: _nullishCoalesce(this.verbose, () => ( false)),
date_format: this.dateFormat,
size: this.size,
max_logs: _optionalChain([this, 'access', _ => _.maxLogs, 'optionalAccess', _2 => _2.toString, 'call', _3 => _3()]),
audit_file: this.auditFile || void 0,
end_stream: true,
extension: this.extension,
create_symlink: this.createSymlink,
symlink_name: this.symlinkName,
utc: this.utc,
audit_hash_type: this.auditHashType,
file_options: {
flags: "a",
encoding: "utf8",
mode: _nullishCoalesce(this.fileMode, () => ( 416)),
...this.fileOptions
}
};
}
/**
* Creates a new LogFileRotationTransport instance.
* @param params - Configuration options for the transport
* @throws {Error} If the filename is already in use by another transport instance
*/
constructor(params) {
super(params);
if (_LogFileRotationTransport.activeFilenames.has(params.filename)) {
throw new Error(
`LogFileRotationTransport: Filename "${params.filename}" is already in use by another instance. To use the same file for multiple loggers, share the same transport instance between them.`
);
}
this.filename = params.filename;
_LogFileRotationTransport.activeFilenames.add(this.filename);
this.fieldNames = {
level: _nullishCoalesce(_optionalChain([params, 'access', _4 => _4.fieldNames, 'optionalAccess', _5 => _5.level]), () => ( "level")),
message: _nullishCoalesce(_optionalChain([params, 'access', _6 => _6.fieldNames, 'optionalAccess', _7 => _7.message]), () => ( "message")),
timestamp: _nullishCoalesce(_optionalChain([params, 'access', _8 => _8.fieldNames, 'optionalAccess', _9 => _9.timestamp]), () => ( "timestamp"))
};
this.delimiter = _nullishCoalesce(params.delimiter, () => ( "\n"));
this.timestampFn = _nullishCoalesce(params.timestampFn, () => ( (() => (/* @__PURE__ */ new Date()).toISOString())));
this.levelMap = _nullishCoalesce(params.levelMap, () => ( {}));
this.compressOnRotate = _nullishCoalesce(params.compressOnRotate, () => ( false));
this.isCompressing = false;
this.batchEnabled = !!params.batch;
this.batchSize = _nullishCoalesce(_optionalChain([params, 'access', _10 => _10.batch, 'optionalAccess', _11 => _11.size]), () => ( 1e3));
this.batchTimeout = _nullishCoalesce(_optionalChain([params, 'access', _12 => _12.batch, 'optionalAccess', _13 => _13.timeout]), () => ( 5e3));
this.batchQueue = [];
this.batchTimer = null;
this.isDisposing = false;
this.callbacks = params.callbacks;
this.frequency = params.frequency;
this.verbose = params.verbose;
this.dateFormat = params.dateFormat;
this.size = params.size;
this.maxLogs = params.maxLogs;
this.auditFile = params.auditFile;
this.extension = params.extension;
this.createSymlink = params.createSymlink;
this.symlinkName = params.symlinkName;
this.utc = params.utc;
this.auditHashType = params.auditHashType;
this.fileOptions = params.fileOptions;
this.fileMode = params.fileMode;
this.staticData = params.staticData;
if (this.batchEnabled) {
process.on("beforeExit", () => {
if (!this.isDisposing) {
this.flush();
}
});
const handleSignal = (signal) => {
if (!this.isDisposing) {
this.flushSync();
_LogFileRotationTransport.activeFilenames.delete(this.filename);
process.exit(signal === "SIGINT" ? 130 : 143);
}
};
process.on("SIGINT", () => handleSignal("SIGINT"));
process.on("SIGTERM", () => handleSignal("SIGTERM"));
}
if (!this.batchEnabled) {
this.initStream(this.getRotatorOptions());
}
}
/**
* Initializes the write stream and sets up event listeners.
* This is called either immediately if batching is disabled,
* or lazily when the first batch needs to be written if batching is enabled.
* @param options - Options for the file stream rotator
* @private
*/
initStream(options) {
this.stream = _filestreamrotator2.default.getStream(options);
if (this.callbacks) {
const { onRotate, onNew, onOpen, onClose, onError, onFinish, onLogRemoved } = this.callbacks;
if (this.compressOnRotate) {
this.stream.on("rotate", async (oldFile, newFile) => {
try {
this.isCompressing = true;
const compressedPath = await this.compressFile(oldFile);
await _promises.unlink.call(void 0, oldFile);
_optionalChain([onRotate, 'optionalCall', _14 => _14(compressedPath, newFile)]);
} catch (error) {
_optionalChain([this, 'access', _15 => _15.callbacks, 'optionalAccess', _16 => _16.onError, 'optionalCall', _17 => _17(error)]);
} finally {
this.isCompressing = false;
}
});
} else if (onRotate) {
this.stream.on("rotate", onRotate);
}
if (onNew) {
this.stream.on("new", onNew);
}
if (onOpen) {
this.stream.on("open", onOpen);
}
if (onClose) {
this.stream.on("close", onClose);
}
if (onError) {
this.stream.on("error", onError);
}
if (onFinish) {
this.stream.on("finish", onFinish);
}
if (onLogRemoved) {
this.stream.on("logRemoved", onLogRemoved);
}
}
}
/**
* Generates a unique path for a compressed log file.
* If a file with .gz extension already exists, appends timestamp and counter.
* @param filePath - The original log file path
* @returns The unique path for the compressed file
* @private
*/
async getUniqueCompressedFilePath(filePath) {
let finalPath = `${filePath}.gz`;
let counter = 0;
try {
while (true) {
try {
await _promises.access.call(void 0, finalPath);
counter++;
finalPath = `${filePath}.${Date.now()}.${counter}.gz`;
} catch (e) {
break;
}
}
} catch (_error) {
finalPath = `${filePath}.${Date.now()}.gz`;
}
return finalPath;
}
/**
* Compresses a log file using gzip.
* @param filePath - Path to the file to compress
* @returns Path to the compressed file
* @private
*/
async compressFile(filePath) {
const gzPath = await this.getUniqueCompressedFilePath(filePath);
const gzip = _zlib.createGzip.call(void 0, );
const source = _fs.createReadStream.call(void 0, filePath);
const destination = _fs.createWriteStream.call(void 0, gzPath);
await _promises3.pipeline.call(void 0, source, gzip, destination);
return gzPath;
}
/**
* Flushes queued log entries to disk asynchronously.
* This is used for normal batch processing operations.
* @private
*/
flush() {
if (!this.batchEnabled || this.batchQueue.length === 0) {
return;
}
if (this.batchTimer) {
clearTimeout(this.batchTimer);
this.batchTimer = null;
}
if (!this.stream) {
this.initStream(this.getRotatorOptions());
}
const batchContent = this.batchQueue.join("");
this.stream.write(batchContent);
this.batchQueue = [];
}
/**
* Synchronously flush logs to disk.
* This is used during process termination (SIGINT/SIGTERM) to ensure logs are written
* before the process exits. This method uses synchronous file I/O to guarantee that
* logs are written even during abrupt process termination.
* @private
*/
flushSync() {
if (!this.batchEnabled || this.batchQueue.length === 0) {
return;
}
if (this.batchTimer) {
clearTimeout(this.batchTimer);
this.batchTimer = null;
}
if (!this.stream) {
this.initStream(this.getRotatorOptions());
}
const batchContent = this.batchQueue.join("");
const rotator = this.stream;
if (rotator.currentFile) {
_fs.writeFileSync.call(void 0, rotator.currentFile, batchContent, { flag: "a" });
}
this.batchQueue = [];
}
/**
* Schedules a batch flush operation.
* This creates a timer that will flush the batch after the configured timeout.
* The timer is unref'd to prevent keeping the process alive.
* @private
*/
scheduleBatchFlush() {
if (!this.batchTimer && !this.isDisposing) {
this.batchTimer = setTimeout(() => {
this.flush();
}, this.batchTimeout);
if (this.batchTimer.unref) {
this.batchTimer.unref();
}
}
}
/**
* Processes and writes a log entry.
* If batching is enabled, the entry is queued and written based on batch settings.
* If batching is disabled, the entry is written immediately.
* @param params - The log entry parameters
* @returns The original messages array
*/
shipToLogger({ logLevel, messages, data, hasData }) {
const logEntry = {
[this.fieldNames.level]: _nullishCoalesce(this.levelMap[logLevel], () => ( logLevel)),
[this.fieldNames.message]: messages.join(" ") || "",
[this.fieldNames.timestamp]: this.timestampFn(),
...this.staticData ? typeof this.staticData === "function" ? this.staticData() : this.staticData : {},
...hasData ? data : {}
};
const logString = `${JSON.stringify(logEntry)}${this.delimiter}`;
if (this.batchEnabled) {
this.batchQueue.push(logString);
if (this.batchQueue.length >= this.batchSize) {
this.flush();
} else {
this.scheduleBatchFlush();
}
} else {
this.stream.write(logString);
}
return messages;
}
/**
* Disposes of the transport, cleaning up resources and flushing any remaining logs.
* This method:
* 1. Prevents new batch flushes from being scheduled
* 2. Cancels any pending batch flush
* 3. Flushes any remaining logs
* 4. Waits for any in-progress compression to complete
* 5. Closes the write stream
* 6. Removes the filename from the registry
*/
[Symbol.dispose]() {
if (this.stream || this.batchEnabled) {
this.isDisposing = true;
if (this.batchTimer) {
clearTimeout(this.batchTimer);
this.batchTimer = null;
}
if (this.batchEnabled) {
this.flush();
}
const checkAndEnd = () => {
if (!this.isCompressing) {
if (this.stream) {
this.stream.end();
}
_LogFileRotationTransport.activeFilenames.delete(this.filename);
} else {
setTimeout(checkAndEnd, 100);
}
};
checkAndEnd();
}
}
}, _class.__initStatic(), _class);
exports.LogFileRotationTransport = LogFileRotationTransport;
//# sourceMappingURL=index.cjs.map