@oletizi/sampler-backup
Version:
Akai sampler backup utilities for hardware samplers via PiSCSI
981 lines (980 loc) • 29.2 kB
JavaScript
import { R as RemoteSource, L as LocalSource } from "../interactive-prompt-BEay0YJ3.js";
import { A, b, a, D, I, U, c, d } from "../interactive-prompt-BEay0YJ3.js";
import { readdir, stat, mkdir, unlink, utimes } from "fs/promises";
import { platform } from "os";
import { basename, join, extname, dirname } from "pathe";
import { createReadStream, createWriteStream } from "fs";
import { spawn } from "child_process";
import { openSync, readSync, closeSync, existsSync, readdirSync, statSync } from "node:fs";
class BackupSourceFactory {
/**
* Create a BackupSource from explicit configuration
*
* @param config - Backup source configuration (RemoteSourceConfig or LocalSourceConfig)
* @returns BackupSource instance (RemoteSource or LocalSource)
*
* @example
* Create a remote source
* ```typescript
* const source = BackupSourceFactory.create({
* type: 'remote',
* host: 'pi@pi-scsi2.local',
* sourcePath: '/home/pi/images/',
* backupSubdir: 'pi-scsi2'
* });
* ```
*
* @example
* Create a local source
* ```typescript
* const source = BackupSourceFactory.create({
* type: 'local',
* sourcePath: '/Volumes/SDCARD',
* backupSubdir: 'sdcard',
* snapshotRoot: '~/.audiotools/backup'
* });
* ```
*/
static create(config) {
if (config.type === "remote") {
return new RemoteSource(config);
} else {
return new LocalSource(config);
}
}
/**
* Create a BackupSource from a path string with automatic type detection
*
* This is the recommended method for CLI integration and simple use cases.
* The factory automatically determines whether the path is a remote SSH
* source or local filesystem path.
*
* @param path - Source path (local or remote SSH syntax)
* @param options - Optional configuration overrides
* @returns BackupSource instance (RemoteSource or LocalSource)
*
* @throws Error if path is empty or invalid format
*
* @remarks
* Detection logic:
* - Contains `:` and not a Windows path → Remote SSH source (e.g., `host:/path`)
* - Otherwise → Local filesystem source (e.g., `/Volumes/SDCARD`)
*
* Subdirectory naming:
* - Remote: Generated from hostname (e.g., `pi-scsi2.local` → `pi-scsi2`)
* - Local: Generated from last path component (e.g., `/Volumes/SDCARD` → `sdcard`)
* - Override with `options.backupSubdir` for custom naming
*
* @example
* Auto-detect local filesystem path
* ```typescript
* const source = BackupSourceFactory.fromPath('/Volumes/SDCARD');
* await source.backup('daily');
* // Backs up to: ~/.audiotools/backup/daily.0/sdcard/
* ```
*
* @example
* Auto-detect remote SSH path
* ```typescript
* const source = BackupSourceFactory.fromPath('pi@host:/images/');
* await source.backup('daily');
* // Backs up to: ~/.audiotools/backup/daily.0/host/
* ```
*
* @example
* Override subdirectory name
* ```typescript
* const source = BackupSourceFactory.fromPath('/Volumes/GOTEK', {
* backupSubdir: 'gotek-s3000xl'
* });
* await source.backup('daily');
* // Backs up to: ~/.audiotools/backup/daily.0/gotek-s3000xl/
* ```
*/
static fromPath(path, options = {}) {
if (!path || path.trim().length === 0) {
throw new Error("Source path cannot be empty");
}
if (this.isRemotePath(path)) {
return this.createRemoteFromPath(path, options);
} else {
return this.createLocalFromPath(path, options);
}
}
/**
* Check if path is a remote SSH path
* Remote paths follow the format: [user@]host:path
*/
static isRemotePath(path) {
const hasColon = path.includes(":");
const isWindowsPath = /^[A-Za-z]:/.test(path);
return hasColon && !isWindowsPath;
}
/**
* Create RemoteSource from SSH path string
*/
static createRemoteFromPath(path, options) {
const colonIndex = path.indexOf(":");
if (colonIndex === -1) {
throw new Error(`Invalid remote path format: ${path}`);
}
const hostPart = path.substring(0, colonIndex);
const sourcePath = path.substring(colonIndex + 1);
if (!hostPart || !sourcePath) {
throw new Error(`Invalid remote path format: ${path}. Expected format: host:/path or user@host:/path`);
}
hostPart.includes("@") ? hostPart.split("@")[1] : hostPart;
const device = options.device ?? options.backupSubdir;
if (!device) {
throw new Error("Device name is required (use --device flag)");
}
const config = {
type: "remote",
host: hostPart,
// Keep user@ prefix if present
sourcePath,
device,
sampler: options.sampler,
// Optional override
backupSubdir: device
// DEPRECATED: for backward compatibility
};
return new RemoteSource(config);
}
/**
* Create LocalSource from filesystem path string
*/
static createLocalFromPath(path, options) {
if (!options.sampler) {
throw new Error("Sampler name is required for local sources (use --sampler flag)");
}
const device = options.device ?? options.backupSubdir;
if (!device) {
throw new Error("Device name is required (use --device flag)");
}
const config = {
type: "local",
sourcePath: path,
sampler: options.sampler,
device,
backupSubdir: device,
// DEPRECATED: for backward compatibility
snapshotRoot: options.snapshotRoot
};
return new LocalSource(config);
}
/**
* Generate backup subdirectory name from hostname
* Examples: "pi-scsi2.local" → "pi-scsi2"
*/
static generateSubdirFromHost(host) {
const cleaned = host.replace(/\.local$/, "");
const withoutUser = cleaned.includes("@") ? cleaned.split("@")[1] : cleaned;
return withoutUser.replace(/[.\/]/g, "-");
}
/**
* Generate backup subdirectory name from filesystem path
* Examples:
* "/Volumes/SDCARD" → "sdcard"
* "/media/user/USB" → "usb"
* "local-media" → "local-media"
*/
static generateSubdirFromPath(path) {
const parts = path.split("/").filter((p) => p.length > 0);
const lastPart = parts[parts.length - 1] || "local-media";
return lastPart.toLowerCase().replace(/\s+/g, "-");
}
}
let DefaultFileSystemOperations$1 = class DefaultFileSystemOperations {
async readdir(path) {
return readdir(path);
}
async stat(path) {
return stat(path);
}
platform() {
return platform();
}
};
class MediaDetector {
static {
this.DISK_IMAGE_EXTENSIONS = [".hds", ".img", ".iso"];
}
static {
this.HIDDEN_FILE_PREFIX = ".";
}
static {
this.MACOS_SYSTEM_VOLUMES = ["Macintosh HD", "Data", "Preboot", "Recovery", "VM"];
}
constructor(fsOps) {
this.fs = fsOps ?? new DefaultFileSystemOperations$1();
}
/**
* Detect all available storage media on the system
* @returns Array of MediaInfo for each detected media
*/
async detectMedia() {
const platformType = this.fs.platform();
const mountPoints = await this.getMountPoints(platformType);
const mediaInfos = [];
for (const mountPoint of mountPoints) {
try {
const diskImages = await this.findDiskImages(mountPoint);
const volumeName = basename(mountPoint);
mediaInfos.push({
mountPoint,
volumeName,
diskImages
});
} catch (error) {
continue;
}
}
return mediaInfos;
}
/**
* Recursively find disk images in a directory
* @param path - Directory path to search
* @returns Array of DiskImageInfo for discovered disk images
*/
async findDiskImages(path) {
const diskImages = [];
try {
await this.fs.stat(path);
} catch (error) {
throw new Error(`Failed to scan directory ${path}: ${error.message}`);
}
await this.scanDirectory(path, diskImages);
return diskImages;
}
/**
* Get platform-specific mount points to scan
*/
async getMountPoints(platformType) {
if (platformType === "darwin") {
return this.getMacOSMountPoints();
} else if (platformType === "linux") {
return this.getLinuxMountPoints();
}
throw new Error(`Unsupported platform: ${platformType}`);
}
/**
* Get macOS mount points from /Volumes/
* Excludes system volumes like "Macintosh HD"
*/
async getMacOSMountPoints() {
const volumesPath = "/Volumes";
const mountPoints = [];
try {
const entries = await this.fs.readdir(volumesPath);
for (const entry of entries) {
if (entry.startsWith(MediaDetector.HIDDEN_FILE_PREFIX)) {
continue;
}
if (MediaDetector.MACOS_SYSTEM_VOLUMES.includes(entry)) {
continue;
}
const fullPath = join(volumesPath, entry);
try {
const stats = await this.fs.stat(fullPath);
if (stats.isDirectory()) {
mountPoints.push(fullPath);
}
} catch {
continue;
}
}
} catch (error) {
throw new Error(`Failed to read macOS volumes directory: ${error.message}`);
}
return mountPoints;
}
/**
* Get Linux mount points from /media/$USER/ and /mnt/
*/
async getLinuxMountPoints() {
const mountPoints = [];
const username = process.env.USER || process.env.USERNAME || "";
const mediaPaths = [
`/media/${username}`,
"/mnt"
];
for (const basePath of mediaPaths) {
try {
const entries = await this.fs.readdir(basePath);
for (const entry of entries) {
if (entry.startsWith(MediaDetector.HIDDEN_FILE_PREFIX)) {
continue;
}
const fullPath = join(basePath, entry);
try {
const stats = await this.fs.stat(fullPath);
if (stats.isDirectory()) {
mountPoints.push(fullPath);
}
} catch {
continue;
}
}
} catch {
continue;
}
}
return mountPoints;
}
/**
* Recursively scan a directory for disk images
*/
async scanDirectory(dirPath, results) {
try {
const entries = await this.fs.readdir(dirPath);
for (const entry of entries) {
if (entry.startsWith(MediaDetector.HIDDEN_FILE_PREFIX)) {
continue;
}
const fullPath = join(dirPath, entry);
try {
const stats = await this.fs.stat(fullPath);
if (stats.isDirectory()) {
await this.scanDirectory(fullPath, results);
} else if (stats.isFile()) {
if (this.isDiskImage(entry)) {
const name = basename(entry, extname(entry));
results.push({
path: fullPath,
name,
size: stats.size,
mtime: stats.mtime
});
}
}
} catch {
continue;
}
}
} catch (error) {
return;
}
}
/**
* Check if a filename has a disk image extension
*/
isDiskImage(filename) {
const ext = extname(filename).toLowerCase();
return MediaDetector.DISK_IMAGE_EXTENSIONS.includes(ext);
}
}
class DefaultFileSystemOperations2 {
async stat(path) {
return stat(path);
}
async mkdir(path, options) {
await mkdir(path, options);
}
async unlink(path) {
await unlink(path);
}
async utimes(path, atime, mtime) {
await utimes(path, atime, mtime);
}
createReadStream(path) {
return createReadStream(path);
}
createWriteStream(path) {
return createWriteStream(path);
}
async readdir(path) {
const { readdir: readdir2 } = await import("fs/promises");
return readdir2(path);
}
}
class LocalBackupAdapter {
constructor(fsOps) {
this.fs = fsOps ?? new DefaultFileSystemOperations2();
}
/**
* Perform backup from source to destination
*/
async backup(options) {
const { sourcePath, destPath, incremental = true, onProgress } = options;
const result = {
success: true,
filesProcessed: 0,
filesCopied: 0,
filesSkipped: 0,
bytesProcessed: 0,
errors: []
};
try {
const sourceStat = await this.fs.stat(sourcePath);
if (!sourceStat.isDirectory()) {
throw new Error(`Source path is not a directory: ${sourcePath}`);
}
} catch (error) {
result.success = false;
result.errors.push(`Failed to access source directory ${sourcePath}: ${error.message}`);
return result;
}
const filesToBackup = [];
await this.discoverFiles(sourcePath, destPath, filesToBackup);
if (filesToBackup.length === 0) {
return result;
}
const totalBytes = filesToBackup.reduce((sum, file) => sum + file.size, 0);
let bytesProcessed = 0;
for (const fileInfo of filesToBackup) {
try {
const shouldCopy = !incremental || await this.shouldCopyFile(fileInfo.sourcePath, fileInfo.destPath);
if (shouldCopy) {
await this.ensureDirectory(dirname(fileInfo.destPath));
await this.copyFileWithProgress(
fileInfo.sourcePath,
fileInfo.destPath,
fileInfo.mtime,
(bytes) => {
if (onProgress) {
onProgress({
currentFile: fileInfo.sourcePath,
bytesProcessed: bytesProcessed + bytes,
totalBytes,
filesProcessed: result.filesProcessed,
totalFiles: filesToBackup.length
});
}
}
);
result.filesCopied++;
bytesProcessed += fileInfo.size;
} else {
result.filesSkipped++;
bytesProcessed += fileInfo.size;
}
result.filesProcessed++;
if (onProgress) {
onProgress({
currentFile: fileInfo.sourcePath,
bytesProcessed,
totalBytes,
filesProcessed: result.filesProcessed,
totalFiles: filesToBackup.length
});
}
} catch (error) {
if (error.code === "EACCES") {
result.errors.push(`Permission denied for ${fileInfo.sourcePath}, skipping`);
continue;
} else if (error.code === "ENOSPC") {
result.success = false;
result.errors.push(`Disk full while copying ${fileInfo.sourcePath}`);
return result;
} else {
result.success = false;
result.errors.push(`Failed to copy ${fileInfo.sourcePath}: ${error.message}`);
return result;
}
}
}
result.bytesProcessed = bytesProcessed;
return result;
}
/**
* Recursively discover files to backup
*/
async discoverFiles(sourcePath, destPath, results) {
try {
const entries = await this.fs.readdir(sourcePath);
for (const entry of entries) {
if (entry.startsWith(".")) {
continue;
}
const sourceFile = join(sourcePath, entry);
const destFile = join(destPath, entry);
try {
const stats = await this.fs.stat(sourceFile);
if (stats.isDirectory()) {
await this.discoverFiles(sourceFile, destFile, results);
} else if (stats.isFile()) {
results.push({
sourcePath: sourceFile,
destPath: destFile,
size: stats.size,
mtime: stats.mtime
});
}
} catch {
continue;
}
}
} catch {
return;
}
}
/**
* Determine if a file should be copied based on incremental logic
* Returns true if:
* - Destination doesn't exist
* - Source is newer than destination (mtime)
* - Source size differs from destination
*/
async shouldCopyFile(sourcePath, destPath) {
try {
const [sourceStat, destStat] = await Promise.all([
this.fs.stat(sourcePath),
this.fs.stat(destPath)
]);
return sourceStat.mtime > destStat.mtime || sourceStat.size !== destStat.size;
} catch (error) {
if (error.code === "ENOENT") {
return true;
}
throw error;
}
}
/**
* Copy file with progress tracking and timestamp preservation
*/
async copyFileWithProgress(sourcePath, destPath, mtime, onProgress) {
let bytesWritten = 0;
let cleanupNeeded = false;
return new Promise((resolve, reject) => {
const readStream = this.fs.createReadStream(sourcePath);
const writeStream = this.fs.createWriteStream(destPath);
cleanupNeeded = true;
readStream.on("data", (chunk) => {
bytesWritten += chunk.length;
if (onProgress) {
onProgress(bytesWritten);
}
});
readStream.on("error", async (error) => {
if (cleanupNeeded) {
try {
await this.fs.unlink(destPath);
} catch {
}
}
reject(error);
});
writeStream.on("error", async (error) => {
if (cleanupNeeded) {
try {
await this.fs.unlink(destPath);
} catch {
}
}
reject(error);
});
writeStream.on("finish", async () => {
cleanupNeeded = false;
try {
await this.fs.utimes(destPath, mtime, mtime);
resolve();
} catch (error) {
reject(error);
}
});
readStream.pipe(writeStream);
});
}
/**
* Ensure directory exists, creating it recursively if needed
*/
async ensureDirectory(dirPath) {
try {
await this.fs.stat(dirPath);
} catch (error) {
if (error.code === "ENOENT") {
await this.fs.mkdir(dirPath, { recursive: true });
} else {
throw error;
}
}
}
}
async function executeBorgCommand(command, args, onProgress) {
return new Promise((resolve, reject) => {
const borg = spawn("borg", [command, ...args]);
let stdout = "";
let stderr = "";
borg.stdout.on("data", (data) => {
const line = data.toString();
stdout += line;
if (onProgress) {
onProgress(line);
}
});
borg.stderr.on("data", (data) => {
stderr += data.toString();
});
borg.on("close", (exitCode) => {
if (exitCode === 0) {
resolve({ stdout, stderr, exitCode });
} else {
reject(new Error(
`borg ${command} failed with exit code ${exitCode}: ${stderr}`
));
}
});
borg.on("error", (error) => {
reject(new Error(`Failed to spawn borg: ${error.message}`));
});
});
}
function parseProgress(line) {
try {
const data = JSON.parse(line);
if (data.type === "archive_progress") {
return {
operation: "Creating archive",
bytesProcessed: data.original_size || 0,
totalBytes: data.original_size || 0,
filesProcessed: data.nfiles || 0,
totalFiles: data.nfiles || 0,
compressionRatio: data.compressed_size && data.original_size ? data.compressed_size / data.original_size : void 0,
dedupRatio: data.deduplicated_size && data.original_size ? data.deduplicated_size / data.original_size : void 0
};
}
} catch {
}
return null;
}
async function ensureBorgInstalled() {
try {
await executeBorgCommand("--version", []);
} catch (error) {
if (error.message.includes("spawn borg")) {
throw new Error(
"BorgBackup is not installed. Install it with:\n macOS: brew install borgbackup\n Linux: sudo apt install borgbackup\nSee: https://borgbackup.readthedocs.io/en/stable/installation.html"
);
}
throw error;
}
}
async function getBorgVersion() {
const { stdout } = await executeBorgCommand("--version", []);
const match = stdout.match(/borg (\d+\.\d+\.\d+)/);
return match ? match[1] : "unknown";
}
async function checkBorgVersion(minVersion = "1.2.0") {
const version = await getBorgVersion();
const [major, minor] = version.split(".").map(Number);
const [minMajor, minMinor] = minVersion.split(".").map(Number);
if (major < minMajor || major === minMajor && minor < minMinor) {
throw new Error(
`Borg version ${version} is not supported. Please upgrade to ${minVersion} or higher: brew upgrade borgbackup`
);
}
}
function expandPath(path) {
if (path.startsWith("~/")) {
return path.replace("~", process.env.HOME || "~");
}
return path;
}
class BorgBackupAdapter {
constructor(config) {
this.config = {
...config,
repoPath: expandPath(config.repoPath),
compression: config.compression || "zstd",
encryption: config.encryption || "none"
};
}
/**
* Initialize a new Borg repository
*/
async initRepository(config) {
await ensureBorgInstalled();
await checkBorgVersion("1.2.0");
try {
await this.getRepositoryInfo();
console.log("Repository already exists, skipping initialization");
return;
} catch {
}
const repoPath = expandPath(config.repoPath);
const args = [
"--encryption",
config.encryption || "none",
"--make-parent-dirs",
repoPath
];
try {
await executeBorgCommand("init", args);
console.log(`✓ Initialized Borg repository: ${repoPath}`);
} catch (error) {
throw new Error(`Failed to initialize repository: ${error.message}`);
}
}
/**
* Create a new backup archive
*/
async createArchive(sources, archiveName, onProgress) {
await ensureBorgInstalled();
const args = [
"--stats",
"--json",
"--progress",
"--compression",
this.config.compression,
`${this.config.repoPath}::${archiveName}`,
...sources
];
let statsOutput = "";
try {
const { stdout } = await executeBorgCommand(
"create",
args,
(line) => {
const progress = parseProgress(line);
if (progress && onProgress) {
onProgress(progress);
}
if (line.trim().startsWith("{")) {
statsOutput += line;
}
}
);
const stats = JSON.parse(statsOutput || stdout);
return {
name: archiveName,
timestamp: /* @__PURE__ */ new Date(),
stats: {
originalSize: stats.archive?.stats?.original_size || 0,
compressedSize: stats.archive?.stats?.compressed_size || 0,
dedupedSize: stats.archive?.stats?.deduplicated_size || 0,
nfiles: stats.archive?.stats?.nfiles || 0
}
};
} catch (error) {
if (error.message.includes("Failed to create/acquire the lock")) {
throw new Error(
"Repository is locked by another process. Wait for other backup to complete or run: borg break-lock " + this.config.repoPath
);
}
if (error.message.includes("Connection refused") || error.message.includes("Connection reset")) {
throw new Error(
"Cannot connect to remote host. Check SSH connection and try again."
);
}
if (error.message.includes("No space left on device")) {
throw new Error(
"Not enough disk space for backup. Free up space or change repository location."
);
}
throw new Error(`Failed to create archive: ${error.message}`);
}
}
/**
* List all archives in repository
*/
async listArchives() {
await ensureBorgInstalled();
const args = [
"--json",
this.config.repoPath
];
try {
const { stdout } = await executeBorgCommand("list", args);
const data = JSON.parse(stdout);
return data.archives.map((archive) => ({
name: archive.name,
timestamp: new Date(archive.time),
stats: {
originalSize: 0,
// Not included in list output
compressedSize: 0,
dedupedSize: 0,
nfiles: archive.nfiles || 0
}
}));
} catch (error) {
throw new Error(`Failed to list archives: ${error.message}`);
}
}
/**
* Restore specific archive to destination
*/
async restoreArchive(archiveName, destination, onProgress) {
await ensureBorgInstalled();
await mkdir(destination, { recursive: true });
const args = [
"--progress",
`${this.config.repoPath}::${archiveName}`
];
try {
await executeBorgCommand(
"extract",
args,
(line) => {
const progress = parseProgress(line);
if (progress && onProgress) {
onProgress(progress);
}
}
);
} catch (error) {
throw new Error(`Failed to restore archive: ${error.message}`);
}
}
/**
* Prune old archives based on retention policy
*/
async pruneArchives(policy) {
await ensureBorgInstalled();
const args = [
"--stats",
"--list",
`--keep-daily=${policy.daily}`,
`--keep-weekly=${policy.weekly}`,
`--keep-monthly=${policy.monthly}`,
this.config.repoPath
];
try {
await executeBorgCommand("prune", args);
} catch (error) {
throw new Error(`Failed to prune archives: ${error.message}`);
}
}
/**
* Get repository information and statistics
*/
async getRepositoryInfo() {
await ensureBorgInstalled();
const args = [
"--json",
this.config.repoPath
];
try {
const { stdout } = await executeBorgCommand("info", args);
const data = JSON.parse(stdout);
const repo = data.repository;
const cache = data.cache;
return {
path: this.config.repoPath,
id: repo.id,
lastModified: new Date(repo.last_modified),
archiveCount: cache?.stats?.total_chunks || 0,
originalSize: cache?.stats?.total_size || 0,
compressedSize: cache?.stats?.total_csize || 0,
dedupedSize: cache?.stats?.unique_csize || 0,
encryption: data.encryption?.mode || "none"
};
} catch (error) {
throw new Error(`Failed to get repository info: ${error.message}`);
}
}
/**
* Check repository consistency
*/
async checkRepository() {
await ensureBorgInstalled();
const args = [
this.config.repoPath
];
try {
await executeBorgCommand("check", args);
return true;
} catch (error) {
console.error(`Repository check failed: ${error.message}`);
return false;
}
}
/**
* Check if archive already exists for today
*/
async hasArchiveForToday(interval, source) {
const archives = await this.listArchives();
const today = (/* @__PURE__ */ new Date()).toISOString().split("T")[0];
const prefix = `${interval}-${today}`;
return archives.some(
(archive) => archive.name.startsWith(prefix) && archive.name.includes(source)
);
}
}
function detectSamplerType(diskImage) {
try {
const fd = openSync(diskImage, "r");
const buffer = Buffer.alloc(4096);
readSync(fd, buffer, 0, 4096, 0);
closeSync(fd);
const bootSig = buffer.readUInt16LE(510);
if (bootSig === 43605) {
return "unknown";
}
const akaiSig = buffer.toString("ascii", 0, 4);
if (akaiSig === "AKAI") {
return "s3k";
}
for (let offset = 0; offset < 4096; offset += 512) {
const volSig = buffer.toString("ascii", offset, offset + 4);
if (volSig === "VOL1" || volSig === "PART") {
return "s5k";
}
}
return "unknown";
} catch {
return "unknown";
}
}
function findDiskImagesRecursive(dir, maxDepth = 3) {
if (maxDepth <= 0 || !existsSync(dir)) {
return [];
}
const results = [];
try {
const entries = readdirSync(dir, { withFileTypes: true });
for (const entry of entries) {
const fullPath = join(dir, entry.name);
if (entry.isFile()) {
const lowerName = entry.name.toLowerCase();
if (lowerName.endsWith(".hds") || lowerName.endsWith(".img") || lowerName.endsWith(".iso")) {
results.push(fullPath);
}
} else if (entry.isDirectory() && !entry.name.startsWith(".")) {
results.push(...findDiskImagesRecursive(fullPath, maxDepth - 1));
}
}
} catch {
}
return results;
}
function discoverDiskImages(sourceDir) {
const diskPaths = findDiskImagesRecursive(sourceDir);
const disks = [];
for (const diskPath of diskPaths) {
try {
const diskStat = statSync(diskPath);
const samplerType = detectSamplerType(diskPath);
disks.push({
path: diskPath,
name: basename(diskPath, extname(diskPath)),
samplerType,
mtime: diskStat.mtime
});
} catch (error) {
console.warn(`Skipping unreadable disk: ${diskPath}`);
}
}
return disks.sort((a2, b2) => a2.path.localeCompare(b2.path));
}
export {
A as AutoDetectBackup,
BackupSourceFactory,
BorgBackupAdapter,
b as DEVICE_TYPES,
a as DeviceMatcher,
D as DeviceResolver,
I as InteractivePrompt,
LocalBackupAdapter,
LocalSource,
MediaDetector,
RemoteSource,
U as UserCancelledError,
c as createAutoDetectBackup,
d as createInteractivePrompt,
detectSamplerType,
discoverDiskImages,
findDiskImagesRecursive
};
//# sourceMappingURL=index.js.map