UNPKG

pushduck

Version:

The fastest way to add file uploads to any web application. Enterprise security, edge-ready. Works with 16+ frameworks and 5+ storage providers. No heavy AWS SDK required.

1,587 lines (1,584 loc) 94.6 kB
import { AwsClient } from "aws4fetch"; //#region src/core/providers/providers.d.ts /** * @fileoverview Cloud Storage Providers System * * This module provides a comprehensive system for configuring different cloud storage providers * with environment-based configuration, type-safe initialization, and automatic endpoint resolution. * * The provider system supports multiple tiers of cloud storage services: * - **Tier 1**: Fully supported with comprehensive testing (AWS S3, Cloudflare R2, DigitalOcean Spaces, MinIO) * - **Tier 2**: Enterprise/Hyperscale providers (Azure Blob, IBM Cloud, Oracle OCI) * - **Tier 3**: Cost-optimized providers (Wasabi, Backblaze B2, Storj DCS) * - **Tier 4**: Performance/Specialized providers (Telnyx, Tigris, Cloudian) * * Features: * - Environment variable auto-detection with fallbacks * - Type-safe configuration with TypeScript inference * - Automatic endpoint generation for known providers * - Validation and error reporting * - Custom domain and ACL support * * @example Basic AWS S3 Configuration * ```typescript * import { createProvider } from 'pushduck/server'; * * const s3Config = createProvider('aws', { * bucket: 'my-uploads', * region: 'us-east-1', * // Credentials auto-loaded from AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY * }); * ``` * * @example Cloudflare R2 Configuration * ```typescript * const r2Config = createProvider('cloudflareR2', { * bucket: 'my-r2-bucket', * accountId: 'your-account-id', * // Credentials from CLOUDFLARE_R2_ACCESS_KEY_ID, CLOUDFLARE_R2_SECRET_ACCESS_KEY * }); * ``` * * @example MinIO Self-hosted Configuration * ```typescript * const minioConfig = createProvider('minio', { * endpoint: 'http://localhost:9000', * bucket: 'uploads', * accessKeyId: 'minioadmin', * secretAccessKey: 'minioadmin', * useSSL: false, * }); * ``` * * @example Environment Variable Setup * ```bash * # AWS S3 * export AWS_ACCESS_KEY_ID="your-access-key" * export AWS_SECRET_ACCESS_KEY="your-secret-key" * export AWS_REGION="us-east-1" * * # Cloudflare R2 * export CLOUDFLARE_R2_ACCESS_KEY_ID="your-r2-access-key" * export CLOUDFLARE_R2_SECRET_ACCESS_KEY="your-r2-secret-key" * export CLOUDFLARE_ACCOUNT_ID="your-account-id" * * # DigitalOcean Spaces * export DO_SPACES_ACCESS_KEY_ID="your-spaces-key" * export DO_SPACES_SECRET_ACCESS_KEY="your-spaces-secret" * export DO_SPACES_REGION="nyc3" * ``` * */ /** * Base configuration interface for all cloud storage providers. * Contains common properties shared across all provider implementations. * * @interface BaseProviderConfig */ interface BaseProviderConfig { /** Provider identifier string */ provider: string; /** Geographic region for the storage service */ region?: string; /** Name of the storage bucket/container */ bucket: string; /** Access Control List permissions (e.g., 'public-read', 'private') */ acl?: string; /** Custom domain for file URLs (e.g., 'cdn.example.com') */ customDomain?: string; /** Force path-style URLs instead of virtual-hosted style */ forcePathStyle?: boolean; } /** * Configuration for Amazon Web Services S3. * The most widely used object storage service with global availability. * * @interface AWSProviderConfig * @extends BaseProviderConfig * * @example Basic Configuration * ```typescript * const awsConfig: AWSProviderConfig = { * provider: 'aws', * bucket: 'my-app-uploads', * region: 'us-east-1', * accessKeyId: process.env.AWS_ACCESS_KEY_ID!, * secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY!, * }; * ``` * * @example With Custom Domain * ```typescript * const awsWithCDN: AWSProviderConfig = { * provider: 'aws', * bucket: 'my-uploads', * region: 'us-east-1', * accessKeyId: 'AKIA...', * secretAccessKey: 'secret...', * customDomain: 'cdn.myapp.com', * acl: 'public-read', * }; * ``` */ interface AWSProviderConfig extends BaseProviderConfig { provider: "aws"; /** AWS Access Key ID */ accessKeyId: string; /** AWS Secret Access Key */ secretAccessKey: string; /** AWS region (required) */ region: string; /** AWS Session Token for temporary credentials */ sessionToken?: string; } /** * Configuration for Cloudflare R2 object storage. * S3-compatible storage with zero egress fees and global distribution. * * @interface CloudflareR2Config * @extends BaseProviderConfig * * @example Basic Configuration * ```typescript * const r2Config: CloudflareR2Config = { * provider: 'cloudflare-r2', * bucket: 'my-r2-bucket', * accountId: 'your-cloudflare-account-id', * accessKeyId: 'your-r2-access-key', * secretAccessKey: 'your-r2-secret-key', * }; * ``` * * @example With Custom Domain * ```typescript * const r2WithDomain: CloudflareR2Config = { * provider: 'cloudflare-r2', * bucket: 'assets', * accountId: 'abc123', * accessKeyId: 'key123', * secretAccessKey: 'secret123', * customDomain: 'assets.myapp.com', * }; * ``` */ interface CloudflareR2Config extends BaseProviderConfig { provider: "cloudflare-r2"; /** Cloudflare Account ID */ accountId: string; /** R2 Access Key ID */ accessKeyId: string; /** R2 Secret Access Key */ secretAccessKey: string; /** Region (typically 'auto' for R2) */ region?: "auto"; /** Custom endpoint (auto-generated from accountId if not provided) */ endpoint?: string; } /** * Configuration for DigitalOcean Spaces object storage. * S3-compatible storage service integrated with DigitalOcean's ecosystem. * * @interface DigitalOceanSpacesConfig * @extends BaseProviderConfig * * @example Basic Configuration * ```typescript * const spacesConfig: DigitalOceanSpacesConfig = { * provider: 'digitalocean-spaces', * bucket: 'my-space', * region: 'nyc3', * accessKeyId: 'your-spaces-key', * secretAccessKey: 'your-spaces-secret', * }; * ``` * * @example Available Regions * ```typescript * const regions = ['nyc3', 'ams3', 'sgp1', 'sfo3', 'fra1']; * const spacesConfig: DigitalOceanSpacesConfig = { * provider: 'digitalocean-spaces', * bucket: 'global-assets', * region: 'fra1', // Frankfurt * accessKeyId: process.env.DO_SPACES_ACCESS_KEY_ID!, * secretAccessKey: process.env.DO_SPACES_SECRET_ACCESS_KEY!, * }; * ``` */ interface DigitalOceanSpacesConfig extends BaseProviderConfig { provider: "digitalocean-spaces"; /** Spaces Access Key ID */ accessKeyId: string; /** Spaces Secret Access Key */ secretAccessKey: string; /** DigitalOcean region */ region: string; /** Custom endpoint (auto-generated from region if not provided) */ endpoint?: string; } /** * Configuration for MinIO object storage. * Self-hosted S3-compatible storage for on-premises or private cloud deployments. * * @interface MinIOConfig * @extends BaseProviderConfig * * @example Local Development * ```typescript * const minioConfig: MinIOConfig = { * provider: 'minio', * endpoint: 'http://localhost:9000', * bucket: 'uploads', * accessKeyId: 'minioadmin', * secretAccessKey: 'minioadmin', * useSSL: false, * }; * ``` * * @example Production Setup * ```typescript * const minioProduction: MinIOConfig = { * provider: 'minio', * endpoint: 'https://minio.mycompany.com', * bucket: 'production-uploads', * accessKeyId: process.env.MINIO_ACCESS_KEY!, * secretAccessKey: process.env.MINIO_SECRET_KEY!, * useSSL: true, * port: 9000, * }; * ``` */ interface MinIOConfig extends BaseProviderConfig { provider: "minio"; /** MinIO server endpoint URL */ endpoint: string; /** MinIO access key */ accessKeyId: string; /** MinIO secret key */ secretAccessKey: string; /** Whether to use SSL/TLS */ useSSL?: boolean; /** Custom port (default: 9000) */ port?: number; } interface AzureBlobConfig extends BaseProviderConfig { provider: "azure-blob"; accountName: string; accessKeyId: string; secretAccessKey: string; endpoint?: string; } interface IBMCloudConfig extends BaseProviderConfig { provider: "ibm-cloud"; accessKeyId: string; secretAccessKey: string; endpoint: string; serviceInstanceId?: string; } interface OracleOCIConfig extends BaseProviderConfig { provider: "oracle-oci"; accessKeyId: string; secretAccessKey: string; endpoint: string; namespace?: string; } interface WasabiConfig extends BaseProviderConfig { provider: "wasabi"; accessKeyId: string; secretAccessKey: string; endpoint?: string; } interface BackblazeB2Config extends BaseProviderConfig { provider: "backblaze-b2"; accessKeyId: string; secretAccessKey: string; endpoint: string; } interface StorjDCSConfig extends BaseProviderConfig { provider: "storj-dcs"; accessKeyId: string; secretAccessKey: string; endpoint?: string; } interface TelnyxStorageConfig extends BaseProviderConfig { provider: "telnyx-storage"; accessKeyId: string; secretAccessKey: string; endpoint: string; } interface TigrisDataConfig extends BaseProviderConfig { provider: "tigris-data"; accessKeyId: string; secretAccessKey: string; endpoint: string; region?: "auto"; } interface CloudianHyperStoreConfig extends BaseProviderConfig { provider: "cloudian-hyperstore"; accessKeyId: string; secretAccessKey: string; endpoint: string; } interface GoogleCloudStorageConfig extends BaseProviderConfig { provider: "gcs"; projectId: string; keyFilename?: string; credentials?: object; } interface S3CompatibleConfig extends BaseProviderConfig { provider: "s3-compatible"; accessKeyId: string; secretAccessKey: string; endpoint: string; } type ProviderConfig = AWSProviderConfig | CloudflareR2Config | DigitalOceanSpacesConfig | MinIOConfig | AzureBlobConfig | IBMCloudConfig | OracleOCIConfig | WasabiConfig | BackblazeB2Config | StorjDCSConfig | TelnyxStorageConfig | TigrisDataConfig | CloudianHyperStoreConfig | GoogleCloudStorageConfig | S3CompatibleConfig; declare const PROVIDER_SPECS: { readonly aws: { readonly provider: "aws"; readonly configKeys: { readonly region: readonly ["AWS_REGION", "S3_REGION"]; readonly bucket: readonly ["AWS_S3_BUCKET", "S3_BUCKET", "S3_BUCKET_NAME"]; readonly accessKeyId: readonly ["AWS_ACCESS_KEY_ID", "S3_ACCESS_KEY_ID"]; readonly secretAccessKey: readonly ["AWS_SECRET_ACCESS_KEY", "S3_SECRET_ACCESS_KEY"]; readonly sessionToken: readonly ["AWS_SESSION_TOKEN"]; readonly acl: readonly ["S3_ACL"]; readonly customDomain: readonly ["S3_CUSTOM_DOMAIN"]; readonly forcePathStyle: readonly ["S3_FORCE_PATH_STYLE"]; }; readonly defaults: { readonly region: "us-east-1"; readonly acl: "private"; }; }; readonly cloudflareR2: { readonly provider: "cloudflare-r2"; readonly configKeys: { readonly accountId: readonly ["CLOUDFLARE_ACCOUNT_ID", "R2_ACCOUNT_ID"]; readonly bucket: readonly ["CLOUDFLARE_R2_BUCKET", "R2_BUCKET"]; readonly accessKeyId: readonly ["CLOUDFLARE_R2_ACCESS_KEY_ID", "R2_ACCESS_KEY_ID"]; readonly secretAccessKey: readonly ["CLOUDFLARE_R2_SECRET_ACCESS_KEY", "R2_SECRET_ACCESS_KEY"]; readonly endpoint: readonly ["CLOUDFLARE_R2_ENDPOINT", "R2_ENDPOINT"]; readonly customDomain: readonly ["R2_CUSTOM_DOMAIN"]; readonly acl: readonly []; }; readonly defaults: { readonly region: "auto"; readonly acl: "private"; }; readonly customLogic: (config: any, computed: any) => { endpoint: any; }; }; readonly digitalOceanSpaces: { readonly provider: "digitalocean-spaces"; readonly configKeys: { readonly region: readonly ["DO_SPACES_REGION", "DIGITALOCEAN_SPACES_REGION"]; readonly bucket: readonly ["DO_SPACES_BUCKET", "DIGITALOCEAN_SPACES_BUCKET"]; readonly accessKeyId: readonly ["DO_SPACES_ACCESS_KEY_ID", "DIGITALOCEAN_SPACES_ACCESS_KEY_ID"]; readonly secretAccessKey: readonly ["DO_SPACES_SECRET_ACCESS_KEY", "DIGITALOCEAN_SPACES_SECRET_ACCESS_KEY"]; readonly endpoint: readonly ["DO_SPACES_ENDPOINT", "DIGITALOCEAN_SPACES_ENDPOINT"]; readonly customDomain: readonly ["DO_SPACES_CUSTOM_DOMAIN"]; readonly acl: readonly []; }; readonly defaults: { readonly region: "nyc3"; readonly acl: "private"; }; readonly customLogic: (config: any, computed: any) => { endpoint: any; }; }; readonly minio: { readonly provider: "minio"; readonly configKeys: { readonly endpoint: readonly ["MINIO_ENDPOINT"]; readonly bucket: readonly ["MINIO_BUCKET"]; readonly accessKeyId: readonly ["MINIO_ACCESS_KEY_ID", "MINIO_ACCESS_KEY"]; readonly secretAccessKey: readonly ["MINIO_SECRET_ACCESS_KEY", "MINIO_SECRET_KEY"]; readonly region: readonly ["MINIO_REGION"]; readonly customDomain: readonly ["MINIO_CUSTOM_DOMAIN"]; readonly acl: readonly []; }; readonly defaults: { readonly endpoint: "localhost:9000"; readonly region: "us-east-1"; readonly acl: "private"; }; readonly customLogic: (config: any, computed: any) => { useSSL: any; port: number | undefined; }; }; readonly gcs: { readonly provider: "gcs"; readonly configKeys: { readonly projectId: readonly ["GOOGLE_CLOUD_PROJECT_ID", "GCS_PROJECT_ID"]; readonly bucket: readonly ["GCS_BUCKET", "GOOGLE_CLOUD_STORAGE_BUCKET"]; readonly keyFilename: readonly ["GOOGLE_APPLICATION_CREDENTIALS", "GCS_KEY_FILE"]; readonly region: readonly ["GCS_REGION"]; readonly customDomain: readonly ["GCS_CUSTOM_DOMAIN"]; readonly acl: readonly []; }; readonly defaults: { readonly region: "us-central1"; readonly acl: "private"; }; readonly customLogic: (config: any) => { credentials: any; }; }; readonly s3Compatible: { readonly provider: "s3-compatible"; readonly configKeys: { readonly endpoint: readonly ["S3_ENDPOINT", "S3_COMPATIBLE_ENDPOINT"]; readonly bucket: readonly ["S3_BUCKET", "S3_BUCKET_NAME"]; readonly accessKeyId: readonly ["S3_ACCESS_KEY_ID", "ACCESS_KEY_ID"]; readonly secretAccessKey: readonly ["S3_SECRET_ACCESS_KEY", "SECRET_ACCESS_KEY"]; readonly region: readonly ["S3_REGION", "REGION"]; readonly customDomain: readonly ["S3_CUSTOM_DOMAIN"]; readonly acl: readonly ["S3_ACL"]; }; readonly defaults: { readonly region: "us-east-1"; readonly acl: "private"; readonly forcePathStyle: true; }; }; }; type ProviderSpecsType = typeof PROVIDER_SPECS; type ProviderType = keyof ProviderSpecsType; /** * Maps each provider type to its corresponding configuration interface * This enables type-safe provider configuration in createUploadConfig().provider() */ type ProviderConfigMap = { aws: Partial<Omit<AWSProviderConfig, "provider">>; cloudflareR2: Partial<Omit<CloudflareR2Config, "provider">>; digitalOceanSpaces: Partial<Omit<DigitalOceanSpacesConfig, "provider">>; minio: Partial<Omit<MinIOConfig, "provider">>; gcs: Partial<Omit<GoogleCloudStorageConfig, "provider">>; s3Compatible: Partial<Omit<S3CompatibleConfig, "provider">>; }; /** * Type-safe provider configuration function * Usage: createProvider("aws", { bucket: "my-bucket", region: "us-west-2" }) */ /** * Creates a provider configuration with automatic environment variable detection. * This is the main factory function for creating type-safe provider configurations * with automatic credential loading from environment variables. * * @template T - The provider type * @param type - The provider type identifier * @param config - Partial configuration object (missing values loaded from env) * @returns Complete provider configuration * * @example AWS S3 Provider * ```typescript * // Environment variables: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_REGION * const s3Config = createProvider('aws', { * bucket: 'my-uploads', * // region, accessKeyId, secretAccessKey auto-loaded from env * }); * ``` * * @example Cloudflare R2 Provider * ```typescript * // Environment variables: CLOUDFLARE_R2_ACCESS_KEY_ID, CLOUDFLARE_R2_SECRET_ACCESS_KEY, CLOUDFLARE_ACCOUNT_ID * const r2Config = createProvider('cloudflareR2', { * bucket: 'my-r2-bucket', * // accountId, accessKeyId, secretAccessKey auto-loaded from env * }); * ``` * * @example MinIO Provider * ```typescript * const minioConfig = createProvider('minio', { * endpoint: 'http://localhost:9000', * bucket: 'uploads', * accessKeyId: 'minioadmin', * secretAccessKey: 'minioadmin', * useSSL: false, * }); * ``` * * @example DigitalOcean Spaces * ```typescript * // Environment variables: DO_SPACES_ACCESS_KEY_ID, DO_SPACES_SECRET_ACCESS_KEY, DO_SPACES_REGION * const spacesConfig = createProvider('digitalOceanSpaces', { * bucket: 'my-space', * // region, accessKeyId, secretAccessKey auto-loaded from env * }); * ``` * * @throws {Error} When required configuration is missing and not available in environment */ declare function createProvider<T extends ProviderType>(type: T, config?: ProviderConfigMap[T]): ProviderConfig; /** * Validates a provider configuration and returns detailed error information. * This function checks for required fields, validates endpoints, and ensures * the configuration is complete and correct. * * @param config - The provider configuration to validate * @returns Validation result with success status and error details * * @example Validating AWS Configuration * ```typescript * const awsConfig = createProvider('aws', { * bucket: 'my-uploads', * region: 'us-east-1', * accessKeyId: 'AKIA...', * secretAccessKey: 'secret...', * }); * * const validation = validateProviderConfig(awsConfig); * if (!validation.valid) { * console.error('Configuration errors:', validation.errors); * // ["Missing required field: accessKeyId", "Invalid region format"] * } * ``` * * @example Handling Validation Errors * ```typescript * const config = createProvider('cloudflareR2', { * bucket: 'test-bucket', * // Missing accountId, accessKeyId, secretAccessKey * }); * * const { valid, errors } = validateProviderConfig(config); * if (!valid) { * throw new Error(`Provider configuration invalid: ${errors.join(', ')}`); * } * ``` * * @example Validation in Setup * ```typescript * function setupStorage(providerConfig: ProviderConfig) { * const validation = validateProviderConfig(providerConfig); * * if (!validation.valid) { * console.error('❌ Storage configuration errors:'); * validation.errors.forEach(error => console.error(` - ${error}`)); * process.exit(1); * } * * console.log('✅ Storage configuration valid'); * return createStorageClient(providerConfig); * } * ``` * */ declare function validateProviderConfig(config: ProviderConfig): { valid: boolean; errors: string[]; }; /** * Generates the appropriate endpoint URL for a given provider configuration. * This function handles automatic endpoint generation for known providers and * validates custom endpoints for self-hosted or specialized providers. * * @param config - The provider configuration * @returns The complete endpoint URL for the provider * * @example AWS S3 Endpoint * ```typescript * const awsConfig = createProvider('aws', { * bucket: 'my-uploads', * region: 'us-west-2', * }); * * const endpoint = getProviderEndpoint(awsConfig); * // Returns: "https://s3.us-west-2.amazonaws.com" * ``` * * @example Cloudflare R2 Endpoint * ```typescript * const r2Config = createProvider('cloudflareR2', { * bucket: 'my-bucket', * accountId: 'abc123def456', * }); * * const endpoint = getProviderEndpoint(r2Config); * // Returns: "https://abc123def456.r2.cloudflarestorage.com" * ``` * * @example DigitalOcean Spaces Endpoint * ```typescript * const spacesConfig = createProvider('digitalOceanSpaces', { * bucket: 'my-space', * region: 'nyc3', * }); * * const endpoint = getProviderEndpoint(spacesConfig); * // Returns: "https://nyc3.digitaloceanspaces.com" * ``` * * @example MinIO Custom Endpoint * ```typescript * const minioConfig = createProvider('minio', { * endpoint: 'https://minio.mycompany.com:9000', * bucket: 'uploads', * }); * * const endpoint = getProviderEndpoint(minioConfig); * // Returns: "https://minio.mycompany.com:9000" * ``` * * @throws {Error} When endpoint cannot be determined or is invalid */ declare function getProviderEndpoint(config: ProviderConfig): string; //#endregion //#region src/core/schema.d.ts /** * File validation constraints for S3 schemas. * These constraints are applied during validation to ensure uploaded files * meet the specified requirements. * * @interface S3FileConstraints * * @example * ```typescript * const constraints: S3FileConstraints = { * maxSize: '10MB', // or 10485760 (bytes) * minSize: '1KB', // or 1024 (bytes) * allowedTypes: ['image/jpeg', 'image/png', 'application/pdf'], * allowedExtensions: ['.jpg', '.jpeg', '.png', '.pdf'], * required: true, * }; * ``` */ interface S3FileConstraints { /** Maximum file size (string like '10MB' or number in bytes) */ maxSize?: string | number; /** Minimum file size (string like '1KB' or number in bytes) */ minSize?: string | number; /** Allowed MIME types (e.g., ['image/jpeg', 'application/pdf']) */ allowedTypes?: string[]; /** Allowed file extensions (e.g., ['.jpg', '.pdf']) */ allowedExtensions?: string[]; /** Whether the file is required (default: true) */ required?: boolean; } /** * Array validation constraints for file arrays. * Used when validating multiple files uploaded together. * * @interface S3ArrayConstraints * * @example * ```typescript * const arrayConstraints: S3ArrayConstraints = { * min: 1, // At least 1 file required * max: 10, // Maximum 10 files allowed * length: 5, // Exactly 5 files required * }; * ``` */ interface S3ArrayConstraints { /** Minimum number of files in the array */ min?: number; /** Maximum number of files in the array */ max?: number; /** Exact number of files required (overrides min/max) */ length?: number; } /** * Context object provided to validation functions. * Contains information about the file being validated and the validation environment. * * @interface S3ValidationContext * * @example * ```typescript * const validator = async (ctx: S3ValidationContext) => { * console.log(`Validating ${ctx.file.name} in field ${ctx.fieldName}`); * * if (ctx.allFiles) { * const totalSize = Object.values(ctx.allFiles) * .flat() * .reduce((sum, file) => sum + file.size, 0); * * return totalSize < 50 * 1024 * 1024; // Total < 50MB * } * * return true; * }; * ``` */ interface S3ValidationContext { /** The file being validated */ file: File; /** Name of the field/property being validated */ fieldName: string; /** All files in the upload (for cross-file validation) */ allFiles?: Record<string, File | File[]>; } /** * Result object returned from validation operations. * Indicates whether validation passed and provides error details if it failed. * * @interface S3ValidationResult * * @example Success Result * ```typescript * const successResult: S3ValidationResult = { * success: true, * data: processedFile, * }; * ``` * * @example Error Result * ```typescript * const errorResult: S3ValidationResult = { * success: false, * error: { * code: 'FILE_TOO_LARGE', * message: 'File size exceeds 10MB limit', * path: ['avatar'], * }, * }; * ``` */ interface S3ValidationResult { /** Whether validation succeeded */ success: boolean; /** Error details if validation failed */ error?: { /** Error code for programmatic handling */ code: string; /** Human-readable error message */ message: string; /** Path to the field that failed validation */ path: string[]; }; /** Processed/transformed data if validation succeeded */ data?: any; } /** * Context object provided to transform functions. * Contains the file and metadata for data transformation operations. * * @template T - Type of the original data being transformed * @interface S3TransformContext * * @example * ```typescript * const addTimestamp = async (ctx: S3TransformContext<File>) => { * return { * file: ctx.file, * uploadedAt: new Date().toISOString(), * userId: ctx.metadata?.userId, * originalName: ctx.originalData.name, * }; * }; * ``` */ interface S3TransformContext<T = any> { /** The file being transformed */ file: File; /** Additional metadata from the upload context */ metadata?: Record<string, any>; /** The original data before transformation */ originalData: T; } /** * Abstract base class for all S3 schema types. * Provides the foundation for type-safe file validation and transformation. * * This class implements the core validation pipeline: * 1. Type validation (_parse method) * 2. Custom validators (refine method) * 3. Data transformation (transform method) * * @template TInput - The input type expected by this schema * @template TOutput - The output type after validation and transformation * @abstract * @class S3Schema * * @example Creating a Custom Schema * ```typescript * class S3VideoSchema extends S3Schema<File, File> { * _type = "video" as const; * * _parse(input: unknown): S3ValidationResult { * if (!(input instanceof File)) { * return { * success: false, * error: { * code: 'INVALID_TYPE', * message: 'Expected File object', * path: [], * }, * }; * } * * if (!input.type.startsWith('video/')) { * return { * success: false, * error: { * code: 'INVALID_VIDEO_TYPE', * message: 'File must be a video', * path: [], * }, * }; * } * * return { success: true, data: input }; * } * * protected _clone(): this { * return new S3VideoSchema() as this; * } * } * ``` */ declare abstract class S3Schema<TInput = any, TOutput = TInput> { protected _constraints: Record<string, any>; protected _transforms: Array<(ctx: S3TransformContext<TInput>) => Promise<any> | any>; protected _validators: Array<(ctx: S3ValidationContext) => S3ValidationResult | Promise<S3ValidationResult>>; protected _optional: boolean; /** Schema type identifier */ abstract _type: string; /** * Abstract method for parsing and validating input data. * Must be implemented by concrete schema classes. * * @param input - The input data to validate * @returns Validation result indicating success or failure * @abstract */ abstract _parse(input: unknown): S3ValidationResult | Promise<S3ValidationResult>; /** * Core validation method that orchestrates the entire validation pipeline. * Handles optional values, type validation, custom validators, and transformations. * * @param input - The input data to validate * @param context - Optional validation context * @returns Promise resolving to validation result * * @example * ```typescript * const schema = new S3FileSchema({ maxSize: '10MB' }); * const result = await schema.validate(file, { * fieldName: 'avatar', * allFiles: { avatar: file }, * }); * * if (result.success) { * console.log('File is valid:', result.data); * } else { * console.error('Validation failed:', result.error); * } * ``` */ validate(input: unknown, context?: Partial<S3ValidationContext>): Promise<S3ValidationResult>; /** * Makes this schema optional, allowing undefined or null values. * Optional schemas will pass validation when no value is provided. * * @returns New schema instance that accepts optional values * * @example * ```typescript * const optionalImage = s3.image().maxFileSize('5MB').optional(); * * // Both of these will pass validation * await optionalImage.validate(undefined); // ✅ Success * await optionalImage.validate(imageFile); // ✅ Success (if valid) * ``` */ optional(): S3Schema<TInput, TOutput | undefined>; /** * Adds a transformation function to process validated data. * Transformations are applied after validation succeeds and can modify the output. * * @template TNewOutput - The type of the transformed output * @param transformer - Function to transform the validated data * @returns New schema instance with the transformation applied * * @example Adding Metadata * ```typescript * const enhancedSchema = s3.file() * .maxFileSize('10MB') * .transform(async ({ file, metadata }) => ({ * originalName: file.name, * size: file.size, * uploadedBy: metadata.userId, * uploadedAt: new Date().toISOString(), * })); * ``` * * @example Processing File Data * ```typescript * const processedSchema = s3.image() * .transform(async ({ file }) => { * const buffer = await file.arrayBuffer(); * const hash = await crypto.subtle.digest('SHA-256', buffer); * return { * file, * hash: Array.from(new Uint8Array(hash)) * .map(b => b.toString(16).padStart(2, '0')) * .join(''), * }; * }); * ``` */ transform<TNewOutput>(transformer: (ctx: S3TransformContext<TOutput>) => Promise<TNewOutput> | TNewOutput): S3Schema<TInput, TNewOutput>; /** * Adds a custom validation function with a custom error message. * Refinements are executed after basic type validation but before transformations. * * @param validator - Function that returns true if validation passes * @param message - Error message to show if validation fails * @returns New schema instance with the custom validation * * @example File Name Validation * ```typescript * const strictSchema = s3.file() * .refine( * async ({ file }) => !file.name.includes(' '), * 'File name cannot contain spaces' * ) * .refine( * async ({ file }) => file.name.length <= 50, * 'File name must be 50 characters or less' * ); * ``` * * @example Cross-File Validation * ```typescript * const totalSizeSchema = s3.file() * .refine( * async ({ allFiles }) => { * if (!allFiles) return true; * const totalSize = Object.values(allFiles) * .flat() * .reduce((sum, file) => sum + file.size, 0); * return totalSize <= 100 * 1024 * 1024; // 100MB total * }, * 'Total upload size cannot exceed 100MB' * ); * ``` */ refine(validator: (ctx: S3ValidationContext) => boolean | Promise<boolean>, message: string): this; /** * Creates a deep clone of this schema instance. * Used internally to ensure immutability when chaining methods. * * @returns Cloned schema instance * @protected * @abstract */ protected abstract _clone(): this; } /** * Schema for validating individual File objects with comprehensive constraints. * This is the core schema for handling file uploads with size, type, and extension validation. * * @class S3FileSchema * @extends S3Schema<File, File> * * @example Basic File Validation * ```typescript * const documentSchema = new S3FileSchema({ * maxSize: '10MB', * allowedTypes: ['application/pdf', 'application/msword'], * allowedExtensions: ['.pdf', '.doc', '.docx'], * }); * * // Use in router * const router = s3.createRouter({ * document: documentSchema, * }); * ``` * * @example Chainable API * ```typescript * const imageSchema = s3.file() * .maxFileSize('5MB') * .types(['image/jpeg', 'image/png', 'image/webp']) * .extensions(['.jpg', '.jpeg', '.png', '.webp']) * .refine( * async ({ file }) => file.name.length <= 100, * 'Filename must be 100 characters or less' * ); * ``` * * @example With Lifecycle Hooks * ```typescript * const trackedSchema = new S3FileSchema({ maxSize: '50MB' }) * .onUploadStart(async ({ file, metadata }) => { * console.log(`Starting upload: ${file.name}`); * await logUploadStart(file.name, metadata.userId); * }) * .onUploadComplete(async ({ file, url, key }) => { * console.log(`Upload complete: ${file.name} -> ${url}`); * await notifyUploadComplete(file.name, url); * }) * .onUploadError(async ({ file, error }) => { * console.error(`Upload failed: ${file.name}`, error); * await logUploadError(file.name, error); * }); * ``` */ declare class S3FileSchema extends S3Schema<File, File> { protected constraints: S3FileConstraints; _type: "file"; /** * Creates a new S3FileSchema instance with the specified constraints. * * @param constraints - File validation constraints * * @example * ```typescript * const schema = new S3FileSchema({ * maxSize: '10MB', * minSize: '1KB', * allowedTypes: ['image/jpeg', 'image/png'], * allowedExtensions: ['.jpg', '.jpeg', '.png'], * required: true, * }); * ``` */ constructor(constraints?: S3FileConstraints); _parse(input: unknown): S3ValidationResult; /** * Sets the maximum file size constraint. * * @deprecated Use `maxFileSize()` instead. This method will be removed in a future version. * @param size - Maximum size as string (e.g., '10MB', '500KB') or number (bytes) * @returns New schema instance with max size constraint * * @example * ```typescript * const schema = s3.file().maxFileSize('10MB'); * const schema2 = s3.file().maxFileSize(10485760); // 10MB in bytes * ``` */ max(size: string | number): S3FileSchema; /** * Sets the maximum file size constraint. * * @param size - Maximum size as string (e.g., '10MB', '500KB') or number (bytes) * @returns New schema instance with max size constraint * * @example * ```typescript * const schema = s3.file().maxFileSize('10MB'); * const schema2 = s3.file().maxFileSize(10485760); // 10MB in bytes * ``` */ maxFileSize(size: string | number): S3FileSchema; /** * Sets the minimum file size constraint. * * @param size - Minimum size as string (e.g., '1KB', '100B') or number (bytes) * @returns New schema instance with min size constraint * * @example * ```typescript * const schema = s3.file().min('1KB'); * const schema2 = s3.file().min(1024); // 1KB in bytes * ``` */ min(size: string | number): S3FileSchema; /** * Sets the allowed MIME types constraint. * * @param allowedTypes - Array of allowed MIME types * @returns New schema instance with MIME type constraint * * @example * ```typescript * const imageSchema = s3.file().types([ * 'image/jpeg', * 'image/png', * 'image/webp' * ]); * * const documentSchema = s3.file().types([ * 'application/pdf', * 'application/msword', * 'application/vnd.openxmlformats-officedocument.wordprocessingml.document' * ]); * ``` */ types(allowedTypes: string[]): S3FileSchema; /** * Sets the allowed file extensions constraint. * * @param allowedExtensions - Array of allowed file extensions (with or without dots) * @returns New schema instance with extension constraint * * @example * ```typescript * const imageSchema = s3.file().extensions(['.jpg', '.jpeg', '.png']); * const docSchema = s3.file().extensions(['pdf', 'doc', 'docx']); // dots optional * ``` */ extensions(allowedExtensions: string[]): S3FileSchema; /** * Creates an array schema that validates multiple files of this type with a maximum count. * This is a convenience method for creating arrays with a maximum file limit. * * @param maxCount - Maximum number of files allowed * @returns New array schema instance with maximum constraint * * @example * ```typescript * const gallerySchema = s3.image() * .maxFileSize('2MB') * .maxFiles(6); // Maximum 6 images, each max 2MB * * const documentsSchema = s3.file() * .types(['application/pdf']) * .maxFiles(5); // Maximum 5 PDF files * ``` */ maxFiles(maxCount: number): S3ArraySchema<this>; protected _clone(): this; /** * Adds middleware to process requests before file upload. * Middleware can modify metadata, perform authentication, or add custom logic. * * @template TMetadata - Type of metadata returned by middleware * @param middleware - Function to process the request and return metadata * @returns S3Route instance with middleware applied * * @example Authentication Middleware * ```typescript * const authenticatedUpload = s3.file() * .maxFileSize('10MB') * .middleware(async ({ req }) => { * const user = await authenticateRequest(req); * if (!user) throw new Error('Unauthorized'); * * return { * userId: user.id, * organizationId: user.organizationId, * }; * }); * ``` * * @example Rate Limiting Middleware * ```typescript * const rateLimitedUpload = s3.file() * .middleware(async ({ req, file }) => { * const clientId = getClientId(req); * await checkRateLimit(clientId, file.size); * * return { uploadedBy: clientId }; * }); * ``` */ middleware<TMetadata$1>(middleware: (ctx: { req: any; file: { name: string; size: number; type: string; }; metadata: any; }) => Promise<TMetadata$1> | TMetadata$1): S3Route<this, TMetadata$1>; /** * Adds a hook that executes when file upload starts. * Useful for logging, notifications, or initializing upload tracking. * * @param hook - Function to execute on upload start * @returns S3Route instance with upload start hook * * @example Upload Logging * ```typescript * const trackedUpload = s3.file() * .onUploadStart(async ({ file, metadata }) => { * console.log(`Upload started: ${file.name} (${file.size} bytes)`); * await logUploadStart({ * fileName: file.name, * fileSize: file.size, * userId: metadata.userId, * timestamp: new Date(), * }); * }); * ``` * * @example Progress Initialization * ```typescript * const progressTrackedUpload = s3.file() * .onUploadStart(async ({ file, metadata }) => { * await initializeUploadProgress(metadata.uploadId, { * fileName: file.name, * totalSize: file.size, * status: 'started', * }); * }); * ``` */ onUploadStart(hook: (ctx: { file: { name: string; size: number; type: string; }; metadata: any; }) => Promise<void> | void): S3Route<this, any>; /** * Adds a hook that executes when file upload completes successfully. * Useful for post-processing, notifications, or updating databases. * * @param hook - Function to execute on upload completion * @returns S3Route instance with upload complete hook * * @example Database Update * ```typescript * const dbTrackedUpload = s3.file() * .onUploadComplete(async ({ file, url, key, metadata }) => { * await db.files.create({ * name: file.name, * size: file.size, * type: file.type, * url: url, * key: key, * uploadedBy: metadata.userId, * uploadedAt: new Date(), * }); * }); * ``` * * @example Notification System * ```typescript * const notificationUpload = s3.file() * .onUploadComplete(async ({ file, url, metadata }) => { * await sendNotification({ * userId: metadata.userId, * message: `File "${file.name}" uploaded successfully`, * fileUrl: url, * }); * }); * ``` */ onUploadComplete(hook: (ctx: { file: { name: string; size: number; type: string; }; metadata: any; url?: string; key?: string; }) => Promise<void> | void): S3Route<this, any>; /** * Adds a hook that executes when file upload fails. * Useful for error logging, cleanup, or user notifications. * * @param hook - Function to execute on upload error * @returns S3Route instance with upload error hook * * @example Error Logging * ```typescript * const errorLoggedUpload = s3.file() * .onUploadError(async ({ file, error, metadata }) => { * console.error(`Upload failed: ${file.name}`, error); * await logUploadError({ * fileName: file.name, * error: error.message, * userId: metadata.userId, * timestamp: new Date(), * }); * }); * ``` * * @example User Notification * ```typescript * const userNotifiedUpload = s3.file() * .onUploadError(async ({ file, error, metadata }) => { * await sendErrorNotification({ * userId: metadata.userId, * message: `Failed to upload "${file.name}": ${error.message}`, * }); * }); * ``` */ onUploadError(hook: (ctx: { file: { name: string; size: number; type: string; }; metadata: any; error: Error; }) => Promise<void> | void): S3Route<this, any>; private _parseSize; private _formatSize; } declare class S3ImageSchema extends S3FileSchema { constructor(constraints?: S3FileConstraints); formats(formats: string[]): S3ImageSchema; /** * @deprecated Use `maxFileSize()` instead. This method will be removed in a future version. */ max(size: string | number): S3ImageSchema; /** * Sets the maximum file size constraint. * * @param size - Maximum size as string (e.g., '10MB', '500KB') or number (bytes) * @returns New schema instance with max size constraint * * @example * ```typescript * const schema = s3.image().maxFileSize('10MB'); * const schema2 = s3.image().maxFileSize(10485760); // 10MB in bytes * ``` */ maxFileSize(size: string | number): S3ImageSchema; min(size: string | number): S3ImageSchema; types(allowedTypes: string[]): S3ImageSchema; extensions(allowedExtensions: string[]): S3ImageSchema; /** * Creates an array schema that validates multiple images with a maximum count. * This is a convenience method for creating image arrays with a maximum file limit. * * @param maxCount - Maximum number of images allowed * @returns New array schema instance with maximum constraint * * @example * ```typescript * const gallerySchema = s3.image() * .maxFileSize('2MB') * .formats(['jpeg', 'png']) * .maxFiles(6); // Maximum 6 images, each max 2MB * ``` */ maxFiles(maxCount: number): S3ArraySchema<this>; protected _clone(): this; } declare class S3ArraySchema<T extends S3Schema> extends S3Schema<File[], File[]> { private elementSchema; private arrayConstraints; _type: "array"; constructor(elementSchema: T, arrayConstraints?: S3ArrayConstraints); _parse(input: unknown): Promise<S3ValidationResult>; min(count: number): S3ArraySchema<T>; max(count: number): S3ArraySchema<T>; length(count: number): S3ArraySchema<T>; protected _clone(): this; } declare class S3ObjectSchema<T extends Record<string, S3Schema>> extends S3Schema<{ [K in keyof T]: T[K] extends S3Schema<any, infer U> ? U : never }, { [K in keyof T]: T[K] extends S3Schema<any, infer U> ? U : never }> { private shape; _type: "object"; constructor(shape: T); _parse(input: unknown): Promise<S3ValidationResult>; protected _clone(): this; } type InferS3Input<T extends S3Schema> = T extends S3Schema<infer I, any> ? I : never; type InferS3Output<T extends S3Schema> = T extends S3Schema<any, infer O> ? O : never; //#endregion //#region src/core/storage/client.d.ts /** * Creates and caches an AWS client instance using aws4fetch */ /** * Creates and caches an AWS client instance using aws4fetch. * This function creates a lightweight S3-compatible client that works with multiple providers. * The client is cached for performance and reused across requests. * * @param uploadConfig - Optional upload configuration. If not provided, uses global config. * @returns Configured AwsClient instance * @throws {Error} If configuration is missing or invalid * * @example Basic Usage * ```typescript * const client = createS3Client(config); * * // Use with aws4fetch methods * const response = await client.fetch('https://bucket.s3.amazonaws.com/file.jpg'); * ``` * * @example Provider-Specific Clients * ```typescript * // AWS S3 client * const awsClient = createS3Client(awsConfig); * * // Cloudflare R2 client * const r2Client = createS3Client(r2Config); * * // Both use the same interface * ``` * */ declare function createS3Client(uploadConfig?: UploadConfig): AwsClient; /** * Resets the AWS client instance (useful for testing) */ declare function resetS3Client(): void; /** * Options for generating presigned URLs for file uploads. * These URLs allow clients to upload files directly to S3 without exposing credentials. * * @interface PresignedUrlOptions * * @example * ```typescript * const options: PresignedUrlOptions = { * key: 'uploads/user-123/avatar.jpg', * contentType: 'image/jpeg', * contentLength: 1024000, // 1MB * expiresIn: 3600, // 1 hour * metadata: { * userId: '123', * uploadedBy: 'web-app', * }, * }; * ``` */ interface PresignedUrlOptions { /** S3 object key (file path) where the file will be stored */ key: string; /** MIME type of the file (optional to avoid signing issues) */ contentType?: string; /** Expected file size in bytes (for validation) */ contentLength?: number; /** URL expiration time in seconds (default: 3600 = 1 hour) */ expiresIn?: number; /** Custom metadata to attach to the uploaded object */ metadata?: Record<string, string>; } /** * Result object returned from presigned URL generation. * Contains the URL and metadata needed for uploading files. * * @interface PresignedUrlResult * * @example * ```typescript * const result: PresignedUrlResult = { * url: 'https://bucket.s3.amazonaws.com/uploads/file.jpg?AWSAccessKeyId=...', * key: 'uploads/file.jpg', * fields: { * 'Content-Type': 'image/jpeg', * 'x-amz-meta-user-id': '123', * }, * }; * * // Use the URL for direct upload * await fetch(result.url, { * method: 'PUT', * headers: result.fields, * body: file, * }); * ``` */ interface PresignedUrlResult { /** The presigned URL for uploading the file */ url: string; /** The S3 object key where the file will be stored */ key: string; /** Additional form fields required for the upload (for POST uploads) */ fields?: Record<string, string>; } interface FileKeyOptions { originalName: string; userId?: string; prefix?: string; preserveExtension?: boolean; addTimestamp?: boolean; addRandomId?: boolean; } interface UploadProgress { loaded: number; total: number; percentage: number; key: string; progress?: number; uploadSpeed?: number; eta?: number; } type ProgressCallback = (progress: UploadProgress) => void; interface ListFilesOptions { prefix?: string; maxFiles?: number; includeMetadata?: boolean; sortBy?: "key" | "size" | "modified"; sortOrder?: "asc" | "desc"; } interface PaginatedListOptions extends ListFilesOptions { pageSize?: number; continuationToken?: string; } interface FileInfo { key: string; url: string; size: number; contentType: string; lastModified: Date; etag: string; metadata?: Record<string, string>; } interface ListFilesResult { files: FileInfo[]; continuationToken?: string; isTruncated: boolean; totalCount?: number; } interface FileInfoResult { key: string; info: FileInfo | null; error?: string; } interface FileValidationResult { valid: boolean; errors: string[]; warnings: string[]; info: FileInfo; } interface ValidationRules { maxSize?: number; minSize?: number; allowedTypes?: string[]; requiredExtensions?: string[]; customValidators?: ((info: FileInfo) => boolean | string)[]; } interface DeleteFilesResult { deleted: string[]; errors: DeleteError[]; } interface DeleteError { key: string; code: string; message: string; } interface DeleteByPrefixResult { filesFound: number; deleted: string[]; errors: DeleteError[]; dryRun: boolean; } //#endregion //#region src/core/storage/storage-api.d.ts declare class StorageInstance { private readonly config; constructor(config: UploadConfig); /** * Get the current configuration (read-only) */ getConfig(): Readonly<UploadConfig>; /** * Get provider information */ getProviderInfo(): { provider: "aws" | "cloudflare-r2" | "digitalocean-spaces" | "minio" | "azure-blob" | "ibm-cloud" | "oracle-oci" | "wasabi" | "backblaze-b2" | "storj-dcs" | "telnyx-storage" | "tigris-data" | "cloudian-hyperstore" | "gcs" | "s3-compatible"; bucket: string; region: string | undefined; }; list: { files: (options?: ListFilesOptions) => Promise<FileInfo[]>; paginated: (options?: PaginatedListOptions) => Promise<ListFilesResult>; byExtension: (extension: string, prefix?: string) => Promise<FileInfo[]>; bySize: (minSize?: number, maxSize?: number, prefix?: string) => Promise<FileInfo[]>; byDate: (fromDate?: Date, toDate?: Date, prefix?: string) => Promise<FileInfo[]>; directories: (prefix?: string) => Promise<string[]>; paginatedGenerator: (options?: PaginatedListOptions) => AsyncGenerator<FileInfo[], any, any>; }; metadata: { getInfo: (key: string) => Promise<FileInfo>; getBatch: (keys: string[]) => Promise<FileInfoResult[]>; getSize: (key: string) => Promise<number>; getContentType: (key: string) => Promise<string>;