UNPKG

@supabase/storage-js

Version:
1,632 lines (1,631 loc) 82.3 kB
import { IcebergError, IcebergRestCatalog } from "iceberg-js"; //#region src/lib/errors.d.ts declare class StorageError extends Error { protected __isStorageError: boolean; constructor(message: string); } declare function isStorageError(error: unknown): error is StorageError; declare class StorageApiError extends StorageError { status: number; statusCode: string; constructor(message: string, status: number, statusCode: string); toJSON(): { name: string; message: string; status: number; statusCode: string; }; } declare class StorageUnknownError extends StorageError { originalError: unknown; constructor(message: string, originalError: unknown); } //#endregion //#region src/lib/types.d.ts /** * Type of storage bucket * - STANDARD: Regular file storage buckets * - ANALYTICS: Iceberg table-based buckets for analytical workloads */ type BucketType = 'STANDARD' | 'ANALYTICS'; interface Bucket { id: string; type?: BucketType; name: string; owner: string; file_size_limit?: number; allowed_mime_types?: string[]; created_at: string; updated_at: string; public: boolean; } interface ListBucketOptions { limit?: number; offset?: number; sortColumn?: 'id' | 'name' | 'created_at' | 'updated_at'; sortOrder?: 'asc' | 'desc'; search?: string; } /** * Represents an Analytics Bucket using Apache Iceberg table format. * Analytics buckets are optimized for analytical queries and data processing. */ interface AnalyticBucket { /** Unique identifier for the bucket */ name: string; /** Bucket type - always 'ANALYTICS' for analytics buckets */ type: 'ANALYTICS'; /** Storage format used (e.g., 'iceberg') */ format: string; /** ISO 8601 timestamp of bucket creation */ created_at: string; /** ISO 8601 timestamp of last update */ updated_at: string; } interface FileObject { name: string; bucket_id: string; owner: string; id: string; updated_at: string; created_at: string; /** @deprecated */ last_accessed_at: string; metadata: Record<string, any>; buckets: Bucket; } interface FileObjectV2 { id: string; version: string; name: string; bucket_id: string; updated_at: string; created_at: string; /** @deprecated */ last_accessed_at: string; size?: number; cache_control?: string; content_type?: string; etag?: string; last_modified?: string; metadata?: Record<string, any>; } interface SortBy { column?: string; order?: string; } interface FileOptions { /** * The number of seconds the asset is cached in the browser and in the Supabase CDN. This is set in the `Cache-Control: max-age=<seconds>` header. Defaults to 3600 seconds. */ cacheControl?: string; /** * the `Content-Type` header value. Should be specified if using a `fileBody` that is neither `Blob` nor `File` nor `FormData`, otherwise will default to `text/plain;charset=UTF-8`. */ contentType?: string; /** * When upsert is set to true, the file is overwritten if it exists. When set to false, an error is thrown if the object already exists. Defaults to false. */ upsert?: boolean; /** * The duplex option is a string parameter that enables or disables duplex streaming, allowing for both reading and writing data in the same stream. It can be passed as an option to the fetch() method. */ duplex?: string; /** * The metadata option is an object that allows you to store additional information about the file. This information can be used to filter and search for files. The metadata object can contain any key-value pairs you want to store. */ metadata?: Record<string, any>; /** * Optionally add extra headers */ headers?: Record<string, string>; } interface DestinationOptions { destinationBucket?: string; } interface SearchOptions { /** * The number of files you want to be returned. * @default 100 */ limit?: number; /** * The starting position. */ offset?: number; /** * The column to sort by. Can be any column inside a FileObject. */ sortBy?: SortBy; /** * The search string to filter files by. */ search?: string; } interface SortByV2 { column: 'name' | 'updated_at' | 'created_at'; order?: 'asc' | 'desc'; } interface SearchV2Options { /** * The number of files you want to be returned. * @default 1000 */ limit?: number; /** * The prefix search string to filter files by. */ prefix?: string; /** * The cursor used for pagination. Pass the value received from nextCursor of the previous request. */ cursor?: string; /** * Whether to emulate a hierarchical listing of objects using delimiters. * * - When `false` (default), all objects are listed as flat key/value pairs. * - When `true`, the response groups objects by delimiter, making it appear * like a file/folder hierarchy. * * @default false */ with_delimiter?: boolean; /** * The column and order to sort by * @default 'name asc' */ sortBy?: SortByV2; } interface SearchV2Object { id: string; key: string; name: string; updated_at: string; created_at: string; metadata: Record<string, any>; /** * @deprecated */ last_accessed_at: string; } type SearchV2Folder = Omit<SearchV2Object, 'id' | 'metadata' | 'last_accessed_at'>; interface SearchV2Result { hasNext: boolean; folders: SearchV2Folder[]; objects: SearchV2Object[]; nextCursor?: string; } interface FetchParameters { /** * Pass in an AbortController's signal to cancel the request. */ signal?: AbortSignal; } interface Metadata { name: string; } interface TransformOptions { /** * The width of the image in pixels. */ width?: number; /** * The height of the image in pixels. */ height?: number; /** * The resize mode can be cover, contain or fill. Defaults to cover. * Cover resizes the image to maintain it's aspect ratio while filling the entire width and height. * Contain resizes the image to maintain it's aspect ratio while fitting the entire image within the width and height. * Fill resizes the image to fill the entire width and height. If the object's aspect ratio does not match the width and height, the image will be stretched to fit. */ resize?: 'cover' | 'contain' | 'fill'; /** * Set the quality of the returned image. * A number from 20 to 100, with 100 being the highest quality. * Defaults to 80 */ quality?: number; /** * Specify the format of the image requested. * * When using 'origin' we force the format to be the same as the original image. * When this option is not passed in, images are optimized to modern image formats like Webp. */ format?: 'origin'; } type CamelCase<S extends string> = S extends `${infer P1}_${infer P2}${infer P3}` ? `${Lowercase<P1>}${Uppercase<P2>}${CamelCase<P3>}` : S; type Camelize<T> = { [K in keyof T as CamelCase<Extract<K, string>>]: T[K] }; type DownloadResult<T> = { data: T; error: null; } | { data: null; error: StorageError; }; //#endregion //#region src/lib/fetch.d.ts type Fetch$1 = typeof fetch; //#endregion //#region src/packages/StreamDownloadBuilder.d.ts declare class StreamDownloadBuilder implements PromiseLike<DownloadResult<ReadableStream>> { private downloadFn; private shouldThrowOnError; constructor(downloadFn: () => Promise<Response>, shouldThrowOnError: boolean); then<TResult1 = DownloadResult<ReadableStream>, TResult2 = never>(onfulfilled?: ((value: DownloadResult<ReadableStream>) => TResult1 | PromiseLike<TResult1>) | null, onrejected?: ((reason: any) => TResult2 | PromiseLike<TResult2>) | null): Promise<TResult1 | TResult2>; private execute; } //#endregion //#region src/packages/BlobDownloadBuilder.d.ts declare class BlobDownloadBuilder implements Promise<DownloadResult<Blob>> { private downloadFn; private shouldThrowOnError; readonly [Symbol.toStringTag]: string; private promise; constructor(downloadFn: () => Promise<Response>, shouldThrowOnError: boolean); asStream(): StreamDownloadBuilder; then<TResult1 = DownloadResult<Blob>, TResult2 = never>(onfulfilled?: ((value: DownloadResult<Blob>) => TResult1 | PromiseLike<TResult1>) | null, onrejected?: ((reason: any) => TResult2 | PromiseLike<TResult2>) | null): Promise<TResult1 | TResult2>; catch<TResult = never>(onrejected?: ((reason: any) => TResult | PromiseLike<TResult>) | null): Promise<DownloadResult<Blob> | TResult>; finally(onfinally?: (() => void) | null): Promise<DownloadResult<Blob>>; private getPromise; private execute; } //#endregion //#region src/packages/StorageFileApi.d.ts type FileBody = ArrayBuffer | ArrayBufferView | Blob | Buffer | File | FormData | NodeJS.ReadableStream | ReadableStream<Uint8Array> | URLSearchParams | string; declare class StorageFileApi { protected url: string; protected headers: { [key: string]: string; }; protected bucketId?: string; protected fetch: Fetch$1; protected shouldThrowOnError: boolean; constructor(url: string, headers?: { [key: string]: string; }, bucketId?: string, fetch?: Fetch$1); /** * Enable throwing errors instead of returning them. * * @category File Buckets */ throwOnError(): this; /** * Uploads a file to an existing bucket or replaces an existing file at the specified path with a new one. * * @param method HTTP method. * @param path The relative file path. Should be of the format `folder/subfolder/filename.png`. The bucket must already exist before attempting to upload. * @param fileBody The body of the file to be stored in the bucket. */ private uploadOrUpdate; /** * Uploads a file to an existing bucket. * * @category File Buckets * @param path The file path, including the file name. Should be of the format `folder/subfolder/filename.png`. The bucket must already exist before attempting to upload. * @param fileBody The body of the file to be stored in the bucket. * @param fileOptions Optional file upload options including cacheControl, contentType, upsert, and metadata. * @returns Promise with response containing file path, id, and fullPath or error * * @example Upload file * ```js * const avatarFile = event.target.files[0] * const { data, error } = await supabase * .storage * .from('avatars') * .upload('public/avatar1.png', avatarFile, { * cacheControl: '3600', * upsert: false * }) * ``` * * Response: * ```json * { * "data": { * "path": "public/avatar1.png", * "fullPath": "avatars/public/avatar1.png" * }, * "error": null * } * ``` * * @example Upload file using `ArrayBuffer` from base64 file data * ```js * import { decode } from 'base64-arraybuffer' * * const { data, error } = await supabase * .storage * .from('avatars') * .upload('public/avatar1.png', decode('base64FileData'), { * contentType: 'image/png' * }) * ``` */ upload(path: string, fileBody: FileBody, fileOptions?: FileOptions): Promise<{ data: { id: string; path: string; fullPath: string; }; error: null; } | { data: null; error: StorageError; }>; /** * Upload a file with a token generated from `createSignedUploadUrl`. * * @category File Buckets * @param path The file path, including the file name. Should be of the format `folder/subfolder/filename.png`. The bucket must already exist before attempting to upload. * @param token The token generated from `createSignedUploadUrl` * @param fileBody The body of the file to be stored in the bucket. * @param fileOptions HTTP headers (cacheControl, contentType, etc.). * **Note:** The `upsert` option has no effect here. To enable upsert behavior, * pass `{ upsert: true }` when calling `createSignedUploadUrl()` instead. * @returns Promise with response containing file path and fullPath or error * * @example Upload to a signed URL * ```js * const { data, error } = await supabase * .storage * .from('avatars') * .uploadToSignedUrl('folder/cat.jpg', 'token-from-createSignedUploadUrl', file) * ``` * * Response: * ```json * { * "data": { * "path": "folder/cat.jpg", * "fullPath": "avatars/folder/cat.jpg" * }, * "error": null * } * ``` */ uploadToSignedUrl(path: string, token: string, fileBody: FileBody, fileOptions?: FileOptions): Promise<{ data: { path: string; fullPath: any; }; error: null; } | { data: null; error: StorageError; }>; /** * Creates a signed upload URL. * Signed upload URLs can be used to upload files to the bucket without further authentication. * They are valid for 2 hours. * * @category File Buckets * @param path The file path, including the current file name. For example `folder/image.png`. * @param options.upsert If set to true, allows the file to be overwritten if it already exists. * @returns Promise with response containing signed upload URL, token, and path or error * * @example Create Signed Upload URL * ```js * const { data, error } = await supabase * .storage * .from('avatars') * .createSignedUploadUrl('folder/cat.jpg') * ``` * * Response: * ```json * { * "data": { * "signedUrl": "https://example.supabase.co/storage/v1/object/upload/sign/avatars/folder/cat.jpg?token=<TOKEN>", * "path": "folder/cat.jpg", * "token": "<TOKEN>" * }, * "error": null * } * ``` */ createSignedUploadUrl(path: string, options?: { upsert: boolean; }): Promise<{ data: { signedUrl: string; token: string; path: string; }; error: null; } | { data: null; error: StorageError; }>; /** * Replaces an existing file at the specified path with a new one. * * @category File Buckets * @param path The relative file path. Should be of the format `folder/subfolder/filename.png`. The bucket must already exist before attempting to update. * @param fileBody The body of the file to be stored in the bucket. * @param fileOptions Optional file upload options including cacheControl, contentType, upsert, and metadata. * @returns Promise with response containing file path, id, and fullPath or error * * @example Update file * ```js * const avatarFile = event.target.files[0] * const { data, error } = await supabase * .storage * .from('avatars') * .update('public/avatar1.png', avatarFile, { * cacheControl: '3600', * upsert: true * }) * ``` * * Response: * ```json * { * "data": { * "path": "public/avatar1.png", * "fullPath": "avatars/public/avatar1.png" * }, * "error": null * } * ``` * * @example Update file using `ArrayBuffer` from base64 file data * ```js * import {decode} from 'base64-arraybuffer' * * const { data, error } = await supabase * .storage * .from('avatars') * .update('public/avatar1.png', decode('base64FileData'), { * contentType: 'image/png' * }) * ``` */ update(path: string, fileBody: ArrayBuffer | ArrayBufferView | Blob | Buffer | File | FormData | NodeJS.ReadableStream | ReadableStream<Uint8Array> | URLSearchParams | string, fileOptions?: FileOptions): Promise<{ data: { id: string; path: string; fullPath: string; }; error: null; } | { data: null; error: StorageError; }>; /** * Moves an existing file to a new path in the same bucket. * * @category File Buckets * @param fromPath The original file path, including the current file name. For example `folder/image.png`. * @param toPath The new file path, including the new file name. For example `folder/image-new.png`. * @param options The destination options. * @returns Promise with response containing success message or error * * @example Move file * ```js * const { data, error } = await supabase * .storage * .from('avatars') * .move('public/avatar1.png', 'private/avatar2.png') * ``` * * Response: * ```json * { * "data": { * "message": "Successfully moved" * }, * "error": null * } * ``` */ move(fromPath: string, toPath: string, options?: DestinationOptions): Promise<{ data: { message: string; }; error: null; } | { data: null; error: StorageError; }>; /** * Copies an existing file to a new path in the same bucket. * * @category File Buckets * @param fromPath The original file path, including the current file name. For example `folder/image.png`. * @param toPath The new file path, including the new file name. For example `folder/image-copy.png`. * @param options The destination options. * @returns Promise with response containing copied file path or error * * @example Copy file * ```js * const { data, error } = await supabase * .storage * .from('avatars') * .copy('public/avatar1.png', 'private/avatar2.png') * ``` * * Response: * ```json * { * "data": { * "path": "avatars/private/avatar2.png" * }, * "error": null * } * ``` */ copy(fromPath: string, toPath: string, options?: DestinationOptions): Promise<{ data: { path: string; }; error: null; } | { data: null; error: StorageError; }>; /** * Creates a signed URL. Use a signed URL to share a file for a fixed amount of time. * * @category File Buckets * @param path The file path, including the current file name. For example `folder/image.png`. * @param expiresIn The number of seconds until the signed URL expires. For example, `60` for a URL which is valid for one minute. * @param options.download triggers the file as a download if set to true. Set this parameter as the name of the file if you want to trigger the download with a different filename. * @param options.transform Transform the asset before serving it to the client. * @returns Promise with response containing signed URL or error * * @example Create Signed URL * ```js * const { data, error } = await supabase * .storage * .from('avatars') * .createSignedUrl('folder/avatar1.png', 60) * ``` * * Response: * ```json * { * "data": { * "signedUrl": "https://example.supabase.co/storage/v1/object/sign/avatars/folder/avatar1.png?token=<TOKEN>" * }, * "error": null * } * ``` * * @example Create a signed URL for an asset with transformations * ```js * const { data } = await supabase * .storage * .from('avatars') * .createSignedUrl('folder/avatar1.png', 60, { * transform: { * width: 100, * height: 100, * } * }) * ``` * * @example Create a signed URL which triggers the download of the asset * ```js * const { data } = await supabase * .storage * .from('avatars') * .createSignedUrl('folder/avatar1.png', 60, { * download: true, * }) * ``` */ createSignedUrl(path: string, expiresIn: number, options?: { download?: string | boolean; transform?: TransformOptions; }): Promise<{ data: { signedUrl: string; }; error: null; } | { data: null; error: StorageError; }>; /** * Creates multiple signed URLs. Use a signed URL to share a file for a fixed amount of time. * * @category File Buckets * @param paths The file paths to be downloaded, including the current file names. For example `['folder/image.png', 'folder2/image2.png']`. * @param expiresIn The number of seconds until the signed URLs expire. For example, `60` for URLs which are valid for one minute. * @param options.download triggers the file as a download if set to true. Set this parameter as the name of the file if you want to trigger the download with a different filename. * @returns Promise with response containing array of objects with signedUrl, path, and error or error * * @example Create Signed URLs * ```js * const { data, error } = await supabase * .storage * .from('avatars') * .createSignedUrls(['folder/avatar1.png', 'folder/avatar2.png'], 60) * ``` * * Response: * ```json * { * "data": [ * { * "error": null, * "path": "folder/avatar1.png", * "signedURL": "/object/sign/avatars/folder/avatar1.png?token=<TOKEN>", * "signedUrl": "https://example.supabase.co/storage/v1/object/sign/avatars/folder/avatar1.png?token=<TOKEN>" * }, * { * "error": null, * "path": "folder/avatar2.png", * "signedURL": "/object/sign/avatars/folder/avatar2.png?token=<TOKEN>", * "signedUrl": "https://example.supabase.co/storage/v1/object/sign/avatars/folder/avatar2.png?token=<TOKEN>" * } * ], * "error": null * } * ``` */ createSignedUrls(paths: string[], expiresIn: number, options?: { download: string | boolean; }): Promise<{ data: { error: string | null; path: string | null; signedUrl: string; }[]; error: null; } | { data: null; error: StorageError; }>; /** * Downloads a file from a private bucket. For public buckets, make a request to the URL returned from `getPublicUrl` instead. * * @category File Buckets * @param path The full path and file name of the file to be downloaded. For example `folder/image.png`. * @param options.transform Transform the asset before serving it to the client. * @returns BlobDownloadBuilder instance for downloading the file * * @example Download file * ```js * const { data, error } = await supabase * .storage * .from('avatars') * .download('folder/avatar1.png') * ``` * * Response: * ```json * { * "data": <BLOB>, * "error": null * } * ``` * * @example Download file with transformations * ```js * const { data, error } = await supabase * .storage * .from('avatars') * .download('folder/avatar1.png', { * transform: { * width: 100, * height: 100, * quality: 80 * } * }) * ``` */ download<Options extends { transform?: TransformOptions; }>(path: string, options?: Options): BlobDownloadBuilder; /** * Retrieves the details of an existing file. * * @category File Buckets * @param path The file path, including the file name. For example `folder/image.png`. * @returns Promise with response containing file metadata or error * * @example Get file info * ```js * const { data, error } = await supabase * .storage * .from('avatars') * .info('folder/avatar1.png') * ``` */ info(path: string): Promise<{ data: Camelize<FileObjectV2>; error: null; } | { data: null; error: StorageError; }>; /** * Checks the existence of a file. * * @category File Buckets * @param path The file path, including the file name. For example `folder/image.png`. * @returns Promise with response containing boolean indicating file existence or error * * @example Check file existence * ```js * const { data, error } = await supabase * .storage * .from('avatars') * .exists('folder/avatar1.png') * ``` */ exists(path: string): Promise<{ data: boolean; error: null; } | { data: boolean; error: StorageError; }>; /** * A simple convenience function to get the URL for an asset in a public bucket. If you do not want to use this function, you can construct the public URL by concatenating the bucket URL with the path to the asset. * This function does not verify if the bucket is public. If a public URL is created for a bucket which is not public, you will not be able to download the asset. * * @category File Buckets * @param path The path and name of the file to generate the public URL for. For example `folder/image.png`. * @param options.download Triggers the file as a download if set to true. Set this parameter as the name of the file if you want to trigger the download with a different filename. * @param options.transform Transform the asset before serving it to the client. * @returns Object with public URL * * @example Returns the URL for an asset in a public bucket * ```js * const { data } = supabase * .storage * .from('public-bucket') * .getPublicUrl('folder/avatar1.png') * ``` * * Response: * ```json * { * "data": { * "publicUrl": "https://example.supabase.co/storage/v1/object/public/public-bucket/folder/avatar1.png" * } * } * ``` * * @example Returns the URL for an asset in a public bucket with transformations * ```js * const { data } = supabase * .storage * .from('public-bucket') * .getPublicUrl('folder/avatar1.png', { * transform: { * width: 100, * height: 100, * } * }) * ``` * * @example Returns the URL which triggers the download of an asset in a public bucket * ```js * const { data } = supabase * .storage * .from('public-bucket') * .getPublicUrl('folder/avatar1.png', { * download: true, * }) * ``` */ getPublicUrl(path: string, options?: { download?: string | boolean; transform?: TransformOptions; }): { data: { publicUrl: string; }; }; /** * Deletes files within the same bucket * * @category File Buckets * @param paths An array of files to delete, including the path and file name. For example [`'folder/image.png'`]. * @returns Promise with response containing array of deleted file objects or error * * @example Delete file * ```js * const { data, error } = await supabase * .storage * .from('avatars') * .remove(['folder/avatar1.png']) * ``` * * Response: * ```json * { * "data": [], * "error": null * } * ``` */ remove(paths: string[]): Promise<{ data: FileObject[]; error: null; } | { data: null; error: StorageError; }>; /** * Get file metadata * @param id the file id to retrieve metadata */ /** * Update file metadata * @param id the file id to update metadata * @param meta the new file metadata */ /** * Lists all the files and folders within a path of the bucket. * * @category File Buckets * @param path The folder path. * @param options Search options including limit (defaults to 100), offset, sortBy, and search * @param parameters Optional fetch parameters including signal for cancellation * @returns Promise with response containing array of files or error * * @example List files in a bucket * ```js * const { data, error } = await supabase * .storage * .from('avatars') * .list('folder', { * limit: 100, * offset: 0, * sortBy: { column: 'name', order: 'asc' }, * }) * ``` * * Response: * ```json * { * "data": [ * { * "name": "avatar1.png", * "id": "e668cf7f-821b-4a2f-9dce-7dfa5dd1cfd2", * "updated_at": "2024-05-22T23:06:05.580Z", * "created_at": "2024-05-22T23:04:34.443Z", * "last_accessed_at": "2024-05-22T23:04:34.443Z", * "metadata": { * "eTag": "\"c5e8c553235d9af30ef4f6e280790b92\"", * "size": 32175, * "mimetype": "image/png", * "cacheControl": "max-age=3600", * "lastModified": "2024-05-22T23:06:05.574Z", * "contentLength": 32175, * "httpStatusCode": 200 * } * } * ], * "error": null * } * ``` * * @example Search files in a bucket * ```js * const { data, error } = await supabase * .storage * .from('avatars') * .list('folder', { * limit: 100, * offset: 0, * sortBy: { column: 'name', order: 'asc' }, * search: 'jon' * }) * ``` */ list(path?: string, options?: SearchOptions, parameters?: FetchParameters): Promise<{ data: FileObject[]; error: null; } | { data: null; error: StorageError; }>; /** * @experimental this method signature might change in the future * * @category File Buckets * @param options search options * @param parameters */ listV2(options?: SearchV2Options, parameters?: FetchParameters): Promise<{ data: SearchV2Result; error: null; } | { data: null; error: StorageError; }>; protected encodeMetadata(metadata: Record<string, any>): string; toBase64(data: string): string; private _getFinalPath; private _removeEmptyFolders; private transformOptsToQueryString; } //#endregion //#region src/packages/StorageBucketApi.d.ts declare class StorageBucketApi { protected url: string; protected headers: { [key: string]: string; }; protected fetch: Fetch$1; protected shouldThrowOnError: boolean; constructor(url: string, headers?: { [key: string]: string; }, fetch?: Fetch$1, opts?: StorageClientOptions); /** * Enable throwing errors instead of returning them. * * @category File Buckets */ throwOnError(): this; /** * Retrieves the details of all Storage buckets within an existing project. * * @category File Buckets * @param options Query parameters for listing buckets * @param options.limit Maximum number of buckets to return * @param options.offset Number of buckets to skip * @param options.sortColumn Column to sort by ('id', 'name', 'created_at', 'updated_at') * @param options.sortOrder Sort order ('asc' or 'desc') * @param options.search Search term to filter bucket names * @returns Promise with response containing array of buckets or error * * @example List buckets * ```js * const { data, error } = await supabase * .storage * .listBuckets() * ``` * * @example List buckets with options * ```js * const { data, error } = await supabase * .storage * .listBuckets({ * limit: 10, * offset: 0, * sortColumn: 'created_at', * sortOrder: 'desc', * search: 'prod' * }) * ``` */ listBuckets(options?: ListBucketOptions): Promise<{ data: Bucket[]; error: null; } | { data: null; error: StorageError; }>; /** * Retrieves the details of an existing Storage bucket. * * @category File Buckets * @param id The unique identifier of the bucket you would like to retrieve. * @returns Promise with response containing bucket details or error * * @example Get bucket * ```js * const { data, error } = await supabase * .storage * .getBucket('avatars') * ``` * * Response: * ```json * { * "data": { * "id": "avatars", * "name": "avatars", * "owner": "", * "public": false, * "file_size_limit": 1024, * "allowed_mime_types": [ * "image/png" * ], * "created_at": "2024-05-22T22:26:05.100Z", * "updated_at": "2024-05-22T22:26:05.100Z" * }, * "error": null * } * ``` */ getBucket(id: string): Promise<{ data: Bucket; error: null; } | { data: null; error: StorageError; }>; /** * Creates a new Storage bucket * * @category File Buckets * @param id A unique identifier for the bucket you are creating. * @param options.public The visibility of the bucket. Public buckets don't require an authorization token to download objects, but still require a valid token for all other operations. By default, buckets are private. * @param options.fileSizeLimit specifies the max file size in bytes that can be uploaded to this bucket. * The global file size limit takes precedence over this value. * The default value is null, which doesn't set a per bucket file size limit. * @param options.allowedMimeTypes specifies the allowed mime types that this bucket can accept during upload. * The default value is null, which allows files with all mime types to be uploaded. * Each mime type specified can be a wildcard, e.g. image/*, or a specific mime type, e.g. image/png. * @param options.type (private-beta) specifies the bucket type. see `BucketType` for more details. * - default bucket type is `STANDARD` * @returns Promise with response containing newly created bucket name or error * * @example Create bucket * ```js * const { data, error } = await supabase * .storage * .createBucket('avatars', { * public: false, * allowedMimeTypes: ['image/png'], * fileSizeLimit: 1024 * }) * ``` * * Response: * ```json * { * "data": { * "name": "avatars" * }, * "error": null * } * ``` */ createBucket(id: string, options?: { public: boolean; fileSizeLimit?: number | string | null; allowedMimeTypes?: string[] | null; type?: BucketType; }): Promise<{ data: Pick<Bucket, 'name'>; error: null; } | { data: null; error: StorageError; }>; /** * Updates a Storage bucket * * @category File Buckets * @param id A unique identifier for the bucket you are updating. * @param options.public The visibility of the bucket. Public buckets don't require an authorization token to download objects, but still require a valid token for all other operations. * @param options.fileSizeLimit specifies the max file size in bytes that can be uploaded to this bucket. * The global file size limit takes precedence over this value. * The default value is null, which doesn't set a per bucket file size limit. * @param options.allowedMimeTypes specifies the allowed mime types that this bucket can accept during upload. * The default value is null, which allows files with all mime types to be uploaded. * Each mime type specified can be a wildcard, e.g. image/*, or a specific mime type, e.g. image/png. * @returns Promise with response containing success message or error * * @example Update bucket * ```js * const { data, error } = await supabase * .storage * .updateBucket('avatars', { * public: false, * allowedMimeTypes: ['image/png'], * fileSizeLimit: 1024 * }) * ``` * * Response: * ```json * { * "data": { * "message": "Successfully updated" * }, * "error": null * } * ``` */ updateBucket(id: string, options: { public: boolean; fileSizeLimit?: number | string | null; allowedMimeTypes?: string[] | null; }): Promise<{ data: { message: string; }; error: null; } | { data: null; error: StorageError; }>; /** * Removes all objects inside a single bucket. * * @category File Buckets * @param id The unique identifier of the bucket you would like to empty. * @returns Promise with success message or error * * @example Empty bucket * ```js * const { data, error } = await supabase * .storage * .emptyBucket('avatars') * ``` * * Response: * ```json * { * "data": { * "message": "Successfully emptied" * }, * "error": null * } * ``` */ emptyBucket(id: string): Promise<{ data: { message: string; }; error: null; } | { data: null; error: StorageError; }>; /** * Deletes an existing bucket. A bucket can't be deleted with existing objects inside it. * You must first `empty()` the bucket. * * @category File Buckets * @param id The unique identifier of the bucket you would like to delete. * @returns Promise with success message or error * * @example Delete bucket * ```js * const { data, error } = await supabase * .storage * .deleteBucket('avatars') * ``` * * Response: * ```json * { * "data": { * "message": "Successfully deleted" * }, * "error": null * } * ``` */ deleteBucket(id: string): Promise<{ data: { message: string; }; error: null; } | { data: null; error: StorageError; }>; private listBucketOptionsToQueryString; } //#endregion //#region src/packages/StorageAnalyticsClient.d.ts type WrapAsyncMethod<T> = T extends ((...args: infer A) => Promise<infer R>) ? (...args: A) => Promise<{ data: R; error: null; } | { data: null; error: IcebergError; }> : T; type WrappedIcebergRestCatalog = { [K in keyof IcebergRestCatalog]: WrapAsyncMethod<IcebergRestCatalog[K]> }; /** * Client class for managing Analytics Buckets using Iceberg tables * Provides methods for creating, listing, and deleting analytics buckets */ declare class StorageAnalyticsClient { protected url: string; protected headers: { [key: string]: string; }; protected fetch: Fetch$1; protected shouldThrowOnError: boolean; /** * @alpha * * Creates a new StorageAnalyticsClient instance * * **Public alpha:** This API is part of a public alpha release and may not be available to your account type. * * @category Analytics Buckets * @param url - The base URL for the storage API * @param headers - HTTP headers to include in requests * @param fetch - Optional custom fetch implementation * * @example * ```typescript * const client = new StorageAnalyticsClient(url, headers) * ``` */ constructor(url: string, headers?: { [key: string]: string; }, fetch?: Fetch$1); /** * @alpha * * Enable throwing errors instead of returning them in the response * When enabled, failed operations will throw instead of returning { data: null, error } * * **Public alpha:** This API is part of a public alpha release and may not be available to your account type. * * @category Analytics Buckets * @returns This instance for method chaining */ throwOnError(): this; /** * @alpha * * Creates a new analytics bucket using Iceberg tables * Analytics buckets are optimized for analytical queries and data processing * * **Public alpha:** This API is part of a public alpha release and may not be available to your account type. * * @category Analytics Buckets * @param name A unique name for the bucket you are creating * @returns Promise with response containing newly created analytics bucket or error * * @example Create analytics bucket * ```js * const { data, error } = await supabase * .storage * .analytics * .createBucket('analytics-data') * ``` * * Response: * ```json * { * "data": { * "name": "analytics-data", * "type": "ANALYTICS", * "format": "iceberg", * "created_at": "2024-05-22T22:26:05.100Z", * "updated_at": "2024-05-22T22:26:05.100Z" * }, * "error": null * } * ``` */ createBucket(name: string): Promise<{ data: AnalyticBucket; error: null; } | { data: null; error: StorageError; }>; /** * @alpha * * Retrieves the details of all Analytics Storage buckets within an existing project * Only returns buckets of type 'ANALYTICS' * * **Public alpha:** This API is part of a public alpha release and may not be available to your account type. * * @category Analytics Buckets * @param options Query parameters for listing buckets * @param options.limit Maximum number of buckets to return * @param options.offset Number of buckets to skip * @param options.sortColumn Column to sort by ('name', 'created_at', 'updated_at') * @param options.sortOrder Sort order ('asc' or 'desc') * @param options.search Search term to filter bucket names * @returns Promise with response containing array of analytics buckets or error * * @example List analytics buckets * ```js * const { data, error } = await supabase * .storage * .analytics * .listBuckets({ * limit: 10, * offset: 0, * sortColumn: 'created_at', * sortOrder: 'desc' * }) * ``` * * Response: * ```json * { * "data": [ * { * "name": "analytics-data", * "type": "ANALYTICS", * "format": "iceberg", * "created_at": "2024-05-22T22:26:05.100Z", * "updated_at": "2024-05-22T22:26:05.100Z" * } * ], * "error": null * } * ``` */ listBuckets(options?: { limit?: number; offset?: number; sortColumn?: 'name' | 'created_at' | 'updated_at'; sortOrder?: 'asc' | 'desc'; search?: string; }): Promise<{ data: AnalyticBucket[]; error: null; } | { data: null; error: StorageError; }>; /** * @alpha * * Deletes an existing analytics bucket * A bucket can't be deleted with existing objects inside it * You must first empty the bucket before deletion * * **Public alpha:** This API is part of a public alpha release and may not be available to your account type. * * @category Analytics Buckets * @param bucketName The unique identifier of the bucket you would like to delete * @returns Promise with response containing success message or error * * @example Delete analytics bucket * ```js * const { data, error } = await supabase * .storage * .analytics * .deleteBucket('analytics-data') * ``` * * Response: * ```json * { * "data": { * "message": "Successfully deleted" * }, * "error": null * } * ``` */ deleteBucket(bucketName: string): Promise<{ data: { message: string; }; error: null; } | { data: null; error: StorageError; }>; /** * @alpha * * Get an Iceberg REST Catalog client configured for a specific analytics bucket * Use this to perform advanced table and namespace operations within the bucket * The returned client provides full access to the Apache Iceberg REST Catalog API * with the Supabase `{ data, error }` pattern for consistent error handling on all operations. * * **Public alpha:** This API is part of a public alpha release and may not be available to your account type. * * @category Analytics Buckets * @param bucketName - The name of the analytics bucket (warehouse) to connect to * @returns The wrapped Iceberg catalog client * @throws {StorageError} If the bucket name is invalid * * @example Get catalog and create table * ```js * // First, create an analytics bucket * const { data: bucket, error: bucketError } = await supabase * .storage * .analytics * .createBucket('analytics-data') * * // Get the Iceberg catalog for that bucket * const catalog = supabase.storage.analytics.from('analytics-data') * * // Create a namespace * const { error: nsError } = await catalog.createNamespace({ namespace: ['default'] }) * * // Create a table with schema * const { data: tableMetadata, error: tableError } = await catalog.createTable( * { namespace: ['default'] }, * { * name: 'events', * schema: { * type: 'struct', * fields: [ * { id: 1, name: 'id', type: 'long', required: true }, * { id: 2, name: 'timestamp', type: 'timestamp', required: true }, * { id: 3, name: 'user_id', type: 'string', required: false } * ], * 'schema-id': 0, * 'identifier-field-ids': [1] * }, * 'partition-spec': { * 'spec-id': 0, * fields: [] * }, * 'write-order': { * 'order-id': 0, * fields: [] * }, * properties: { * 'write.format.default': 'parquet' * } * } * ) * ``` * * @example List tables in namespace * ```js * const catalog = supabase.storage.analytics.from('analytics-data') * * // List all tables in the default namespace * const { data: tables, error: listError } = await catalog.listTables({ namespace: ['default'] }) * if (listError) { * if (listError.isNotFound()) { * console.log('Namespace not found') * } * return * } * console.log(tables) // [{ namespace: ['default'], name: 'events' }] * ``` * * @example Working with namespaces * ```js * const catalog = supabase.storage.analytics.from('analytics-data') * * // List all namespaces * const { data: namespaces } = await catalog.listNamespaces() * * // Create namespace with properties * await catalog.createNamespace( * { namespace: ['production'] }, * { properties: { owner: 'data-team', env: 'prod' } } * ) * ``` * * @example Cleanup operations * ```js * const catalog = supabase.storage.analytics.from('analytics-data') * * // Drop table with purge option (removes all data) * const { error: dropError } = await catalog.dropTable( * { namespace: ['default'], name: 'events' }, * { purge: true } * ) * * if (dropError?.isNotFound()) { * console.log('Table does not exist') * } * * // Drop namespace (must be empty) * await catalog.dropNamespace({ namespace: ['default'] }) * ``` * * @remarks * This method provides a bridge between Supabase's bucket management and the standard * Apache Iceberg REST Catalog API. The bucket name maps to the Iceberg warehouse parameter. * All authentication and configuration is handled automatically using your Supabase credentials. * * **Error Handling**: Invalid bucket names throw immediately. All catalog * operations return `{ data, error }` where errors are `IcebergError` instances from iceberg-js. * Use helper methods like `error.isNotFound()` or check `error.status` for specific error handling. * Use `.throwOnError()` on the analytics client if you prefer exceptions for catalog operations. * * **Cleanup Operations**: When using `dropTable`, the `purge: true` option permanently * deletes all table data. Without it, the table is marked as deleted but data remains. * * **Library Dependency**: The returned catalog wraps `IcebergRestCatalog` from iceberg-js. * For complete API documentation and advanced usage, refer to the * [iceberg-js documentation](https://supabase.github.io/iceberg-js/). */ from(bucketName: string): WrappedIcebergRestCatalog; } //#endregion //#region src/lib/vectors/errors.d.ts /** * Base error class for all Storage Vectors errors */ declare class StorageVectorsError extends Error { protected __isStorageVectorsError: boolean; constructor(message: string); } /** * Type guard to check if an error is a StorageVectorsError * @param error - The error to check * @returns True if the error is a StorageVectorsError */ declare function isStorageVectorsError(error: unknown): error is StorageVectorsError; /** * API error returned from S3 Vectors service * Includes HTTP status code and service-specific error code */ declare class StorageVectorsApiError extends StorageVectorsError { status: number; statusCode: string; constructor(message: string, status: number, statusCode: string); toJSON(): { name: string; message: string; status: number; statusCode: string; }; } /** * Unknown error that doesn't match expected error patterns * Wraps the original error for debugging */ declare class StorageVectorsUnknownError extends StorageVectorsError { originalError: unknown; constructor(message: string, originalError: unknown); } /** * Error codes specific to S3 Vectors API * Maps AWS service errors to application-friendly error codes */ declare enum StorageVectorsErrorCode { /** Internal server fault (HTTP 500) */ InternalError = "InternalError", /** Resource already exists / conflict (HTTP 409) */ S3VectorConflictException = "S3VectorConflictException", /** Resource not found (HTTP 404) */ S3VectorNotFoundException = "S3VectorNotFoundException", /** Delete bucket while not empty (HTTP 400) */ S3VectorBucketNotEmpty = "S3VectorBucketNotEmpty", /** Exceeds bucket quota/limit (HTTP 400) */ S3VectorMaxBucketsExceeded = "S3VectorMaxBucketsExceeded", /** Exceeds index quota/limit (HTTP 400) */ S3VectorMaxIndexesExceeded = "S3VectorMaxIndexesExceeded", } //#endregion //#region src/lib/vectors/types.d.ts /** * Configuration for encryption at rest * @property kmsKeyArn - ARN of the KMS key used for encryption * @property sseType - Server-side encryption type (e.g., 'KMS') */ interface EncryptionConfiguration { kmsKeyArn?: string; sseType?: string; } /** * Vector bucket metadata * @property vectorBucketName - Unique name of the vector bucket * @property creationTime - Unix timestamp of when the bucket was created * @property encryptionConfiguration - Optional encryption settings */ interface VectorBucket { vectorBucketName: string; creationTime?: number; encryptionConfiguration?: EncryptionConfiguration; } /** * Metadata configuration for vector index * Defines which metadata keys should not be indexed for filtering * @property nonFilterableMetadataKeys - Array of metadata keys that cannot be used in filters */ interface MetadataConfiguration { nonFilterableMetadataKeys?: string[]; } /** * Supported data types for vectors * Currently only float32 is supported */ type VectorDataType = 'float32'; /** * Distance metrics for vector similarity search */ type DistanceMetric = 'cosine' | 'euclidean' | 'dotproduct'; /** * Vector index configuration and metadata * @property indexName - Unique name of the index within the bucket * @property vectorBucketName - Name of the parent vector bucket * @property dataType - Data type of vector components (currently only 'float32') * @property dimension - Dimensionality of vectors (e.g., 384, 768, 1536) * @property distanc