@itwin/core-backend
Version:
iTwin.js backend components
766 lines • 46 kB
TypeScript
/** @packageDocumentation
* @module SQLiteDb
*/
import { AccessToken, Constructor, GuidString, OpenMode, Optional, PickAsyncMethods, PickMethods } from "@itwin/core-bentley";
import { LocalDirName, LocalFileName } from "@itwin/core-common";
import { BlobContainer } from "./BlobContainerService";
import type { SQLiteDb, VersionedSqliteDb } from "./SQLiteDb";
/**
* Types for accessing SQLite databases stored in cloud containers.
* @beta
*/
export declare namespace CloudSqlite {
export type RequestTokenArgs = Optional<BlobContainer.RequestTokenProps, "userToken">;
/** Add (or replace) a property to an object that is not enumerable.
* This is important so this member will be skipped when the object is the target of
* [structuredClone](https://developer.mozilla.org/docs/Web/API/Web_Workers_API/Structured_clone_algorithm)
* (e.g. when the object is part of an exception that is marshalled across process boundaries.)
*/
export function addHiddenProperty<T>(o: T, p: PropertyKey, value?: any): T;
export function getBlobService(): BlobContainer.ContainerService;
/**
* Request a new AccessToken for a cloud container using the [[BlobContainer]] service.
* If the service is unavailable or returns an error, an empty token is returned.
*/
export function requestToken(args: RequestTokenArgs): Promise<AccessToken>;
export function noLeadingOrTrailingSpaces(name: string, msg: string): void;
export function validateDbName(dbName: DbName): void;
/**
* Create a new CloudContainer from a ContainerAccessProps. For non-public containers, a valid accessToken must be provided before the container
* can be used (e.g. via [[CloudSqlite.requestToken]]).
* @note After the container is successfully connected to a CloudCache, it will begin auto-refreshing its accessToken every `tokenRefreshSeconds` seconds (default is 1 hour)
* until it is disconnected. However, if the container is public, or if `tokenRefreshSeconds` is <=0, auto-refresh is not enabled.
*/
export function createCloudContainer(args: ContainerAccessProps & {
accessLevel?: BlobContainer.RequestAccessLevel;
tokenFn?: (args: RequestTokenArgs) => Promise<AccessToken>;
}): CloudContainer;
/** Begin prefetching all blocks for a database in a CloudContainer in the background. */
export function startCloudPrefetch(container: CloudContainer, dbName: string, args?: PrefetchProps): CloudPrefetch;
export interface ContainerProps {
/** The type of storage provider. */
readonly storageType: "azure" | "google";
/** The base URI for the container. */
readonly baseUri: string;
/** The name of the container. */
readonly containerId: string;
/** true if the container is public (doesn't require authorization) */
readonly isPublic?: boolean;
/** access token for container. If not present uses `CloudSqlite.requestToken` */
accessToken?: string;
}
/** Properties to access a CloudContainer. */
export interface ContainerAccessProps extends ContainerProps {
/** an alias for the container. Defaults to `containerId` */
readonly alias?: string;
/** SAS token that grants access to the container. */
accessToken: string;
/** if true, container is allowed to request the write lock. */
readonly writeable?: boolean;
/** if true, container is attached in "secure" mode (blocks are encrypted). Only supported in daemon mode. */
readonly secure?: boolean;
/** string attached to log messages from CloudSQLite. This is most useful for identifying usage from daemon mode. */
readonly logId?: string;
/** Duration for holding write lock, in seconds. After this time the write lock expires if not refreshed. Default is one hour. */
readonly lockExpireSeconds?: number;
/** number of seconds between auto-refresh of access token. If <=0 no auto-refresh. Default is 1 hour (60*60) */
readonly tokenRefreshSeconds?: number;
}
/** Returned from `CloudContainer.queryDatabase` describing one database in the container */
export interface CachedDbProps {
/** The total number of blocks in the database. */
readonly totalBlocks: number;
/** the number of blocks of the database that have been downloaded into the CloudCache */
readonly localBlocks: number;
/** the number of blocks from this database that have been modified in the CloudCache and need to be uploaded. */
readonly dirtyBlocks: number;
/** If true, the database currently has transactions in the WAL file and may not be uploaded until they have been checkPointed. */
readonly transactions: boolean;
/** the state of this database. Indicates whether the database is new or deleted since last upload */
readonly state: "" | "copied" | "deleted";
/** current number of clients that have this database open. */
readonly nClient: number;
/** current number of ongoing prefetches on this database. */
readonly nPrefetch: number;
}
/** Filter options passed to CloudContainer.queryHttpLog
* @internal
*/
export interface BcvHttpLogFilterOptions {
/** only return rows whose ID is >= the provided id */
startFromId?: number;
/** only return rows whose endTime is null OR >= the provided endTime. */
finishedAtOrAfterTime?: string;
/** only return rows with a non-null end_time. */
showOnlyFinished?: boolean;
}
/** Returned from 'CloudContainer.queryHttpLog' describing a row in the bcv_http_log table.
* @internal
*/
export interface BcvHttpLog {
/** Unique, monotonically increasing id value */
readonly id: number;
/** Time request was made, as iso-8601 */
readonly startTime: string;
/** Time reply received, as iso-8601 (may be undefined) */
readonly endTime: string | undefined;
/** "PUT", "GET", etc. */
readonly method: string;
/** LogId of client that caused this request. Will be "prefetch" for prefetch requests. */
readonly logId: string;
/** Log message associated with request */
readonly logmsg: string;
/** URI of request */
readonly uri: string;
/** HTTP response code (e.g. 200) */
readonly httpcode: number;
}
/** Filter options passed to 'CloudContainer.queryBcvStats'
* @internal
*/
interface BcvStatsFilterOptions {
/** if true, adds activeClients, totalClients, ongoingPrefetches, and attachedContainers to the result. */
addClientInformation?: boolean;
}
/** Returned from 'CloudContainer.queryBcvStats' describing the rows in the bcv_stat table.
* Also gathers additional statistics using the other virtual tables bcv_container, bcv_database such as totalClients, ongoingPrefetches, activeClients and attachedContainers.
* @internal
*/
export interface BcvStats {
/** The total number of cache slots that are currently in use or 'locked' by ongoing client read transactions. In daemonless mode, this value is always 0.
* A locked cache slot implies that it is not eligible for eviction in the event of a full cachefile.
*/
readonly lockedCacheslots: number;
/** The current number of slots with data in them in the cache. */
readonly populatedCacheslots: number;
/** The configured size of the cache, in number of slots. */
readonly totalCacheslots: number;
/** The total number of clients opened on this cache */
readonly totalClients?: number;
/** The total number of ongoing prefetches on this cache */
readonly ongoingPrefetches?: number;
/** The total number of active clients on this cache. An active client is one which has an open read txn. */
readonly activeClients?: number;
/** The total number of attached containers on this cache. */
readonly attachedContainers?: number;
}
/** The base name of a CloudSqlite database, without any version information.
* The name must conform to the following constraints:
* - Case-insensitively unique among all databases in the same [[CloudSqlite.CloudContainer]]
* - Between 1 and 255 characters in length.
* - A legal filename on both [Windows](https://learn.microsoft.com/en-us/windows/win32/fileio/naming-a-file#naming-conventions) and UNIX.
* - Contain none of the following characters: forward or backward slash, period, single or double quote, backtick, colon, and "#".
* - Begin or end with a whitespace character.
* @see [[CloudSqlite.DbFullName]] for the fully-specified name, including version information.
*/
export type DbName = string;
/** The fully-specified name of a CloudSqlite database, combining its [[CloudSqlite.DbName]] and [[CloudSqlite.DbVersion]] in the format "name:version".
*/
export type DbFullName = string;
/** A [semver](https://github.com/npm/node-semver) string describing the version of a database, e.g., "4.2.11".
*/
export type DbVersion = string;
/** A [semver string](https://github.com/npm/node-semver?tab=readme-ov-file#ranges) describing a range of acceptable versions,
* e.g., ">=1.2.7 <1.3.0".
*/
export type DbVersionRange = string;
/** Specifies the name and version of a CloudSqlite database.
*/
export interface DbNameAndVersion {
/** The name of the database */
readonly dbName: DbName;
/** The range of acceptable versions of the database of the specified [[dbName]].
* If omitted, it defaults to the newest available version.
*/
readonly version?: DbVersionRange;
}
export interface LoadProps extends DbNameAndVersion {
readonly container: CloudContainer;
/** If true, allow semver [prerelease versions](https://github.com/npm/node-semver?tab=readme-ov-file#prerelease-tags), e.g., "1.4.2-beta.0".
* By default, only released version are allowed.
*/
readonly includePrerelease?: boolean;
/** If true, start a prefetch operation whenever this database is opened, to begin downloading pages of the database before they are needed. */
readonly prefetch?: boolean;
}
/**
* The release increment for a version number, used as part of [[CloudSqlite.CreateNewDbVersionArgs]] to specify the kind of version to create.
* @see [semver.ReleaseType](https://www.npmjs.com/package/semver)
*/
export type SemverIncrement = "major" | "minor" | "patch" | "premajor" | "preminor" | "prepatch" | "prerelease";
/**
* Arguments supplied to [[CloudSqlite.createNewDbVersion]].
*/
export interface CreateNewDbVersionArgs {
readonly fromDb: DbNameAndVersion;
/** The type of version increment to apply to the source version. */
readonly versionType: SemverIncrement;
/** For prerelease versions, a string that becomes part of the version name. */
readonly identifier?: string;
}
/** The name of a CloudSqlite database within a CloudContainer. */
export interface DbNameProp {
/** the name of the database within the CloudContainer.
* @note names of databases within a CloudContainer are always **case sensitive** on all platforms.*/
dbName: DbFullName;
}
export type TransferDirection = "upload" | "download";
export interface TransferProgress {
/** a user-supplied progress function called during the transfer operation. Return a non-0 value to abort the transfer. */
onProgress?: (loaded: number, total: number) => number;
}
export interface CloudHttpProps {
/** The number of simultaneous HTTP requests. Default is 6. */
nRequests?: number;
}
export interface PrefetchProps extends CloudHttpProps {
/** timeout between requests, in milliseconds. Default is 100. */
timeout?: number;
/** The number of prefetch requests to issue while there is foreground activity. Default is 3. */
minRequests?: number;
}
export interface TransferDbProps extends DbNameProp, TransferProgress, CloudHttpProps {
/** the name of the local file to access the database for uploading and downloading */
localFileName: LocalFileName;
}
/** Properties for creating a CloudCache. */
export interface CacheProps extends CloudHttpProps {
/** full path of directory for cache to store its files. Must be on a (preferably fast) local drive, and must be empty when the cache is first created. */
rootDir: string;
/** name of this cache. It is possible to have more than one CloudCache in the same session, but each must have a unique name. */
name: string;
/** maximum cache Size. Must be a number followed by either M (for megabytes) or G (for gigabytes.) Default is 1G */
cacheSize?: string;
/** turn on diagnostics for `curl` (outputs to stderr) */
curlDiagnostics?: boolean;
}
/** Parameters used to obtain the write lock on a cloud container */
export interface ObtainLockParams {
/** a string that identifies me to others if I hold the lock while they attempt to acquire it. */
user?: string;
/** number of times to retry in the event the lock currently held by someone else.
* After this number of attempts, `onFailure` is called. Default is 20.
*/
nRetries: number;
/** Delay between retries, in milliseconds. Default is 100. */
retryDelayMs: number;
/** function called if lock cannot be obtained after all retries. It is called with the name of the user currently holding the lock and
* generally is expected that the user will be consulted whether to wait further.
* If this function returns "stop", an exception will be thrown. Otherwise the retry cycle is restarted.
*/
onFailure?: WriteLockBusyHandler;
}
/** @internal */
export interface LockAndOpenArgs extends SQLiteDb.WithOpenDbArgs {
/** a string that identifies me to others if I hold the lock while they attempt to acquire it. */
user: string;
/** the name of the database within the container */
dbName: string;
/** the CloudContainer on which the operation will be performed */
container: CloudContainer;
/** if present, function called when the write lock is currently held by another user. */
busyHandler?: WriteLockBusyHandler;
/** if present, open mode for Db. Default is ReadWrite */
openMode?: OpenMode;
}
/** Logging categories for `CloudCache.setLogMask` */
export enum LoggingMask {
/** log all HTTP requests and responses */
HTTP = 1,
/** log as blocks become dirty and must be uploaded */
DirtyBlocks = 2,
/** log as blocks are added to the delete list */
AddToDelete = 4,
/** log container lifecycle events (e.g. authorization requests, disconnects, and state transitions) */
LifecycleEvents = 8,
/** Turn on all logging categories */
All = 255,
/** Disable logging */
None = 0
}
/**
* A local cache for storing data downloaded from many CloudSqlite databases. This object refers to a directory on the local filesystem
* and is used to **connect** CloudContainers so they may be accessed. It maintains the state of the local copy of
* the downloaded data from SQLiteDbs in CloudContainers across sessions.
*
* Notes:
* - CloudCaches have a name, used internally by CloudSqlite, that must be unique. CloudCaches are created and maintained via [[CloudCaches.getCache]].
* - All CloudContainers connected to a given CloudCache must have the same block size, as determined by the first CloudContainer connected.
* - they have a maximum size that limits the amount of disk space they can consume. When the maximum size of a CloudCache is reached,
* the least recently used blocks are removed to make room for new blocks.
* - CloudCaches may only be used by a single process at a time. An exception is thrown if you attempt to access a CloudCache from a
* second process if it is already in use by another process. Note: for a readonly CloudCache, a "daemon" process can be used to
* share a CloudCache across processes. See its documentation for details.
* - Generally, it is expected that there only be a few CloudCaches and they be shared by all applications. Each CloudCache can consume
* its maximum disk space, so controlling system-wide disk usage is complicated. The only reason to make a new CloudCache is either
* for containers with a different block size, or to purposely control local disk space usage for a specific set of containers.
* - The contents of the cache directory are entirely controlled by CloudSqlite and should be empty when the cache is
* first created and never modified directly thereafter.
*/
export interface CloudCache {
/** `true` if this CloudCache is connected to a daemon process */
get isDaemon(): boolean;
/** The name for this CloudCache. */
get name(): string;
/** The root directory of this CloudCache on a local drive. */
get rootDir(): LocalDirName;
/** A guid for this CloudCache. It is assigned when the CloudCache is first created and used for acquiring write locks. */
get guid(): GuidString;
/** Configure logging for this CloudCache.
* @param mask A bitmask of `LoggingMask` values
* @note this method does nothing if [[isDaemon]] is true. Daemon logging is configured when the daemon is started.
* @note HTTP logging can be happen on multiple threads and may be buffered. To see buffered log messages, periodically call
* `[[IModelHost.flushLog]]
*/
setLogMask(mask: number): void;
/**
* destroy this CloudCache to end this session. All currently connected CloudContainers are disconnected first.
* @note this does *not* delete the local directory. Its contents are maintained so it can be used in future sessions.
* @note this function is automatically called on [[IModelHost.shutdown]], so it is only called directly for tests.
* @internal
*/
destroy(): void;
}
export interface CleanDeletedBlocksOptions {
/**
* Any block that was marked as unused before this number of seconds ago will be deleted. Specifying a non-zero
* value gives a period of time for other clients to refresh their manifests and stop using the now-garbage blocks. Otherwise they may get
* a 404 error. Default is 1 hour.
*/
nSeconds?: number;
/** if enabled, outputs verbose logs about the cleanup process. Output includes blocks determined eligible for deletion.
* @default false
*/
debugLogging?: boolean;
/** If true, iterates over all blobs in the cloud container to add blocks that are 'orphaned' to the delete list in the manifest.
* Orphaned blocks are created when a client abruptly halts, is disconnected, or encounters an error while uploading a change.
* If false, the search for 'orphaned' blocks is skipped and only any blocks which are already on the delete list are deleted.
* @default true
*/
findOrphanedBlocks?: boolean;
/**
* a user-supplied progress function called during the cleanup operation. While the search for orphaned blocks occurs, nDeleted will be 0 and nTotalToDelete will be 1.
* Once the search is complete and orphaned blocks begin being deleted, nDeleted will be the number of blocks deleted and nTotalToDelete will be the total number of blocks to delete.
* If the return value is 1, the job will be cancelled and progress will be saved. If one or more blocks have already been deleted, then a new manifest file is uploaded saving the progress of the delete job.
* Return any other non-0 value to cancel the job without saving progress.
*/
onProgress?: (nDeleted: number, nTotalToDelete: number) => Promise<number>;
}
/**
* A CloudSqlite container that may be connected to a CloudCache. A CloudContainer maps a container in a cloud blob-storage
* account to a local cache, so that the contents of a database in the container may be accessed as if it were a local file.
*
* Notes:
* - all methods and accessors of this interface (other than `initializeContainer`) require that the `connect` method be successfully called first.
* Otherwise they will throw an exception or return meaningless values.
* - before a SQLiteDb in a container may be opened for write access, the container's write lock must be held (see [[acquireWriteLock]].)
* - a single CloudContainer may hold more than one SQLiteDb, but often they are 1:1.
* - the write lock is per-Container, not per-SQLiteDb (which is the reason they are often 1:1)
* - the accessToken (a SAS key) member provides time limited, restricted, access to the container. It must be refreshed before it expires.
* - when a CloudContainer is created, it may either be readonly or writeable. If a container is never meant to be used for writes,
* it is slightly more efficient to indicate that by passing `writeable: false`
*/
export interface CloudContainer {
onConnect?: (container: CloudContainer, cache: CloudCache) => void;
onConnected?: (container: CloudContainer) => void;
onDisconnect?: (container: CloudContainer, detach: boolean) => void;
onDisconnected?: (container: CloudContainer, detach: boolean) => void;
readonly cache?: CloudCache;
/** the baseUri of this container */
get baseUri(): string;
/** the storageType of this container */
get storageType(): string;
/** The ContainerId within a storage account. */
get containerId(): string;
/** The *alias* to identify this CloudContainer in a CloudCache. Usually just the ContainerId. */
get alias(): string;
/** The logId. */
get logId(): string;
/** The time that the write lock expires. Of the form 'YYYY-MM-DDTHH:MM:SS.000Z' in UTC.
* Returns empty string if write lock is not held.
*/
get writeLockExpires(): string;
/** true if this CloudContainer is currently connected to a CloudCache via the `connect` method. */
get isConnected(): boolean;
/** true if this CloudContainer was created with the `writeable` flag (and its `accessToken` supplies write access). */
get isWriteable(): boolean;
/** true if this container is public (doesn't require authorization ). */
get isPublic(): boolean;
/** true if this CloudContainer currently holds the write lock for its container in the cloud. */
get hasWriteLock(): boolean;
/** true if this CloudContainer has local changes that have not be uploaded to its container in the cloud. */
get hasLocalChanges(): boolean;
/** The current accessToken providing access to the cloud container */
get accessToken(): string;
set accessToken(val: string);
/** Get the number of garbage blocks in this container that can be purged. */
get garbageBlocks(): number;
/** The block size for this CloudContainer. */
get blockSize(): number;
/**
* initialize a cloud blob-store container to be used as a new CloudContainer. This creates the container's manifest of its contents, and should be
* performed on an empty container. If an existing manifest is present, it is destroyed and a new one is created (essentially emptying the container.)
*/
initializeContainer(args: {
checksumBlockNames?: boolean;
blockSize: number;
}): void;
/**
* Connect this CloudContainer to a CloudCache for accessing and/or modifying its contents.
* @note A CloudCache is a local directory holding copies of information from the cloud. It is persistent across sessions,
* but this method must be called each session to (re)establish the connection to the CloudCache. If the CloudCache was previously populated,
* this method may be called and will succeed *even when offline* or without a valid `accessToken`.
*/
connect(cache: CloudCache): void;
/**
* Attempt to acquire the write lock for this CloudContainer. For this to succeed:
* 1. it must be connected to a `CloudCache`
* 2. this CloudContainer must have been constructed with `writeable: true`
* 3. the `accessToken` must authorize write access
* 4. no other process may be holding an unexpired write lock
* @throws if any of the above conditions fail
* @note Write locks *expire* after the duration specified in the `durationSeconds` property of the constructor argument, in case a process
* crashes or otherwise fails to release the lock. Calling `acquireWriteLock` with the lock already held resets the lock duration from the current time,
* so long running processes should call this method periodically to ensure their lock doesn't expire (they should also make sure their accessToken is refreshed
* before it expires.)
* @note on success, the container is synchronized with its contents in the cloud before the promise resolves.
* @param user An identifier of the process/user locking the CloudContainer. In the event of a write lock
* collision, this string will be included in the exception string of the *other* process attempting to obtain a write lock so that users may identify who currently holds
* the lock.
*/
acquireWriteLock(user: string): void;
/**
* Release the write lock if it is currently held.
*
* Notes:
* - if there are local changes that have not been uploaded, they are automatically uploaded before the write lock is released.
* - if the write lock is not held, this method does nothing.
*/
releaseWriteLock(): void;
/**
* Destroy any currently valid write lock from this or any other process. This is obviously very dangerous and defeats the purpose of write locking.
* This method exists only for administrator tools to clear a failed process without waiting for the expiration period. It can also be useful for tests.
* For this to succeed, all of the conditions of `acquireWriteLock` must be true other than #4.
*/
clearWriteLock(): void;
/**
* Abandon any local changes in this container. If the write lock is currently held, it is released.
* This function fails with BE_SQLITE_BUSY if there are any open read or write transactions on *any* database in the container.
*/
abandonChanges(): void;
/**
* Disconnect this CloudContainer from its CloudCache. There must be no open databases from this container. Leaves the container's contents in the
* CloudCache so it is available for future sessions.
* @note This function does nothing (and does not throw) if the CloudContainer is not connected to a CloudCache.
*/
disconnect(args?: {
/** if true removes the container from the CloudCache, otherwise Leaves the container in the CloudCache so it is available for future sessions. */
detach?: boolean;
}): void;
/**
* Poll cloud storage for changes from other processes.
*
* Notes:
* - no changes made by other processes are visible to this CloudContainer unless/until this method is called.
* - this is automatically called whenever the write lock is obtained to ensure all changes are against the latest version.
* - any existing transactions on databases within the container will continue to use the old version of the manifest and therefore see no new changes pulled in.
*/
checkForChanges(): void;
/**
* Upload any changed blocks from the databases in this CloudContainer.
* @note this is called automatically from `releaseWriteLock` before the write lock is released. It is only necessary to call this directly if you
* wish to upload changes while the write lock is still held.
* @see hasLocalChanges
*/
uploadChanges(): Promise<void>;
/**
* Create a copy of an existing database within this CloudContainer with a new name.
* @note CloudSqlite uses copy-on-write semantics for this operation. That is, this method merely makes a
* new entry in the manifest with the new name that *shares* all of its blocks with the original database.
* If either database subsequently changes, the only modified blocks are not shared.
*/
copyDatabase(dbName: string, toAlias: string): Promise<void>;
/** Remove a database from this CloudContainer. Unused blocks are moved to the delete list in the manifest.
* @see [[CloudSqlite.cleanDeletedBlocks]] to actually delete the blocks from the delete list.
*/
deleteDatabase(dbName: string): Promise<void>;
/** Get the list of database names in this CloudContainer.
* @param globArg if present, filter the results with SQLite [GLOB](https://www.sqlite.org/lang_expr.html#glob) operator.
*/
queryDatabases(globArg?: string): string[];
/**
* Get the status of a specific database in this CloudContainer.
* @param dbName the name of the database of interest
*/
queryDatabase(dbName: string): CachedDbProps | undefined;
/**
* query the bcv_http_log table
* @note the bcv_http_log table contains one row for each HTTP request made by the VFS or connected daemon.
* @note Entries are automatically removed from the table on a FIFO basis. By default entries which are 1 hr old will be removed.
* @internal
*/
queryHttpLog(filterOptions?: BcvHttpLogFilterOptions): CloudSqlite.BcvHttpLog[];
/**
* query the bcv_stat table.
* @internal
*/
queryBcvStats(filterOptions?: BcvStatsFilterOptions): CloudSqlite.BcvStats;
/**
* Get the SHA1 hash of the content of a database.
* @param dbName the name of the database of interest
* @note the hash will be empty if the database does not exist
*/
queryDatabaseHash(dbName: string): string;
}
/**
* Object returned by [[CloudSqlite.startCloudPrefetch]].
* It holds a promise that is fulfilled when a Prefetch is completed. May also be used to cancel an in-progress prefetch.
*/
export interface CloudPrefetch {
readonly cloudContainer: CloudContainer;
readonly dbName: string;
/** Cancel a currently pending prefetch. The promise will be resolved immediately after this call. */
cancel(): void;
/**
* Promise that is resolved when the prefetch completes or is cancelled. Await this promise to ensure that the
* database has been fully downloaded before going offline, for example.
*
* Notes:
* - resolves to `true` if the prefetch completed and the entire database is local, or `false` if it was aborted or failed.
* - it is *not* rejected on `cancel`. Some progress may (or may not) have been made by the request.
* - To monitor the progress being made during prefetch, call `CloudContainer.queryDatabase` periodically.
*/
promise: Promise<boolean>;
}
/**
* Clean any unused deleted blocks from cloud storage. Unused deleted blocks can accumulate in cloud storage in a couple of ways:
* 1) When a database is updated, a subset of its blocks are replaced by new versions, sometimes leaving the originals unused.
* 2) A database is deleted with [[CloudContainer.deleteDatabase]]
* In both cases, the blocks are not deleted immediately. Instead, they are scheduled for deletion at some later time.
* Calling this method deletes all blocks in the cloud container for which the scheduled deletion time has passed.
* @param container the CloudContainer to be cleaned. Must be connected and hold the write lock.
* @param options options for the cleanup operation. @see CloudSqlite.CleanDeletedBlocksOptions
*/
export function cleanDeletedBlocks(container: CloudContainer, options: CleanDeletedBlocksOptions): Promise<void>;
/** @internal */
export function transferDb(direction: TransferDirection, container: CloudContainer, props: TransferDbProps): Promise<void>;
/** Upload a local SQLite database file into a CloudContainer.
* @param container the CloudContainer holding the database. Must be connected.
* @param props the properties that describe the database to be downloaded, plus optionally an `onProgress` function.
* @note this function requires that the write lock be held on the container
*/
export function uploadDb(container: CloudContainer, props: TransferDbProps): Promise<void>;
/** Download a database from a CloudContainer.
* @param container the CloudContainer holding the database. Must be connected.
* @param props the properties that describe the database to be downloaded, plus optionally an `onProgress` function.
* @returns a Promise that is resolved when the download completes.
* @note the download is "restartable." If the transfer is aborted and then re-requested, it will continue from where
* it left off rather than re-downloading the entire file.
*/
export function downloadDb(container: CloudContainer, props: TransferDbProps): Promise<void>;
/** Optional method to be called when an attempt to acquire the write lock fails because another user currently holds it.
* @param lockedBy The identifier supplied by the application/user that currently holds the lock.
* @param expires a stringified Date (in local time) indicating when the lock will expire.
* @return "stop" to give up and stop retrying. Generally, it's a good idea to wait for some time before returning.
*/
export type WriteLockBusyHandler = (lockedBy: string, expires: string) => Promise<void | "stop">;
/**
* Attempt to acquire the write lock for a container, with retries.
* If write lock is held by another user, call busyHandler if supplied. If no busyHandler, or handler returns "stop", throw. Otherwise try again.
* @note if write lock is already held by the same user, this function will refresh the write lock's expiry time.
* @param user the name to be displayed to other users in the event they attempt to obtain the lock while it is held by us
* @param container the CloudContainer for which the lock is to be acquired
* @param busyHandler if present, function called when the write lock is currently held by another user.
* @throws if [[container]] is not connected to a CloudCache.
*/
export function acquireWriteLock(args: {
user: string;
container: CloudContainer;
busyHandler?: WriteLockBusyHandler;
}): Promise<void>;
export function getWriteLockHeldBy(container: CloudContainer): string | undefined;
/** release the write lock on a container. */
export function releaseWriteLock(container: CloudContainer): void;
/**
* Perform an asynchronous write operation on a CloudContainer with the write lock held.
* 1. if write lock is already held by the current user, refresh write lock's expiry time, call operation and return.
* 2. attempt to acquire the write lock, with retries. Throw if unable to obtain write lock.
* 3. perform the operation
* 3.a if the operation throws, abandon all changes and re-throw
* 4. release the write lock.
* 5. return value from operation
* @param user the name to be displayed to other users in the event they attempt to obtain the lock while it is held by us
* @param container the CloudContainer for which the lock is to be acquired
* @param operation an asynchronous operation performed with the write lock held.
* @param busyHandler if present, function called when the write lock is currently held by another user.
* @returns a Promise with the result of `operation`
*/
export function withWriteLock<T>(args: {
user: string;
container: CloudContainer;
busyHandler?: WriteLockBusyHandler;
}, operation: () => Promise<T>): Promise<T>;
/**
* Parse the name of a Db stored in a CloudContainer into the dbName and version number. A single CloudContainer may hold
* many versions of the same Db. The name of the Db in the CloudContainer is in the format "name:version". This
* function splits them into separate strings.
*/
export function parseDbFileName(dbFileName: DbFullName): {
dbName: DbName;
version: DbVersion;
};
export function validateDbVersion(version?: DbVersion): string;
export function isSemverPrerelease(version: string): true | readonly (string | number)[] | null;
export function isSemverEditable(dbFullName: string, container: CloudContainer): boolean | readonly (string | number)[];
/** Create a dbName for a database from its base name and version. This will be in the format "name:version" */
export function makeSemverName(dbName: DbName, version?: DbVersion): DbName;
/** query the databases in the supplied container for the highest SemVer match according to the version range. Throws if no version available for the range. */
export function querySemverMatch(props: LoadProps): DbFullName;
export function createNewDbVersion(container: CloudContainer, args: CreateNewDbVersionArgs): Promise<{
oldDb: DbNameAndVersion;
newDb: DbNameAndVersion;
}>;
/** Arguments to create or find a CloudCache */
export interface CreateCloudCacheArg {
/** The name of the CloudCache. CloudCache names must be unique. */
cacheName: string;
/** A string that specifies the maximum size of the CloudCache. It should be a number followed by "K",
* "M" "G", or "T". Default is "10G". */
cacheSize?: string;
/** A local directory in temporary storage for the CloudCache. If not supplied, it is a subdirectory called `cacheName`
* in the `CloudCaches` temporary directory.
* If the directory does not exist, it is created. */
cacheDir?: string;
}
/** The collection of currently extant `CloudCache`s, by name. */
export class CloudCaches {
private static readonly cloudCaches;
/** create a new CloudCache */
private static makeCache;
/** find a CloudCache by name, if it exists */
static findCache(cacheName: string): CloudCache | undefined;
/** @internal */
static dropCache(cacheName: string): CloudCache | undefined;
/** called by IModelHost after shutdown.
* @internal
*/
static destroy(): void;
/** Get a CloudCache by name. If the CloudCache doesn't yet exist, it is created. */
static getCache(args: CreateCloudCacheArg): CloudCache;
}
/** Class that provides convenient local access to a SQLite database in a CloudContainer. */
export class DbAccess<DbType extends VersionedSqliteDb, ReadMethods = DbType, WriteMethods = DbType> {
/** The name of the database within the cloud container. */
readonly dbName: string;
/** Parameters for obtaining the write lock for this container. */
readonly lockParams: ObtainLockParams;
protected static _cacheName: string;
protected _container: CloudContainer;
protected _cloudDb: DbType;
private _writeLockProxy?;
private _readerProxy?;
private get _ctor();
/** @internal */
static getCacheForClass(): CloudCache;
private _cache?;
/** only for tests
* @internal
*/
setCache(cache: CloudCache): void;
/** @internal */
getCache(): CloudCache;
/** @internal */
getCloudDb(): DbType;
/**
* The token that grants access to the cloud container for this DbAccess. If it does not grant write permissions, all
* write operations will fail. It should be refreshed (via a timer) before it expires.
*/
get sasToken(): AccessToken;
set sasToken(token: AccessToken);
/** the container for this DbAccess. It is automatically connected to the CloudCache whenever it is accessed. */
get container(): CloudContainer;
/** Start a prefetch operation to download all the blocks for the VersionedSqliteDb */
startPrefetch(): CloudPrefetch;
/** Create a new DbAccess for a database stored in a cloud container. */
constructor(args: {
/** The Constructor for DbType. */
dbType: Constructor<DbType>;
/** The properties of the cloud container holding the database. */
props: ContainerAccessProps;
/** The name of the database within the container. */
dbName: string;
});
/** Close the database for this DbAccess, if it is open */
closeDb(): void;
/** Close the database for this DbAccess if it is opened, and disconnect this `DbAccess from its CloudContainer. */
close(): void;
/**
* Initialize a cloud container to hold VersionedSqliteDbs. The container must first be created by [[createBlobContainer]].
* This function creates and uploads an empty database into the container.
* @note this deletes any existing content in the container.
*/
protected static _initializeDb(args: {
dbType: typeof VersionedSqliteDb;
props: ContainerProps;
dbName: string;
blockSize?: "64K" | "4M";
}): Promise<void>;
/**
* Create a new BlobContainer from the BlobContainer service to hold one or more VersionedSqliteDbs.
* @returns A ContainerProps that describes the newly created container.
* @note the current user must have administrator rights to create containers.
*/
protected static createBlobContainer(args: Omit<BlobContainer.CreateNewContainerProps, "userToken">): Promise<CloudSqlite.ContainerProps>;
/**
* Synchronize the local cache of this database with any changes by made by others.
* @note This is called automatically whenever any write operation is performed on this DbAccess. It is only necessary to
* call this directly if you have not changed the database recently, but wish to perform a readonly operation and want to
* ensure it is up-to-date as of now.
* @note There is no guarantee that the database is up-to-date even immediately after calling this method, since others
* may be modifying it at any time.
*/
synchronizeWithCloud(): void;
/**
* Ensure that the database controlled by this `DbAccess` is open for read access and return the database object.
* @note if the database is already open (either for read or write), this method merely returns the database object.
*/
openForRead(): DbType;
/**
* Perform an operation on this database with the lock held and the database opened for write
* @param operationName the name of the operation. Only used for logging.
* @param operation a function called with the lock held and the database open for write.
* @returns A promise that resolves to the the return value of `operation`.
* @see `SQLiteDb.withLockedContainer`
* @note Most uses of `CloudSqliteDbAccess` require that the lock not be held by any operation for long. Make sure you don't
* do any avoidable or time consuming work in your operation function.
*/
withLockedDb<T>(args: {
operationName: string;
openMode?: OpenMode;
user?: string;
}, operation: () => Promise<T>): Promise<T>;
/** get a method member, by name, from the database object. Throws if not a Function. */
private getDbMethod;
/**
* A Proxy Object to call a writeable async method on the cloud database controlled by this `DbAccess`.
*
* Whenever a method is called through this Proxy, it will:
* - attempt to acquire the write lock on the container
* - open the database for write
* - call the method
* - close the database
* - upload changes
* - release the write lock.
*
* @see [[withLockedDb]]
*/
get writeLocker(): PickAsyncMethods<WriteMethods>;
/**
* A Proxy Object to call a synchronous readonly method on the database controlled by this `DbAccess`.
* Whenever a method is called through this Proxy, it will first ensure that the database is opened for at least read access.
*/
get reader(): PickMethods<ReadMethods>;
}
export {};
}
//# sourceMappingURL=CloudSqlite.d.ts.map