UNPKG

typeorm

Version:

Data-Mapper ORM for TypeScript and ES2021+. Supports MySQL/MariaDB, PostgreSQL, MS SQL Server, Oracle, SAP HANA, SQLite, MongoDB databases.

1,202 lines • 227 kB
import { BSON, BSONRegExp, BSONSymbol, BSONType, Binary, Code, DBRef, Decimal128, DeserializeOptions, Document, Double, Int32, Long, MaxKey, MinKey, ObjectId, ObjectIdLike, SerializeOptions, Timestamp, deserialize, serialize } from "./bson.typings"; import type { ConnectionOptions as ConnectionOptions_2 } from "tls"; import type { Socket } from "net"; import type { SrvRecord } from "dns"; import type { TcpNetConnectOpts } from "net"; import type { TLSSocket } from "tls"; import type { TLSSocketOptions } from "tls"; import { Readable, EventEmitter } from "../../platform/PlatformTools"; /** @public */ export declare abstract class AbstractCursor<TSchema = any, CursorEvents extends AbstractCursorEvents = AbstractCursorEvents> extends TypedEventEmitter<CursorEvents> { /** @event */ static readonly CLOSE: "close"; get id(): Long | undefined; get namespace(): MongoDBNamespace; get readPreference(): ReadPreference; get readConcern(): ReadConcern | undefined; get closed(): boolean; get killed(): boolean; get loadBalanced(): boolean; /** Returns current buffered documents length */ bufferedCount(): number; /** Returns current buffered documents */ readBufferedDocuments(number?: number): TSchema[]; [Symbol.asyncIterator](): AsyncGenerator<TSchema, void, void>; stream(options?: CursorStreamOptions): Readable & AsyncIterable<TSchema>; hasNext(): Promise<boolean>; /** Get the next available document from the cursor, returns null if no more documents are available. */ next(): Promise<TSchema | null>; /** * Try to get the next available document from the cursor or `null` if an empty batch is returned */ tryNext(): Promise<TSchema | null>; /** * Iterates over all the documents for this cursor using the iterator, callback pattern. * * If the iterator returns `false`, iteration will stop. * * @param iterator - The iteration callback. */ forEach(iterator: (doc: TSchema) => boolean | void): Promise<void>; close(): Promise<void>; /** * Returns an array of documents. The caller is responsible for making sure that there * is enough memory to store the results. Note that the array only contains partial * results when this cursor had been previously accessed. In that case, * cursor.rewind() can be used to reset the cursor. */ toArray(): Promise<TSchema[]>; /** * Add a cursor flag to the cursor * * @param flag - The flag to set, must be one of following ['tailable', 'oplogReplay', 'noCursorTimeout', 'awaitData', 'partial' -. * @param value - The flag boolean value. */ addCursorFlag(flag: CursorFlag, value: boolean): this; /** * Map all documents using the provided function * If there is a transform set on the cursor, that will be called first and the result passed to * this function's transform. * * @remarks * * **Note** Cursors use `null` internally to indicate that there are no more documents in the cursor. Providing a mapping * function that maps values to `null` will result in the cursor closing itself before it has finished iterating * all documents. This will **not** result in a memory leak, just surprising behavior. For example: * * ```typescript * const cursor = collection.find({}); * cursor.map(() => null); * * const documents = await cursor.toArray(); * // documents is always [], regardless of how many documents are in the collection. * ``` * * Other falsey values are allowed: * * ```typescript * const cursor = collection.find({}); * cursor.map(() => ''); * * const documents = await cursor.toArray(); * // documents is now an array of empty strings * ``` * * **Note for Typescript Users:** adding a transform changes the return type of the iteration of this cursor, * it **does not** return a new instance of a cursor. This means when calling map, * you should always assign the result to a new variable in order to get a correctly typed cursor variable. * Take note of the following example: * * @example * ```typescript * const cursor: FindCursor<Document> = coll.find(); * const mappedCursor: FindCursor<number> = cursor.map(doc => Object.keys(doc).length); * const keyCounts: number[] = await mappedCursor.toArray(); // cursor.toArray() still returns Document[] * ``` * @param transform - The mapping transformation method. */ map<T = any>(transform: (doc: TSchema) => T): AbstractCursor<T>; /** * Set the ReadPreference for the cursor. * * @param readPreference - The new read preference for the cursor. */ withReadPreference(readPreference: ReadPreferenceLike): this; /** * Set the ReadPreference for the cursor. * * @param readPreference - The new read preference for the cursor. */ withReadConcern(readConcern: ReadConcernLike): this; /** * Set a maxTimeMS on the cursor query, allowing for hard timeout limits on queries (Only supported on MongoDB 2.6 or higher) * * @param value - Number of milliseconds to wait before aborting the query. */ maxTimeMS(value: number): this; /** * Set the batch size for the cursor. * * @param value - The number of documents to return per batch. See {@link https://www.mongodb.com/docs/manual/reference/command/find/|find command documentation}. */ batchSize(value: number): this; /** * Rewind this cursor to its uninitialized state. Any options that are present on the cursor will * remain in effect. Iterating this cursor will cause new queries to be sent to the server, even * if the resultant data has already been retrieved by this cursor. */ rewind(): void; /** * Returns a new uninitialized copy of this cursor, with options matching those that have been set on the current instance */ abstract clone(): AbstractCursor<TSchema>; } /** @public */ export declare type AbstractCursorEvents = { [AbstractCursor.CLOSE](): void; }; /** @public */ export declare interface AbstractCursorOptions extends BSONSerializeOptions { session?: ClientSession; readPreference?: ReadPreferenceLike; readConcern?: ReadConcernLike; /** * Specifies the number of documents to return in each response from MongoDB */ batchSize?: number; /** * When applicable `maxTimeMS` controls the amount of time the initial command * that constructs a cursor should take. (ex. find, aggregate, listCollections) */ maxTimeMS?: number; /** * When applicable `maxAwaitTimeMS` controls the amount of time subsequent getMores * that a cursor uses to fetch more data should take. (ex. cursor.next()) */ maxAwaitTimeMS?: number; /** * Comment to apply to the operation. * * In server versions pre-4.4, 'comment' must be string. A server * error will be thrown if any other type is provided. * * In server versions 4.4 and above, 'comment' can be any valid BSON type. */ comment?: unknown; /** * By default, MongoDB will automatically close a cursor when the * client has exhausted all results in the cursor. However, for [capped collections](https://www.mongodb.com/docs/manual/core/capped-collections) * you may use a Tailable Cursor that remains open after the client exhausts * the results in the initial cursor. */ tailable?: boolean; /** * If awaitData is set to true, when the cursor reaches the end of the capped collection, * MongoDB blocks the query thread for a period of time waiting for new data to arrive. * When new data is inserted into the capped collection, the blocked thread is signaled * to wake up and return the next batch to the client. */ awaitData?: boolean; noCursorTimeout?: boolean; } /** @public */ export declare type AcceptedFields<TSchema, FieldType, AssignableType> = { readonly [key in KeysOfAType<TSchema, FieldType>]?: AssignableType; }; /** @public */ export declare type AddToSetOperators<Type> = { $each?: Array<Flatten<Type>>; }; /** @public */ export declare interface AddUserOptions extends CommandOperationOptions { /** Roles associated with the created user */ roles?: string | string[] | RoleSpecification | RoleSpecification[]; /** Custom data associated with the user (only Mongodb 2.6 or higher) */ customData?: Document; } /** * The **Admin** class is an internal class that allows convenient access to * the admin functionality and commands for MongoDB. * * **ADMIN Cannot directly be instantiated** * @public * * @example * ```ts * import { MongoClient } from 'mongodb'; * * const client = new MongoClient('mongodb://localhost:27017'); * const admin = client.db().admin(); * const dbInfo = await admin.listDatabases(); * for (const db of dbInfo.databases) { * console.log(db.name); * } * ``` */ export declare class Admin { /** * Execute a command * * @param command - The command to execute * @param options - Optional settings for the command */ command(command: Document, options?: RunCommandOptions): Promise<Document>; /** * Retrieve the server build information * * @param options - Optional settings for the command */ buildInfo(options?: CommandOperationOptions): Promise<Document>; /** * Retrieve the server build information * * @param options - Optional settings for the command */ serverInfo(options?: CommandOperationOptions): Promise<Document>; /** * Retrieve this db's server status. * * @param options - Optional settings for the command */ serverStatus(options?: CommandOperationOptions): Promise<Document>; /** * Ping the MongoDB server and retrieve results * * @param options - Optional settings for the command */ ping(options?: CommandOperationOptions): Promise<Document>; /** * Add a user to the database * * @param username - The username for the new user * @param passwordOrOptions - An optional password for the new user, or the options for the command * @param options - Optional settings for the command */ addUser(username: string, passwordOrOptions?: string | AddUserOptions, options?: AddUserOptions): Promise<Document>; /** * Remove a user from a database * * @param username - The username to remove * @param options - Optional settings for the command */ removeUser(username: string, options?: RemoveUserOptions): Promise<boolean>; /** * Validate an existing collection * * @param collectionName - The name of the collection to validate. * @param options - Optional settings for the command */ validateCollection(collectionName: string, options?: ValidateCollectionOptions): Promise<Document>; /** * List the available databases * * @param options - Optional settings for the command */ listDatabases(options?: ListDatabasesOptions): Promise<ListDatabasesResult>; /** * Get ReplicaSet status * * @param options - Optional settings for the command */ replSetGetStatus(options?: CommandOperationOptions): Promise<Document>; } /** @public */ export declare interface AggregateOptions extends CommandOperationOptions { /** allowDiskUse lets the server know if it can use disk to store temporary results for the aggregation (requires mongodb 2.6 \>). */ allowDiskUse?: boolean; /** The number of documents to return per batch. See [aggregation documentation](https://www.mongodb.com/docs/manual/reference/command/aggregate). */ batchSize?: number; /** Allow driver to bypass schema validation in MongoDB 3.2 or higher. */ bypassDocumentValidation?: boolean; /** Return the query as cursor, on 2.6 \> it returns as a real cursor on pre 2.6 it returns as an emulated cursor. */ cursor?: Document; /** specifies a cumulative time limit in milliseconds for processing operations on the cursor. MongoDB interrupts the operation at the earliest following interrupt point. */ maxTimeMS?: number; /** The maximum amount of time for the server to wait on new documents to satisfy a tailable cursor query. */ maxAwaitTimeMS?: number; /** Specify collation. */ collation?: CollationOptions; /** Add an index selection hint to an aggregation command */ hint?: Hint; /** Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0). */ let?: Document; out?: string; } /** * The **AggregationCursor** class is an internal class that embodies an aggregation cursor on MongoDB * allowing for iteration over the results returned from the underlying query. It supports * one by one document iteration, conversion to an array or can be iterated as a Node 4.X * or higher stream * @public */ export declare class AggregationCursor<TSchema = any> extends AbstractCursor<TSchema> { get pipeline(): Document[]; clone(): AggregationCursor<TSchema>; map<T>(transform: (doc: TSchema) => T): AggregationCursor<T>; /** Execute the explain for the cursor */ explain(verbosity?: ExplainVerbosityLike): Promise<Document>; /** Add a group stage to the aggregation pipeline */ group<T = TSchema>($group: Document): AggregationCursor<T>; /** Add a limit stage to the aggregation pipeline */ limit($limit: number): this; /** Add a match stage to the aggregation pipeline */ match($match: Document): this; /** Add an out stage to the aggregation pipeline */ out($out: { db: string; coll: string; } | string): this; /** * Add a project stage to the aggregation pipeline * * @remarks * In order to strictly type this function you must provide an interface * that represents the effect of your projection on the result documents. * * By default chaining a projection to your cursor changes the returned type to the generic {@link Document} type. * You should specify a parameterized type to have assertions on your final results. * * @example * ```typescript * // Best way * const docs: AggregationCursor<{ a: number }> = cursor.project<{ a: number }>({ _id: 0, a: true }); * // Flexible way * const docs: AggregationCursor<Document> = cursor.project({ _id: 0, a: true }); * ``` * * @remarks * In order to strictly type this function you must provide an interface * that represents the effect of your projection on the result documents. * * **Note for Typescript Users:** adding a transform changes the return type of the iteration of this cursor, * it **does not** return a new instance of a cursor. This means when calling project, * you should always assign the result to a new variable in order to get a correctly typed cursor variable. * Take note of the following example: * * @example * ```typescript * const cursor: AggregationCursor<{ a: number; b: string }> = coll.aggregate([]); * const projectCursor = cursor.project<{ a: number }>({ _id: 0, a: true }); * const aPropOnlyArray: {a: number}[] = await projectCursor.toArray(); * * // or always use chaining and save the final cursor * * const cursor = coll.aggregate().project<{ a: string }>({ * _id: 0, * a: { $convert: { input: '$a', to: 'string' } * }}); * ``` */ project<T extends Document = Document>($project: Document): AggregationCursor<T>; /** Add a lookup stage to the aggregation pipeline */ lookup($lookup: Document): this; /** Add a redact stage to the aggregation pipeline */ redact($redact: Document): this; /** Add a skip stage to the aggregation pipeline */ skip($skip: number): this; /** Add a sort stage to the aggregation pipeline */ sort($sort: Sort): this; /** Add a unwind stage to the aggregation pipeline */ unwind($unwind: Document | string): this; /** Add a geoNear stage to the aggregation pipeline */ geoNear($geoNear: Document): this; } /** @public */ export declare interface AggregationCursorOptions extends AbstractCursorOptions, AggregateOptions { } /** * It is possible to search using alternative types in mongodb e.g. * string types can be searched using a regex in mongo * array types can be searched using their element type * @public */ export declare type AlternativeType<T> = T extends ReadonlyArray<infer U> ? T | RegExpOrString<U> : RegExpOrString<T>; /** @public */ export declare type AnyBulkWriteOperation<TSchema extends Document = Document> = { insertOne: InsertOneModel<TSchema>; } | { replaceOne: ReplaceOneModel<TSchema>; } | { updateOne: UpdateOneModel<TSchema>; } | { updateMany: UpdateManyModel<TSchema>; } | { deleteOne: DeleteOneModel<TSchema>; } | { deleteMany: DeleteManyModel<TSchema>; }; /** @public */ export declare type AnyError = MongoError | Error; /** @public */ export declare type ArrayElement<Type> = Type extends ReadonlyArray<infer Item> ? Item : never; /** @public */ export declare type ArrayOperator<Type> = { $each?: Array<Flatten<Type>>; $slice?: number; $position?: number; $sort?: Sort; }; /** @public */ export declare interface Auth { /** The username for auth */ username?: string; /** The password for auth */ password?: string; } /** @public */ export declare const AuthMechanism: Readonly<{ readonly MONGODB_AWS: "MONGODB-AWS"; readonly MONGODB_CR: "MONGODB-CR"; readonly MONGODB_DEFAULT: "DEFAULT"; readonly MONGODB_GSSAPI: "GSSAPI"; readonly MONGODB_PLAIN: "PLAIN"; readonly MONGODB_SCRAM_SHA1: "SCRAM-SHA-1"; readonly MONGODB_SCRAM_SHA256: "SCRAM-SHA-256"; readonly MONGODB_X509: "MONGODB-X509"; /** @experimental */ readonly MONGODB_OIDC: "MONGODB-OIDC"; }>; /** @public */ export declare type AuthMechanism = (typeof AuthMechanism)[keyof typeof AuthMechanism]; /** @public */ export declare interface AuthMechanismProperties extends Document { SERVICE_HOST?: string; SERVICE_NAME?: string; SERVICE_REALM?: string; CANONICALIZE_HOST_NAME?: GSSAPICanonicalizationValue; AWS_SESSION_TOKEN?: string; /** @experimental */ REQUEST_TOKEN_CALLBACK?: OIDCRequestFunction; /** @experimental */ REFRESH_TOKEN_CALLBACK?: OIDCRefreshFunction; /** @experimental */ PROVIDER_NAME?: "aws"; } /** @public */ export declare interface AutoEncrypter { new (client: MongoClient, options: AutoEncryptionOptions): AutoEncrypter; init(cb: Callback): void; teardown(force: boolean, callback: Callback): void; encrypt(ns: string, cmd: Document, options: any, callback: Callback<Document>): void; decrypt(cmd: Document, options: any, callback: Callback<Document>): void; /** @experimental */ readonly cryptSharedLibVersionInfo: { version: bigint; versionStr: string; } | null; } /** @public */ export declare const AutoEncryptionLoggerLevel: Readonly<{ readonly FatalError: 0; readonly Error: 1; readonly Warning: 2; readonly Info: 3; readonly Trace: 4; }>; /** @public */ export declare type AutoEncryptionLoggerLevel = (typeof AutoEncryptionLoggerLevel)[keyof typeof AutoEncryptionLoggerLevel]; /** @public */ export declare interface AutoEncryptionOptions { /** A `MongoClient` used to fetch keys from a key vault */ keyVaultClient?: MongoClient; /** The namespace where keys are stored in the key vault */ keyVaultNamespace?: string; /** Configuration options that are used by specific KMS providers during key generation, encryption, and decryption. */ kmsProviders?: { /** Configuration options for using 'aws' as your KMS provider */ aws?: { /** The access key used for the AWS KMS provider */ accessKeyId: string; /** The secret access key used for the AWS KMS provider */ secretAccessKey: string; /** * An optional AWS session token that will be used as the * X-Amz-Security-Token header for AWS requests. */ sessionToken?: string; } | Record<string, never>; /** Configuration options for using 'local' as your KMS provider */ local?: { /** * The master key used to encrypt/decrypt data keys. * A 96-byte long Buffer or base64 encoded string. */ key: Buffer | string; }; /** Configuration options for using 'azure' as your KMS provider */ azure?: { /** The tenant ID identifies the organization for the account */ tenantId: string; /** The client ID to authenticate a registered application */ clientId: string; /** The client secret to authenticate a registered application */ clientSecret: string; /** * If present, a host with optional port. E.g. "example.com" or "example.com:443". * This is optional, and only needed if customer is using a non-commercial Azure instance * (e.g. a government or China account, which use different URLs). * Defaults to "login.microsoftonline.com" */ identityPlatformEndpoint?: string | undefined; } | { /** * If present, an access token to authenticate with Azure. */ accessToken: string; } | Record<string, never>; /** Configuration options for using 'gcp' as your KMS provider */ gcp?: { /** The service account email to authenticate */ email: string; /** A PKCS#8 encrypted key. This can either be a base64 string or a binary representation */ privateKey: string | Buffer; /** * If present, a host with optional port. E.g. "example.com" or "example.com:443". * Defaults to "oauth2.googleapis.com" */ endpoint?: string | undefined; } | { /** * If present, an access token to authenticate with GCP. */ accessToken: string; } | Record<string, never>; /** * Configuration options for using 'kmip' as your KMS provider */ kmip?: { /** * The output endpoint string. * The endpoint consists of a hostname and port separated by a colon. * E.g. "example.com:123". A port is always present. */ endpoint?: string; }; }; /** * A map of namespaces to a local JSON schema for encryption * * **NOTE**: Supplying options.schemaMap provides more security than relying on JSON Schemas obtained from the server. * It protects against a malicious server advertising a false JSON Schema, which could trick the client into sending decrypted data that should be encrypted. * Schemas supplied in the schemaMap only apply to configuring automatic encryption for Client-Side Field Level Encryption. * Other validation rules in the JSON schema will not be enforced by the driver and will result in an error. */ schemaMap?: Document; /** @experimental Public Technical Preview: Supply a schema for the encrypted fields in the document */ encryptedFieldsMap?: Document; /** Allows the user to bypass auto encryption, maintaining implicit decryption */ bypassAutoEncryption?: boolean; /** @experimental Public Technical Preview: Allows users to bypass query analysis */ bypassQueryAnalysis?: boolean; options?: { /** An optional hook to catch logging messages from the underlying encryption engine */ logger?: (level: AutoEncryptionLoggerLevel, message: string) => void; }; extraOptions?: { /** * A local process the driver communicates with to determine how to encrypt values in a command. * Defaults to "mongodb://%2Fvar%2Fmongocryptd.sock" if domain sockets are available or "mongodb://localhost:27020" otherwise */ mongocryptdURI?: string; /** If true, autoEncryption will not attempt to spawn a mongocryptd before connecting */ mongocryptdBypassSpawn?: boolean; /** The path to the mongocryptd executable on the system */ mongocryptdSpawnPath?: string; /** Command line arguments to use when auto-spawning a mongocryptd */ mongocryptdSpawnArgs?: string[]; /** * Full path to a MongoDB Crypt shared library to be used (instead of mongocryptd). * * This needs to be the path to the file itself, not a directory. * It can be an absolute or relative path. If the path is relative and * its first component is `$ORIGIN`, it will be replaced by the directory * containing the mongodb-client-encryption native addon file. Otherwise, * the path will be interpreted relative to the current working directory. * * Currently, loading different MongoDB Crypt shared library files from different * MongoClients in the same process is not supported. * * If this option is provided and no MongoDB Crypt shared library could be loaded * from the specified location, creating the MongoClient will fail. * * If this option is not provided and `cryptSharedLibRequired` is not specified, * the AutoEncrypter will attempt to spawn and/or use mongocryptd according * to the mongocryptd-specific `extraOptions` options. * * Specifying a path prevents mongocryptd from being used as a fallback. * * Requires the MongoDB Crypt shared library, available in MongoDB 6.0 or higher. */ cryptSharedLibPath?: string; /** * If specified, never use mongocryptd and instead fail when the MongoDB Crypt * shared library could not be loaded. * * This is always true when `cryptSharedLibPath` is specified. * * Requires the MongoDB Crypt shared library, available in MongoDB 6.0 or higher. */ cryptSharedLibRequired?: boolean; }; proxyOptions?: ProxyOptions; /** The TLS options to use connecting to the KMS provider */ tlsOptions?: { aws?: AutoEncryptionTlsOptions; local?: AutoEncryptionTlsOptions; azure?: AutoEncryptionTlsOptions; gcp?: AutoEncryptionTlsOptions; kmip?: AutoEncryptionTlsOptions; }; } /** @public */ export declare interface AutoEncryptionTlsOptions { /** * Specifies the location of a local .pem file that contains * either the client's TLS/SSL certificate and key or only the * client's TLS/SSL key when tlsCertificateFile is used to * provide the certificate. */ tlsCertificateKeyFile?: string; /** * Specifies the password to de-crypt the tlsCertificateKeyFile. */ tlsCertificateKeyFilePassword?: string; /** * Specifies the location of a local .pem file that contains the * root certificate chain from the Certificate Authority. * This file is used to validate the certificate presented by the * KMS provider. */ tlsCAFile?: string; } /** * Keeps the state of a unordered batch so we can rewrite the results * correctly after command execution * * @public */ export declare class Batch<T = Document> { originalZeroIndex: number; currentIndex: number; originalIndexes: number[]; batchType: BatchType; operations: T[]; size: number; sizeBytes: number; constructor(batchType: BatchType, originalZeroIndex: number); } /** @public */ export declare const BatchType: Readonly<{ readonly INSERT: 1; readonly UPDATE: 2; readonly DELETE: 3; }>; /** @public */ export declare type BatchType = (typeof BatchType)[keyof typeof BatchType]; export { Binary }; /** @public */ export declare type BitwiseFilter = number /** numeric bit mask */ | Binary /** BinData bit mask */ | ReadonlyArray<number>; export { BSON }; export { BSONRegExp }; /** * BSON Serialization options. * @public */ export declare interface BSONSerializeOptions extends Omit<SerializeOptions, "index">, Omit<DeserializeOptions, "evalFunctions" | "cacheFunctions" | "cacheFunctionsCrc32" | "allowObjectSmallerThanBufferSize" | "index" | "validation"> { /** * Enabling the raw option will return a [Node.js Buffer](https://nodejs.org/api/buffer.html) * which is allocated using [allocUnsafe API](https://nodejs.org/api/buffer.html#static-method-bufferallocunsafesize). * See this section from the [Node.js Docs here](https://nodejs.org/api/buffer.html#what-makes-bufferallocunsafe-and-bufferallocunsafeslow-unsafe) * for more detail about what "unsafe" refers to in this context. * If you need to maintain your own editable clone of the bytes returned for an extended life time of the process, it is recommended you allocate * your own buffer and clone the contents: * * @example * ```ts * const raw = await collection.findOne({}, { raw: true }); * const myBuffer = Buffer.alloc(raw.byteLength); * myBuffer.set(raw, 0); * // Only save and use `myBuffer` beyond this point * ``` * * @remarks * Please note there is a known limitation where this option cannot be used at the MongoClient level (see [NODE-3946](https://jira.mongodb.org/browse/NODE-3946)). * It does correctly work at `Db`, `Collection`, and per operation the same as other BSON options work. */ raw?: boolean; /** Enable utf8 validation when deserializing BSON documents. Defaults to true. */ enableUtf8Validation?: boolean; } export { BSONSymbol }; export { BSONType }; /** @public */ export declare type BSONTypeAlias = keyof typeof BSONType; /** @public */ export declare abstract class BulkOperationBase { isOrdered: boolean; operationId?: number; /** * Add a single insert document to the bulk operation * * @example * ```ts * const bulkOp = collection.initializeOrderedBulkOp(); * * // Adds three inserts to the bulkOp. * bulkOp * .insert({ a: 1 }) * .insert({ b: 2 }) * .insert({ c: 3 }); * await bulkOp.execute(); * ``` */ insert(document: Document): BulkOperationBase; /** * Builds a find operation for an update/updateOne/delete/deleteOne/replaceOne. * Returns a builder object used to complete the definition of the operation. * * @example * ```ts * const bulkOp = collection.initializeOrderedBulkOp(); * * // Add an updateOne to the bulkOp * bulkOp.find({ a: 1 }).updateOne({ $set: { b: 2 } }); * * // Add an updateMany to the bulkOp * bulkOp.find({ c: 3 }).update({ $set: { d: 4 } }); * * // Add an upsert * bulkOp.find({ e: 5 }).upsert().updateOne({ $set: { f: 6 } }); * * // Add a deletion * bulkOp.find({ g: 7 }).deleteOne(); * * // Add a multi deletion * bulkOp.find({ h: 8 }).delete(); * * // Add a replaceOne * bulkOp.find({ i: 9 }).replaceOne({writeConcern: { j: 10 }}); * * // Update using a pipeline (requires Mongodb 4.2 or higher) * bulk.find({ k: 11, y: { $exists: true }, z: { $exists: true } }).updateOne([ * { $set: { total: { $sum: [ '$y', '$z' ] } } } * ]); * * // All of the ops will now be executed * await bulkOp.execute(); * ``` */ find(selector: Document): FindOperators; /** Specifies a raw operation to perform in the bulk write. */ raw(op: AnyBulkWriteOperation): this; get bsonOptions(): BSONSerializeOptions; get writeConcern(): WriteConcern | undefined; get batches(): Batch[]; execute(options?: BulkWriteOptions): Promise<BulkWriteResult>; abstract addToOperationsList(batchType: BatchType, document: Document | UpdateStatement | DeleteStatement): this; } /** @public */ export declare interface BulkWriteOperationError { index: number; code: number; errmsg: string; errInfo: Document; op: Document | UpdateStatement | DeleteStatement; } /** @public */ export declare interface BulkWriteOptions extends CommandOperationOptions { /** Allow driver to bypass schema validation in MongoDB 3.2 or higher. */ bypassDocumentValidation?: boolean; /** If true, when an insert fails, don't execute the remaining writes. If false, continue with remaining inserts when one fails. */ ordered?: boolean; /** Force server to assign _id values instead of driver. */ forceServerObjectId?: boolean; /** Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0). */ let?: Document; } /** * @public * The result of a bulk write. */ export declare class BulkWriteResult { private readonly result; /** Number of documents inserted. */ readonly insertedCount: number; /** Number of documents matched for update. */ readonly matchedCount: number; /** Number of documents modified. */ readonly modifiedCount: number; /** Number of documents deleted. */ readonly deletedCount: number; /** Number of documents upserted. */ readonly upsertedCount: number; /** Upserted document generated Id's, hash key is the index of the originating operation */ readonly upsertedIds: { [key: number]: any; }; /** Inserted document generated Id's, hash key is the index of the originating operation */ readonly insertedIds: { [key: number]: any; }; private static generateIdMap; /** Evaluates to true if the bulk operation correctly executes */ get ok(): number; /** The number of inserted documents */ get nInserted(): number; /** Number of upserted documents */ get nUpserted(): number; /** Number of matched documents */ get nMatched(): number; /** Number of documents updated physically on disk */ get nModified(): number; /** Number of removed documents */ get nRemoved(): number; /** Returns an array of all inserted ids */ getInsertedIds(): Document[]; /** Returns an array of all upserted ids */ getUpsertedIds(): Document[]; /** Returns the upserted id at the given index */ getUpsertedIdAt(index: number): Document | undefined; /** Returns raw internal result */ getRawResponse(): Document; /** Returns true if the bulk operation contains a write error */ hasWriteErrors(): boolean; /** Returns the number of write errors off the bulk operation */ getWriteErrorCount(): number; /** Returns a specific write error object */ getWriteErrorAt(index: number): WriteError | undefined; /** Retrieve all write errors */ getWriteErrors(): WriteError[]; /** Retrieve the write concern error if one exists */ getWriteConcernError(): WriteConcernError | undefined; toString(): string; isOk(): boolean; } /** * MongoDB Driver style callback * @public */ export declare type Callback<T = any> = (error?: AnyError, result?: T) => void; /** @public */ export declare class CancellationToken extends TypedEventEmitter<{ cancel(): void; }> { } /** * Creates a new Change Stream instance. Normally created using {@link Collection#watch|Collection.watch()}. * @public */ export declare class ChangeStream<TSchema extends Document = Document, TChange extends Document = ChangeStreamDocument<TSchema>> extends TypedEventEmitter<ChangeStreamEvents<TSchema, TChange>> { pipeline: Document[]; /** * @remarks WriteConcern can still be present on the options because * we inherit options from the client/db/collection. The * key must be present on the options in order to delete it. * This allows typescript to delete the key but will * not allow a writeConcern to be assigned as a property on options. */ options: ChangeStreamOptions & { writeConcern?: never; }; parent: MongoClient | Db | Collection; namespace: MongoDBNamespace; type: symbol; streamOptions?: CursorStreamOptions; /** @event */ static readonly RESPONSE: "response"; /** @event */ static readonly MORE: "more"; /** @event */ static readonly INIT: "init"; /** @event */ static readonly CLOSE: "close"; /** * Fired for each new matching change in the specified namespace. Attaching a `change` * event listener to a Change Stream will switch the stream into flowing mode. Data will * then be passed as soon as it is available. * @event */ static readonly CHANGE: "change"; /** @event */ static readonly END: "end"; /** @event */ static readonly ERROR: "error"; /** * Emitted each time the change stream stores a new resume token. * @event */ static readonly RESUME_TOKEN_CHANGED: "resumeTokenChanged"; /** The cached resume token that is used to resume after the most recently returned change. */ get resumeToken(): ResumeToken; /** Check if there is any document still available in the Change Stream */ hasNext(): Promise<boolean>; /** Get the next available document from the Change Stream. */ next(): Promise<TChange>; /** * Try to get the next available document from the Change Stream's cursor or `null` if an empty batch is returned */ tryNext(): Promise<Document | null>; [Symbol.asyncIterator](): AsyncGenerator<TChange, void, void>; /** Is the cursor closed */ get closed(): boolean; /** Close the Change Stream */ close(): Promise<void>; /** * Return a modified Readable stream including a possible transform method. * * NOTE: When using a Stream to process change stream events, the stream will * NOT automatically resume in the case a resumable error is encountered. * * @throws MongoChangeStreamError if the underlying cursor or the change stream is closed */ stream(options?: CursorStreamOptions): Readable & AsyncIterable<TChange>; } /** * Only present when the `showExpandedEvents` flag is enabled. * @public * @see https://www.mongodb.com/docs/manual/reference/change-events/ */ export declare interface ChangeStreamCollModDocument extends ChangeStreamDocumentCommon, ChangeStreamDocumentCollectionUUID { /** Describes the type of operation represented in this change notification */ operationType: "modify"; } /** * @public * @see https://www.mongodb.com/docs/manual/reference/change-events/ */ export declare interface ChangeStreamCreateDocument extends ChangeStreamDocumentCommon, ChangeStreamDocumentCollectionUUID { /** Describes the type of operation represented in this change notification */ operationType: "create"; } /** * Only present when the `showExpandedEvents` flag is enabled. * @public * @see https://www.mongodb.com/docs/manual/reference/change-events/ */ export declare interface ChangeStreamCreateIndexDocument extends ChangeStreamDocumentCommon, ChangeStreamDocumentCollectionUUID, ChangeStreamDocumentOperationDescription { /** Describes the type of operation represented in this change notification */ operationType: "createIndexes"; } /** * @public * @see https://www.mongodb.com/docs/manual/reference/change-events/#delete-event */ export declare interface ChangeStreamDeleteDocument<TSchema extends Document = Document> extends ChangeStreamDocumentCommon, ChangeStreamDocumentKey<TSchema>, ChangeStreamDocumentCollectionUUID { /** Describes the type of operation represented in this change notification */ operationType: "delete"; /** Namespace the delete event occurred on */ ns: ChangeStreamNameSpace; /** * Contains the pre-image of the modified or deleted document if the * pre-image is available for the change event and either 'required' or * 'whenAvailable' was specified for the 'fullDocumentBeforeChange' option * when creating the change stream. If 'whenAvailable' was specified but the * pre-image is unavailable, this will be explicitly set to null. */ fullDocumentBeforeChange?: TSchema; } /** @public */ export declare type ChangeStreamDocument<TSchema extends Document = Document> = ChangeStreamInsertDocument<TSchema> | ChangeStreamUpdateDocument<TSchema> | ChangeStreamReplaceDocument<TSchema> | ChangeStreamDeleteDocument<TSchema> | ChangeStreamDropDocument | ChangeStreamRenameDocument | ChangeStreamDropDatabaseDocument | ChangeStreamInvalidateDocument | ChangeStreamCreateIndexDocument | ChangeStreamCreateDocument | ChangeStreamCollModDocument | ChangeStreamDropIndexDocument | ChangeStreamShardCollectionDocument | ChangeStreamReshardCollectionDocument | ChangeStreamRefineCollectionShardKeyDocument; /** @public */ export declare interface ChangeStreamDocumentCollectionUUID { /** * The UUID (Binary subtype 4) of the collection that the operation was performed on. * * Only present when the `showExpandedEvents` flag is enabled. * * **NOTE:** collectionUUID will be converted to a NodeJS Buffer if the promoteBuffers * flag is enabled. * * @sinceServerVersion 6.1.0 */ collectionUUID: Binary; } /** @public */ export declare interface ChangeStreamDocumentCommon { /** * The id functions as an opaque token for use when resuming an interrupted * change stream. */ _id: ResumeToken; /** * The timestamp from the oplog entry associated with the event. * For events that happened as part of a multi-document transaction, the associated change stream * notifications will have the same clusterTime value, namely the time when the transaction was committed. * On a sharded cluster, events that occur on different shards can have the same clusterTime but be * associated with different transactions or even not be associated with any transaction. * To identify events for a single transaction, you can use the combination of lsid and txnNumber in the change stream event document. */ clusterTime?: Timestamp; /** * The transaction number. * Only present if the operation is part of a multi-document transaction. * * **NOTE:** txnNumber can be a Long if promoteLongs is set to false */ txnNumber?: number; /** * The identifier for the session associated with the transaction. * Only present if the operation is part of a multi-document transaction. */ lsid?: ServerSessionId; } /** @public */ export declare interface ChangeStreamDocumentKey<TSchema extends Document = Document> { /** * For unsharded collections this contains a single field `_id`. * For sharded collections, this will contain all the components of the shard key */ documentKey: { _id: InferIdType<TSchema>; [shardKey: string]: any; }; } /** @public */ export declare interface ChangeStreamDocumentOperationDescription { /** * An description of the operation. * * Only present when the `showExpandedEvents` flag is enabled. * * @sinceServerVersion 6.1.0 */ operationDescription?: Document; } /** * @public * @see https://www.mongodb.com/docs/manual/reference/change-events/#dropdatabase-event */ export declare interface ChangeStreamDropDatabaseDocument extends ChangeStreamDocumentCommon { /** Describes the type of operation represented in this change notification */ operationType: "dropDatabase"; /** The database dropped */ ns: { db: string; }; } /** * @public * @see https://www.mongodb.com/docs/manual/reference/change-events/#drop-event */ export declare interface ChangeStreamDropDocument extends ChangeStreamDocumentCommon, ChangeStreamDocumentCollectionUUID { /** Describes the type of operation represented in this change notification */ operationType: "drop"; /** Namespace the drop event occurred on */ ns: ChangeStreamNameSpace; } /** * Only present when the `showExpandedEvents` flag is enabled. * @public * @see https://www.mongodb.com/docs/manual/reference/change-events/ */ export declare interface ChangeStreamDropIndexDocument extends ChangeStreamDocumentCommon, ChangeStreamDocumentCollectionUUID, ChangeStreamDocumentOperationDescription { /** Describes the type of operation represented in this change notification */ operationType: "dropIndexes"; } /** @public */ export declare type ChangeStreamEvents<TSchema extends Document = Document, TChange extends Document = ChangeStreamDocument<TSchema>> = { resumeTokenChanged(token: ResumeToken): void; init(response: any): void; more(response?: any): void; response(): void; end(): void; error(error: Error): void; change(change: TChange): void; } & AbstractCursorEvents; /** * @public * @see https://www.mongodb.com/docs/manual/reference/change-events/#insert-event */ export declare interface ChangeStreamInsertDocument<TSchema extends Document = Document> extends ChangeStreamDocumentCommon, ChangeStreamDocumentKey<TSchema>, ChangeStreamDocumentCollectionUUID { /** Describes the type of operation represented in this change notification */ operationType: "insert"; /** This key will contain the document being inserted */ fullDocument: TSchema; /** Namespace the insert event occurred on */ ns: ChangeStreamNameSpace; } /** * @public * @see https://www.mongodb.com/docs/manual/reference/change-events/#invalidate-event */ export declare interface ChangeStreamInvalidateDocument extends ChangeStreamDocumentCommon { /** Describes the type of operation represented in this change notification */ operationType: "invalidate"; } /** @public */ export declare interface ChangeStreamNameSpace { db: string; coll: string; } /** * Options that can be passed to a ChangeStream. Note that startAfter, resumeAfter, and startAtOperationTime are all mutually exclusive, and the server will error if more than one is specified. * @public */ export declare interface ChangeStreamOptions extends Omit<AggregateOptions, "writeConcern"> { /** * Allowed values: 'updateLookup', 'whenAvailable', 'required'. * * When set to 'updateLookup', the change notification for partial updates * will include both a delta describing the changes to the document as well * as a copy of the entire document that was changed from some time after * the change occurred. * * When set to 'whenAvailable', configures the change stream to return the * post-image of the modified document for replace and update change events * if the post-image for this event is available. * * When set to 'required', the same behavior as 'whenAvailable' except that * an error is raised if the post-image is not available. */ fullDocument?: string; /** * Allowed values: 'whenAvailable', 'required', 'off'. * * The default is to not send a value, which is equivalent to 'off'. * * When set to 'whenAvailable', configures the change stream to return the * pre-image of the modified document for replace, update, and delete change * events if it is available. * * When set to 'required', the same behavior as 'whenAvailable' except that * an error is raised if the pre-image is not available. */ fullDocumentBeforeChange?: string; /** The maximum amount of time for the server to wait on new documents to satisfy a change stream query. */ maxAwaitTimeMS?: number; /** * Allows you to start a changeStream after a specified event. * @see https://www.mongodb.com/docs/manual/changeStreams/#resumeafter-for-change-streams */ resumeAfter?: ResumeToken; /** * Similar to resumeAfter, but will allow you to start after an invalidated event. * @see https://www.mongodb.com/docs/manual/changeStreams/#startafter-for-change-streams */ startAfter?: ResumeToken; /** Will start the changeStream after the specified operationTime. */ startAtOperationTime?: OperationTime; /** * The number of documents to return per batch. * @see https://www.mongodb.com/docs/manual/reference/command/aggregate */ batchSize?: number; /** * When enabled, configures the change stream to include extra change events. * * - createIndexes * - dropIndexes * - modify * - create * - shardCollection * - reshardCollection * - refineCollectionShardKey */ showExpandedEvents?: boolean; } /** * @public * @see https://www.mongodb.com/docs/manual/reference/change-events/ */ export declare interface ChangeStreamRefineCollectionShardKeyDocument extends ChangeStreamDocumentCommon, ChangeStreamDocumentCollectionUUID, ChangeStreamDocumentOperationDescription { /** Describes the type of operation represented in this change notification */ operationType: "refineCollectionShardKey"; } /** * @public * @see https://www.mongodb.com/docs/manual/reference/change-events/#rename-event */ export declare interface ChangeStreamRenameDocument extends ChangeStreamDocumentCommon, ChangeStreamDocumentCollectionUUID { /** Describes the type of operation represented in this change notification */ operationType: "rename"; /** The new name for the `ns.coll` collection */ to: { db: string; coll: string; }; /** The "from" namespace that the rename occurred on */ ns: ChangeStreamNameSpace; } /** * @public * @see https:/