mongodb
Version:
The official MongoDB driver for Node.js
1,297 lines (1,236 loc) • 343 kB
TypeScript
import type { DeserializeOptions } from 'bson';
import type { ObjectIdLike } from 'bson';
import type { SerializeOptions } from 'bson';
import { Binary } from 'bson';
import { BSON } from 'bson';
import { BSONRegExp } from 'bson';
import { BSONSymbol } from 'bson';
import { BSONType } from 'bson';
import { Code } from 'bson';
import { DBRef } from 'bson';
import { Decimal128 } from 'bson';
import { deserialize } from 'bson';
import { Document } from 'bson';
import { Double } from 'bson';
import { Int32 } from 'bson';
import { Long } from 'bson';
import { MaxKey } from 'bson';
import { MinKey } from 'bson';
import { ObjectId } from 'bson';
import { serialize } from 'bson';
import { Timestamp } from 'bson';
import { UUID } from 'bson';
import type { SrvRecord } from 'dns';
import { EventEmitter } from 'events';
import type { Socket } from 'net';
import type { TcpNetConnectOpts } from 'net';
import { Readable } from 'stream';
import { Writable } from 'stream';
import type { ConnectionOptions as ConnectionOptions_2 } from 'tls';
import type { TLSSocket } from 'tls';
import type { TLSSocketOptions } from 'tls';
/** @public */
export declare type Abortable = {
/**
* @experimental
* When provided, the corresponding `AbortController` can be used to abort an asynchronous action.
*
* The `signal.reason` value is used as the error thrown.
*
* @remarks
* **NOTE:** If an abort signal aborts an operation while the driver is writing to the underlying
* socket or reading the response from the server, the socket will be closed.
* If signals are aborted at a high rate during socket read/writes this can lead to a high rate of connection reestablishment.
*
* We plan to mitigate this in a future release, please follow NODE-6062 (`timeoutMS` expiration suffers the same limitation).
*
* AbortSignals are likely a best fit for human interactive interruption (ex. ctrl-C) where the frequency
* of cancellation is reasonably low. If a signal is programmatically aborted for 100s of operations you can empty
* the driver's connection pool.
*
* @example
* ```js
* const controller = new AbortController();
* const { signal } = controller;
* process.on('SIGINT', () => controller.abort(new Error('^C pressed')));
*
* try {
* const res = await fetch('...', { signal });
* await collection.findOne(await res.json(), { signal });
* catch (error) {
* if (error === signal.reason) {
* // signal abort error handling
* }
* }
* ```
*/
signal?: AbortSignal | undefined;
};
/** @public */
export declare abstract class AbstractCursor<TSchema = any, CursorEvents extends AbstractCursorEvents = AbstractCursorEvents> extends TypedEventEmitter<CursorEvents> implements AsyncDisposable_2 {
/* Excluded from this release type: cursorId */
/* Excluded from this release type: cursorSession */
/* Excluded from this release type: selectedServer */
/* Excluded from this release type: cursorNamespace */
/* Excluded from this release type: documents */
/* Excluded from this release type: cursorClient */
/* Excluded from this release type: transform */
/* Excluded from this release type: initialized */
/* Excluded from this release type: isClosed */
/* Excluded from this release type: isKilled */
/* Excluded from this release type: cursorOptions */
/* Excluded from this release type: timeoutContext */
/** @event */
static readonly CLOSE: "close";
/* Excluded from this release type: deserializationOptions */
protected signal: AbortSignal | undefined;
private abortListener;
/* Excluded from this release type: __constructor */
/**
* The cursor has no id until it receives a response from the initial cursor creating command.
*
* It is non-zero for as long as the database has an open cursor.
*
* The initiating command may receive a zero id if the entire result is in the `firstBatch`.
*/
get id(): Long | undefined;
/* Excluded from this release type: isDead */
/* Excluded from this release type: client */
/* Excluded from this release type: server */
get namespace(): MongoDBNamespace;
get readPreference(): ReadPreference;
get readConcern(): ReadConcern | undefined;
/* Excluded from this release type: session */
/* Excluded from this release type: session */
/**
* The cursor is closed and all remaining locally buffered documents have been iterated.
*/
get closed(): boolean;
/**
* A `killCursors` command was attempted on this cursor.
* This is performed if the cursor id is non zero.
*/
get killed(): boolean;
get loadBalanced(): boolean;
/**
* @beta
* @experimental
* An alias for {@link AbstractCursor.close|AbstractCursor.close()}.
*/
[Symbol.asyncDispose]: () => Promise<void>;
/* Excluded from this release type: asyncDispose */
/** Adds cursor to client's tracking so it will be closed by MongoClient.close() */
private trackCursor;
/** Returns current buffered documents length */
bufferedCount(): number;
/** Returns current buffered documents */
readBufferedDocuments(number?: number): NonNullable<TSchema>[];
[Symbol.asyncIterator](): AsyncGenerator<TSchema, void, void>;
stream(options?: CursorStreamOptions): Readable & AsyncIterable<TSchema>;
hasNext(): Promise<boolean>;
/** Get the next available document from the cursor, returns null if no more documents are available. */
next(): Promise<TSchema | null>;
/**
* Try to get the next available document from the cursor or `null` if an empty batch is returned
*/
tryNext(): Promise<TSchema | null>;
/**
* Iterates over all the documents for this cursor using the iterator, callback pattern.
*
* If the iterator returns `false`, iteration will stop.
*
* @param iterator - The iteration callback.
* @deprecated - Will be removed in a future release. Use for await...of instead.
*/
forEach(iterator: (doc: TSchema) => boolean | void): Promise<void>;
/**
* Frees any client-side resources used by the cursor.
*/
close(options?: {
timeoutMS?: number;
}): Promise<void>;
/**
* Returns an array of documents. The caller is responsible for making sure that there
* is enough memory to store the results. Note that the array only contains partial
* results when this cursor had been previously accessed. In that case,
* cursor.rewind() can be used to reset the cursor.
*/
toArray(): Promise<TSchema[]>;
/**
* Add a cursor flag to the cursor
*
* @param flag - The flag to set, must be one of following ['tailable', 'oplogReplay', 'noCursorTimeout', 'awaitData', 'partial' -.
* @param value - The flag boolean value.
*/
addCursorFlag(flag: CursorFlag, value: boolean): this;
/**
* Map all documents using the provided function
* If there is a transform set on the cursor, that will be called first and the result passed to
* this function's transform.
*
* @remarks
*
* **Note** Cursors use `null` internally to indicate that there are no more documents in the cursor. Providing a mapping
* function that maps values to `null` will result in the cursor closing itself before it has finished iterating
* all documents. This will **not** result in a memory leak, just surprising behavior. For example:
*
* ```typescript
* const cursor = collection.find({});
* cursor.map(() => null);
*
* const documents = await cursor.toArray();
* // documents is always [], regardless of how many documents are in the collection.
* ```
*
* Other falsey values are allowed:
*
* ```typescript
* const cursor = collection.find({});
* cursor.map(() => '');
*
* const documents = await cursor.toArray();
* // documents is now an array of empty strings
* ```
*
* **Note for Typescript Users:** adding a transform changes the return type of the iteration of this cursor,
* it **does not** return a new instance of a cursor. This means when calling map,
* you should always assign the result to a new variable in order to get a correctly typed cursor variable.
* Take note of the following example:
*
* @example
* ```typescript
* const cursor: FindCursor<Document> = coll.find();
* const mappedCursor: FindCursor<number> = cursor.map(doc => Object.keys(doc).length);
* const keyCounts: number[] = await mappedCursor.toArray(); // cursor.toArray() still returns Document[]
* ```
* @param transform - The mapping transformation method.
*/
map<T = any>(transform: (doc: TSchema) => T): AbstractCursor<T>;
/**
* Set the ReadPreference for the cursor.
*
* @param readPreference - The new read preference for the cursor.
*/
withReadPreference(readPreference: ReadPreferenceLike): this;
/**
* Set the ReadPreference for the cursor.
*
* @param readPreference - The new read preference for the cursor.
*/
withReadConcern(readConcern: ReadConcernLike): this;
/**
* Set a maxTimeMS on the cursor query, allowing for hard timeout limits on queries (Only supported on MongoDB 2.6 or higher)
*
* @param value - Number of milliseconds to wait before aborting the query.
*/
maxTimeMS(value: number): this;
/**
* Set the batch size for the cursor.
*
* @param value - The number of documents to return per batch. See {@link https://www.mongodb.com/docs/manual/reference/command/find/|find command documentation}.
*/
batchSize(value: number): this;
/**
* Rewind this cursor to its uninitialized state. Any options that are present on the cursor will
* remain in effect. Iterating this cursor will cause new queries to be sent to the server, even
* if the resultant data has already been retrieved by this cursor.
*/
rewind(): void;
/**
* Returns a new uninitialized copy of this cursor, with options matching those that have been set on the current instance
*/
abstract clone(): AbstractCursor<TSchema>;
/* Excluded from this release type: _initialize */
/* Excluded from this release type: getMore */
/* Excluded from this release type: cursorInit */
/* Excluded from this release type: fetchBatch */
/* Excluded from this release type: cleanup */
/* Excluded from this release type: hasEmittedClose */
/* Excluded from this release type: emitClose */
/* Excluded from this release type: transformDocument */
/* Excluded from this release type: throwIfInitialized */
}
/** @public */
export declare type AbstractCursorEvents = {
[AbstractCursor.CLOSE](): void;
};
/** @public */
export declare interface AbstractCursorOptions extends BSONSerializeOptions {
session?: ClientSession;
readPreference?: ReadPreferenceLike;
readConcern?: ReadConcernLike;
/**
* Specifies the number of documents to return in each response from MongoDB
*/
batchSize?: number;
/**
* When applicable `maxTimeMS` controls the amount of time the initial command
* that constructs a cursor should take. (ex. find, aggregate, listCollections)
*/
maxTimeMS?: number;
/**
* When applicable `maxAwaitTimeMS` controls the amount of time subsequent getMores
* that a cursor uses to fetch more data should take. (ex. cursor.next())
*/
maxAwaitTimeMS?: number;
/**
* Comment to apply to the operation.
*
* In server versions pre-4.4, 'comment' must be string. A server
* error will be thrown if any other type is provided.
*
* In server versions 4.4 and above, 'comment' can be any valid BSON type.
*/
comment?: unknown;
/**
* By default, MongoDB will automatically close a cursor when the
* client has exhausted all results in the cursor. However, for [capped collections](https://www.mongodb.com/docs/manual/core/capped-collections)
* you may use a Tailable Cursor that remains open after the client exhausts
* the results in the initial cursor.
*/
tailable?: boolean;
/**
* If awaitData is set to true, when the cursor reaches the end of the capped collection,
* MongoDB blocks the query thread for a period of time waiting for new data to arrive.
* When new data is inserted into the capped collection, the blocked thread is signaled
* to wake up and return the next batch to the client.
*/
awaitData?: boolean;
noCursorTimeout?: boolean;
/** Specifies the time an operation will run until it throws a timeout error. See {@link AbstractCursorOptions.timeoutMode} for more details on how this option applies to cursors. */
timeoutMS?: number;
/**
* @public
* @experimental
* Specifies how `timeoutMS` is applied to the cursor. Can be either `'cursorLifeTime'` or `'iteration'`
* When set to `'iteration'`, the deadline specified by `timeoutMS` applies to each call of
* `cursor.next()`.
* When set to `'cursorLifetime'`, the deadline applies to the life of the entire cursor.
*
* Depending on the type of cursor being used, this option has different default values.
* For non-tailable cursors, this value defaults to `'cursorLifetime'`
* For tailable cursors, this value defaults to `'iteration'` since tailable cursors, by
* definition can have an arbitrarily long lifetime.
*
* @example
* ```ts
* const cursor = collection.find({}, {timeoutMS: 100, timeoutMode: 'iteration'});
* for await (const doc of cursor) {
* // process doc
* // This will throw a timeout error if any of the iterator's `next()` calls takes more than 100ms, but
* // will continue to iterate successfully otherwise, regardless of the number of batches.
* }
* ```
*
* @example
* ```ts
* const cursor = collection.find({}, { timeoutMS: 1000, timeoutMode: 'cursorLifetime' });
* const docs = await cursor.toArray(); // This entire line will throw a timeout error if all batches are not fetched and returned within 1000ms.
* ```
*/
timeoutMode?: CursorTimeoutMode;
/* Excluded from this release type: timeoutContext */
}
/* Excluded from this release type: AbstractOperation */
/** @public */
export declare type AcceptedFields<TSchema, FieldType, AssignableType> = {
readonly [key in KeysOfAType<TSchema, FieldType>]?: AssignableType;
};
/** @public */
export declare type AddToSetOperators<Type> = {
$each?: Array<Flatten<Type>>;
};
/**
* The **Admin** class is an internal class that allows convenient access to
* the admin functionality and commands for MongoDB.
*
* **ADMIN Cannot directly be instantiated**
* @public
*
* @example
* ```ts
* import { MongoClient } from 'mongodb';
*
* const client = new MongoClient('mongodb://localhost:27017');
* const admin = client.db().admin();
* const dbInfo = await admin.listDatabases();
* for (const db of dbInfo.databases) {
* console.log(db.name);
* }
* ```
*/
export declare class Admin {
/* Excluded from this release type: s */
/* Excluded from this release type: __constructor */
/**
* Execute a command
*
* The driver will ensure the following fields are attached to the command sent to the server:
* - `lsid` - sourced from an implicit session or options.session
* - `$readPreference` - defaults to primary or can be configured by options.readPreference
* - `$db` - sourced from the name of this database
*
* If the client has a serverApi setting:
* - `apiVersion`
* - `apiStrict`
* - `apiDeprecationErrors`
*
* When in a transaction:
* - `readConcern` - sourced from readConcern set on the TransactionOptions
* - `writeConcern` - sourced from writeConcern set on the TransactionOptions
*
* Attaching any of the above fields to the command will have no effect as the driver will overwrite the value.
*
* @param command - The command to execute
* @param options - Optional settings for the command
*/
command(command: Document, options?: RunCommandOptions): Promise<Document>;
/**
* Retrieve the server build information
*
* @param options - Optional settings for the command
*/
buildInfo(options?: CommandOperationOptions): Promise<Document>;
/**
* Retrieve the server build information
*
* @param options - Optional settings for the command
*/
serverInfo(options?: CommandOperationOptions): Promise<Document>;
/**
* Retrieve this db's server status.
*
* @param options - Optional settings for the command
*/
serverStatus(options?: CommandOperationOptions): Promise<Document>;
/**
* Ping the MongoDB server and retrieve results
*
* @param options - Optional settings for the command
*/
ping(options?: CommandOperationOptions): Promise<Document>;
/**
* Remove a user from a database
*
* @param username - The username to remove
* @param options - Optional settings for the command
*/
removeUser(username: string, options?: RemoveUserOptions): Promise<boolean>;
/**
* Validate an existing collection
*
* @param collectionName - The name of the collection to validate.
* @param options - Optional settings for the command
*/
validateCollection(collectionName: string, options?: ValidateCollectionOptions): Promise<Document>;
/**
* List the available databases
*
* @param options - Optional settings for the command
*/
listDatabases(options?: ListDatabasesOptions): Promise<ListDatabasesResult>;
/**
* Get ReplicaSet status
*
* @param options - Optional settings for the command
*/
replSetGetStatus(options?: CommandOperationOptions): Promise<Document>;
}
/* Excluded from this release type: AdminPrivate */
/* Excluded from this release type: AggregateOperation */
/** @public */
export declare interface AggregateOptions extends Omit<CommandOperationOptions, 'explain'> {
/** allowDiskUse lets the server know if it can use disk to store temporary results for the aggregation (requires mongodb 2.6 \>). */
allowDiskUse?: boolean;
/** The number of documents to return per batch. See [aggregation documentation](https://www.mongodb.com/docs/manual/reference/command/aggregate). */
batchSize?: number;
/** Allow driver to bypass schema validation. */
bypassDocumentValidation?: boolean;
/** Return the query as cursor, on 2.6 \> it returns as a real cursor on pre 2.6 it returns as an emulated cursor. */
cursor?: Document;
/**
* Specifies a cumulative time limit in milliseconds for processing operations on the cursor. MongoDB interrupts the operation at the earliest following interrupt point.
*/
maxTimeMS?: number;
/** The maximum amount of time for the server to wait on new documents to satisfy a tailable cursor query. */
maxAwaitTimeMS?: number;
/** Specify collation. */
collation?: CollationOptions;
/** Add an index selection hint to an aggregation command */
hint?: Hint;
/** Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0). */
let?: Document;
out?: string;
/**
* Specifies the verbosity mode for the explain output.
* @deprecated This API is deprecated in favor of `collection.aggregate().explain()`
* or `db.aggregate().explain()`.
*/
explain?: ExplainOptions['explain'];
/* Excluded from this release type: timeoutMode */
}
/**
* The **AggregationCursor** class is an internal class that embodies an aggregation cursor on MongoDB
* allowing for iteration over the results returned from the underlying query. It supports
* one by one document iteration, conversion to an array or can be iterated as a Node 4.X
* or higher stream
* @public
*/
export declare class AggregationCursor<TSchema = any> extends ExplainableCursor<TSchema> {
readonly pipeline: Document[];
/* Excluded from this release type: aggregateOptions */
/* Excluded from this release type: __constructor */
clone(): AggregationCursor<TSchema>;
map<T>(transform: (doc: TSchema) => T): AggregationCursor<T>;
/* Excluded from this release type: _initialize */
/** Execute the explain for the cursor */
explain(): Promise<Document>;
explain(verbosity: ExplainVerbosityLike | ExplainCommandOptions): Promise<Document>;
explain(options: {
timeoutMS?: number;
}): Promise<Document>;
explain(verbosity: ExplainVerbosityLike | ExplainCommandOptions, options: {
timeoutMS?: number;
}): Promise<Document>;
/** Add a stage to the aggregation pipeline
* @example
* ```
* const documents = await users.aggregate().addStage({ $match: { name: /Mike/ } }).toArray();
* ```
* @example
* ```
* const documents = await users.aggregate()
* .addStage<{ name: string }>({ $project: { name: true } })
* .toArray(); // type of documents is { name: string }[]
* ```
*/
addStage(stage: Document): this;
addStage<T = Document>(stage: Document): AggregationCursor<T>;
/** Add a group stage to the aggregation pipeline */
group<T = TSchema>($group: Document): AggregationCursor<T>;
/** Add a limit stage to the aggregation pipeline */
limit($limit: number): this;
/** Add a match stage to the aggregation pipeline */
match($match: Document): this;
/** Add an out stage to the aggregation pipeline */
out($out: {
db: string;
coll: string;
} | string): this;
/**
* Add a project stage to the aggregation pipeline
*
* @remarks
* In order to strictly type this function you must provide an interface
* that represents the effect of your projection on the result documents.
*
* By default chaining a projection to your cursor changes the returned type to the generic {@link Document} type.
* You should specify a parameterized type to have assertions on your final results.
*
* @example
* ```typescript
* // Best way
* const docs: AggregationCursor<{ a: number }> = cursor.project<{ a: number }>({ _id: 0, a: true });
* // Flexible way
* const docs: AggregationCursor<Document> = cursor.project({ _id: 0, a: true });
* ```
*
* @remarks
* In order to strictly type this function you must provide an interface
* that represents the effect of your projection on the result documents.
*
* **Note for Typescript Users:** adding a transform changes the return type of the iteration of this cursor,
* it **does not** return a new instance of a cursor. This means when calling project,
* you should always assign the result to a new variable in order to get a correctly typed cursor variable.
* Take note of the following example:
*
* @example
* ```typescript
* const cursor: AggregationCursor<{ a: number; b: string }> = coll.aggregate([]);
* const projectCursor = cursor.project<{ a: number }>({ _id: 0, a: true });
* const aPropOnlyArray: {a: number}[] = await projectCursor.toArray();
*
* // or always use chaining and save the final cursor
*
* const cursor = coll.aggregate().project<{ a: string }>({
* _id: 0,
* a: { $convert: { input: '$a', to: 'string' }
* }});
* ```
*/
project<T extends Document = Document>($project: Document): AggregationCursor<T>;
/** Add a lookup stage to the aggregation pipeline */
lookup($lookup: Document): this;
/** Add a redact stage to the aggregation pipeline */
redact($redact: Document): this;
/** Add a skip stage to the aggregation pipeline */
skip($skip: number): this;
/** Add a sort stage to the aggregation pipeline */
sort($sort: Sort): this;
/** Add a unwind stage to the aggregation pipeline */
unwind($unwind: Document | string): this;
/** Add a geoNear stage to the aggregation pipeline */
geoNear($geoNear: Document): this;
}
/** @public */
export declare interface AggregationCursorOptions extends AbstractCursorOptions, AggregateOptions {
}
/**
* It is possible to search using alternative types in mongodb e.g.
* string types can be searched using a regex in mongo
* array types can be searched using their element type
* @public
*/
export declare type AlternativeType<T> = T extends ReadonlyArray<infer U> ? T | RegExpOrString<U> : RegExpOrString<T>;
/** @public */
export declare type AnyBulkWriteOperation<TSchema extends Document = Document> = {
insertOne: InsertOneModel<TSchema>;
} | {
replaceOne: ReplaceOneModel<TSchema>;
} | {
updateOne: UpdateOneModel<TSchema>;
} | {
updateMany: UpdateManyModel<TSchema>;
} | {
deleteOne: DeleteOneModel<TSchema>;
} | {
deleteMany: DeleteManyModel<TSchema>;
};
/**
* Used to represent any of the client bulk write models that can be passed as an array
* to MongoClient#bulkWrite.
* @public
*/
export declare type AnyClientBulkWriteModel<TSchema extends Document> = ClientInsertOneModel<TSchema> | ClientReplaceOneModel<TSchema> | ClientUpdateOneModel<TSchema> | ClientUpdateManyModel<TSchema> | ClientDeleteOneModel<TSchema> | ClientDeleteManyModel<TSchema>;
/** @public */
export declare type AnyError = MongoError | Error;
/** @public */
export declare type ArrayElement<Type> = Type extends ReadonlyArray<infer Item> ? Item : never;
/** @public */
export declare type ArrayOperator<Type> = {
$each?: Array<Flatten<Type>>;
$slice?: number;
$position?: number;
$sort?: Sort;
};
/**
* @public
*/
declare interface AsyncDisposable_2 {
/**
* @beta
* @experimental
*/
[Symbol.asyncDispose](): Promise<void>;
/* Excluded from this release type: asyncDispose */
}
export { AsyncDisposable_2 as AsyncDisposable }
/** @public */
export declare interface Auth {
/** The username for auth */
username?: string;
/** The password for auth */
password?: string;
}
/* Excluded from this release type: AuthContext */
/** @public */
export declare const AuthMechanism: Readonly<{
readonly MONGODB_AWS: "MONGODB-AWS";
readonly MONGODB_CR: "MONGODB-CR";
readonly MONGODB_DEFAULT: "DEFAULT";
readonly MONGODB_GSSAPI: "GSSAPI";
readonly MONGODB_PLAIN: "PLAIN";
readonly MONGODB_SCRAM_SHA1: "SCRAM-SHA-1";
readonly MONGODB_SCRAM_SHA256: "SCRAM-SHA-256";
readonly MONGODB_X509: "MONGODB-X509";
readonly MONGODB_OIDC: "MONGODB-OIDC";
}>;
/** @public */
export declare type AuthMechanism = (typeof AuthMechanism)[keyof typeof AuthMechanism];
/** @public */
export declare interface AuthMechanismProperties extends Document {
SERVICE_HOST?: string;
SERVICE_NAME?: string;
SERVICE_REALM?: string;
CANONICALIZE_HOST_NAME?: GSSAPICanonicalizationValue;
AWS_SESSION_TOKEN?: string;
/** A user provided OIDC machine callback function. */
OIDC_CALLBACK?: OIDCCallbackFunction;
/** A user provided OIDC human interacted callback function. */
OIDC_HUMAN_CALLBACK?: OIDCCallbackFunction;
/** The OIDC environment. Note that 'test' is for internal use only. */
ENVIRONMENT?: 'test' | 'azure' | 'gcp' | 'k8s';
/** Allowed hosts that OIDC auth can connect to. */
ALLOWED_HOSTS?: string[];
/** The resource token for OIDC auth in Azure and GCP. */
TOKEN_RESOURCE?: string;
/**
* A custom AWS credential provider to use. An example using the AWS SDK default provider chain:
*
* ```ts
* const client = new MongoClient(process.env.MONGODB_URI, {
* authMechanismProperties: {
* AWS_CREDENTIAL_PROVIDER: fromNodeProviderChain()
* }
* });
* ```
*
* Using a custom function that returns AWS credentials:
*
* ```ts
* const client = new MongoClient(process.env.MONGODB_URI, {
* authMechanismProperties: {
* AWS_CREDENTIAL_PROVIDER: async () => {
* return {
* accessKeyId: process.env.ACCESS_KEY_ID,
* secretAccessKey: process.env.SECRET_ACCESS_KEY
* }
* }
* }
* });
* ```
*/
AWS_CREDENTIAL_PROVIDER?: AWSCredentialProvider;
}
/* Excluded from this release type: AuthProvider */
/* Excluded from this release type: AutoEncrypter */
/**
* @public
*
* Extra options related to the mongocryptd process
* \* _Available in MongoDB 6.0 or higher._
*/
export declare type AutoEncryptionExtraOptions = NonNullable<AutoEncryptionOptions['extraOptions']>;
/** @public */
export declare const AutoEncryptionLoggerLevel: Readonly<{
readonly FatalError: 0;
readonly Error: 1;
readonly Warning: 2;
readonly Info: 3;
readonly Trace: 4;
}>;
/**
* @public
* The level of severity of the log message
*
* | Value | Level |
* |-------|-------|
* | 0 | Fatal Error |
* | 1 | Error |
* | 2 | Warning |
* | 3 | Info |
* | 4 | Trace |
*/
export declare type AutoEncryptionLoggerLevel = (typeof AutoEncryptionLoggerLevel)[keyof typeof AutoEncryptionLoggerLevel];
/** @public */
export declare interface AutoEncryptionOptions {
/* Excluded from this release type: metadataClient */
/** A `MongoClient` used to fetch keys from a key vault */
keyVaultClient?: MongoClient;
/** The namespace where keys are stored in the key vault */
keyVaultNamespace?: string;
/** Configuration options that are used by specific KMS providers during key generation, encryption, and decryption. */
kmsProviders?: KMSProviders;
/** Configuration options for custom credential providers. */
credentialProviders?: CredentialProviders;
/**
* A map of namespaces to a local JSON schema for encryption
*
* **NOTE**: Supplying options.schemaMap provides more security than relying on JSON Schemas obtained from the server.
* It protects against a malicious server advertising a false JSON Schema, which could trick the client into sending decrypted data that should be encrypted.
* Schemas supplied in the schemaMap only apply to configuring automatic encryption for Client-Side Field Level Encryption.
* Other validation rules in the JSON schema will not be enforced by the driver and will result in an error.
*/
schemaMap?: Document;
/** Supply a schema for the encrypted fields in the document */
encryptedFieldsMap?: Document;
/** Allows the user to bypass auto encryption, maintaining implicit decryption */
bypassAutoEncryption?: boolean;
/** Allows users to bypass query analysis */
bypassQueryAnalysis?: boolean;
options?: {
/** An optional hook to catch logging messages from the underlying encryption engine */
logger?: (level: AutoEncryptionLoggerLevel, message: string) => void;
};
extraOptions?: {
/**
* A local process the driver communicates with to determine how to encrypt values in a command.
* Defaults to "mongodb://%2Fvar%2Fmongocryptd.sock" if domain sockets are available or "mongodb://localhost:27020" otherwise
*/
mongocryptdURI?: string;
/** If true, autoEncryption will not attempt to spawn a mongocryptd before connecting */
mongocryptdBypassSpawn?: boolean;
/** The path to the mongocryptd executable on the system */
mongocryptdSpawnPath?: string;
/** Command line arguments to use when auto-spawning a mongocryptd */
mongocryptdSpawnArgs?: string[];
/**
* Full path to a MongoDB Crypt shared library to be used (instead of mongocryptd).
*
* This needs to be the path to the file itself, not a directory.
* It can be an absolute or relative path. If the path is relative and
* its first component is `$ORIGIN`, it will be replaced by the directory
* containing the mongodb-client-encryption native addon file. Otherwise,
* the path will be interpreted relative to the current working directory.
*
* Currently, loading different MongoDB Crypt shared library files from different
* MongoClients in the same process is not supported.
*
* If this option is provided and no MongoDB Crypt shared library could be loaded
* from the specified location, creating the MongoClient will fail.
*
* If this option is not provided and `cryptSharedLibRequired` is not specified,
* the AutoEncrypter will attempt to spawn and/or use mongocryptd according
* to the mongocryptd-specific `extraOptions` options.
*
* Specifying a path prevents mongocryptd from being used as a fallback.
*
* Requires the MongoDB Crypt shared library, available in MongoDB 6.0 or higher.
*/
cryptSharedLibPath?: string;
/**
* If specified, never use mongocryptd and instead fail when the MongoDB Crypt
* shared library could not be loaded.
*
* This is always true when `cryptSharedLibPath` is specified.
*
* Requires the MongoDB Crypt shared library, available in MongoDB 6.0 or higher.
*/
cryptSharedLibRequired?: boolean;
/* Excluded from this release type: cryptSharedLibSearchPaths */
};
proxyOptions?: ProxyOptions;
/** The TLS options to use connecting to the KMS provider */
tlsOptions?: CSFLEKMSTlsOptions;
}
/** @public **/
export declare type AWSCredentialProvider = () => Promise<AWSCredentials>;
/**
* @public
* Copy of the AwsCredentialIdentityProvider interface from [`smithy/types`](https://socket.dev/npm/package/\@smithy/types/files/1.1.1/dist-types/identity/awsCredentialIdentity.d.ts),
* the return type of the aws-sdk's `fromNodeProviderChain().provider()`.
*/
export declare interface AWSCredentials {
accessKeyId: string;
secretAccessKey: string;
sessionToken?: string;
expiration?: Date;
}
/**
* @public
* Configuration options for making an AWS encryption key
*/
export declare interface AWSEncryptionKeyOptions {
/**
* The AWS region of the KMS
*/
region: string;
/**
* The Amazon Resource Name (ARN) to the AWS customer master key (CMK)
*/
key: string;
/**
* An alternate host to send KMS requests to. May include port number.
*/
endpoint?: string | undefined;
}
/** @public */
export declare interface AWSKMSProviderConfiguration {
/**
* The access key used for the AWS KMS provider
*/
accessKeyId: string;
/**
* The secret access key used for the AWS KMS provider
*/
secretAccessKey: string;
/**
* An optional AWS session token that will be used as the
* X-Amz-Security-Token header for AWS requests.
*/
sessionToken?: string;
}
/**
* @public
* Configuration options for making an Azure encryption key
*/
export declare interface AzureEncryptionKeyOptions {
/**
* Key name
*/
keyName: string;
/**
* Key vault URL, typically `<name>.vault.azure.net`
*/
keyVaultEndpoint: string;
/**
* Key version
*/
keyVersion?: string | undefined;
}
/** @public */
export declare type AzureKMSProviderConfiguration = {
/**
* The tenant ID identifies the organization for the account
*/
tenantId: string;
/**
* The client ID to authenticate a registered application
*/
clientId: string;
/**
* The client secret to authenticate a registered application
*/
clientSecret: string;
/**
* If present, a host with optional port. E.g. "example.com" or "example.com:443".
* This is optional, and only needed if customer is using a non-commercial Azure instance
* (e.g. a government or China account, which use different URLs).
* Defaults to "login.microsoftonline.com"
*/
identityPlatformEndpoint?: string | undefined;
} | {
/**
* If present, an access token to authenticate with Azure.
*/
accessToken: string;
};
/**
* Keeps the state of a unordered batch so we can rewrite the results
* correctly after command execution
*
* @public
*/
export declare class Batch<T = Document> {
originalZeroIndex: number;
currentIndex: number;
originalIndexes: number[];
batchType: BatchType;
operations: T[];
size: number;
sizeBytes: number;
constructor(batchType: BatchType, originalZeroIndex: number);
}
/** @public */
export declare const BatchType: Readonly<{
readonly INSERT: 1;
readonly UPDATE: 2;
readonly DELETE: 3;
}>;
/** @public */
export declare type BatchType = (typeof BatchType)[keyof typeof BatchType];
export { Binary }
/** @public */
export declare type BitwiseFilter = number /** numeric bit mask */ | Binary /** BinData bit mask */ | ReadonlyArray<number>;
export { BSON }
/* Excluded from this release type: BSONElement */
export { BSONRegExp }
/**
* BSON Serialization options.
* @public
*/
export declare interface BSONSerializeOptions extends Omit<SerializeOptions, 'index'>, Omit<DeserializeOptions, 'evalFunctions' | 'cacheFunctions' | 'cacheFunctionsCrc32' | 'allowObjectSmallerThanBufferSize' | 'index' | 'validation'> {
/**
* Enabling the raw option will return a [Node.js Buffer](https://nodejs.org/api/buffer.html)
* which is allocated using [allocUnsafe API](https://nodejs.org/api/buffer.html#static-method-bufferallocunsafesize).
* See this section from the [Node.js Docs here](https://nodejs.org/api/buffer.html#what-makes-bufferallocunsafe-and-bufferallocunsafeslow-unsafe)
* for more detail about what "unsafe" refers to in this context.
* If you need to maintain your own editable clone of the bytes returned for an extended life time of the process, it is recommended you allocate
* your own buffer and clone the contents:
*
* @example
* ```ts
* const raw = await collection.findOne({}, { raw: true });
* const myBuffer = Buffer.alloc(raw.byteLength);
* myBuffer.set(raw, 0);
* // Only save and use `myBuffer` beyond this point
* ```
*
* @remarks
* Please note there is a known limitation where this option cannot be used at the MongoClient level (see [NODE-3946](https://jira.mongodb.org/browse/NODE-3946)).
* It does correctly work at `Db`, `Collection`, and per operation the same as other BSON options work.
*/
raw?: boolean;
/** Enable utf8 validation when deserializing BSON documents. Defaults to true. */
enableUtf8Validation?: boolean;
}
export { BSONSymbol }
export { BSONType }
/** @public */
export declare type BSONTypeAlias = keyof typeof BSONType;
/* Excluded from this release type: BufferPool */
/** @public */
export declare abstract class BulkOperationBase {
private collection;
isOrdered: boolean;
/* Excluded from this release type: s */
operationId?: number;
/* Excluded from this release type: __constructor */
/**
* Add a single insert document to the bulk operation
*
* @example
* ```ts
* const bulkOp = collection.initializeOrderedBulkOp();
*
* // Adds three inserts to the bulkOp.
* bulkOp
* .insert({ a: 1 })
* .insert({ b: 2 })
* .insert({ c: 3 });
* await bulkOp.execute();
* ```
*/
insert(document: Document): BulkOperationBase;
/**
* Builds a find operation for an update/updateOne/delete/deleteOne/replaceOne.
* Returns a builder object used to complete the definition of the operation.
*
* @example
* ```ts
* const bulkOp = collection.initializeOrderedBulkOp();
*
* // Add an updateOne to the bulkOp
* bulkOp.find({ a: 1 }).updateOne({ $set: { b: 2 } });
*
* // Add an updateMany to the bulkOp
* bulkOp.find({ c: 3 }).update({ $set: { d: 4 } });
*
* // Add an upsert
* bulkOp.find({ e: 5 }).upsert().updateOne({ $set: { f: 6 } });
*
* // Add a deletion
* bulkOp.find({ g: 7 }).deleteOne();
*
* // Add a multi deletion
* bulkOp.find({ h: 8 }).delete();
*
* // Add a replaceOne
* bulkOp.find({ i: 9 }).replaceOne({writeConcern: { j: 10 }});
*
* // Update using a pipeline (requires Mongodb 4.2 or higher)
* bulk.find({ k: 11, y: { $exists: true }, z: { $exists: true } }).updateOne([
* { $set: { total: { $sum: [ '$y', '$z' ] } } }
* ]);
*
* // All of the ops will now be executed
* await bulkOp.execute();
* ```
*/
find(selector: Document): FindOperators;
/** Specifies a raw operation to perform in the bulk write. */
raw(op: AnyBulkWriteOperation): this;
get length(): number;
get bsonOptions(): BSONSerializeOptions;
get writeConcern(): WriteConcern | undefined;
get batches(): Batch[];
execute(options?: BulkWriteOptions): Promise<BulkWriteResult>;
/* Excluded from this release type: handleWriteError */
abstract addToOperationsList(batchType: BatchType, document: Document | UpdateStatement | DeleteStatement): this;
private shouldForceServerObjectId;
}
/* Excluded from this release type: BulkOperationPrivate */
/* Excluded from this release type: BulkResult */
/** @public */
export declare interface BulkWriteOperationError {
index: number;
code: number;
errmsg: string;
errInfo: Document;
op: Document | UpdateStatement | DeleteStatement;
}
/** @public */
export declare interface BulkWriteOptions extends CommandOperationOptions {
/**
* Allow driver to bypass schema validation.
* @defaultValue `false` - documents will be validated by default
**/
bypassDocumentValidation?: boolean;
/**
* If true, when an insert fails, don't execute the remaining writes.
* If false, continue with remaining inserts when one fails.
* @defaultValue `true` - inserts are ordered by default
*/
ordered?: boolean;
/**
* Force server to assign _id values instead of driver.
* @defaultValue `false` - the driver generates `_id` fields by default
**/
forceServerObjectId?: boolean;
/** Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0). */
let?: Document;
/* Excluded from this release type: timeoutContext */
}
/**
* @public
* The result of a bulk write.
*/
export declare class BulkWriteResult {
private readonly result;
/** Number of documents inserted. */
readonly insertedCount: number;
/** Number of documents matched for update. */
readonly matchedCount: number;
/** Number of documents modified. */
readonly modifiedCount: number;
/** Number of documents deleted. */
readonly deletedCount: number;
/** Number of documents upserted. */
readonly upsertedCount: number;
/** Upserted document generated Id's, hash key is the index of the originating operation */
readonly upsertedIds: {
[key: number]: any;
};
/** Inserted document generated Id's, hash key is the index of the originating operation */
readonly insertedIds: {
[key: number]: any;
};
private static generateIdMap;
/* Excluded from this release type: __constructor */
/** Evaluates to true if the bulk operation correctly executes */
get ok(): number;
/* Excluded from this release type: getSuccessfullyInsertedIds */
/** Returns the upserted id at the given index */
getUpsertedIdAt(index: number): Document | undefined;
/** Returns raw internal result */
getRawResponse(): Document;
/** Returns true if the bulk operation contains a write error */
hasWriteErrors(): boolean;
/** Returns the number of write errors off the bulk operation */
getWriteErrorCount(): number;
/** Returns a specific write error object */
getWriteErrorAt(index: number): WriteError | undefined;
/** Retrieve all write errors */
getWriteErrors(): WriteError[];
/** Retrieve the write concern error if one exists */
getWriteConcernError(): WriteConcernError | undefined;
toString(): string;
isOk(): boolean;
}
/**
* MongoDB Driver style callback
* @public
*/
export declare type Callback<T = any> = (error?: AnyError, result?: T) => void;
/** @public */
export declare class CancellationToken extends TypedEventEmitter<{
cancel(): void;
}> {
constructor(...args: any[]);
}
/**
* Creates a new Change Stream instance. Normally created using {@link Collection#watch|Collection.watch()}.
* @public
*/
export declare class ChangeStream<TSchema extends Document = Document, TChange extends Document = ChangeStreamDocument<TSchema>> extends TypedEventEmitter<ChangeStreamEvents<TSchema, TChange>> implements AsyncDisposable_2 {
/**
* @beta
* @experimental
* An alias for {@link ChangeStream.close|ChangeStream.close()}.
*/
[Symbol.asyncDispose]: () => Promise<void>;
/* Excluded from this release type: asyncDispose */
pipeline: Document[];
/**
* @remarks WriteConcern can still be present on the options because
* we inherit options from the client/db/collection. The
* key must be present on the options in order to delete it.
* This allows typescript to delete the key but will
* not allow a writeConcern to be assigned as a property on options.
*/
options: ChangeStreamOptions & {
writeConcern?: never;
};
parent: MongoClient | Db | Collection;
namespace: MongoDBNamespace;
type: symbol;
/* Excluded from this release type: cursor */
streamOptions?: CursorStreamOptions;
/* Excluded from this release type: cursorStream */
/* Excluded from this release type: isClosed */
/* Excluded from this release type: mode */
/** @event */
static readonly RESPONSE: "response";
/** @event */
static readonly MORE: "more";
/** @event */
static readonly INIT: "init";
/** @event */
static readonly CLOSE: "close";
/**
* Fired for each new matching change in the specified namespace. Attaching a `change`
* event listener to a Change Stream will switch the stream into flowing mode. Data will
* then be passed as soon as it is available.
* @event
*/
static readonly CHANGE: "change";
/** @event */
static readonly END: "end";
/** @event */
static readonly ERROR: "error";
/**
* Emitted each time the change stream stores a new resume token.
* @event
*/
static readonly RESUME_TOKEN_CHANGED: "resumeTokenChanged";
private timeoutContext?;
/**
* Note that this property is here to uniquely identify a ChangeStream instance as the owner of
* the {@link CursorTimeoutContext} instance (see {@link ChangeStream._createChangeStreamCursor}) to ensure
* that {@link AbstractCursor.close} does not mutate the timeoutContext.
*/
private contextOwner;
/* Excluded from this release type: __constructor */
/** The cached resume token that is used to resume after the most recently returned change. */
get resumeToken(): ResumeToken;
/** Check if there is any document still available in the Change Stream */
hasNext(): Promise<boolean>;
/** Get the next available document from the Change Stream. */
next(): Promise<TChange>;
/**
* Try to get the next available document from the Change Stream's cursor or `null` if an empty batch is returned
*/
tryNext(): Promise<TChange | null>;
[Symbol.asyncIterator](): AsyncGenerator<TChange, void, void>;
/** Is the cursor closed */
get closed(): boolean;
/**
* Frees the internal resources used by the change stream.
*/
close(): Promise<void>;
/**
* Return a modified Readable stream including a possible transform method.
*
* NOTE: When using a Stream to process change stream events, the stream will
* NOT automatically resume in the case a resumable error is encountered.
*
* @throws MongoChangeStreamError if the underlying cursor or the change stream is closed
*/
stream(options?: CursorStreamOptions): Readable & AsyncIterable<TChange>;
/* Excluded from this release type: _setIsEmitter */
/* Excluded from this release type: _setIsIterator */
/* Excluded from this release type: _createChangeStreamCursor */
/* Excluded from this release type: _closeEmitterModeWithError */
/* Excluded from this release type: _streamEvents */
/* Excluded from this release type: _endStream */
/* Excluded from this release type: _processChange */
/* Excluded from this release type: _processErrorStreamMode */
/* Excluded from this release type: _processErrorIteratorMode */
private _resume;
}
/**
* Only present when the `showExpandedEvents` flag is enabled.
* @public
* @see https://www.mongodb.com/docs/manual/reference/change-events/modify/#mongodb-data-modify
*/
export declare interface ChangeStreamCollModDocument extends ChangeStreamDocumentCommon, ChangeStreamDocumentCollectionUUID {
/** Describes the type of operation represented in this change notification */
operationType: 'modify';
}
/**
* @public
* @see https://www.mongodb.com/docs/manual/reference/change-events/create/#mongodb-data-create
*/
export declare interface ChangeStreamCreateDocument extends ChangeStreamDocumentCommon, ChangeStreamDocumentCollectionUUID {
/** Describes the type of operation represented in this change notification */
operationType: 'create';