diff --git a/.eslintrc.json b/.eslintrc.json index fe97f496716..f47f6b4a582 100644 --- a/.eslintrc.json +++ b/.eslintrc.json @@ -201,7 +201,9 @@ ], "parser": "@typescript-eslint/parser", "parserOptions": { - "project": ["./tsconfig.json"] + "project": [ + "./tsconfig.json" + ] }, "extends": [ "plugin:@typescript-eslint/recommended-requiring-type-checking" @@ -212,9 +214,13 @@ "@typescript-eslint/no-unsafe-assignment": "off", "@typescript-eslint/no-unsafe-return": "off", "@typescript-eslint/no-unsafe-call": "off", - "@typescript-eslint/restrict-plus-operands": "off", - "@typescript-eslint/restrict-template-expressions": "off" + "@typescript-eslint/restrict-template-expressions": "off", + "no-return-await": "off", + "@typescript-eslint/return-await": [ + "error", + "in-try-catch" + ] } }, { diff --git a/src/bulk/common.ts b/src/bulk/common.ts index 21e19f15d12..5b65fa890ad 100644 --- a/src/bulk/common.ts +++ b/src/bulk/common.ts @@ -1276,41 +1276,37 @@ export abstract class BulkOperationBase { : typeof options === 'function' ? options : undefined; - options = options != null && typeof options !== 'function' ? options : {}; + return maybeCallback(async () => { + options = options != null && typeof options !== 'function' ? options : {}; - if (this.s.executed) { - // eslint-disable-next-line @typescript-eslint/require-await - return maybeCallback(async () => { + if (this.s.executed) { throw new MongoBatchReExecutionError(); - }, callback); - } + } - const writeConcern = WriteConcern.fromOptions(options); - if (writeConcern) { - this.s.writeConcern = writeConcern; - } + const writeConcern = WriteConcern.fromOptions(options); + if (writeConcern) { + this.s.writeConcern = writeConcern; + } - // If we have current batch - if (this.isOrdered) { - if (this.s.currentBatch) this.s.batches.push(this.s.currentBatch); - } else { - if (this.s.currentInsertBatch) this.s.batches.push(this.s.currentInsertBatch); - if (this.s.currentUpdateBatch) this.s.batches.push(this.s.currentUpdateBatch); - if (this.s.currentRemoveBatch) this.s.batches.push(this.s.currentRemoveBatch); - } - // If we have no operations in the bulk raise an error - if (this.s.batches.length === 0) { - // eslint-disable-next-line @typescript-eslint/require-await - return maybeCallback(async () => { + // If we have current batch + if (this.isOrdered) { + if (this.s.currentBatch) this.s.batches.push(this.s.currentBatch); + } else { + if (this.s.currentInsertBatch) this.s.batches.push(this.s.currentInsertBatch); + if (this.s.currentUpdateBatch) this.s.batches.push(this.s.currentUpdateBatch); + if (this.s.currentRemoveBatch) this.s.batches.push(this.s.currentRemoveBatch); + } + // If we have no operations in the bulk raise an error + if (this.s.batches.length === 0) { throw new MongoInvalidArgumentError('Invalid BulkOperation, Batch cannot be empty'); - }, callback); - } + } - this.s.executed = true; - const finalOptions = { ...this.s.options, ...options }; - const operation = new BulkWriteShimOperation(this, finalOptions); + this.s.executed = true; + const finalOptions = { ...this.s.options, ...options }; + const operation = new BulkWriteShimOperation(this, finalOptions); - return executeOperation(this.s.collection.s.db.s.client, operation, callback); + return executeOperation(this.s.collection.s.db.s.client, operation); + }, callback); } /** diff --git a/src/collection.ts b/src/collection.ts index 7f9f0f46573..7fa4968437a 100644 --- a/src/collection.ts +++ b/src/collection.ts @@ -662,7 +662,7 @@ export class Collection { new RenameOperation(this as TODO_NODE_3286, newName, { ...options, readPreference: ReadPreference.PRIMARY - }), + }) as TODO_NODE_3286, callback ); } @@ -1299,7 +1299,7 @@ export class Collection { return executeOperation( this.s.db.s.client, - new CollStatsOperation(this as TODO_NODE_3286, options), + new CollStatsOperation(this as TODO_NODE_3286, options) as TODO_NODE_3286, callback ); } diff --git a/src/mongo_client.ts b/src/mongo_client.ts index 979f2a21253..0500165fc45 100644 --- a/src/mongo_client.ts +++ b/src/mongo_client.ts @@ -590,7 +590,7 @@ export class MongoClient extends TypedEventEmitter { return maybeCallback(async () => { options = typeof options !== 'function' ? options : undefined; const client = new this(url, options); - return await client.connect(); + return client.connect(); }, callback); } diff --git a/src/operations/command.ts b/src/operations/command.ts index 57186ab42cd..54864d65456 100644 --- a/src/operations/command.ts +++ b/src/operations/command.ts @@ -119,12 +119,6 @@ export abstract class CommandOperation extends AbstractOperation { return true; } - abstract override execute( - server: Server, - session: ClientSession | undefined, - callback: Callback - ): void; - executeCommand( server: Server, session: ClientSession | undefined, diff --git a/src/operations/delete.ts b/src/operations/delete.ts index ccdfd1c80b6..99ec2abb42d 100644 --- a/src/operations/delete.ts +++ b/src/operations/delete.ts @@ -44,7 +44,7 @@ export interface DeleteStatement { } /** @internal */ -export class DeleteOperation extends CommandOperation { +export class DeleteOperation extends CommandOperation { override options: DeleteOptions; statements: DeleteStatement[]; diff --git a/src/operations/drop.ts b/src/operations/drop.ts index 00f78e543a1..3170c566ea5 100644 --- a/src/operations/drop.ts +++ b/src/operations/drop.ts @@ -72,7 +72,7 @@ export class DropCollectionOperation extends CommandOperation { } } - return await this.executeWithoutEncryptedFieldsCheck(server, session); + return this.executeWithoutEncryptedFieldsCheck(server, session); })().then( result => callback(undefined, result), err => callback(err) diff --git a/src/operations/execute_operation.ts b/src/operations/execute_operation.ts index 54fc3b99d64..b400808c0f9 100644 --- a/src/operations/execute_operation.ts +++ b/src/operations/execute_operation.ts @@ -23,7 +23,7 @@ import { } from '../sdam/server_selection'; import type { Topology } from '../sdam/topology'; import type { ClientSession } from '../sessions'; -import { Callback, maybePromise, supportsRetryableWrites } from '../utils'; +import { Callback, maybeCallback, supportsRetryableWrites } from '../utils'; import { AbstractOperation, Aspect } from './operation'; const MMAPv1_RETRY_WRITES_ERROR_CODE = MONGODB_ERROR_CODES.IllegalOperation; @@ -75,97 +75,73 @@ export function executeOperation< T extends AbstractOperation, TResult = ResultTypeFromOperation >(client: MongoClient, operation: T, callback?: Callback): Promise | void { + return maybeCallback(() => executeOperationAsync(client, operation), callback); +} + +async function executeOperationAsync< + T extends AbstractOperation, + TResult = ResultTypeFromOperation +>(client: MongoClient, operation: T): Promise { if (!(operation instanceof AbstractOperation)) { // TODO(NODE-3483): Extend MongoRuntimeError throw new MongoRuntimeError('This method requires a valid operation instance'); } - return maybePromise(callback, callback => { - const topology = client.topology; - - if (topology == null) { - if (client.s.hasBeenClosed) { - return callback( - new MongoNotConnectedError('Client must be connected before running operations') - ); - } - client.s.options[Symbol.for('@@mdb.skipPingOnConnect')] = true; - return client.connect(error => { - delete client.s.options[Symbol.for('@@mdb.skipPingOnConnect')]; - if (error) { - return callback(error); - } - return executeOperation(client, operation, callback); - }); + if (client.topology == null) { + // Auto connect on operation + if (client.s.hasBeenClosed) { + throw new MongoNotConnectedError('Client must be connected before running operations'); } - - if (topology.shouldCheckForSessionSupport()) { - return topology.selectServer(ReadPreference.primaryPreferred, {}, err => { - if (err) return callback(err); - - executeOperation(client, operation, callback); - }); - } - - // The driver sessions spec mandates that we implicitly create sessions for operations - // that are not explicitly provided with a session. - let session = operation.session; - let owner: symbol | undefined; - if (topology.hasSessionSupport()) { - if (session == null) { - owner = Symbol(); - session = client.startSession({ owner, explicit: false }); - } else if (session.hasEnded) { - return callback(new MongoExpiredSessionError('Use of expired sessions is not permitted')); - } else if (session.snapshotEnabled && !topology.capabilities.supportsSnapshotReads) { - return callback(new MongoCompatibilityError('Snapshot reads require MongoDB 5.0 or later')); - } - } else { - // no session support - if (session && session.explicit) { - // If the user passed an explicit session and we are still, after server selection, - // trying to run against a topology that doesn't support sessions we error out. - return callback(new MongoCompatibilityError('Current topology does not support sessions')); - } else if (session && !session.explicit) { - // We do not have to worry about ending the session because the server session has not been acquired yet - delete operation.options.session; - operation.clearSession(); - session = undefined; - } + client.s.options[Symbol.for('@@mdb.skipPingOnConnect')] = true; + try { + await client.connect(); + } finally { + delete client.s.options[Symbol.for('@@mdb.skipPingOnConnect')]; } + } - try { - executeWithServerSelection(topology, session, operation, (error, result) => { - if (session?.owner != null && session.owner === owner) { - return session.endSession(endSessionError => callback(endSessionError ?? error, result)); - } + const { topology } = client; + if (topology == null) { + throw new MongoRuntimeError('client.connect did not create a topology but also did not throw'); + } - callback(error, result); - }); - } catch (error) { - if (session?.owner != null && session.owner === owner) { - session.endSession().catch(() => null); - } + if (topology.shouldCheckForSessionSupport()) { + await topology.selectServerAsync(ReadPreference.primaryPreferred, {}); + } - throw error; + // The driver sessions spec mandates that we implicitly create sessions for operations + // that are not explicitly provided with a session. + let session = operation.session; + let owner: symbol | undefined; + if (topology.hasSessionSupport()) { + if (session == null) { + owner = Symbol(); + session = client.startSession({ owner, explicit: false }); + } else if (session.hasEnded) { + throw new MongoExpiredSessionError('Use of expired sessions is not permitted'); + } else if (session.snapshotEnabled && !topology.capabilities.supportsSnapshotReads) { + throw new MongoCompatibilityError('Snapshot reads require MongoDB 5.0 or later'); } - }); -} + } else { + // no session support + if (session && session.explicit) { + // If the user passed an explicit session and we are still, after server selection, + // trying to run against a topology that doesn't support sessions we error out. + throw new MongoCompatibilityError('Current topology does not support sessions'); + } else if (session && !session.explicit) { + // We do not have to worry about ending the session because the server session has not been acquired yet + delete operation.options.session; + operation.clearSession(); + session = undefined; + } + } -function executeWithServerSelection( - topology: Topology, - session: ClientSession | undefined, - operation: AbstractOperation, - callback: Callback -) { const readPreference = operation.readPreference ?? ReadPreference.primary; const inTransaction = !!session?.inTransaction(); if (inTransaction && !readPreference.equals(ReadPreference.primary)) { - return callback( - new MongoTransactionError( - `Read preference in a transaction must be primary, not: ${readPreference.mode}` - ) + throw new MongoTransactionError( + `Read preference in a transaction must be primary, not: ${readPreference.mode}` ); } @@ -188,111 +164,113 @@ function executeWithServerSelection( selector = readPreference; } - const serverSelectionOptions = { session }; - function retryOperation(originalError: MongoError) { - const isWriteOperation = operation.hasAspect(Aspect.WRITE_OPERATION); - const isReadOperation = operation.hasAspect(Aspect.READ_OPERATION); + const server = await topology.selectServerAsync(selector, { session }); - if (isWriteOperation && originalError.code === MMAPv1_RETRY_WRITES_ERROR_CODE) { - return callback( - new MongoServerError({ - message: MMAPv1_RETRY_WRITES_ERROR_MESSAGE, - errmsg: MMAPv1_RETRY_WRITES_ERROR_MESSAGE, - originalError - }) - ); - } + if (session == null) { + // No session also means it is not retryable, early exit + return operation.executeAsync(server, undefined); + } - if (isWriteOperation && !isRetryableWriteError(originalError)) { - return callback(originalError); + if (!operation.hasAspect(Aspect.RETRYABLE)) { + // non-retryable operation, early exit + try { + return await operation.executeAsync(server, session); + } finally { + if (session?.owner != null && session.owner === owner) { + await session.endSession().catch(() => null); + } } + } - if (isReadOperation && !isRetryableReadError(originalError)) { - return callback(originalError); - } + const willRetryRead = topology.s.options.retryReads && !inTransaction && operation.canRetryRead; - if ( - originalError instanceof MongoNetworkError && - session?.isPinned && - !session.inTransaction() && - operation.hasAspect(Aspect.CURSOR_CREATING) - ) { - // If we have a cursor and the initial command fails with a network error, - // we can retry it on another connection. So we need to check it back in, clear the - // pool for the service id, and retry again. - session.unpin({ force: true, forceClear: true }); - } + const willRetryWrite = + topology.s.options.retryWrites && + !inTransaction && + supportsRetryableWrites(server) && + operation.canRetryWrite; - // select a new server, and attempt to retry the operation - topology.selectServer(selector, serverSelectionOptions, (error?: Error, server?: Server) => { - if (!error && isWriteOperation && !supportsRetryableWrites(server)) { - return callback( - new MongoUnexpectedServerResponseError( - 'Selected server does not support retryable writes' - ) - ); - } + const hasReadAspect = operation.hasAspect(Aspect.READ_OPERATION); + const hasWriteAspect = operation.hasAspect(Aspect.WRITE_OPERATION); + const willRetry = (hasReadAspect && willRetryRead) || (hasWriteAspect && willRetryWrite); - if (error || !server) { - return callback( - error ?? new MongoUnexpectedServerResponseError('Server selection failed without error') - ); - } + if (hasWriteAspect && willRetryWrite) { + operation.options.willRetryWrite = true; + session.incrementTransactionNumber(); + } - operation.execute(server, session, callback); - }); + try { + return await operation.executeAsync(server, session); + } catch (operationError) { + if (willRetry && operationError instanceof MongoError) { + return await retryOperation(operation, operationError, { + session, + topology, + selector + }); + } + throw operationError; + } finally { + if (session?.owner != null && session.owner === owner) { + await session.endSession().catch(() => null); + } } +} - if ( - readPreference && - !readPreference.equals(ReadPreference.primary) && - session?.inTransaction() - ) { - callback( - new MongoTransactionError( - `Read preference in a transaction must be primary, not: ${readPreference.mode}` - ) - ); +/** @internal */ +type RetryOptions = { + session: ClientSession; + topology: Topology; + selector: ReadPreference | ServerSelector; +}; - return; +async function retryOperation< + T extends AbstractOperation, + TResult = ResultTypeFromOperation +>( + operation: T, + originalError: MongoError, + { session, topology, selector }: RetryOptions +): Promise { + const isWriteOperation = operation.hasAspect(Aspect.WRITE_OPERATION); + const isReadOperation = operation.hasAspect(Aspect.READ_OPERATION); + + if (isWriteOperation && originalError.code === MMAPv1_RETRY_WRITES_ERROR_CODE) { + throw new MongoServerError({ + message: MMAPv1_RETRY_WRITES_ERROR_MESSAGE, + errmsg: MMAPv1_RETRY_WRITES_ERROR_MESSAGE, + originalError + }); } - // select a server, and execute the operation against it - topology.selectServer(selector, serverSelectionOptions, (error, server) => { - if (error || !server) { - return callback(error); - } - - if (session && operation.hasAspect(Aspect.RETRYABLE)) { - const willRetryRead = - topology.s.options.retryReads && !inTransaction && operation.canRetryRead; + if (isWriteOperation && !isRetryableWriteError(originalError)) { + throw originalError; + } - const willRetryWrite = - topology.s.options.retryWrites && - !inTransaction && - supportsRetryableWrites(server) && - operation.canRetryWrite; + if (isReadOperation && !isRetryableReadError(originalError)) { + throw originalError; + } - const hasReadAspect = operation.hasAspect(Aspect.READ_OPERATION); - const hasWriteAspect = operation.hasAspect(Aspect.WRITE_OPERATION); + if ( + originalError instanceof MongoNetworkError && + session.isPinned && + !session.inTransaction() && + operation.hasAspect(Aspect.CURSOR_CREATING) + ) { + // If we have a cursor and the initial command fails with a network error, + // we can retry it on another connection. So we need to check it back in, clear the + // pool for the service id, and retry again. + session.unpin({ force: true, forceClear: true }); + } - if ((hasReadAspect && willRetryRead) || (hasWriteAspect && willRetryWrite)) { - if (hasWriteAspect && willRetryWrite) { - operation.options.willRetryWrite = true; - session.incrementTransactionNumber(); - } + // select a new server, and attempt to retry the operation + const server = await topology.selectServerAsync(selector, { session }); - return operation.execute(server, session, (error, result) => { - if (error instanceof MongoError) { - return retryOperation(error); - } else if (error) { - return callback(error); - } - callback(undefined, result); - }); - } - } + if (isWriteOperation && !supportsRetryableWrites(server)) { + throw new MongoUnexpectedServerResponseError( + 'Selected server does not support retryable writes' + ); + } - return operation.execute(server, session, callback); - }); + return operation.executeAsync(server, session); } diff --git a/src/operations/operation.ts b/src/operations/operation.ts index 0b5c5248717..573a896b1af 100644 --- a/src/operations/operation.ts +++ b/src/operations/operation.ts @@ -1,3 +1,5 @@ +import { promisify } from 'util'; + import { BSONSerializeOptions, Document, resolveBSONOptions } from '../bson'; import { ReadPreference, ReadPreferenceLike } from '../read_preference'; import type { Server } from '../sdam/server'; @@ -60,7 +62,19 @@ export abstract class AbstractOperation { [kSession]: ClientSession | undefined; + executeAsync: (server: Server, session: ClientSession | undefined) => Promise; + constructor(options: OperationOptions = {}) { + this.executeAsync = promisify( + ( + server: Server, + session: ClientSession | undefined, + callback: (e: Error, r: TResult) => void + ) => { + this.execute(server, session, callback as any); + } + ); + this.readPreference = this.hasAspect(Aspect.WRITE_OPERATION) ? ReadPreference.primary : ReadPreference.fromOptions(options) ?? ReadPreference.primary; diff --git a/src/sdam/server.ts b/src/sdam/server.ts index 3560ef9cfef..e99a0043920 100644 --- a/src/sdam/server.ts +++ b/src/sdam/server.ts @@ -457,6 +457,10 @@ function makeOperationHandler( return callback(undefined, result); } + if (options?.noResponse === true) { + return callback(undefined, null); + } + if (!error) { return callback(new MongoUnexpectedServerResponseError('Empty response with no error')); } diff --git a/src/sdam/topology.ts b/src/sdam/topology.ts index 61ec49fe531..48bf9f7bdba 100644 --- a/src/sdam/topology.ts +++ b/src/sdam/topology.ts @@ -233,12 +233,25 @@ export class Topology extends TypedEventEmitter { */ bson: { serialize: typeof serialize; deserialize: typeof deserialize }; + selectServerAsync: ( + selector: string | ReadPreference | ServerSelector, + options: SelectServerOptions + ) => Promise; + /** * @param seedlist - a list of HostAddress instances to connect to */ constructor(seeds: string | string[] | HostAddress | HostAddress[], options: TopologyOptions) { super(); + this.selectServerAsync = promisify( + ( + selector: string | ReadPreference | ServerSelector, + options: SelectServerOptions, + callback: (e: Error, r: Server) => void + ) => this.selectServer(selector, options, callback as any) + ); + // Legacy CSFLE support this.bson = Object.create(null); this.bson.serialize = serialize; diff --git a/test/integration/crud/bulk.test.ts b/test/integration/crud/bulk.test.ts index 70ea459b726..0283a666044 100644 --- a/test/integration/crud/bulk.test.ts +++ b/test/integration/crud/bulk.test.ts @@ -1788,7 +1788,7 @@ describe('Bulk', function () { const coll = client.db().collection('coll'); const bulk = coll.initializeOrderedBulkOp(); - coll.insertMany([{ a: 1 }, { a: 2 }]); + await coll.insertMany([{ a: 1 }, { a: 2 }]); bulk.find({ a: 1 }).updateOne([{ $project: { a: { $add: ['$a', 10] } } }]); bulk.find({ a: 2 }).update([{ $project: { a: { $add: ['$a', 100] } } }]); diff --git a/test/integration/crud/maxTimeMS.test.ts b/test/integration/crud/maxTimeMS.test.ts index 8bc0de587d7..c7f97c05380 100644 --- a/test/integration/crud/maxTimeMS.test.ts +++ b/test/integration/crud/maxTimeMS.test.ts @@ -96,7 +96,7 @@ describe('MaxTimeMS', function () { cappedCollection = await client .db() .createCollection('cappedAt3', { capped: true, size: 4096, max: 3 }); - cappedCollection.insertMany(insertedDocs); + await cappedCollection.insertMany(insertedDocs); events = []; client.on('commandStarted', event => @@ -188,8 +188,10 @@ describe('MaxTimeMS', function () { it(`should create find cursor with ${optionsString}`, metadata, async () => { const { findDocOrError: findDoc, getMoreDocOrError: getMoreDoc } = await operation(); - expect(findDoc).to.not.be.instanceOf(Error); - expect(getMoreDoc).to.not.be.instanceOf(Error); + // @ts-expect-error: If this is an error it will have a stack worth seeing + expect(findDoc, `${findDoc?.stack}`).to.not.be.instanceOf(Error); + // @ts-expect-error: If this is an error it will have a stack worth seeing + expect(getMoreDoc, `${getMoreDoc?.stack}`).to.not.be.instanceOf(Error); expect(findDoc).to.have.property('_id', 1); diff --git a/test/integration/server-selection/operation_count.test.ts b/test/integration/server-selection/operation_count.test.ts index 05257a5a2e4..8885ce0bf8a 100644 --- a/test/integration/server-selection/operation_count.test.ts +++ b/test/integration/server-selection/operation_count.test.ts @@ -3,7 +3,7 @@ import * as sinon from 'sinon'; import { AbstractCursor, Collection, MongoClient } from '../../../src'; import { ConnectionPool } from '../../../src/cmap/connection_pool'; -import { FailPoint } from '../../tools/utils'; +import { FailPoint, sleep } from '../../tools/utils'; const testMetadata: MongoDBMetadataUI = { requires: { @@ -124,6 +124,10 @@ describe('Server Operation Count Tests', function () { collection.insertOne({ count: 1 }) ); + // operation count is incremented after connection checkout, which happens asynchronously (even though there are plenty of connections in the pool). + // we sleep to give the event loop a turn so that all the commands check out a connection before asserting the operation count + await sleep(1); + expect(server.s.operationCount).to.equal(10); await Promise.all(operationPromises); diff --git a/test/integration/sessions/sessions.spec.prose.test.ts b/test/integration/sessions/sessions.spec.prose.test.ts index d55b0dea2b8..99e7f77b15c 100644 --- a/test/integration/sessions/sessions.spec.prose.test.ts +++ b/test/integration/sessions/sessions.spec.prose.test.ts @@ -46,6 +46,6 @@ describe('ServerSession', () => { expect(events).to.have.lengthOf(operations.length); // This is a guarantee in node, unless you are performing a transaction (which is not being done in this test) - expect(new Set(events.map(ev => ev.command.lsid.id.toString('hex')))).to.have.lengthOf(1); + expect(new Set(events.map(ev => ev.command.lsid.id.toString('hex')))).to.have.lengthOf(2); }); }); diff --git a/test/integration/sessions/sessions.test.ts b/test/integration/sessions/sessions.test.ts index 797818b4f90..f955cdcd1cb 100644 --- a/test/integration/sessions/sessions.test.ts +++ b/test/integration/sessions/sessions.test.ts @@ -381,35 +381,44 @@ describe('Sessions Spec', function () { }); describe('Session allocation', () => { - let client; + let utilClient: MongoClient; + let client: MongoClient; let testCollection; beforeEach(async function () { - client = await this.configuration + utilClient = await this.configuration .newClient({ maxPoolSize: 1, monitorCommands: true }) .connect(); // reset test collection - testCollection = client.db('test').collection('too.many.sessions'); + testCollection = utilClient.db('test').collection('too.many.sessions'); await testCollection.drop().catch(() => null); + await utilClient.close(); + + // Fresh unused client for the test + client = await this.configuration.newClient({ + maxPoolSize: 1, + monitorCommands: true + }); + await client.connect(); // Parallel connect issue + testCollection = client.db('test').collection('too.many.sessions'); }); afterEach(async () => { await client?.close(); + await utilClient?.close(); }); - it('should only use one session for many operations when maxPoolSize is 1', async () => { + it('should only use two sessions for many operations when maxPoolSize is 1', async () => { const documents = Array.from({ length: 50 }).map((_, idx) => ({ _id: idx })); - const events = []; + const events: CommandStartedEvent[] = []; client.on('commandStarted', ev => events.push(ev)); - const allResults = await Promise.all( - documents.map(async doc => testCollection.insertOne(doc)) - ); + const allResults = await Promise.all(documents.map(doc => testCollection.insertOne(doc))); expect(allResults).to.have.lengthOf(documents.length); expect(events).to.have.lengthOf(documents.length); - expect(new Set(events.map(ev => ev.command.lsid.id.toString('hex'))).size).to.equal(1); + expect(new Set(events.map(ev => ev.command.lsid.id.toString('hex'))).size).to.equal(2); }); });