diff --git a/driver-core/src/main/com/mongodb/internal/operation/AbortTransactionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/AbortTransactionOperation.java index 52006c1177a..b3b32b919cc 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/AbortTransactionOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/AbortTransactionOperation.java @@ -18,7 +18,6 @@ import com.mongodb.Function; import com.mongodb.WriteConcern; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.lang.Nullable; import org.bson.BsonDocument; @@ -32,8 +31,8 @@ public class AbortTransactionOperation extends TransactionOperation { private BsonDocument recoveryToken; - public AbortTransactionOperation(final TimeoutSettings timeoutSettings, final WriteConcern writeConcern) { - super(timeoutSettings, writeConcern); + public AbortTransactionOperation(final WriteConcern writeConcern) { + super(writeConcern); } public AbortTransactionOperation recoveryToken(@Nullable final BsonDocument recoveryToken) { diff --git a/driver-core/src/main/com/mongodb/internal/operation/AbstractWriteSearchIndexOperation.java b/driver-core/src/main/com/mongodb/internal/operation/AbstractWriteSearchIndexOperation.java index b0b6d63f69c..07bf7037b55 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/AbstractWriteSearchIndexOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/AbstractWriteSearchIndexOperation.java @@ -19,7 +19,6 @@ import com.mongodb.MongoCommandException; import com.mongodb.MongoNamespace; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncWriteBinding; import com.mongodb.internal.binding.WriteBinding; @@ -39,19 +38,12 @@ *

This class is not part of the public API and may be removed or changed at any time

*/ abstract class AbstractWriteSearchIndexOperation implements AsyncWriteOperation, WriteOperation { - private final TimeoutSettings timeoutSettings; private final MongoNamespace namespace; - AbstractWriteSearchIndexOperation(final TimeoutSettings timeoutSettings, final MongoNamespace namespace) { - this.timeoutSettings = timeoutSettings; + AbstractWriteSearchIndexOperation(final MongoNamespace namespace) { this.namespace = namespace; } - @Override - public TimeoutSettings getTimeoutSettings() { - return timeoutSettings; - } - @Override public Void execute(final WriteBinding binding) { return withConnection(binding, connection -> { diff --git a/driver-core/src/main/com/mongodb/internal/operation/AggregateOperation.java b/driver-core/src/main/com/mongodb/internal/operation/AggregateOperation.java index e6f64665666..07943560b40 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/AggregateOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/AggregateOperation.java @@ -20,7 +20,6 @@ import com.mongodb.MongoNamespace; import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; @@ -44,14 +43,13 @@ public class AggregateOperation implements AsyncExplainableReadOperation>, ExplainableReadOperation> { private final AggregateOperationImpl wrapped; - public AggregateOperation(final TimeoutSettings timeoutSettings, final MongoNamespace namespace, - final List pipeline, final Decoder decoder) { - this(timeoutSettings, namespace, pipeline, decoder, AggregationLevel.COLLECTION); + public AggregateOperation(final MongoNamespace namespace, final List pipeline, final Decoder decoder) { + this(namespace, pipeline, decoder, AggregationLevel.COLLECTION); } - public AggregateOperation(final TimeoutSettings timeoutSettings, final MongoNamespace namespace, - final List pipeline, final Decoder decoder, final AggregationLevel aggregationLevel) { - this.wrapped = new AggregateOperationImpl<>(timeoutSettings, namespace, pipeline, decoder, aggregationLevel); + public AggregateOperation(final MongoNamespace namespace, final List pipeline, final Decoder decoder, + final AggregationLevel aggregationLevel) { + this.wrapped = new AggregateOperationImpl<>(namespace, pipeline, decoder, aggregationLevel); } public List getPipeline() { @@ -131,11 +129,6 @@ public AggregateOperation hint(@Nullable final BsonValue hint) { return this; } - @Override - public TimeoutSettings getTimeoutSettings() { - return wrapped.getTimeoutSettings(); - } - public AggregateOperation timeoutMode(@Nullable final TimeoutMode timeoutMode) { wrapped.timeoutMode(timeoutMode); return this; @@ -161,7 +154,7 @@ public AsyncReadOperation asAsyncExplainableOperation(@Nullable final Exp } CommandReadOperation createExplainableOperation(@Nullable final ExplainVerbosity verbosity, final Decoder resultDecoder) { - return new CommandReadOperation<>(wrapped.getTimeoutSettings(), getNamespace().getDatabaseName(), + return new CommandReadOperation<>(getNamespace().getDatabaseName(), (operationContext, serverDescription, connectionDescription) -> asExplainCommand(wrapped.getCommand(operationContext, MIN_WIRE_VERSION), verbosity), resultDecoder); } diff --git a/driver-core/src/main/com/mongodb/internal/operation/AggregateOperationImpl.java b/driver-core/src/main/com/mongodb/internal/operation/AggregateOperationImpl.java index 77338afd3b1..0edc474d9da 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/AggregateOperationImpl.java +++ b/driver-core/src/main/com/mongodb/internal/operation/AggregateOperationImpl.java @@ -19,7 +19,6 @@ import com.mongodb.MongoNamespace; import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; @@ -55,7 +54,6 @@ class AggregateOperationImpl implements AsyncReadOperation FIELD_NAMES_WITH_RESULT = Arrays.asList(RESULT, FIRST_BATCH); - private final TimeoutSettings timeoutSettings; private final MongoNamespace namespace; private final List pipeline; private final Decoder decoder; @@ -71,18 +69,17 @@ class AggregateOperationImpl implements AsyncReadOperation pipeline, final Decoder decoder, final AggregationLevel aggregationLevel) { - this(timeoutSettings, namespace, pipeline, decoder, + this(namespace, pipeline, decoder, defaultAggregateTarget(notNull("aggregationLevel", aggregationLevel), notNull("namespace", namespace).getCollectionName()), defaultPipelineCreator(pipeline)); } - AggregateOperationImpl(final TimeoutSettings timeoutSettings, final MongoNamespace namespace, + AggregateOperationImpl(final MongoNamespace namespace, final List pipeline, final Decoder decoder, final AggregateTarget aggregateTarget, final PipelineCreator pipelineCreator) { - this.timeoutSettings = timeoutSettings; this.namespace = notNull("namespace", namespace); this.pipeline = notNull("pipeline", pipeline); this.decoder = notNull("decoder", decoder); @@ -158,13 +155,7 @@ BsonValue getHint() { return hint; } - @Override - public TimeoutSettings getTimeoutSettings() { - return timeoutSettings; - } - public AggregateOperationImpl timeoutMode(@Nullable final TimeoutMode timeoutMode) { - isTrueArgument("timeoutMode requires timeoutMS.", timeoutMode == null || timeoutSettings.getTimeoutMS() != null); if (timeoutMode != null) { this.timeoutMode = timeoutMode; } @@ -229,14 +220,14 @@ BsonDocument getCommand(final OperationContext operationContext, final int maxWi private CommandReadTransformer> transformer() { return (result, source, connection) -> - new CommandBatchCursor<>(getTimeoutMode(), result, batchSize != null ? batchSize : 0, getMaxTimeForCursor(), decoder, - comment, source, connection); + new CommandBatchCursor<>(getTimeoutMode(), result, batchSize != null ? batchSize : 0, + getMaxTimeForCursor(source.getOperationContext()), decoder, comment, source, connection); } private CommandReadTransformerAsync> asyncTransformer() { return (result, source, connection) -> - new AsyncCommandBatchCursor<>(getTimeoutMode(), result, batchSize != null ? batchSize : 0, getMaxTimeForCursor(), decoder, - comment, source, connection); + new AsyncCommandBatchCursor<>(getTimeoutMode(), result, batchSize != null ? batchSize : 0, + getMaxTimeForCursor(source.getOperationContext()), decoder, comment, source, connection); } private TimeoutMode getTimeoutMode() { @@ -247,8 +238,8 @@ private TimeoutMode getTimeoutMode() { return localTimeoutMode; } - private long getMaxTimeForCursor() { - return timeoutSettings.getMaxAwaitTimeMS(); + private long getMaxTimeForCursor(final OperationContext operationContext) { + return operationContext.getTimeoutContext().getMaxAwaitTimeMS(); } interface AggregateTarget { diff --git a/driver-core/src/main/com/mongodb/internal/operation/AggregateToCollectionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/AggregateToCollectionOperation.java index 1654cbd5aaa..ae0f8de10cd 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/AggregateToCollectionOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/AggregateToCollectionOperation.java @@ -22,7 +22,6 @@ import com.mongodb.WriteConcern; import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; import com.mongodb.internal.binding.ReadBinding; @@ -57,7 +56,6 @@ *

This class is not part of the public API and may be removed or changed at any time

*/ public class AggregateToCollectionOperation implements AsyncReadOperation, ReadOperation { - private final TimeoutSettings timeoutSettings; private final MongoNamespace namespace; private final List pipeline; private final WriteConcern writeConcern; @@ -71,15 +69,13 @@ public class AggregateToCollectionOperation implements AsyncReadOperation, private BsonValue hint; private BsonDocument variables; - public AggregateToCollectionOperation(final TimeoutSettings timeoutSettings, final MongoNamespace namespace, - final List pipeline, final ReadConcern readConcern, final WriteConcern writeConcern) { - this(timeoutSettings, namespace, pipeline, readConcern, writeConcern, AggregationLevel.COLLECTION); + public AggregateToCollectionOperation(final MongoNamespace namespace, final List pipeline, final ReadConcern readConcern, + final WriteConcern writeConcern) { + this(namespace, pipeline, readConcern, writeConcern, AggregationLevel.COLLECTION); } - public AggregateToCollectionOperation(final TimeoutSettings timeoutSettings, final MongoNamespace namespace, - final List pipeline, @Nullable final ReadConcern readConcern, @Nullable final WriteConcern writeConcern, - final AggregationLevel aggregationLevel) { - this.timeoutSettings = timeoutSettings; + public AggregateToCollectionOperation(final MongoNamespace namespace, final List pipeline, + @Nullable final ReadConcern readConcern, @Nullable final WriteConcern writeConcern, final AggregationLevel aggregationLevel) { this.namespace = notNull("namespace", namespace); this.pipeline = notNull("pipeline", pipeline); this.writeConcern = writeConcern; @@ -156,11 +152,6 @@ public AggregateToCollectionOperation timeoutMode(@Nullable final TimeoutMode ti return this; } - @Override - public TimeoutSettings getTimeoutSettings() { - return timeoutSettings; - } - @Override public Void execute(final ReadBinding binding) { return executeRetryableRead(binding, diff --git a/driver-core/src/main/com/mongodb/internal/operation/AsyncOperations.java b/driver-core/src/main/com/mongodb/internal/operation/AsyncOperations.java index 28141f1ff02..5f6e3c9fbb9 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/AsyncOperations.java +++ b/driver-core/src/main/com/mongodb/internal/operation/AsyncOperations.java @@ -62,18 +62,21 @@ import java.util.List; import static com.mongodb.assertions.Assertions.assertNotNull; +import static java.util.concurrent.TimeUnit.MILLISECONDS; /** *

This class is not part of the public API and may be removed or changed at any time

*/ public final class AsyncOperations { private final Operations operations; + private final TimeoutSettings timeoutSettings; public AsyncOperations(final MongoNamespace namespace, final Class documentClass, final ReadPreference readPreference, final CodecRegistry codecRegistry, final ReadConcern readConcern, final WriteConcern writeConcern, final boolean retryWrites, final boolean retryReads, final TimeoutSettings timeoutSettings) { this.operations = new Operations<>(namespace, documentClass, readPreference, codecRegistry, readConcern, writeConcern, - retryWrites, retryReads, timeoutSettings); + retryWrites, retryReads); + this.timeoutSettings = timeoutSettings; } public MongoNamespace getNamespace() { @@ -100,6 +103,10 @@ public WriteConcern getWriteConcern() { return operations.getWriteConcern(); } + public TimeoutSettings getTimeoutSettings() { + return timeoutSettings; + } + public boolean isRetryWrites() { return operations.isRetryWrites(); } @@ -108,8 +115,42 @@ public boolean isRetryReads() { return operations.isRetryReads(); } - public TimeoutSettings getTimeoutSettings() { - return operations.getTimeoutSettings(); + public TimeoutSettings createTimeoutSettings(final long maxTimeMS) { + return timeoutSettings.withMaxTimeMS(maxTimeMS); + } + + public TimeoutSettings createTimeoutSettings(final long maxTimeMS, final long maxAwaitTimeMS) { + return timeoutSettings.withMaxTimeAndMaxAwaitTimeMS(maxTimeMS, maxAwaitTimeMS); + } + + @SuppressWarnings("deprecation") // MaxTime + public TimeoutSettings createTimeoutSettings(final CountOptions options) { + return createTimeoutSettings(options.getMaxTime(MILLISECONDS)); + } + + @SuppressWarnings("deprecation") // MaxTime + public TimeoutSettings createTimeoutSettings(final EstimatedDocumentCountOptions options) { + return createTimeoutSettings(options.getMaxTime(MILLISECONDS)); + } + + @SuppressWarnings("deprecation") // MaxTime + public TimeoutSettings createTimeoutSettings(final FindOptions options) { + return timeoutSettings.withMaxTimeAndMaxAwaitTimeMS(options.getMaxTime(MILLISECONDS), options.getMaxAwaitTime(MILLISECONDS)); + } + + @SuppressWarnings("deprecation") // MaxTime + public TimeoutSettings createTimeoutSettings(final FindOneAndDeleteOptions options) { + return createTimeoutSettings(options.getMaxTime(MILLISECONDS)); + } + + @SuppressWarnings("deprecation") // MaxTime + public TimeoutSettings createTimeoutSettings(final FindOneAndReplaceOptions options) { + return createTimeoutSettings(options.getMaxTime(MILLISECONDS)); + } + + @SuppressWarnings("deprecation") // MaxTime + public TimeoutSettings createTimeoutSettings(final FindOneAndUpdateOptions options) { + return timeoutSettings.withMaxTimeMS(options.getMaxTime(MILLISECONDS)); } public AsyncReadOperation countDocuments(final Bson filter, final CountOptions options) { @@ -136,14 +177,13 @@ public AsyncReadOperation> find(final MongoN } public AsyncReadOperation> distinct(final String fieldName, final Bson filter, - final Class resultClass, final long maxTimeMS, - final Collation collation, final BsonValue comment) { - return operations.distinct(fieldName, filter, resultClass, maxTimeMS, collation, comment); + final Class resultClass, final Collation collation, final BsonValue comment) { + return operations.distinct(fieldName, filter, resultClass, collation, comment); } - public AsyncExplainableReadOperation> aggregate(final List pipeline, + public AsyncExplainableReadOperation> aggregate( + final List pipeline, final Class resultClass, - final long maxTimeMS, final long maxAwaitTimeMS, @Nullable final TimeoutMode timeoutMode, @Nullable final Integer batchSize, final Collation collation, final Bson hint, @@ -152,15 +192,15 @@ public AsyncExplainableReadOperation> aggreg final Bson variables, final Boolean allowDiskUse, final AggregationLevel aggregationLevel) { - return operations.aggregate(pipeline, resultClass, maxTimeMS, maxAwaitTimeMS, timeoutMode, batchSize, collation, hint, hintString, + return operations.aggregate(pipeline, resultClass, timeoutMode, batchSize, collation, hint, hintString, comment, variables, allowDiskUse, aggregationLevel); } - public AsyncReadOperation aggregateToCollection(final List pipeline, final long maxTimeMS, + public AsyncReadOperation aggregateToCollection(final List pipeline, @Nullable final TimeoutMode timeoutMode, final Boolean allowDiskUse, final Boolean bypassDocumentValidation, final Collation collation, final Bson hint, final String hintString, final BsonValue comment, final Bson variables, final AggregationLevel aggregationLevel) { - return operations.aggregateToCollection(pipeline, maxTimeMS, timeoutMode, allowDiskUse, bypassDocumentValidation, collation, hint, + return operations.aggregateToCollection(pipeline, timeoutMode, allowDiskUse, bypassDocumentValidation, collation, hint, hintString, comment, variables, aggregationLevel); } @@ -168,21 +208,21 @@ public AsyncReadOperation aggregateToCollection(final List public AsyncWriteOperation mapReduceToCollection(final String databaseName, final String collectionName, final String mapFunction, final String reduceFunction, final String finalizeFunction, final Bson filter, final int limit, - final long maxTimeMS, final boolean jsMode, final Bson scope, + final boolean jsMode, final Bson scope, final Bson sort, final boolean verbose, final com.mongodb.client.model.MapReduceAction action, final Boolean bypassDocumentValidation, final Collation collation) { return operations.mapReduceToCollection(databaseName, collectionName, mapFunction, reduceFunction, finalizeFunction, filter, limit, - maxTimeMS, jsMode, scope, sort, verbose, action, bypassDocumentValidation, collation); + jsMode, scope, sort, verbose, action, bypassDocumentValidation, collation); } public AsyncReadOperation> mapReduce(final String mapFunction, final String reduceFunction, final String finalizeFunction, final Class resultClass, final Bson filter, final int limit, - final long maxTimeMS, final boolean jsMode, final Bson scope, + final boolean jsMode, final Bson scope, final Bson sort, final boolean verbose, final Collation collation) { - return operations.mapReduce(mapFunction, reduceFunction, finalizeFunction, resultClass, filter, limit, maxTimeMS, jsMode, scope, + return operations.mapReduce(mapFunction, reduceFunction, finalizeFunction, resultClass, filter, limit, jsMode, scope, sort, verbose, collation); } @@ -295,14 +335,9 @@ public AsyncWriteOperation dropSearchIndex(final String indexName) { } public AsyncExplainableReadOperation> listSearchIndexes(final Class resultClass, - final long maxTimeMS, - @Nullable final String indexName, - @Nullable final Integer batchSize, - @Nullable final Collation collation, - @Nullable final BsonValue comment, - @Nullable final Boolean allowDiskUse) { - return operations.listSearchIndexes(resultClass, maxTimeMS, indexName, batchSize, collation, - comment, allowDiskUse); + @Nullable final String indexName, @Nullable final Integer batchSize, @Nullable final Collation collation, + @Nullable final BsonValue comment, @Nullable final Boolean allowDiskUse) { + return operations.listSearchIndexes(resultClass, indexName, batchSize, collation, comment, allowDiskUse); } public AsyncWriteOperation dropIndex(final String indexName, final DropIndexOptions options) { @@ -315,27 +350,27 @@ public AsyncWriteOperation dropIndex(final Bson keys, final DropIndexOptio public AsyncReadOperation> listCollections(final String databaseName, final Class resultClass, final Bson filter, final boolean collectionNamesOnly, final boolean authorizedCollections, - @Nullable final Integer batchSize, final long maxTimeMS, final BsonValue comment, @Nullable final TimeoutMode timeoutMode) { + @Nullable final Integer batchSize, final BsonValue comment, @Nullable final TimeoutMode timeoutMode) { return operations.listCollections(databaseName, resultClass, filter, collectionNamesOnly, authorizedCollections, - batchSize, maxTimeMS, comment, timeoutMode); + batchSize, comment, timeoutMode); } public AsyncReadOperation> listDatabases(final Class resultClass, final Bson filter, - final Boolean nameOnly, final long maxTimeMS, final Boolean authorizedDatabases, final BsonValue comment) { - return operations.listDatabases(resultClass, filter, nameOnly, maxTimeMS, authorizedDatabases, comment); + final Boolean nameOnly, final Boolean authorizedDatabases, final BsonValue comment) { + return operations.listDatabases(resultClass, filter, nameOnly, authorizedDatabases, comment); } public AsyncReadOperation> listIndexes(final Class resultClass, - @Nullable final Integer batchSize, final long maxTimeMS, final BsonValue comment, @Nullable final TimeoutMode timeoutMode) { - return operations.listIndexes(resultClass, batchSize, maxTimeMS, comment, timeoutMode); + @Nullable final Integer batchSize, final BsonValue comment, @Nullable final TimeoutMode timeoutMode) { + return operations.listIndexes(resultClass, batchSize, comment, timeoutMode); } public AsyncReadOperation> changeStream(final FullDocument fullDocument, final FullDocumentBeforeChange fullDocumentBeforeChange, final List pipeline, final Decoder decoder, final ChangeStreamLevel changeStreamLevel, final Integer batchSize, final Collation collation, - final BsonValue comment, final long maxAwaitTimeMS, final BsonDocument resumeToken, final BsonTimestamp startAtOperationTime, + final BsonValue comment, final BsonDocument resumeToken, final BsonTimestamp startAtOperationTime, final BsonDocument startAfter, final boolean showExpandedEvents) { return operations.changeStream(fullDocument, fullDocumentBeforeChange, pipeline, decoder, changeStreamLevel, batchSize, - collation, comment, maxAwaitTimeMS, resumeToken, startAtOperationTime, startAfter, showExpandedEvents); + collation, comment, resumeToken, startAtOperationTime, startAfter, showExpandedEvents); } } diff --git a/driver-core/src/main/com/mongodb/internal/operation/AsyncReadOperation.java b/driver-core/src/main/com/mongodb/internal/operation/AsyncReadOperation.java index 324e7c5132a..75b18f5cb00 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/AsyncReadOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/AsyncReadOperation.java @@ -16,7 +16,6 @@ package com.mongodb.internal.operation; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; @@ -29,11 +28,6 @@ */ public interface AsyncReadOperation { - /** - * @return the timeout settings for this operation - */ - TimeoutSettings getTimeoutSettings(); - /** * General execute which can return anything of type T * diff --git a/driver-core/src/main/com/mongodb/internal/operation/AsyncWriteOperation.java b/driver-core/src/main/com/mongodb/internal/operation/AsyncWriteOperation.java index 460a02a8be1..334c3bde8ac 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/AsyncWriteOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/AsyncWriteOperation.java @@ -16,7 +16,6 @@ package com.mongodb.internal.operation; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncWriteBinding; @@ -29,11 +28,6 @@ */ public interface AsyncWriteOperation { - /** - * @return the timeout settings for this operation - */ - TimeoutSettings getTimeoutSettings(); - /** * General execute which can return anything of type T * diff --git a/driver-core/src/main/com/mongodb/internal/operation/BaseFindAndModifyOperation.java b/driver-core/src/main/com/mongodb/internal/operation/BaseFindAndModifyOperation.java index 57687fcf528..e5c8f2710fd 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/BaseFindAndModifyOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/BaseFindAndModifyOperation.java @@ -20,7 +20,6 @@ import com.mongodb.WriteConcern; import com.mongodb.client.model.Collation; import com.mongodb.connection.ConnectionDescription; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncWriteBinding; import com.mongodb.internal.binding.WriteBinding; @@ -49,7 +48,6 @@ *

This class is not part of the public API and may be removed or changed at any time

*/ public abstract class BaseFindAndModifyOperation implements AsyncWriteOperation, WriteOperation { - private final TimeoutSettings timeoutSettings; private final MongoNamespace namespace; private final WriteConcern writeConcern; private final boolean retryWrites; @@ -64,9 +62,8 @@ public abstract class BaseFindAndModifyOperation implements AsyncWriteOperati private BsonValue comment; private BsonDocument variables; - protected BaseFindAndModifyOperation(final TimeoutSettings timeoutSettings, final MongoNamespace namespace, - final WriteConcern writeConcern, final boolean retryWrites, final Decoder decoder) { - this.timeoutSettings = timeoutSettings; + protected BaseFindAndModifyOperation(final MongoNamespace namespace, final WriteConcern writeConcern, final boolean retryWrites, + final Decoder decoder) { this.namespace = notNull("namespace", namespace); this.writeConcern = notNull("writeConcern", writeConcern); this.retryWrites = retryWrites; @@ -181,11 +178,6 @@ public BaseFindAndModifyOperation let(@Nullable final BsonDocument variables) return this; } - @Override - public TimeoutSettings getTimeoutSettings() { - return timeoutSettings; - } - protected abstract FieldNameValidator getFieldNameValidator(); protected abstract void specializeCommand(BsonDocument initialCommand, ConnectionDescription connectionDescription); diff --git a/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamOperation.java b/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamOperation.java index b76b0a1867d..0afc4c5f1d0 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamOperation.java @@ -20,7 +20,6 @@ import com.mongodb.client.model.Collation; import com.mongodb.client.model.changestream.FullDocument; import com.mongodb.client.model.changestream.FullDocumentBeforeChange; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; @@ -63,17 +62,16 @@ public class ChangeStreamOperation implements AsyncReadOperation pipeline, final Decoder decoder) { - this(timeoutSettings, namespace, fullDocument, fullDocumentBeforeChange, pipeline, decoder, ChangeStreamLevel.COLLECTION); + public ChangeStreamOperation(final MongoNamespace namespace, final FullDocument fullDocument, + final FullDocumentBeforeChange fullDocumentBeforeChange, final List pipeline, final Decoder decoder) { + this(namespace, fullDocument, fullDocumentBeforeChange, pipeline, decoder, ChangeStreamLevel.COLLECTION); } - public ChangeStreamOperation(final TimeoutSettings timeoutSettings, final MongoNamespace namespace, - final FullDocument fullDocument, final FullDocumentBeforeChange fullDocumentBeforeChange, final List pipeline, - final Decoder decoder, final ChangeStreamLevel changeStreamLevel) { - this.wrapped = new AggregateOperationImpl<>(timeoutSettings, namespace, pipeline, RAW_BSON_DOCUMENT_CODEC, - getAggregateTarget(), getPipelineCreator()); + public ChangeStreamOperation(final MongoNamespace namespace, final FullDocument fullDocument, + final FullDocumentBeforeChange fullDocumentBeforeChange, final List pipeline, final Decoder decoder, + final ChangeStreamLevel changeStreamLevel) { + this.wrapped = new AggregateOperationImpl<>(namespace, pipeline, RAW_BSON_DOCUMENT_CODEC, getAggregateTarget(), + getPipelineCreator()); this.fullDocument = notNull("fullDocument", fullDocument); this.fullDocumentBeforeChange = notNull("fullDocumentBeforeChange", fullDocumentBeforeChange); this.decoder = notNull("decoder", decoder); @@ -169,11 +167,6 @@ public ChangeStreamOperation showExpandedEvents(final boolean showExpandedEve return this; } - @Override - public TimeoutSettings getTimeoutSettings() { - return wrapped.getTimeoutSettings(); - } - @Override public BatchCursor execute(final ReadBinding binding) { CommandBatchCursor cursor = (CommandBatchCursor) wrapped.execute(binding); diff --git a/driver-core/src/main/com/mongodb/internal/operation/CommandReadOperation.java b/driver-core/src/main/com/mongodb/internal/operation/CommandReadOperation.java index a8b1674b362..ea89dfb303e 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/CommandReadOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/CommandReadOperation.java @@ -16,7 +16,6 @@ package com.mongodb.internal.operation; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; import com.mongodb.internal.binding.ReadBinding; @@ -34,29 +33,20 @@ *

This class is not part of the public API and may be removed or changed at any time

*/ public class CommandReadOperation implements AsyncReadOperation, ReadOperation { - private final TimeoutSettings timeoutSettings; private final String databaseName; private final CommandCreator commandCreator; private final Decoder decoder; - public CommandReadOperation(final TimeoutSettings timeoutSettings, final String databaseName, - final BsonDocument command, final Decoder decoder) { - this(timeoutSettings, databaseName, (operationContext, serverDescription, connectionDescription) -> command, decoder); + public CommandReadOperation(final String databaseName, final BsonDocument command, final Decoder decoder) { + this(databaseName, (operationContext, serverDescription, connectionDescription) -> command, decoder); } - public CommandReadOperation(final TimeoutSettings timeoutSettings, final String databaseName, - final CommandCreator commandCreator, final Decoder decoder) { - this.timeoutSettings = timeoutSettings; + public CommandReadOperation(final String databaseName, final CommandCreator commandCreator, final Decoder decoder) { this.databaseName = notNull("databaseName", databaseName); this.commandCreator = notNull("commandCreator", commandCreator); this.decoder = notNull("decoder", decoder); } - @Override - public TimeoutSettings getTimeoutSettings() { - return timeoutSettings; - } - @Override public T execute(final ReadBinding binding) { return executeRetryableRead(binding, databaseName, commandCreator, decoder, diff --git a/driver-core/src/main/com/mongodb/internal/operation/CommitTransactionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/CommitTransactionOperation.java index 77b53787db4..933776af3e0 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/CommitTransactionOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/CommitTransactionOperation.java @@ -25,7 +25,6 @@ import com.mongodb.MongoTimeoutException; import com.mongodb.MongoWriteConcernException; import com.mongodb.WriteConcern; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncWriteBinding; import com.mongodb.internal.binding.WriteBinding; @@ -50,13 +49,12 @@ public class CommitTransactionOperation extends TransactionOperation { private final boolean alreadyCommitted; private BsonDocument recoveryToken; - public CommitTransactionOperation(final TimeoutSettings timeoutSettings, final WriteConcern writeConcern) { - this(timeoutSettings, writeConcern, false); + public CommitTransactionOperation(final WriteConcern writeConcern) { + this(writeConcern, false); } - public CommitTransactionOperation(final TimeoutSettings timeoutSettings, final WriteConcern writeConcern, - final boolean alreadyCommitted) { - super(timeoutSettings, writeConcern); + public CommitTransactionOperation(final WriteConcern writeConcern, final boolean alreadyCommitted) { + super(writeConcern); this.alreadyCommitted = alreadyCommitted; } diff --git a/driver-core/src/main/com/mongodb/internal/operation/CountDocumentsOperation.java b/driver-core/src/main/com/mongodb/internal/operation/CountDocumentsOperation.java index e93f7848d1c..1095dd44508 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/CountDocumentsOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/CountDocumentsOperation.java @@ -18,7 +18,6 @@ import com.mongodb.MongoNamespace; import com.mongodb.client.model.Collation; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; import com.mongodb.internal.binding.ReadBinding; @@ -40,7 +39,6 @@ */ public class CountDocumentsOperation implements AsyncReadOperation, ReadOperation { private static final Decoder DECODER = new BsonDocumentCodec(); - private final TimeoutSettings timeoutSettings; private final MongoNamespace namespace; private boolean retryReads; private BsonDocument filter; @@ -50,8 +48,7 @@ public class CountDocumentsOperation implements AsyncReadOperation, ReadOp private long limit; private Collation collation; - public CountDocumentsOperation(final TimeoutSettings timeoutSettings, final MongoNamespace namespace) { - this.timeoutSettings = timeoutSettings; + public CountDocumentsOperation(final MongoNamespace namespace) { this.namespace = notNull("namespace", namespace); } @@ -122,11 +119,6 @@ public CountDocumentsOperation comment(@Nullable final BsonValue comment) { return this; } - @Override - public TimeoutSettings getTimeoutSettings() { - return timeoutSettings; - } - @Override public Long execute(final ReadBinding binding) { try (BatchCursor cursor = getAggregateOperation().execute(binding)) { @@ -152,7 +144,7 @@ public void executeAsync(final AsyncReadBinding binding, final SingleResultCallb } private AggregateOperation getAggregateOperation() { - return new AggregateOperation<>(timeoutSettings, namespace, getPipeline(), DECODER) + return new AggregateOperation<>(namespace, getPipeline(), DECODER) .retryReads(retryReads) .collation(collation) .comment(comment) @@ -173,7 +165,7 @@ private List getPipeline() { return pipeline; } - private Long getCountFromAggregateResults(final List results) { + private Long getCountFromAggregateResults(@Nullable final List results) { if (results == null || results.isEmpty()) { return 0L; } else { diff --git a/driver-core/src/main/com/mongodb/internal/operation/CountOperation.java b/driver-core/src/main/com/mongodb/internal/operation/CountOperation.java index 7fc3c8a612d..076fa6f0abe 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/CountOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/CountOperation.java @@ -18,7 +18,6 @@ import com.mongodb.MongoNamespace; import com.mongodb.client.model.Collation; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; import com.mongodb.internal.binding.ReadBinding; @@ -44,7 +43,6 @@ */ public class CountOperation implements AsyncReadOperation, ReadOperation { private static final Decoder DECODER = new BsonDocumentCodec(); - private final TimeoutSettings timeoutSettings; private final MongoNamespace namespace; private boolean retryReads; private BsonDocument filter; @@ -53,8 +51,7 @@ public class CountOperation implements AsyncReadOperation, ReadOperation, Wri private static final BsonDocument ENCRYPT_CLUSTERED_INDEX = BsonDocument.parse("{key: {_id: 1}, unique: true}"); private static final BsonArray SAFE_CONTENT_ARRAY = new BsonArray( singletonList(BsonDocument.parse("{key: {__safeContent__: 1}, name: '__safeContent___1'}"))); - private final TimeoutSettings timeoutSettings; private final String databaseName; private final String collectionName; private final WriteConcern writeConcern; @@ -94,9 +92,7 @@ public class CreateCollectionOperation implements AsyncWriteOperation, Wri private String clusteredIndexName; private BsonDocument encryptedFields; - public CreateCollectionOperation(final TimeoutSettings timeoutSettings, final String databaseName, - final String collectionName, @Nullable final WriteConcern writeConcern) { - this.timeoutSettings = timeoutSettings; + public CreateCollectionOperation(final String databaseName, final String collectionName, @Nullable final WriteConcern writeConcern) { this.databaseName = notNull("databaseName", databaseName); this.collectionName = notNull("collectionName", collectionName); this.writeConcern = writeConcern; @@ -235,11 +231,6 @@ public CreateCollectionOperation encryptedFields(@Nullable final BsonDocument en return this; } - @Override - public TimeoutSettings getTimeoutSettings() { - return timeoutSettings; - } - @Override public Void execute(final WriteBinding binding) { return withConnection(binding, connection -> { diff --git a/driver-core/src/main/com/mongodb/internal/operation/CreateIndexesOperation.java b/driver-core/src/main/com/mongodb/internal/operation/CreateIndexesOperation.java index 5b75bcc7b1c..5e02ca55bc7 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/CreateIndexesOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/CreateIndexesOperation.java @@ -25,7 +25,6 @@ import com.mongodb.MongoNamespace; import com.mongodb.WriteConcern; import com.mongodb.WriteConcernResult; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncWriteBinding; import com.mongodb.internal.binding.WriteBinding; @@ -60,15 +59,13 @@ *

This class is not part of the public API and may be removed or changed at any time

*/ public class CreateIndexesOperation implements AsyncWriteOperation, WriteOperation { - private final TimeoutSettings timeoutSettings; private final MongoNamespace namespace; private final List requests; private final WriteConcern writeConcern; private CreateIndexCommitQuorum commitQuorum; - public CreateIndexesOperation(final TimeoutSettings timeoutSettings, final MongoNamespace namespace, - final List requests, @Nullable final WriteConcern writeConcern) { - this.timeoutSettings = timeoutSettings; + public CreateIndexesOperation(final MongoNamespace namespace, final List requests, + @Nullable final WriteConcern writeConcern) { this.namespace = notNull("namespace", namespace); this.requests = notNull("indexRequests", requests); this.writeConcern = writeConcern; @@ -103,11 +100,6 @@ public CreateIndexesOperation commitQuorum(@Nullable final CreateIndexCommitQuor return this; } - @Override - public TimeoutSettings getTimeoutSettings() { - return timeoutSettings; - } - @Override public Void execute(final WriteBinding binding) { try { diff --git a/driver-core/src/main/com/mongodb/internal/operation/CreateSearchIndexesOperation.java b/driver-core/src/main/com/mongodb/internal/operation/CreateSearchIndexesOperation.java index 64eeaa522a0..1a44d887586 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/CreateSearchIndexesOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/CreateSearchIndexesOperation.java @@ -17,7 +17,6 @@ package com.mongodb.internal.operation; import com.mongodb.MongoNamespace; -import com.mongodb.internal.TimeoutSettings; import org.bson.BsonArray; import org.bson.BsonDocument; import org.bson.BsonString; @@ -36,9 +35,8 @@ final class CreateSearchIndexesOperation extends AbstractWriteSearchIndexOperati private static final String COMMAND_NAME = "createSearchIndexes"; private final List indexRequests; - CreateSearchIndexesOperation(final TimeoutSettings timeoutSettings, final MongoNamespace namespace, - final List indexRequests) { - super(timeoutSettings, namespace); + CreateSearchIndexesOperation(final MongoNamespace namespace, final List indexRequests) { + super(namespace); this.indexRequests = assertNotNull(indexRequests); } diff --git a/driver-core/src/main/com/mongodb/internal/operation/CreateViewOperation.java b/driver-core/src/main/com/mongodb/internal/operation/CreateViewOperation.java index 624ab3cf844..1fe3b21fc51 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/CreateViewOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/CreateViewOperation.java @@ -18,7 +18,6 @@ import com.mongodb.WriteConcern; import com.mongodb.client.model.Collation; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncWriteBinding; import com.mongodb.internal.binding.WriteBinding; @@ -48,7 +47,6 @@ *

This class is not part of the public API and may be removed or changed at any time

*/ public class CreateViewOperation implements AsyncWriteOperation, WriteOperation { - private final TimeoutSettings timeoutSettings; private final String databaseName; private final String viewName; private final String viewOn; @@ -56,9 +54,8 @@ public class CreateViewOperation implements AsyncWriteOperation, WriteOper private final WriteConcern writeConcern; private Collation collation; - public CreateViewOperation(final TimeoutSettings timeoutSettings, final String databaseName, - final String viewName, final String viewOn, final List pipeline, final WriteConcern writeConcern) { - this.timeoutSettings = timeoutSettings; + public CreateViewOperation(final String databaseName, final String viewName, final String viewOn, final List pipeline, + final WriteConcern writeConcern) { this.databaseName = notNull("databaseName", databaseName); this.viewName = notNull("viewName", viewName); this.viewOn = notNull("viewOn", viewOn); @@ -126,11 +123,6 @@ public CreateViewOperation collation(@Nullable final Collation collation) { return this; } - @Override - public TimeoutSettings getTimeoutSettings() { - return timeoutSettings; - } - @Override public Void execute(final WriteBinding binding) { return withConnection(binding, connection -> { diff --git a/driver-core/src/main/com/mongodb/internal/operation/DistinctOperation.java b/driver-core/src/main/com/mongodb/internal/operation/DistinctOperation.java index 0f4733b15e1..a1190ec3db5 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/DistinctOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/DistinctOperation.java @@ -18,7 +18,6 @@ import com.mongodb.MongoNamespace; import com.mongodb.client.model.Collation; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; @@ -49,8 +48,6 @@ */ public class DistinctOperation implements AsyncReadOperation>, ReadOperation> { private static final String VALUES = "values"; - - private final TimeoutSettings timeoutSettings; private final MongoNamespace namespace; private final String fieldName; private final Decoder decoder; @@ -59,9 +56,7 @@ public class DistinctOperation implements AsyncReadOperation decoder) { - this.timeoutSettings = timeoutSettings; + public DistinctOperation(final MongoNamespace namespace, final String fieldName, final Decoder decoder) { this.namespace = notNull("namespace", namespace); this.fieldName = notNull("fieldName", fieldName); this.decoder = notNull("decoder", decoder); @@ -103,11 +98,6 @@ public DistinctOperation comment(final BsonValue comment) { return this; } - @Override - public TimeoutSettings getTimeoutSettings() { - return timeoutSettings; - } - @Override public BatchCursor execute(final ReadBinding binding) { return executeRetryableRead(binding, namespace.getDatabaseName(), getCommandCreator(), createCommandDecoder(), diff --git a/driver-core/src/main/com/mongodb/internal/operation/DropCollectionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/DropCollectionOperation.java index 275cf781f0a..baf4fb8ab0a 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/DropCollectionOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/DropCollectionOperation.java @@ -21,7 +21,6 @@ import com.mongodb.MongoOperationTimeoutException; import com.mongodb.WriteConcern; import com.mongodb.internal.TimeoutContext; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadWriteBinding; import com.mongodb.internal.binding.AsyncWriteBinding; @@ -66,15 +65,12 @@ public class DropCollectionOperation implements AsyncWriteOperation, WriteOperation { private static final String ENCRYPT_PREFIX = "enxcol_."; private static final BsonValueCodec BSON_VALUE_CODEC = new BsonValueCodec(); - private final TimeoutSettings timeoutSettings; private final MongoNamespace namespace; private final WriteConcern writeConcern; private BsonDocument encryptedFields; private boolean autoEncryptedFields; - public DropCollectionOperation(final TimeoutSettings timeoutSettings, final MongoNamespace namespace, - @Nullable final WriteConcern writeConcern) { - this.timeoutSettings = timeoutSettings; + public DropCollectionOperation(final MongoNamespace namespace, @Nullable final WriteConcern writeConcern) { this.namespace = notNull("namespace", namespace); this.writeConcern = writeConcern; } @@ -93,11 +89,6 @@ public DropCollectionOperation autoEncryptedFields(final boolean autoEncryptedFi return this; } - @Override - public TimeoutSettings getTimeoutSettings() { - return timeoutSettings; - } - @Override public Void execute(final WriteBinding binding) { BsonDocument localEncryptedFields = getEncryptedFields((ReadWriteBinding) binding); @@ -229,7 +220,7 @@ private BsonDocument getCollectionEncryptedFields(final BsonDocument defaultEncr } private ListCollectionsOperation listCollectionOperation() { - return new ListCollectionsOperation<>(timeoutSettings, namespace.getDatabaseName(), BSON_VALUE_CODEC) + return new ListCollectionsOperation<>(namespace.getDatabaseName(), BSON_VALUE_CODEC) .filter(new BsonDocument("name", new BsonString(namespace.getCollectionName()))) .batchSize(1); } diff --git a/driver-core/src/main/com/mongodb/internal/operation/DropDatabaseOperation.java b/driver-core/src/main/com/mongodb/internal/operation/DropDatabaseOperation.java index d62420e6dcf..de125a261ee 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/DropDatabaseOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/DropDatabaseOperation.java @@ -17,7 +17,6 @@ package com.mongodb.internal.operation; import com.mongodb.WriteConcern; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncWriteBinding; import com.mongodb.internal.binding.WriteBinding; @@ -44,13 +43,10 @@ *

This class is not part of the public API and may be removed or changed at any time

*/ public class DropDatabaseOperation implements AsyncWriteOperation, WriteOperation { - private final TimeoutSettings timeoutSettings; private final String databaseName; private final WriteConcern writeConcern; - public DropDatabaseOperation(final TimeoutSettings timeoutSettings, - final String databaseName, @Nullable final WriteConcern writeConcern) { - this.timeoutSettings = timeoutSettings; + public DropDatabaseOperation(final String databaseName, @Nullable final WriteConcern writeConcern) { this.databaseName = notNull("databaseName", databaseName); this.writeConcern = writeConcern; } @@ -59,11 +55,6 @@ public WriteConcern getWriteConcern() { return writeConcern; } - @Override - public TimeoutSettings getTimeoutSettings() { - return timeoutSettings; - } - @Override public Void execute(final WriteBinding binding) { return withConnection(binding, connection -> { diff --git a/driver-core/src/main/com/mongodb/internal/operation/DropIndexOperation.java b/driver-core/src/main/com/mongodb/internal/operation/DropIndexOperation.java index 6c4df4902d7..2bd315e002a 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/DropIndexOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/DropIndexOperation.java @@ -19,7 +19,6 @@ import com.mongodb.MongoCommandException; import com.mongodb.MongoNamespace; import com.mongodb.WriteConcern; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncWriteBinding; import com.mongodb.internal.binding.WriteBinding; @@ -43,24 +42,19 @@ *

This class is not part of the public API and may be removed or changed at any time

*/ public class DropIndexOperation implements AsyncWriteOperation, WriteOperation { - private final TimeoutSettings timeoutSettings; private final MongoNamespace namespace; private final String indexName; private final BsonDocument indexKeys; private final WriteConcern writeConcern; - public DropIndexOperation(final TimeoutSettings timeoutSettings, final MongoNamespace namespace, - final String indexName, @Nullable final WriteConcern writeConcern) { - this.timeoutSettings = timeoutSettings; + public DropIndexOperation(final MongoNamespace namespace, final String indexName, @Nullable final WriteConcern writeConcern) { this.namespace = notNull("namespace", namespace); this.indexName = notNull("indexName", indexName); this.indexKeys = null; this.writeConcern = writeConcern; } - public DropIndexOperation(final TimeoutSettings timeoutSettings, final MongoNamespace namespace, - final BsonDocument indexKeys, @Nullable final WriteConcern writeConcern) { - this.timeoutSettings = timeoutSettings; + public DropIndexOperation(final MongoNamespace namespace, final BsonDocument indexKeys, @Nullable final WriteConcern writeConcern) { this.namespace = notNull("namespace", namespace); this.indexKeys = notNull("indexKeys", indexKeys); this.indexName = null; @@ -71,11 +65,6 @@ public WriteConcern getWriteConcern() { return writeConcern; } - @Override - public TimeoutSettings getTimeoutSettings() { - return timeoutSettings; - } - @Override public Void execute(final WriteBinding binding) { try { diff --git a/driver-core/src/main/com/mongodb/internal/operation/DropSearchIndexOperation.java b/driver-core/src/main/com/mongodb/internal/operation/DropSearchIndexOperation.java index f92fcb8608d..657dedca942 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/DropSearchIndexOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/DropSearchIndexOperation.java @@ -17,7 +17,6 @@ package com.mongodb.internal.operation; import com.mongodb.MongoNamespace; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.lang.Nullable; import org.bson.BsonDocument; import org.bson.BsonString; @@ -33,8 +32,8 @@ final class DropSearchIndexOperation extends AbstractWriteSearchIndexOperation { private static final String COMMAND_NAME = "dropSearchIndex"; private final String indexName; - DropSearchIndexOperation(final TimeoutSettings timeoutSettings, final MongoNamespace namespace, final String indexName) { - super(timeoutSettings, namespace); + DropSearchIndexOperation(final MongoNamespace namespace, final String indexName) { + super(namespace); this.indexName = indexName; } diff --git a/driver-core/src/main/com/mongodb/internal/operation/EstimatedDocumentCountOperation.java b/driver-core/src/main/com/mongodb/internal/operation/EstimatedDocumentCountOperation.java index b3184b6ed2b..6610b853e7b 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/EstimatedDocumentCountOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/EstimatedDocumentCountOperation.java @@ -19,7 +19,6 @@ import com.mongodb.MongoCommandException; import com.mongodb.MongoNamespace; import com.mongodb.connection.ConnectionDescription; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; import com.mongodb.internal.binding.ReadBinding; @@ -48,13 +47,11 @@ */ public class EstimatedDocumentCountOperation implements AsyncReadOperation, ReadOperation { private static final Decoder DECODER = new BsonDocumentCodec(); - private final TimeoutSettings timeoutSettings; private final MongoNamespace namespace; private boolean retryReads; private BsonValue comment; - public EstimatedDocumentCountOperation(final TimeoutSettings timeoutSettings, final MongoNamespace namespace) { - this.timeoutSettings = timeoutSettings; + public EstimatedDocumentCountOperation(final MongoNamespace namespace) { this.namespace = notNull("namespace", namespace); } @@ -73,11 +70,6 @@ public EstimatedDocumentCountOperation comment(@Nullable final BsonValue comment return this; } - @Override - public TimeoutSettings getTimeoutSettings() { - return timeoutSettings; - } - @Override public Long execute(final ReadBinding binding) { try { diff --git a/driver-core/src/main/com/mongodb/internal/operation/FindAndDeleteOperation.java b/driver-core/src/main/com/mongodb/internal/operation/FindAndDeleteOperation.java index 6d217b08977..0a2a8352ff8 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/FindAndDeleteOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/FindAndDeleteOperation.java @@ -20,7 +20,6 @@ import com.mongodb.WriteConcern; import com.mongodb.client.model.Collation; import com.mongodb.connection.ConnectionDescription; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.validator.NoOpFieldNameValidator; import com.mongodb.lang.Nullable; import org.bson.BsonBoolean; @@ -37,9 +36,9 @@ */ public class FindAndDeleteOperation extends BaseFindAndModifyOperation { - public FindAndDeleteOperation(final TimeoutSettings timeoutSettings, final MongoNamespace namespace, - final WriteConcern writeConcern, final boolean retryWrites, final Decoder decoder) { - super(timeoutSettings, namespace, writeConcern, retryWrites, decoder); + public FindAndDeleteOperation(final MongoNamespace namespace, final WriteConcern writeConcern, final boolean retryWrites, + final Decoder decoder) { + super(namespace, writeConcern, retryWrites, decoder); } @Override diff --git a/driver-core/src/main/com/mongodb/internal/operation/FindAndReplaceOperation.java b/driver-core/src/main/com/mongodb/internal/operation/FindAndReplaceOperation.java index 6b048b33132..8ef66cbf8a5 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/FindAndReplaceOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/FindAndReplaceOperation.java @@ -20,7 +20,6 @@ import com.mongodb.WriteConcern; import com.mongodb.client.model.Collation; import com.mongodb.connection.ConnectionDescription; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.validator.MappedFieldNameValidator; import com.mongodb.internal.validator.NoOpFieldNameValidator; import com.mongodb.internal.validator.ReplacingDocumentFieldNameValidator; @@ -49,9 +48,9 @@ public class FindAndReplaceOperation extends BaseFindAndModifyOperation { private boolean upsert; private Boolean bypassDocumentValidation; - public FindAndReplaceOperation(final TimeoutSettings timeoutSettings, final MongoNamespace namespace, - final WriteConcern writeConcern, final boolean retryWrites, final Decoder decoder, final BsonDocument replacement) { - super(timeoutSettings, namespace, writeConcern, retryWrites, decoder); + public FindAndReplaceOperation(final MongoNamespace namespace, final WriteConcern writeConcern, final boolean retryWrites, + final Decoder decoder, final BsonDocument replacement) { + super(namespace, writeConcern, retryWrites, decoder); this.replacement = notNull("replacement", replacement); } diff --git a/driver-core/src/main/com/mongodb/internal/operation/FindAndUpdateOperation.java b/driver-core/src/main/com/mongodb/internal/operation/FindAndUpdateOperation.java index ec9ca3e2ecf..796e63d0edc 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/FindAndUpdateOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/FindAndUpdateOperation.java @@ -20,7 +20,6 @@ import com.mongodb.WriteConcern; import com.mongodb.client.model.Collation; import com.mongodb.connection.ConnectionDescription; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.validator.MappedFieldNameValidator; import com.mongodb.internal.validator.NoOpFieldNameValidator; import com.mongodb.internal.validator.UpdateFieldNameValidator; @@ -54,16 +53,16 @@ public class FindAndUpdateOperation extends BaseFindAndModifyOperation { private Boolean bypassDocumentValidation; private List arrayFilters; - public FindAndUpdateOperation(final TimeoutSettings timeoutSettings, final MongoNamespace namespace, + public FindAndUpdateOperation(final MongoNamespace namespace, final WriteConcern writeConcern, final boolean retryWrites, final Decoder decoder, final BsonDocument update) { - super(timeoutSettings, namespace, writeConcern, retryWrites, decoder); + super(namespace, writeConcern, retryWrites, decoder); this.update = notNull("update", update); this.updatePipeline = null; } - public FindAndUpdateOperation(final TimeoutSettings timeoutSettings, final MongoNamespace namespace, - final WriteConcern writeConcern, final boolean retryWrites, final Decoder decoder, final List update) { - super(timeoutSettings, namespace, writeConcern, retryWrites, decoder); + public FindAndUpdateOperation(final MongoNamespace namespace, final WriteConcern writeConcern, final boolean retryWrites, + final Decoder decoder, final List update) { + super(namespace, writeConcern, retryWrites, decoder); this.updatePipeline = update; this.update = null; } diff --git a/driver-core/src/main/com/mongodb/internal/operation/FindOperation.java b/driver-core/src/main/com/mongodb/internal/operation/FindOperation.java index 2e54a043758..4a231241219 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/FindOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/FindOperation.java @@ -23,7 +23,6 @@ import com.mongodb.MongoQueryException; import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.async.function.AsyncCallbackSupplier; @@ -41,7 +40,6 @@ import java.util.function.Supplier; -import static com.mongodb.assertions.Assertions.isTrueArgument; import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; import static com.mongodb.internal.operation.AsyncOperationHelper.CommandReadTransformerAsync; @@ -71,7 +69,6 @@ public class FindOperation implements AsyncExplainableReadOperation>, ExplainableReadOperation> { private static final String FIRST_BATCH = "firstBatch"; - private final TimeoutSettings timeoutSettings; private final MongoNamespace namespace; private final Decoder decoder; private boolean retryReads; @@ -95,9 +92,7 @@ public class FindOperation implements AsyncExplainableReadOperation decoder) { - this.timeoutSettings = timeoutSettings; + public FindOperation(final MongoNamespace namespace, final Decoder decoder) { this.namespace = notNull("namespace", namespace); this.decoder = notNull("decoder", decoder); } @@ -174,7 +169,6 @@ public FindOperation cursorType(final CursorType cursorType) { } public FindOperation timeoutMode(@Nullable final TimeoutMode timeoutMode) { - isTrueArgument("timeoutMode requires timeoutMS.", timeoutMode == null || timeoutSettings.getTimeoutMS() != null); if (timeoutMode != null) { this.timeoutMode = timeoutMode; } @@ -289,11 +283,6 @@ public FindOperation allowDiskUse(@Nullable final Boolean allowDiskUse) { return this; } - @Override - public TimeoutSettings getTimeoutSettings() { - return timeoutSettings; - } - @Override public BatchCursor execute(final ReadBinding binding) { IllegalStateException invalidTimeoutModeException = invalidTimeoutModeException(); @@ -374,7 +363,7 @@ public AsyncReadOperation asAsyncExplainableOperation(@Nullable final Exp } CommandReadOperation createExplainableOperation(@Nullable final ExplainVerbosity verbosity, final Decoder resultDecoder) { - return new CommandReadOperation<>(timeoutSettings, getNamespace().getDatabaseName(), + return new CommandReadOperation<>(getNamespace().getDatabaseName(), (operationContext, serverDescription, connectionDescription) -> asExplainCommand(getCommand(operationContext, MIN_WIRE_VERSION), verbosity), resultDecoder); } @@ -469,16 +458,18 @@ private TimeoutMode getTimeoutMode() { private CommandReadTransformer> transformer() { return (result, source, connection) -> - new CommandBatchCursor<>(getTimeoutMode(), result, batchSize, getMaxTimeForCursor(), decoder, comment, source, connection); + new CommandBatchCursor<>(getTimeoutMode(), result, batchSize, getMaxTimeForCursor(source.getOperationContext()), decoder, + comment, source, connection); } private CommandReadTransformerAsync> asyncTransformer() { return (result, source, connection) -> - new AsyncCommandBatchCursor<>(getTimeoutMode(), result, batchSize, getMaxTimeForCursor(), decoder, comment, source, connection); + new AsyncCommandBatchCursor<>(getTimeoutMode(), result, batchSize, getMaxTimeForCursor(source.getOperationContext()), decoder, + comment, source, connection); } - private long getMaxTimeForCursor() { - return cursorType == CursorType.TailableAwait ? timeoutSettings.getMaxAwaitTimeMS() : 0; + private long getMaxTimeForCursor(final OperationContext operationContext) { + return cursorType == CursorType.TailableAwait ? operationContext.getTimeoutContext().getMaxAwaitTimeMS() : 0; } @Nullable diff --git a/driver-core/src/main/com/mongodb/internal/operation/ListCollectionsOperation.java b/driver-core/src/main/com/mongodb/internal/operation/ListCollectionsOperation.java index 151955b8446..86844c89aec 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/ListCollectionsOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/ListCollectionsOperation.java @@ -18,7 +18,6 @@ import com.mongodb.MongoCommandException; import com.mongodb.client.cursor.TimeoutMode; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.VisibleForTesting; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; @@ -35,7 +34,6 @@ import java.util.function.Supplier; -import static com.mongodb.assertions.Assertions.isTrueArgument; import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; @@ -72,7 +70,6 @@ *

This class is not part of the public API and may be removed or changed at any time

*/ public class ListCollectionsOperation implements AsyncReadOperation>, ReadOperation> { - private final TimeoutSettings timeoutSettings; private final String databaseName; private final Decoder decoder; private boolean retryReads; @@ -83,9 +80,7 @@ public class ListCollectionsOperation implements AsyncReadOperation decoder) { - this.timeoutSettings = timeoutSettings; + public ListCollectionsOperation(final String databaseName, final Decoder decoder) { this.databaseName = notNull("databaseName", databaseName); this.decoder = notNull("decoder", decoder); } @@ -150,17 +145,12 @@ public boolean isAuthorizedCollections() { return authorizedCollections; } - @Override - public TimeoutSettings getTimeoutSettings() { - return timeoutSettings; - } public TimeoutMode getTimeoutMode() { return timeoutMode; } public ListCollectionsOperation timeoutMode(@Nullable final TimeoutMode timeoutMode) { - isTrueArgument("timeoutMode requires timeoutMS.", timeoutMode == null || timeoutSettings.getTimeoutMS() != null); if (timeoutMode != null) { this.timeoutMode = timeoutMode; } diff --git a/driver-core/src/main/com/mongodb/internal/operation/ListDatabasesOperation.java b/driver-core/src/main/com/mongodb/internal/operation/ListDatabasesOperation.java index 3cdc2027ecf..47ca72b0038 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/ListDatabasesOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/ListDatabasesOperation.java @@ -16,7 +16,6 @@ package com.mongodb.internal.operation; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; @@ -45,7 +44,6 @@ *

This class is not part of the public API and may be removed or changed at any time

*/ public class ListDatabasesOperation implements AsyncReadOperation>, ReadOperation> { - private final TimeoutSettings timeoutSettings; private static final String DATABASES = "databases"; private final Decoder decoder; private boolean retryReads; @@ -54,8 +52,7 @@ public class ListDatabasesOperation implements AsyncReadOperation decoder) { - this.timeoutSettings = timeoutSettings; + public ListDatabasesOperation(final Decoder decoder) { this.decoder = notNull("decoder", decoder); } @@ -105,11 +102,6 @@ public ListDatabasesOperation comment(@Nullable final BsonValue comment) { return this; } - @Override - public TimeoutSettings getTimeoutSettings() { - return timeoutSettings; - } - @Override public BatchCursor execute(final ReadBinding binding) { return executeRetryableRead(binding, "admin", getCommandCreator(), CommandResultDocumentCodec.create(decoder, DATABASES), diff --git a/driver-core/src/main/com/mongodb/internal/operation/ListIndexesOperation.java b/driver-core/src/main/com/mongodb/internal/operation/ListIndexesOperation.java index d97211c2d10..72adf66fabf 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/ListIndexesOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/ListIndexesOperation.java @@ -19,7 +19,6 @@ import com.mongodb.MongoCommandException; import com.mongodb.MongoNamespace; import com.mongodb.client.cursor.TimeoutMode; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.async.function.AsyncCallbackSupplier; @@ -35,7 +34,6 @@ import java.util.function.Supplier; -import static com.mongodb.assertions.Assertions.isTrueArgument; import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; import static com.mongodb.internal.operation.AsyncOperationHelper.CommandReadTransformerAsync; @@ -67,7 +65,6 @@ *

This class is not part of the public API and may be removed or changed at any time

*/ public class ListIndexesOperation implements AsyncReadOperation>, ReadOperation> { - private final TimeoutSettings timeoutSettings; private final MongoNamespace namespace; private final Decoder decoder; private boolean retryReads; @@ -75,8 +72,7 @@ public class ListIndexesOperation implements AsyncReadOperation decoder) { - this.timeoutSettings = timeoutSettings; + public ListIndexesOperation(final MongoNamespace namespace, final Decoder decoder) { this.namespace = notNull("namespace", namespace); this.decoder = notNull("decoder", decoder); } @@ -114,18 +110,12 @@ public TimeoutMode getTimeoutMode() { } public ListIndexesOperation timeoutMode(@Nullable final TimeoutMode timeoutMode) { - isTrueArgument("timeoutMode requires timeoutMS.", timeoutMode == null || timeoutSettings.getTimeoutMS() != null); if (timeoutMode != null) { this.timeoutMode = timeoutMode; } return this; } - @Override - public TimeoutSettings getTimeoutSettings() { - return timeoutSettings; - } - @Override public BatchCursor execute(final ReadBinding binding) { RetryState retryState = initialRetryState(retryReads, binding.getOperationContext().getTimeoutContext()); diff --git a/driver-core/src/main/com/mongodb/internal/operation/ListSearchIndexesOperation.java b/driver-core/src/main/com/mongodb/internal/operation/ListSearchIndexesOperation.java index aad26e47f8e..0f9a81dbf19 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/ListSearchIndexesOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/ListSearchIndexesOperation.java @@ -20,7 +20,6 @@ import com.mongodb.MongoCommandException; import com.mongodb.MongoNamespace; import com.mongodb.client.model.Collation; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; @@ -46,7 +45,6 @@ final class ListSearchIndexesOperation implements AsyncExplainableReadOperation>, ExplainableReadOperation> { private static final String STAGE_LIST_SEARCH_INDEXES = "$listSearchIndexes"; - private final TimeoutSettings timeoutSettings; private final MongoNamespace namespace; private final Decoder decoder; @Nullable @@ -61,11 +59,9 @@ final class ListSearchIndexesOperation private final String indexName; private final boolean retryReads; - ListSearchIndexesOperation(final TimeoutSettings timeoutSettings, final MongoNamespace namespace, - final Decoder decoder, @Nullable final String indexName, @Nullable final Integer batchSize, - @Nullable final Collation collation, @Nullable final BsonValue comment, @Nullable final Boolean allowDiskUse, - final boolean retryReads) { - this.timeoutSettings = timeoutSettings; + ListSearchIndexesOperation(final MongoNamespace namespace, final Decoder decoder, @Nullable final String indexName, + @Nullable final Integer batchSize, @Nullable final Collation collation, @Nullable final BsonValue comment, + @Nullable final Boolean allowDiskUse, final boolean retryReads) { this.namespace = namespace; this.decoder = decoder; this.allowDiskUse = allowDiskUse; @@ -76,11 +72,6 @@ final class ListSearchIndexesOperation this.retryReads = retryReads; } - @Override - public TimeoutSettings getTimeoutSettings() { - return timeoutSettings; - } - @Override public BatchCursor execute(final ReadBinding binding) { try { @@ -122,7 +113,7 @@ public AsyncReadOperation asAsyncExplainableOperation(@Nullable final Exp private AggregateOperation asAggregateOperation() { BsonDocument searchDefinition = getSearchDefinition(); BsonDocument listSearchIndexesStage = new BsonDocument(STAGE_LIST_SEARCH_INDEXES, searchDefinition); - return new AggregateOperation<>(timeoutSettings, namespace, singletonList(listSearchIndexesStage), decoder) + return new AggregateOperation<>(namespace, singletonList(listSearchIndexesStage), decoder) .retryReads(retryReads) .collation(collation) .comment(comment) diff --git a/driver-core/src/main/com/mongodb/internal/operation/MapReduceToCollectionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/MapReduceToCollectionOperation.java index 82adfd02405..3dd6110645a 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/MapReduceToCollectionOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/MapReduceToCollectionOperation.java @@ -20,7 +20,6 @@ import com.mongodb.MongoNamespace; import com.mongodb.WriteConcern; import com.mongodb.client.model.Collation; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncWriteBinding; import com.mongodb.internal.binding.WriteBinding; @@ -59,7 +58,6 @@ *

This class is not part of the public API and may be removed or changed at any time

*/ public class MapReduceToCollectionOperation implements AsyncWriteOperation, WriteOperation { - private final TimeoutSettings timeoutSettings; private final MongoNamespace namespace; private final BsonJavaScript mapFunction; private final BsonJavaScript reduceFunction; @@ -78,10 +76,8 @@ public class MapReduceToCollectionOperation implements AsyncWriteOperation VALID_ACTIONS = asList("replace", "merge", "reduce"); - public MapReduceToCollectionOperation(final TimeoutSettings timeoutSettings, final MongoNamespace namespace, - final BsonJavaScript mapFunction, final BsonJavaScript reduceFunction, @Nullable final String collectionName, - @Nullable final WriteConcern writeConcern) { - this.timeoutSettings = timeoutSettings; + public MapReduceToCollectionOperation(final MongoNamespace namespace, final BsonJavaScript mapFunction, + final BsonJavaScript reduceFunction, @Nullable final String collectionName, @Nullable final WriteConcern writeConcern) { this.namespace = notNull("namespace", namespace); this.mapFunction = notNull("mapFunction", mapFunction); this.reduceFunction = notNull("reduceFunction", reduceFunction); @@ -211,11 +207,6 @@ public MapReduceToCollectionOperation collation(@Nullable final Collation collat return this; } - @Override - public TimeoutSettings getTimeoutSettings() { - return timeoutSettings; - } - @Override public MapReduceStatistics execute(final WriteBinding binding) { return executeCommand(binding, namespace.getDatabaseName(), getCommandCreator(), transformer()); @@ -247,7 +238,7 @@ public AsyncReadOperation asExplainableOperationAsync(final Explai } private CommandReadOperation createExplainableOperation(final ExplainVerbosity explainVerbosity) { - return new CommandReadOperation<>(timeoutSettings, getNamespace().getDatabaseName(), + return new CommandReadOperation<>(getNamespace().getDatabaseName(), (operationContext, serverDescription, connectionDescription) -> asExplainCommand(getCommandCreator().create(operationContext, serverDescription, connectionDescription), explainVerbosity), new BsonDocumentCodec()); diff --git a/driver-core/src/main/com/mongodb/internal/operation/MapReduceWithInlineResultsOperation.java b/driver-core/src/main/com/mongodb/internal/operation/MapReduceWithInlineResultsOperation.java index c70ff7a270b..daae59d5c15 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/MapReduceWithInlineResultsOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/MapReduceWithInlineResultsOperation.java @@ -19,7 +19,6 @@ import com.mongodb.ExplainVerbosity; import com.mongodb.MongoNamespace; import com.mongodb.client.model.Collation; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; import com.mongodb.internal.binding.ReadBinding; @@ -56,7 +55,6 @@ */ public class MapReduceWithInlineResultsOperation implements AsyncReadOperation>, ReadOperation> { - private final TimeoutSettings timeoutSettings; private final MongoNamespace namespace; private final BsonJavaScript mapFunction; private final BsonJavaScript reduceFunction; @@ -70,9 +68,8 @@ public class MapReduceWithInlineResultsOperation implements AsyncReadOperatio private boolean verbose; private Collation collation; - public MapReduceWithInlineResultsOperation(final TimeoutSettings timeoutSettings, final MongoNamespace namespace, - final BsonJavaScript mapFunction, final BsonJavaScript reduceFunction, final Decoder decoder) { - this.timeoutSettings = timeoutSettings; + public MapReduceWithInlineResultsOperation(final MongoNamespace namespace, final BsonJavaScript mapFunction, + final BsonJavaScript reduceFunction, final Decoder decoder) { this.namespace = notNull("namespace", namespace); this.mapFunction = notNull("mapFunction", mapFunction); this.reduceFunction = notNull("reduceFunction", reduceFunction); @@ -167,11 +164,6 @@ public MapReduceWithInlineResultsOperation collation(@Nullable final Collatio return this; } - @Override - public TimeoutSettings getTimeoutSettings() { - return timeoutSettings; - } - @Override public MapReduceBatchCursor execute(final ReadBinding binding) { return executeRetryableRead(binding, namespace.getDatabaseName(), @@ -196,7 +188,7 @@ public AsyncReadOperation asExplainableOperationAsync(final Explai } private CommandReadOperation createExplainableOperation(final ExplainVerbosity explainVerbosity) { - return new CommandReadOperation<>(timeoutSettings, namespace.getDatabaseName(), + return new CommandReadOperation<>(namespace.getDatabaseName(), (operationContext, serverDescription, connectionDescription) -> asExplainCommand(getCommandCreator().create(operationContext, serverDescription, connectionDescription), explainVerbosity), new BsonDocumentCodec()); diff --git a/driver-core/src/main/com/mongodb/internal/operation/MixedBulkWriteOperation.java b/driver-core/src/main/com/mongodb/internal/operation/MixedBulkWriteOperation.java index 3fa222e733c..f99f297f96d 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/MixedBulkWriteOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/MixedBulkWriteOperation.java @@ -23,7 +23,6 @@ import com.mongodb.bulk.BulkWriteResult; import com.mongodb.connection.ConnectionDescription; import com.mongodb.internal.TimeoutContext; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.async.function.AsyncCallbackLoop; import com.mongodb.internal.async.function.AsyncCallbackRunnable; @@ -78,7 +77,6 @@ */ public class MixedBulkWriteOperation implements AsyncWriteOperation, WriteOperation { private static final FieldNameValidator NO_OP_FIELD_NAME_VALIDATOR = new NoOpFieldNameValidator(); - private final TimeoutSettings timeoutSettings; private final MongoNamespace namespace; private final List writeRequests; private final boolean ordered; @@ -88,10 +86,8 @@ public class MixedBulkWriteOperation implements AsyncWriteOperation writeRequests, final boolean ordered, final WriteConcern writeConcern, - final boolean retryWrites) { - this.timeoutSettings = timeoutSettings; + public MixedBulkWriteOperation(final MongoNamespace namespace, final List writeRequests, + final boolean ordered, final WriteConcern writeConcern, final boolean retryWrites) { this.namespace = notNull("namespace", namespace); this.writeRequests = notNull("writes", writeRequests); this.ordered = ordered; @@ -178,11 +174,6 @@ private boolean shouldAttemptToRetryWrite(final RetryState retryState, final Thr return decision; } - @Override - public TimeoutSettings getTimeoutSettings() { - return timeoutSettings; - } - @Override public BulkWriteResult execute(final WriteBinding binding) { TimeoutContext timeoutContext = binding.getOperationContext().getTimeoutContext(); diff --git a/driver-core/src/main/com/mongodb/internal/operation/OperationHelper.java b/driver-core/src/main/com/mongodb/internal/operation/OperationHelper.java index 8476f54edf8..b4a321a97c2 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/OperationHelper.java +++ b/driver-core/src/main/com/mongodb/internal/operation/OperationHelper.java @@ -201,7 +201,7 @@ static void addMaxTimeMSToNonTailableCursor(final BsonDocument commandDocument, addMaxTimeMSToNonTailableCursor(commandDocument, TimeoutMode.CURSOR_LIFETIME, operationContext); } - static void addMaxTimeMSToNonTailableCursor(final BsonDocument commandDocument, final TimeoutMode timeoutMode, + static void addMaxTimeMSToNonTailableCursor(final BsonDocument commandDocument, @Nullable final TimeoutMode timeoutMode, final OperationContext operationContext) { long maxTimeMS = timeoutMode == TimeoutMode.ITERATION ? 0 : operationContext.getTimeoutContext().getMaxTimeMS(); if (maxTimeMS > 0) { diff --git a/driver-core/src/main/com/mongodb/internal/operation/Operations.java b/driver-core/src/main/com/mongodb/internal/operation/Operations.java index e2d1d352527..64d006c7e45 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/Operations.java +++ b/driver-core/src/main/com/mongodb/internal/operation/Operations.java @@ -55,7 +55,6 @@ import com.mongodb.client.model.WriteModel; import com.mongodb.client.model.changestream.FullDocument; import com.mongodb.client.model.changestream.FullDocumentBeforeChange; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.bulk.DeleteRequest; import com.mongodb.internal.bulk.IndexRequest; import com.mongodb.internal.bulk.InsertRequest; @@ -88,7 +87,6 @@ import static com.mongodb.assertions.Assertions.notNull; import static java.lang.String.format; import static java.util.Collections.singletonList; -import static java.util.concurrent.TimeUnit.MILLISECONDS; final class Operations { private final MongoNamespace namespace; @@ -99,11 +97,10 @@ final class Operations { private final WriteConcern writeConcern; private final boolean retryWrites; private final boolean retryReads; - private final TimeoutSettings timeoutSettings; Operations(@Nullable final MongoNamespace namespace, final Class documentClass, final ReadPreference readPreference, final CodecRegistry codecRegistry, final ReadConcern readConcern, final WriteConcern writeConcern, final boolean retryWrites, - final boolean retryReads, final TimeoutSettings timeoutSettings) { + final boolean retryReads) { this.namespace = namespace; this.documentClass = documentClass; this.readPreference = readPreference; @@ -112,7 +109,6 @@ final class Operations { this.writeConcern = writeConcern; this.retryWrites = retryWrites; this.retryReads = retryReads; - this.timeoutSettings = timeoutSettings; } @Nullable @@ -148,14 +144,9 @@ boolean isRetryReads() { return retryReads; } - public TimeoutSettings getTimeoutSettings() { - return timeoutSettings; - } - CountDocumentsOperation countDocuments(final Bson filter, final CountOptions options) { - @SuppressWarnings("deprecation") CountDocumentsOperation operation = new CountDocumentsOperation( - timeoutSettings.withMaxTimeMS(options.getMaxTime(MILLISECONDS)), assertNotNull(namespace)) + assertNotNull(namespace)) .retryReads(retryReads) .filter(toBsonDocument(filter)) .skip(options.getSkip()) @@ -170,9 +161,8 @@ CountDocumentsOperation countDocuments(final Bson filter, final CountOptions opt return operation; } - @SuppressWarnings("deprecation") EstimatedDocumentCountOperation estimatedDocumentCount(final EstimatedDocumentCountOptions options) { - return new EstimatedDocumentCountOperation(timeoutSettings.withMaxTimeMS(options.getMaxTime(MILLISECONDS)), + return new EstimatedDocumentCountOperation( assertNotNull(namespace)) .retryReads(retryReads) .comment(options.getComment()); @@ -195,9 +185,7 @@ FindOperation find(final MongoNamespace findNamespace, @Nulla private FindOperation createFindOperation(final MongoNamespace findNamespace, @Nullable final Bson filter, final Class resultClass, final FindOptions options) { - @SuppressWarnings("deprecation") FindOperation operation = new FindOperation<>( - timeoutSettings.withMaxTimeAndMaxAwaitTimeMS(options.getMaxTime(MILLISECONDS), options.getMaxAwaitTime(MILLISECONDS)), findNamespace, codecRegistry.get(resultClass)) .retryReads(retryReads) .filter(filter == null ? new BsonDocument() : filter.toBsonDocument(documentClass, codecRegistry)) @@ -228,8 +216,8 @@ private FindOperation createFindOperation(final MongoNamespac } DistinctOperation distinct(final String fieldName, @Nullable final Bson filter, final Class resultClass, - final long maxTimeMS, final Collation collation, final BsonValue comment) { - return new DistinctOperation<>(timeoutSettings.withMaxTimeMS(maxTimeMS), assertNotNull(namespace), + final Collation collation, final BsonValue comment) { + return new DistinctOperation<>(assertNotNull(namespace), fieldName, codecRegistry.get(resultClass)) .retryReads(retryReads) .filter(filter == null ? null : filter.toBsonDocument(documentClass, codecRegistry)) @@ -238,10 +226,10 @@ DistinctOperation distinct(final String fieldName, @Nullable } AggregateOperation aggregate(final List pipeline, final Class resultClass, - final long maxTimeMS, final long maxAwaitTimeMS, @Nullable final TimeoutMode timeoutMode, @Nullable final Integer batchSize, + @Nullable final TimeoutMode timeoutMode, @Nullable final Integer batchSize, final Collation collation, @Nullable final Bson hint, @Nullable final String hintString, final BsonValue comment, final Bson variables, final Boolean allowDiskUse, final AggregationLevel aggregationLevel) { - return new AggregateOperation<>(timeoutSettings.withMaxTimeAndMaxAwaitTimeMS(maxTimeMS, maxAwaitTimeMS), assertNotNull(namespace), + return new AggregateOperation<>(assertNotNull(namespace), assertNotNull(toBsonDocumentList(pipeline)), codecRegistry.get(resultClass), aggregationLevel) .retryReads(retryReads) .allowDiskUse(allowDiskUse) @@ -253,11 +241,10 @@ AggregateOperation aggregate(final List pipel .timeoutMode(timeoutMode); } - AggregateToCollectionOperation aggregateToCollection(final List pipeline, final long maxTimeMS, - @Nullable final TimeoutMode timeoutMode, final Boolean allowDiskUse, final Boolean bypassDocumentValidation, - final Collation collation, @Nullable final Bson hint, @Nullable final String hintString, final BsonValue comment, - final Bson variables, final AggregationLevel aggregationLevel) { - return new AggregateToCollectionOperation(timeoutSettings.withMaxTimeMS(maxTimeMS), assertNotNull(namespace), + AggregateToCollectionOperation aggregateToCollection(final List pipeline, @Nullable final TimeoutMode timeoutMode, + final Boolean allowDiskUse, final Boolean bypassDocumentValidation, final Collation collation, @Nullable final Bson hint, + @Nullable final String hintString, final BsonValue comment, final Bson variables, final AggregationLevel aggregationLevel) { + return new AggregateToCollectionOperation(assertNotNull(namespace), assertNotNull(toBsonDocumentList(pipeline)), readConcern, writeConcern, aggregationLevel) .allowDiskUse(allowDiskUse) .bypassDocumentValidation(bypassDocumentValidation) @@ -272,12 +259,12 @@ AggregateToCollectionOperation aggregateToCollection(final List MapReduceToCollectionOperation mapReduceToCollection(final String databaseName, final String collectionName, final String mapFunction, final String reduceFunction, @Nullable final String finalizeFunction, final Bson filter, - final int limit, final long maxTimeMS, final boolean jsMode, + final int limit, final boolean jsMode, final Bson scope, final Bson sort, final boolean verbose, final com.mongodb.client.model.MapReduceAction action, final Boolean bypassDocumentValidation, final Collation collation) { MapReduceToCollectionOperation operation = new MapReduceToCollectionOperation( - timeoutSettings.withMaxTimeMS(maxTimeMS), assertNotNull(namespace), new BsonJavaScript(mapFunction), + assertNotNull(namespace), new BsonJavaScript(mapFunction), new BsonJavaScript(reduceFunction), collectionName, writeConcern) .filter(toBsonDocument(filter)) .limit(limit) @@ -298,11 +285,10 @@ MapReduceToCollectionOperation mapReduceToCollection(final String databaseName, MapReduceWithInlineResultsOperation mapReduce(final String mapFunction, final String reduceFunction, @Nullable final String finalizeFunction, final Class resultClass, final Bson filter, final int limit, - final long maxTimeMS, final boolean jsMode, final Bson scope, final Bson sort, final boolean verbose, + final boolean jsMode, final Bson scope, final Bson sort, final boolean verbose, final Collation collation) { - @SuppressWarnings("deprecation") MapReduceWithInlineResultsOperation operation = - new MapReduceWithInlineResultsOperation<>(timeoutSettings.withMaxTimeMS(maxTimeMS), + new MapReduceWithInlineResultsOperation<>( assertNotNull(namespace), new BsonJavaScript(mapFunction), new BsonJavaScript(reduceFunction), codecRegistry.get(resultClass)) .filter(toBsonDocument(filter)) @@ -318,9 +304,8 @@ MapReduceWithInlineResultsOperation mapReduce(final String ma return operation; } - @SuppressWarnings("deprecation") FindAndDeleteOperation findOneAndDelete(final Bson filter, final FindOneAndDeleteOptions options) { - return new FindAndDeleteOperation<>(timeoutSettings.withMaxTimeMS(options.getMaxTime(MILLISECONDS)), + return new FindAndDeleteOperation<>( assertNotNull(namespace), writeConcern, retryWrites, getCodec()) .filter(toBsonDocument(filter)) .projection(toBsonDocument(options.getProjection())) @@ -332,10 +317,9 @@ FindAndDeleteOperation findOneAndDelete(final Bson filter, final Find .let(toBsonDocument(options.getLet())); } - @SuppressWarnings("deprecation") FindAndReplaceOperation findOneAndReplace(final Bson filter, final TDocument replacement, final FindOneAndReplaceOptions options) { - return new FindAndReplaceOperation<>(timeoutSettings.withMaxTimeMS(options.getMaxTime(MILLISECONDS)), + return new FindAndReplaceOperation<>( assertNotNull(namespace), writeConcern, retryWrites, getCodec(), documentToBsonDocument(replacement)) .filter(toBsonDocument(filter)) .projection(toBsonDocument(options.getProjection())) @@ -350,9 +334,8 @@ FindAndReplaceOperation findOneAndReplace(final Bson filter, final TD .let(toBsonDocument(options.getLet())); } - @SuppressWarnings("deprecation") FindAndUpdateOperation findOneAndUpdate(final Bson filter, final Bson update, final FindOneAndUpdateOptions options) { - return new FindAndUpdateOperation<>(timeoutSettings.withMaxTimeMS(options.getMaxTime(MILLISECONDS)), + return new FindAndUpdateOperation<>( assertNotNull(namespace), writeConcern, retryWrites, getCodec(), assertNotNull(toBsonDocument(update))) .filter(toBsonDocument(filter)) .projection(toBsonDocument(options.getProjection())) @@ -368,10 +351,9 @@ FindAndUpdateOperation findOneAndUpdate(final Bson filter, final Bson .let(toBsonDocument(options.getLet())); } - @SuppressWarnings("deprecation") FindAndUpdateOperation findOneAndUpdate(final Bson filter, final List update, final FindOneAndUpdateOptions options) { - return new FindAndUpdateOperation<>(timeoutSettings.withMaxTimeMS(options.getMaxTime(MILLISECONDS)), + return new FindAndUpdateOperation<>( assertNotNull(namespace), writeConcern, retryWrites, getCodec(), assertNotNull(toBsonDocumentList(update))) .filter(toBsonDocument(filter)) .projection(toBsonDocument(options.getProjection())) @@ -447,7 +429,7 @@ MixedBulkWriteOperation insertMany(final List documents, fi requests.add(new InsertRequest(documentToBsonDocument(document))); } - return new MixedBulkWriteOperation(timeoutSettings, assertNotNull(namespace), + return new MixedBulkWriteOperation(assertNotNull(namespace), requests, options.isOrdered(), writeConcern, retryWrites) .bypassDocumentValidation(options.getBypassDocumentValidation()) .comment(options.getComment()); @@ -515,7 +497,7 @@ MixedBulkWriteOperation bulkWrite(final List CommandReadOperation commandRead(final Bson command, final Class resultClass) { notNull("command", command); notNull("resultClass", resultClass); - return new CommandReadOperation<>(timeoutSettings, assertNotNull(namespace).getDatabaseName(), + return new CommandReadOperation<>(assertNotNull(namespace).getDatabaseName(), assertNotNull(toBsonDocument(command)), codecRegistry.get(resultClass)); } DropDatabaseOperation dropDatabase() { - return new DropDatabaseOperation(timeoutSettings, assertNotNull(namespace).getDatabaseName(), + return new DropDatabaseOperation(assertNotNull(namespace).getDatabaseName(), getWriteConcern()); } CreateCollectionOperation createCollection(final String collectionName, final CreateCollectionOptions createCollectionOptions, @Nullable final AutoEncryptionSettings autoEncryptionSettings) { - CreateCollectionOperation operation = new CreateCollectionOperation(timeoutSettings, + CreateCollectionOperation operation = new CreateCollectionOperation( assertNotNull(namespace).getDatabaseName(), collectionName, writeConcern) .collation(createCollectionOptions.getCollation()) .capped(createCollectionOptions.isCapped()) @@ -580,7 +562,7 @@ CreateCollectionOperation createCollection(final String collectionName, final Cr DropCollectionOperation dropCollection( final DropCollectionOptions dropCollectionOptions, @Nullable final AutoEncryptionSettings autoEncryptionSettings) { - DropCollectionOperation operation = new DropCollectionOperation(timeoutSettings, + DropCollectionOperation operation = new DropCollectionOperation( assertNotNull(namespace), writeConcern); Bson encryptedFields = dropCollectionOptions.getEncryptedFields(); if (encryptedFields != null) { @@ -598,7 +580,7 @@ DropCollectionOperation dropCollection( RenameCollectionOperation renameCollection(final MongoNamespace newCollectionNamespace, final RenameCollectionOptions renameCollectionOptions) { - return new RenameCollectionOperation(timeoutSettings, assertNotNull(namespace), + return new RenameCollectionOperation(assertNotNull(namespace), newCollectionNamespace, writeConcern).dropTarget(renameCollectionOptions.isDropTarget()); } @@ -606,7 +588,7 @@ CreateViewOperation createView(final String viewName, final String viewOn, final final CreateViewOptions createViewOptions) { notNull("options", createViewOptions); notNull("pipeline", pipeline); - return new CreateViewOperation(timeoutSettings, assertNotNull(namespace).getDatabaseName(), viewName, + return new CreateViewOperation(assertNotNull(namespace).getDatabaseName(), viewName, viewOn, assertNotNull(toBsonDocumentList(pipeline)), writeConcern).collation(createViewOptions.getCollation()); } @@ -640,7 +622,7 @@ CreateIndexesOperation createIndexes(final List indexes, final Creat .hidden(model.getOptions().isHidden()) ); } - return new CreateIndexesOperation(timeoutSettings.withMaxTimeMS(createIndexOptions.getMaxTime(MILLISECONDS)), + return new CreateIndexesOperation( assertNotNull(namespace), indexRequests, writeConcern) .commitQuorum(createIndexOptions.getCommitQuorum()); } @@ -649,44 +631,42 @@ CreateSearchIndexesOperation createSearchIndexes(final List in List indexRequests = indexes.stream() .map(this::createSearchIndexRequest) .collect(Collectors.toList()); - return new CreateSearchIndexesOperation(timeoutSettings, assertNotNull(namespace), indexRequests); + return new CreateSearchIndexesOperation(assertNotNull(namespace), indexRequests); } UpdateSearchIndexesOperation updateSearchIndex(final String indexName, final Bson definition) { BsonDocument definitionDocument = assertNotNull(toBsonDocument(definition)); SearchIndexRequest searchIndexRequest = new SearchIndexRequest(definitionDocument, indexName); - return new UpdateSearchIndexesOperation(timeoutSettings, assertNotNull(namespace), searchIndexRequest); + return new UpdateSearchIndexesOperation(assertNotNull(namespace), searchIndexRequest); } DropSearchIndexOperation dropSearchIndex(final String indexName) { - return new DropSearchIndexOperation(timeoutSettings, assertNotNull(namespace), indexName); + return new DropSearchIndexOperation(assertNotNull(namespace), indexName); } - ListSearchIndexesOperation listSearchIndexes(final Class resultClass, final long maxTimeMS, + ListSearchIndexesOperation listSearchIndexes(final Class resultClass, @Nullable final String indexName, @Nullable final Integer batchSize, @Nullable final Collation collation, @Nullable final BsonValue comment, @Nullable final Boolean allowDiskUse) { - return new ListSearchIndexesOperation<>(timeoutSettings.withMaxTimeMS(maxTimeMS), assertNotNull(namespace), + return new ListSearchIndexesOperation<>(assertNotNull(namespace), codecRegistry.get(resultClass), indexName, batchSize, collation, comment, allowDiskUse, retryReads); } - DropIndexOperation dropIndex(final String indexName, final DropIndexOptions options) { - return new DropIndexOperation(timeoutSettings.withMaxTimeMS(options.getMaxTime(MILLISECONDS)), - assertNotNull(namespace), indexName, writeConcern); + DropIndexOperation dropIndex(final String indexName, final DropIndexOptions ignoredOptions) { + return new DropIndexOperation(assertNotNull(namespace), indexName, writeConcern); } - DropIndexOperation dropIndex(final Bson keys, final DropIndexOptions options) { - return new DropIndexOperation(timeoutSettings.withMaxTimeMS(options.getMaxTime(MILLISECONDS)), - assertNotNull(namespace), keys.toBsonDocument(BsonDocument.class, codecRegistry), writeConcern); + DropIndexOperation dropIndex(final Bson keys, final DropIndexOptions ignoredOptions) { + return new DropIndexOperation(assertNotNull(namespace), keys.toBsonDocument(BsonDocument.class, codecRegistry), writeConcern); } ListCollectionsOperation listCollections(final String databaseName, final Class resultClass, final Bson filter, final boolean collectionNamesOnly, final boolean authorizedCollections, - @Nullable final Integer batchSize, final long maxTimeMS, + @Nullable final Integer batchSize, final BsonValue comment, @Nullable final TimeoutMode timeoutMode) { - return new ListCollectionsOperation<>(timeoutSettings.withMaxTimeMS(maxTimeMS), databaseName, codecRegistry.get(resultClass)) + return new ListCollectionsOperation<>(databaseName, codecRegistry.get(resultClass)) .retryReads(retryReads) .filter(toBsonDocument(filter)) .nameOnly(collectionNamesOnly) @@ -697,9 +677,9 @@ ListCollectionsOperation listCollections(final String databas } ListDatabasesOperation listDatabases(final Class resultClass, final Bson filter, - final Boolean nameOnly, final long maxTimeMS, + final Boolean nameOnly, final Boolean authorizedDatabasesOnly, final BsonValue comment) { - return new ListDatabasesOperation<>(timeoutSettings.withMaxTimeMS(maxTimeMS), codecRegistry.get(resultClass)) + return new ListDatabasesOperation<>(codecRegistry.get(resultClass)) .retryReads(retryReads) .filter(toBsonDocument(filter)) .nameOnly(nameOnly) @@ -708,8 +688,8 @@ ListDatabasesOperation listDatabases(final Class res } ListIndexesOperation listIndexes(final Class resultClass, @Nullable final Integer batchSize, - final long maxTimeMS, final BsonValue comment, @Nullable final TimeoutMode timeoutMode) { - return new ListIndexesOperation<>(timeoutSettings.withMaxTimeMS(maxTimeMS), assertNotNull(namespace), + final BsonValue comment, @Nullable final TimeoutMode timeoutMode) { + return new ListIndexesOperation<>(assertNotNull(namespace), codecRegistry.get(resultClass)) .retryReads(retryReads) .batchSize(batchSize == null ? 0 : batchSize) @@ -720,10 +700,9 @@ ListIndexesOperation listIndexes(final Class resultC ChangeStreamOperation changeStream(final FullDocument fullDocument, final FullDocumentBeforeChange fullDocumentBeforeChange, final List pipeline, final Decoder decoder, final ChangeStreamLevel changeStreamLevel, @Nullable final Integer batchSize, - final Collation collation, final BsonValue comment, final long maxAwaitTimeMS, final BsonDocument resumeToken, + final Collation collation, final BsonValue comment, final BsonDocument resumeToken, final BsonTimestamp startAtOperationTime, final BsonDocument startAfter, final boolean showExpandedEvents) { return new ChangeStreamOperation<>( - timeoutSettings.withMaxTimeAndMaxAwaitTimeMS(0, maxAwaitTimeMS), assertNotNull(namespace), fullDocument, fullDocumentBeforeChange, diff --git a/driver-core/src/main/com/mongodb/internal/operation/ReadOperation.java b/driver-core/src/main/com/mongodb/internal/operation/ReadOperation.java index 20cb25e40b5..aa5d2e7d451 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/ReadOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/ReadOperation.java @@ -16,7 +16,6 @@ package com.mongodb.internal.operation; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.binding.ReadBinding; /** @@ -26,11 +25,6 @@ */ public interface ReadOperation { - /** - * @return the timeout settings for this operation - */ - TimeoutSettings getTimeoutSettings(); - /** * General execute which can return anything of type T * diff --git a/driver-core/src/main/com/mongodb/internal/operation/RenameCollectionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/RenameCollectionOperation.java index 39c2cf99a4f..3056338cae7 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/RenameCollectionOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/RenameCollectionOperation.java @@ -18,7 +18,6 @@ import com.mongodb.MongoNamespace; import com.mongodb.WriteConcern; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncWriteBinding; import com.mongodb.internal.binding.WriteBinding; @@ -49,15 +48,13 @@ *

This class is not part of the public API and may be removed or changed at any time

*/ public class RenameCollectionOperation implements AsyncWriteOperation, WriteOperation { - private final TimeoutSettings timeoutSettings; private final MongoNamespace originalNamespace; private final MongoNamespace newNamespace; private final WriteConcern writeConcern; private boolean dropTarget; - public RenameCollectionOperation(final TimeoutSettings timeoutSettings, final MongoNamespace originalNamespace, - final MongoNamespace newNamespace, @Nullable final WriteConcern writeConcern) { - this.timeoutSettings = timeoutSettings; + public RenameCollectionOperation(final MongoNamespace originalNamespace, final MongoNamespace newNamespace, + @Nullable final WriteConcern writeConcern) { this.originalNamespace = notNull("originalNamespace", originalNamespace); this.newNamespace = notNull("newNamespace", newNamespace); this.writeConcern = writeConcern; @@ -76,11 +73,6 @@ public RenameCollectionOperation dropTarget(final boolean dropTarget) { return this; } - @Override - public TimeoutSettings getTimeoutSettings() { - return timeoutSettings; - } - @Override public Void execute(final WriteBinding binding) { return withConnection(binding, connection -> executeCommand(binding, "admin", getCommand(), connection, writeConcernErrorTransformer())); diff --git a/driver-core/src/main/com/mongodb/internal/operation/SyncOperations.java b/driver-core/src/main/com/mongodb/internal/operation/SyncOperations.java index 2f5b70aa77a..ede32597d85 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/SyncOperations.java +++ b/driver-core/src/main/com/mongodb/internal/operation/SyncOperations.java @@ -60,11 +60,14 @@ import java.util.List; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + /** *

This class is not part of the public API and may be removed or changed at any time

*/ public final class SyncOperations { private final Operations operations; + private final TimeoutSettings timeoutSettings; public SyncOperations(final Class documentClass, final ReadPreference readPreference, final CodecRegistry codecRegistry, final boolean retryReads, final TimeoutSettings timeoutSettings) { @@ -80,7 +83,56 @@ public SyncOperations(@Nullable final MongoNamespace namespace, final Class(namespace, documentClass, readPreference, codecRegistry, readConcern, writeConcern, - retryWrites, retryReads, timeoutSettings); + retryWrites, retryReads); + this.timeoutSettings = timeoutSettings; + } + + public TimeoutSettings createTimeoutSettings(final long maxTimeMS) { + return timeoutSettings.withMaxTimeMS(maxTimeMS); + } + + public TimeoutSettings createTimeoutSettings(final long maxTimeMS, final long maxAwaitTimeMS) { + return timeoutSettings.withMaxTimeAndMaxAwaitTimeMS(maxTimeMS, maxAwaitTimeMS); + } + + @SuppressWarnings("deprecation") // MaxTime + public TimeoutSettings createTimeoutSettings(final CountOptions options) { + return createTimeoutSettings(options.getMaxTime(MILLISECONDS)); + } + + @SuppressWarnings("deprecation") // MaxTime + public TimeoutSettings createTimeoutSettings(final EstimatedDocumentCountOptions options) { + return createTimeoutSettings(options.getMaxTime(MILLISECONDS)); + } + + @SuppressWarnings("deprecation") // MaxTime + public TimeoutSettings createTimeoutSettings(final FindOptions options) { + return timeoutSettings.withMaxTimeAndMaxAwaitTimeMS(options.getMaxTime(MILLISECONDS), options.getMaxAwaitTime(MILLISECONDS)); + } + + @SuppressWarnings("deprecation") // MaxTime + public TimeoutSettings createTimeoutSettings(final FindOneAndDeleteOptions options) { + return createTimeoutSettings(options.getMaxTime(MILLISECONDS)); + } + + @SuppressWarnings("deprecation") // MaxTime + public TimeoutSettings createTimeoutSettings(final FindOneAndReplaceOptions options) { + return createTimeoutSettings(options.getMaxTime(MILLISECONDS)); + } + + @SuppressWarnings("deprecation") // MaxTime + public TimeoutSettings createTimeoutSettings(final FindOneAndUpdateOptions options) { + return timeoutSettings.withMaxTimeMS(options.getMaxTime(MILLISECONDS)); + } + + // TODO (CSOT) @SuppressWarnings("deprecation") // MaxTime + public TimeoutSettings createTimeoutSettings(final CreateIndexOptions options) { + return timeoutSettings.withMaxTimeMS(options.getMaxTime(MILLISECONDS)); + } + + // TODO (CSOT) @SuppressWarnings("deprecation") // MaxTime + public TimeoutSettings createTimeoutSettings(final DropIndexOptions options) { + return timeoutSettings.withMaxTimeMS(options.getMaxTime(MILLISECONDS)); } public ReadOperation countDocuments(final Bson filter, final CountOptions options) { @@ -97,7 +149,7 @@ public ReadOperation> findFirst(final Bson filter } public ExplainableReadOperation> find(final Bson filter, final Class resultClass, - final FindOptions options) { + final FindOptions options) { return operations.find(filter, resultClass, options); } @@ -107,25 +159,25 @@ public ReadOperation> find(final MongoNamespace f } public ReadOperation> distinct(final String fieldName, final Bson filter, - final Class resultClass, final long maxTimeMS, + final Class resultClass, final Collation collation, final BsonValue comment) { - return operations.distinct(fieldName, filter, resultClass, maxTimeMS, collation, comment); + return operations.distinct(fieldName, filter, resultClass, collation, comment); } public ExplainableReadOperation> aggregate(final List pipeline, - final Class resultClass, final long maxTimeMS, final long maxAwaitTimeMS, + final Class resultClass, @Nullable final TimeoutMode timeoutMode, @Nullable final Integer batchSize, final Collation collation, final Bson hint, final String hintString, final BsonValue comment, final Bson variables, final Boolean allowDiskUse, final AggregationLevel aggregationLevel) { - return operations.aggregate(pipeline, resultClass, maxTimeMS, maxAwaitTimeMS, timeoutMode, batchSize, collation, hint, hintString, + return operations.aggregate(pipeline, resultClass, timeoutMode, batchSize, collation, hint, hintString, comment, variables, allowDiskUse, aggregationLevel); } - public AggregateToCollectionOperation aggregateToCollection(final List pipeline, final long maxTimeMS, + public AggregateToCollectionOperation aggregateToCollection(final List pipeline, @Nullable final TimeoutMode timeoutMode, final Boolean allowDiskUse, final Boolean bypassDocumentValidation, final Collation collation, @Nullable final Bson hint, @Nullable final String hintString, final BsonValue comment, final Bson variables, final AggregationLevel aggregationLevel) { - return operations.aggregateToCollection(pipeline, maxTimeMS, timeoutMode, allowDiskUse, bypassDocumentValidation, collation, hint, hintString, + return operations.aggregateToCollection(pipeline, timeoutMode, allowDiskUse, bypassDocumentValidation, collation, hint, hintString, comment, variables, aggregationLevel); } @@ -133,21 +185,21 @@ public AggregateToCollectionOperation aggregateToCollection(final List mapReduceToCollection(final String databaseName, final String collectionName, final String mapFunction, final String reduceFunction, final String finalizeFunction, final Bson filter, final int limit, - final long maxTimeMS, final boolean jsMode, final Bson scope, + final boolean jsMode, final Bson scope, final Bson sort, final boolean verbose, final com.mongodb.client.model.MapReduceAction action, final Boolean bypassDocumentValidation, final Collation collation) { return operations.mapReduceToCollection(databaseName, collectionName, mapFunction, reduceFunction, finalizeFunction, filter, limit, - maxTimeMS, jsMode, scope, sort, verbose, action, bypassDocumentValidation, collation); + jsMode, scope, sort, verbose, action, bypassDocumentValidation, collation); } public ReadOperation> mapReduce(final String mapFunction, final String reduceFunction, final String finalizeFunction, final Class resultClass, final Bson filter, final int limit, - final long maxTimeMS, final boolean jsMode, final Bson scope, + final boolean jsMode, final Bson scope, final Bson sort, final boolean verbose, final Collation collation) { - return operations.mapReduce(mapFunction, reduceFunction, finalizeFunction, resultClass, filter, limit, maxTimeMS, jsMode, scope, + return operations.mapReduce(mapFunction, reduceFunction, finalizeFunction, resultClass, filter, limit, jsMode, scope, sort, verbose, collation); } @@ -259,14 +311,9 @@ public WriteOperation dropSearchIndex(final String indexName) { public ExplainableReadOperation> listSearchIndexes(final Class resultClass, - final long maxTimeMS, - @Nullable final String indexName, - @Nullable final Integer batchSize, - @Nullable final Collation collation, - @Nullable final BsonValue comment, - @Nullable final Boolean allowDiskUse) { - return operations.listSearchIndexes(resultClass, maxTimeMS, indexName, batchSize, collation, - comment, allowDiskUse); + @Nullable final String indexName, @Nullable final Integer batchSize, @Nullable final Collation collation, + @Nullable final BsonValue comment, @Nullable final Boolean allowDiskUse) { + return operations.listSearchIndexes(resultClass, indexName, batchSize, collation, comment, allowDiskUse); } public WriteOperation dropIndex(final String indexName, final DropIndexOptions options) { @@ -280,30 +327,30 @@ public WriteOperation dropIndex(final Bson keys, final DropIndexOptions op public ReadOperation> listCollections(final String databaseName, final Class resultClass, final Bson filter, final boolean collectionNamesOnly, final boolean authorizedCollections, - @Nullable final Integer batchSize, final long maxTimeMS, + @Nullable final Integer batchSize, final BsonValue comment, @Nullable final TimeoutMode timeoutMode) { return operations.listCollections(databaseName, resultClass, filter, collectionNamesOnly, authorizedCollections, - batchSize, maxTimeMS, comment, timeoutMode); + batchSize, comment, timeoutMode); } public ReadOperation> listDatabases(final Class resultClass, final Bson filter, - final Boolean nameOnly, final long maxTimeMS, + final Boolean nameOnly, final Boolean authorizedDatabases, final BsonValue comment) { - return operations.listDatabases(resultClass, filter, nameOnly, maxTimeMS, authorizedDatabases, comment); + return operations.listDatabases(resultClass, filter, nameOnly, authorizedDatabases, comment); } public ReadOperation> listIndexes(final Class resultClass, @Nullable final Integer batchSize, - final long maxTimeMS, final BsonValue comment, @Nullable final TimeoutMode timeoutMode) { - return operations.listIndexes(resultClass, batchSize, maxTimeMS, comment, timeoutMode); + final BsonValue comment, @Nullable final TimeoutMode timeoutMode) { + return operations.listIndexes(resultClass, batchSize, comment, timeoutMode); } public ReadOperation> changeStream(final FullDocument fullDocument, final FullDocumentBeforeChange fullDocumentBeforeChange, final List pipeline, final Decoder decoder, final ChangeStreamLevel changeStreamLevel, @Nullable final Integer batchSize, final Collation collation, - final BsonValue comment, final long maxAwaitTimeMS, final BsonDocument resumeToken, final BsonTimestamp startAtOperationTime, + final BsonValue comment, final BsonDocument resumeToken, final BsonTimestamp startAtOperationTime, final BsonDocument startAfter, final boolean showExpandedEvents) { return operations.changeStream(fullDocument, fullDocumentBeforeChange, pipeline, decoder, changeStreamLevel, batchSize, - collation, comment, maxAwaitTimeMS, resumeToken, startAtOperationTime, startAfter, showExpandedEvents); + collation, comment, resumeToken, startAtOperationTime, startAfter, showExpandedEvents); } } diff --git a/driver-core/src/main/com/mongodb/internal/operation/TransactionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/TransactionOperation.java index ff856dd38b1..54c288e4c46 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/TransactionOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/TransactionOperation.java @@ -18,7 +18,6 @@ import com.mongodb.Function; import com.mongodb.WriteConcern; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncWriteBinding; import com.mongodb.internal.binding.WriteBinding; @@ -43,11 +42,9 @@ *

This class is not part of the public API and may be removed or changed at any time

*/ public abstract class TransactionOperation implements WriteOperation, AsyncWriteOperation { - private final TimeoutSettings timeoutSettings; private final WriteConcern writeConcern; - TransactionOperation(final TimeoutSettings timeoutSettings, final WriteConcern writeConcern) { - this.timeoutSettings = timeoutSettings; + TransactionOperation(final WriteConcern writeConcern) { this.writeConcern = notNull("writeConcern", writeConcern); } @@ -55,11 +52,6 @@ public WriteConcern getWriteConcern() { return writeConcern; } - @Override - public TimeoutSettings getTimeoutSettings() { - return timeoutSettings; - } - @Override public Void execute(final WriteBinding binding) { isTrue("in transaction", binding.getOperationContext().getSessionContext().hasActiveTransaction()); diff --git a/driver-core/src/main/com/mongodb/internal/operation/UpdateSearchIndexesOperation.java b/driver-core/src/main/com/mongodb/internal/operation/UpdateSearchIndexesOperation.java index cefc5bcc915..7bd33730680 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/UpdateSearchIndexesOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/UpdateSearchIndexesOperation.java @@ -17,7 +17,6 @@ package com.mongodb.internal.operation; import com.mongodb.MongoNamespace; -import com.mongodb.internal.TimeoutSettings; import org.bson.BsonDocument; import org.bson.BsonString; @@ -30,8 +29,8 @@ final class UpdateSearchIndexesOperation extends AbstractWriteSearchIndexOperati private static final String COMMAND_NAME = "updateSearchIndex"; private final SearchIndexRequest request; - UpdateSearchIndexesOperation(final TimeoutSettings timeoutSettings, final MongoNamespace namespace, final SearchIndexRequest request) { - super(timeoutSettings, namespace); + UpdateSearchIndexesOperation(final MongoNamespace namespace, final SearchIndexRequest request) { + super(namespace); this.request = request; } diff --git a/driver-core/src/main/com/mongodb/internal/operation/WriteOperation.java b/driver-core/src/main/com/mongodb/internal/operation/WriteOperation.java index 80fe0b0ae1b..1a4fee36e1c 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/WriteOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/WriteOperation.java @@ -16,7 +16,6 @@ package com.mongodb.internal.operation; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.binding.WriteBinding; /** @@ -26,11 +25,6 @@ */ public interface WriteOperation { - /** - * @return the timeout settings for this operation - */ - TimeoutSettings getTimeoutSettings(); - /** * General execute which can return anything of type T * diff --git a/driver-core/src/test/functional/com/mongodb/ClusterFixture.java b/driver-core/src/test/functional/com/mongodb/ClusterFixture.java index eacc4de6309..d23a9a0bf9f 100644 --- a/driver-core/src/test/functional/com/mongodb/ClusterFixture.java +++ b/driver-core/src/test/functional/com/mongodb/ClusterFixture.java @@ -180,7 +180,7 @@ public static ClusterDescription getClusterDescription(final Cluster cluster) { public static ServerVersion getServerVersion() { if (serverVersion == null) { - serverVersion = getVersion(new CommandReadOperation<>(TIMEOUT_SETTINGS_WITH_TIMEOUT, "admin", + serverVersion = getVersion(new CommandReadOperation<>("admin", new BsonDocument("buildInfo", new BsonInt32(1)), new BsonDocumentCodec()) .execute(new ClusterBinding(getCluster(), ReadPreference.nearest(), ReadConcern.DEFAULT, OPERATION_CONTEXT))); } @@ -241,7 +241,7 @@ public static boolean hasEncryptionTestsEnabled() { } public static Document getServerStatus() { - return new CommandReadOperation<>(TIMEOUT_SETTINGS_WITH_TIMEOUT, "admin", new BsonDocument("serverStatus", new BsonInt32(1)), + return new CommandReadOperation<>("admin", new BsonDocument("serverStatus", new BsonInt32(1)), new DocumentCodec()) .execute(getBinding()); } @@ -257,7 +257,7 @@ static class ShutdownHook extends Thread { @Override public void run() { if (cluster != null) { - new DropDatabaseOperation(TIMEOUT_SETTINGS_WITH_TIMEOUT, getDefaultDatabaseName(), WriteConcern.ACKNOWLEDGED).execute(getBinding()); + new DropDatabaseOperation(getDefaultDatabaseName(), WriteConcern.ACKNOWLEDGED).execute(getBinding()); cluster.close(); } } @@ -295,7 +295,7 @@ public static synchronized ConnectionString getConnectionString() { Cluster cluster = createCluster(new ConnectionString(DEFAULT_URI), new SocketStreamFactory(new DefaultInetAddressResolver(), SocketSettings.builder().build(), SslSettings.builder().build())); try { - BsonDocument helloResult = new CommandReadOperation<>(TIMEOUT_SETTINGS_WITH_TIMEOUT, "admin", + BsonDocument helloResult = new CommandReadOperation<>("admin", new BsonDocument(LEGACY_HELLO, new BsonInt32(1)), new BsonDocumentCodec()) .execute(new ClusterBinding(cluster, ReadPreference.nearest(), ReadConcern.DEFAULT, OPERATION_CONTEXT)); if (helloResult.containsKey("setName")) { @@ -583,7 +583,7 @@ public static MongoCredentialWithCache getCredentialWithCache() { public static BsonDocument getServerParameters() { if (serverParameters == null) { - serverParameters = new CommandReadOperation<>(TIMEOUT_SETTINGS_WITH_TIMEOUT, "admin", + serverParameters = new CommandReadOperation<>("admin", new BsonDocument("getParameter", new BsonString("*")), new BsonDocumentCodec()) .execute(getBinding()); } @@ -648,7 +648,7 @@ public static void configureFailPoint(final BsonDocument failPointDocument) { boolean failsPointsSupported = true; if (!isSharded()) { try { - new CommandReadOperation<>(TIMEOUT_SETTINGS_WITH_TIMEOUT, "admin", failPointDocument, new BsonDocumentCodec()) + new CommandReadOperation<>("admin", failPointDocument, new BsonDocumentCodec()) .execute(getBinding()); } catch (MongoCommandException e) { if (e.getErrorCode() == COMMAND_NOT_FOUND_ERROR_CODE) { @@ -664,7 +664,7 @@ public static void disableFailPoint(final String failPoint) { BsonDocument failPointDocument = new BsonDocument("configureFailPoint", new BsonString(failPoint)) .append("mode", new BsonString("off")); try { - new CommandReadOperation<>(TIMEOUT_SETTINGS_WITH_TIMEOUT, "admin", failPointDocument, new BsonDocumentCodec()) + new CommandReadOperation<>("admin", failPointDocument, new BsonDocumentCodec()) .execute(getBinding()); } catch (MongoCommandException e) { // ignore @@ -674,7 +674,7 @@ public static void disableFailPoint(final String failPoint) { @SuppressWarnings("overloads") public static T executeSync(final WriteOperation op) { - return executeSync(op, getBinding(op.getTimeoutSettings())); + return executeSync(op, getBinding()); } @SuppressWarnings("overloads") @@ -684,7 +684,7 @@ public static T executeSync(final WriteOperation op, final ReadWriteBindi @SuppressWarnings("overloads") public static T executeSync(final ReadOperation op) { - return executeSync(op, getBinding(op.getTimeoutSettings())); + return executeSync(op, getBinding()); } @SuppressWarnings("overloads") @@ -694,7 +694,7 @@ public static T executeSync(final ReadOperation op, final ReadWriteBindin @SuppressWarnings("overloads") public static T executeAsync(final AsyncWriteOperation op) throws Throwable { - return executeAsync(op, getAsyncBinding(op.getTimeoutSettings())); + return executeAsync(op, getAsyncBinding()); } @SuppressWarnings("overloads") @@ -706,7 +706,7 @@ public static T executeAsync(final AsyncWriteOperation op, final AsyncWri @SuppressWarnings("overloads") public static T executeAsync(final AsyncReadOperation op) throws Throwable { - return executeAsync(op, getAsyncBinding(op.getTimeoutSettings())); + return executeAsync(op, getAsyncBinding()); } @SuppressWarnings("overloads") diff --git a/driver-core/src/test/functional/com/mongodb/OperationFunctionalSpecification.groovy b/driver-core/src/test/functional/com/mongodb/OperationFunctionalSpecification.groovy index 28a471cd0ef..adf707b9cb7 100644 --- a/driver-core/src/test/functional/com/mongodb/OperationFunctionalSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/OperationFunctionalSpecification.groovy @@ -65,9 +65,7 @@ import java.util.concurrent.TimeUnit import static com.mongodb.ClusterFixture.OPERATION_CONTEXT import static com.mongodb.ClusterFixture.TIMEOUT -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS import static com.mongodb.ClusterFixture.checkReferenceCountReachesTarget -import static com.mongodb.ClusterFixture.createNewOperationContext import static com.mongodb.ClusterFixture.executeAsync import static com.mongodb.ClusterFixture.getAsyncBinding import static com.mongodb.ClusterFixture.getBinding @@ -111,13 +109,13 @@ class OperationFunctionalSpecification extends Specification { } void acknowledgeWrite(final SingleConnectionBinding binding) { - new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), [new InsertRequest(new BsonDocument())], true, + new MixedBulkWriteOperation(getNamespace(), [new InsertRequest(new BsonDocument())], true, ACKNOWLEDGED, false).execute(binding) binding.release() } void acknowledgeWrite(final AsyncSingleConnectionBinding binding) { - executeAsync(new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), [new InsertRequest(new BsonDocument())], + executeAsync(new MixedBulkWriteOperation(getNamespace(), [new InsertRequest(new BsonDocument())], true, ACKNOWLEDGED, false), binding) binding.release() } @@ -146,8 +144,8 @@ class OperationFunctionalSpecification extends Specification { def executeWithSession(operation, boolean async) { def executor = async ? ClusterFixture.&executeAsync : ClusterFixture.&executeSync def binding = async ? - new AsyncSessionBinding(getAsyncBinding(operation.getTimeoutSettings())) - : new SessionBinding(getBinding(operation.getTimeoutSettings())) + new AsyncSessionBinding(getAsyncBinding()) + : new SessionBinding(getBinding()) executor(operation, binding) } @@ -275,7 +273,7 @@ class OperationFunctionalSpecification extends Specification { BsonDocument expectedCommand=null, Boolean checkSecondaryOk=false, ReadPreference readPreference=ReadPreference.primary(), Boolean retryable = false, ServerType serverType = ServerType.STANDALONE, Boolean activeTransaction = false) { - def operationContext = createNewOperationContext(operation.getTimeoutSettings()) + def operationContext = OPERATION_CONTEXT .withSessionContext(Stub(SessionContext) { hasActiveTransaction() >> activeTransaction getReadConcern() >> readConcern @@ -352,7 +350,7 @@ class OperationFunctionalSpecification extends Specification { Boolean checkCommand = true, BsonDocument expectedCommand = null, Boolean checkSecondaryOk = false, ReadPreference readPreference = ReadPreference.primary(), Boolean retryable = false, ServerType serverType = ServerType.STANDALONE, Boolean activeTransaction = false) { - def operationContext = createNewOperationContext(operation.getTimeoutSettings()) + def operationContext = OPERATION_CONTEXT .withSessionContext(Stub(SessionContext) { hasActiveTransaction() >> activeTransaction getReadConcern() >> readConcern diff --git a/driver-core/src/test/functional/com/mongodb/client/test/CollectionHelper.java b/driver-core/src/test/functional/com/mongodb/client/test/CollectionHelper.java index 3b89f7816e6..5be40320040 100644 --- a/driver-core/src/test/functional/com/mongodb/client/test/CollectionHelper.java +++ b/driver-core/src/test/functional/com/mongodb/client/test/CollectionHelper.java @@ -64,7 +64,6 @@ import java.util.List; import java.util.stream.Collectors; -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS; import static com.mongodb.ClusterFixture.executeAsync; import static com.mongodb.ClusterFixture.getBinding; import static java.util.Arrays.asList; @@ -82,7 +81,7 @@ public CollectionHelper(final Codec codec, final MongoNamespace namespace) { } public T hello() { - return new CommandReadOperation<>(TIMEOUT_SETTINGS, "admin", BsonDocument.parse("{isMaster: 1}"), codec) + return new CommandReadOperation<>("admin", BsonDocument.parse("{isMaster: 1}"), codec) .execute(getBinding()); } @@ -91,7 +90,7 @@ public static void drop(final MongoNamespace namespace) { } public static void drop(final MongoNamespace namespace, final WriteConcern writeConcern) { - new DropCollectionOperation(TIMEOUT_SETTINGS, namespace, writeConcern).execute(getBinding()); + new DropCollectionOperation(namespace, writeConcern).execute(getBinding()); } public static void dropDatabase(final String name) { @@ -103,7 +102,7 @@ public static void dropDatabase(final String name, final WriteConcern writeConce return; } try { - new DropDatabaseOperation(TIMEOUT_SETTINGS, name, writeConcern).execute(getBinding()); + new DropDatabaseOperation(name, writeConcern).execute(getBinding()); } catch (MongoCommandException e) { if (!e.getErrorMessage().contains("ns not found")) { throw e; @@ -137,7 +136,7 @@ public void create(final String collectionName, final CreateCollectionOptions op public void create(final String collectionName, final CreateCollectionOptions options, final WriteConcern writeConcern) { drop(namespace, writeConcern); - CreateCollectionOperation operation = new CreateCollectionOperation(TIMEOUT_SETTINGS, namespace.getDatabaseName(), collectionName, + CreateCollectionOperation operation = new CreateCollectionOperation(namespace.getDatabaseName(), collectionName, writeConcern) .capped(options.isCapped()) .sizeInBytes(options.getSizeInBytes()) @@ -165,7 +164,7 @@ public void killCursor(final MongoNamespace namespace, final ServerCursor server BsonDocument command = new BsonDocument("killCursors", new BsonString(namespace.getCollectionName())) .append("cursors", new BsonArray(singletonList(new BsonInt64(serverCursor.getId())))); try { - new CommandReadOperation<>(TIMEOUT_SETTINGS, namespace.getDatabaseName(), command, new BsonDocumentCodec()) + new CommandReadOperation<>(namespace.getDatabaseName(), command, new BsonDocumentCodec()) .execute(getBinding()); } catch (Exception e) { // Ignore any exceptions killing old cursors @@ -194,7 +193,7 @@ public void insertDocuments(final List documents, final WriteConce for (BsonDocument document : documents) { insertRequests.add(new InsertRequest(document)); } - new MixedBulkWriteOperation(TIMEOUT_SETTINGS, namespace, insertRequests, true, writeConcern, false).execute(binding); + new MixedBulkWriteOperation(namespace, insertRequests, true, writeConcern, false).execute(binding); } public void insertDocuments(final Document... documents) { @@ -235,7 +234,7 @@ public List find() { } public List find(final Codec codec) { - BatchCursor cursor = new FindOperation<>(TIMEOUT_SETTINGS, namespace, codec) + BatchCursor cursor = new FindOperation<>(namespace, codec) .sort(new BsonDocument("_id", new BsonInt32(1))) .execute(getBinding()); List results = new ArrayList<>(); @@ -250,7 +249,7 @@ public void updateOne(final Bson filter, final Bson update) { } public void updateOne(final Bson filter, final Bson update, final boolean isUpsert) { - new MixedBulkWriteOperation(TIMEOUT_SETTINGS, namespace, + new MixedBulkWriteOperation(namespace, singletonList(new UpdateRequest(filter.toBsonDocument(Document.class, registry), update.toBsonDocument(Document.class, registry), WriteRequest.Type.UPDATE) @@ -260,7 +259,7 @@ public void updateOne(final Bson filter, final Bson update, final boolean isUpse } public void replaceOne(final Bson filter, final Bson update, final boolean isUpsert) { - new MixedBulkWriteOperation(TIMEOUT_SETTINGS, namespace, + new MixedBulkWriteOperation(namespace, singletonList(new UpdateRequest(filter.toBsonDocument(Document.class, registry), update.toBsonDocument(Document.class, registry), WriteRequest.Type.REPLACE) @@ -270,7 +269,7 @@ public void replaceOne(final Bson filter, final Bson update, final boolean isUps } public void deleteOne(final Bson filter) { - new MixedBulkWriteOperation(TIMEOUT_SETTINGS, namespace, + new MixedBulkWriteOperation(namespace, singletonList(new DeleteRequest(filter.toBsonDocument(Document.class, registry))), true, WriteConcern.ACKNOWLEDGED, false) .execute(getBinding()); @@ -297,7 +296,7 @@ private List aggregate(final List pipeline, final Decoder decode for (Bson cur : pipeline) { bsonDocumentPipeline.add(cur.toBsonDocument(Document.class, registry)); } - BatchCursor cursor = new AggregateOperation<>(TIMEOUT_SETTINGS, namespace, bsonDocumentPipeline, decoder, level) + BatchCursor cursor = new AggregateOperation<>(namespace, bsonDocumentPipeline, decoder, level) .execute(getBinding()); List results = new ArrayList<>(); while (cursor.hasNext()) { @@ -332,7 +331,7 @@ public List find(final BsonDocument filter, final BsonDocument sort, fina } public List find(final BsonDocument filter, final BsonDocument sort, final BsonDocument projection, final Decoder decoder) { - BatchCursor cursor = new FindOperation<>(TIMEOUT_SETTINGS, namespace, decoder).filter(filter).sort(sort) + BatchCursor cursor = new FindOperation<>(namespace, decoder).filter(filter).sort(sort) .projection(projection).execute(getBinding()); List results = new ArrayList<>(); while (cursor.hasNext()) { @@ -346,15 +345,15 @@ public long count() { } public long count(final ReadBinding binding) { - return new CountDocumentsOperation(TIMEOUT_SETTINGS, namespace).execute(binding); + return new CountDocumentsOperation(namespace).execute(binding); } public long count(final AsyncReadWriteBinding binding) throws Throwable { - return executeAsync(new CountDocumentsOperation(TIMEOUT_SETTINGS, namespace), binding); + return executeAsync(new CountDocumentsOperation(namespace), binding); } public long count(final Bson filter) { - return new CountDocumentsOperation(TIMEOUT_SETTINGS, namespace) + return new CountDocumentsOperation(namespace) .filter(toBsonDocument(filter)).execute(getBinding()); } @@ -367,34 +366,34 @@ public BsonDocument toBsonDocument(final Bson document) { } public void createIndex(final BsonDocument key) { - new CreateIndexesOperation(TIMEOUT_SETTINGS, namespace, singletonList(new IndexRequest(key)), WriteConcern.ACKNOWLEDGED) + new CreateIndexesOperation(namespace, singletonList(new IndexRequest(key)), WriteConcern.ACKNOWLEDGED) .execute(getBinding()); } public void createIndex(final Document key) { - new CreateIndexesOperation(TIMEOUT_SETTINGS, namespace, singletonList(new IndexRequest(wrap(key))), WriteConcern.ACKNOWLEDGED) + new CreateIndexesOperation(namespace, singletonList(new IndexRequest(wrap(key))), WriteConcern.ACKNOWLEDGED) .execute(getBinding()); } public void createUniqueIndex(final Document key) { - new CreateIndexesOperation(TIMEOUT_SETTINGS, namespace, singletonList(new IndexRequest(wrap(key)).unique(true)), + new CreateIndexesOperation(namespace, singletonList(new IndexRequest(wrap(key)).unique(true)), WriteConcern.ACKNOWLEDGED) .execute(getBinding()); } public void createIndex(final Document key, final String defaultLanguage) { - new CreateIndexesOperation(TIMEOUT_SETTINGS, namespace, + new CreateIndexesOperation(namespace, singletonList(new IndexRequest(wrap(key)).defaultLanguage(defaultLanguage)), WriteConcern.ACKNOWLEDGED).execute(getBinding()); } public void createIndex(final Bson key) { - new CreateIndexesOperation(TIMEOUT_SETTINGS, namespace, + new CreateIndexesOperation(namespace, singletonList(new IndexRequest(key.toBsonDocument(Document.class, registry))), WriteConcern.ACKNOWLEDGED).execute(getBinding()); } public List listIndexes(){ List indexes = new ArrayList<>(); - BatchCursor cursor = new ListIndexesOperation<>(TIMEOUT_SETTINGS, namespace, new BsonDocumentCodec()) + BatchCursor cursor = new ListIndexesOperation<>(namespace, new BsonDocumentCodec()) .execute(getBinding()); while (cursor.hasNext()) { indexes.addAll(cursor.next()); @@ -404,7 +403,7 @@ public List listIndexes(){ public void killAllSessions() { try { - new CommandReadOperation<>(TIMEOUT_SETTINGS, "admin", + new CommandReadOperation<>("admin", new BsonDocument("killAllSessions", new BsonArray()), new BsonDocumentCodec()).execute(getBinding()); } catch (MongoCommandException e) { // ignore exception caused by killing the implicit session that the killAllSessions command itself is running in @@ -413,7 +412,7 @@ public void killAllSessions() { public void renameCollection(final MongoNamespace newNamespace) { try { - new CommandReadOperation<>(TIMEOUT_SETTINGS, "admin", + new CommandReadOperation<>("admin", new BsonDocument("renameCollection", new BsonString(getNamespace().getFullName())) .append("to", new BsonString(newNamespace.getFullName())), new BsonDocumentCodec()).execute(getBinding()); } catch (MongoCommandException e) { @@ -426,12 +425,12 @@ public void runAdminCommand(final String command) { } public void runAdminCommand(final BsonDocument command) { - new CommandReadOperation<>(TIMEOUT_SETTINGS, "admin", command, new BsonDocumentCodec()) + new CommandReadOperation<>("admin", command, new BsonDocumentCodec()) .execute(getBinding()); } public void runAdminCommand(final BsonDocument command, final ReadPreference readPreference) { - new CommandReadOperation<>(TIMEOUT_SETTINGS, "admin", command, new BsonDocumentCodec()) + new CommandReadOperation<>("admin", command, new BsonDocumentCodec()) .execute(getBinding(readPreference)); } } diff --git a/driver-core/src/test/functional/com/mongodb/connection/ConnectionSpecification.groovy b/driver-core/src/test/functional/com/mongodb/connection/ConnectionSpecification.groovy index db86cc4ec1d..b3da89231e7 100644 --- a/driver-core/src/test/functional/com/mongodb/connection/ConnectionSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/connection/ConnectionSpecification.groovy @@ -23,7 +23,6 @@ import org.bson.BsonDocument import org.bson.BsonInt32 import org.bson.codecs.BsonDocumentCodec -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS import static com.mongodb.ClusterFixture.getBinding import static com.mongodb.ClusterFixture.LEGACY_HELLO import static com.mongodb.connection.ConnectionDescription.getDefaultMaxMessageSize @@ -66,7 +65,7 @@ class ConnectionSpecification extends OperationFunctionalSpecification { source?.release() } private static BsonDocument getHelloResult() { - new CommandReadOperation(TIMEOUT_SETTINGS, 'admin', new BsonDocument(LEGACY_HELLO, new BsonInt32(1)), + new CommandReadOperation('admin', new BsonDocument(LEGACY_HELLO, new BsonInt32(1)), new BsonDocumentCodec()).execute(getBinding()) } } diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/ScramSha256AuthenticationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/connection/ScramSha256AuthenticationSpecification.groovy index 6cf60bf8fb5..faffded597e 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/connection/ScramSha256AuthenticationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/ScramSha256AuthenticationSpecification.groovy @@ -34,7 +34,6 @@ import spock.lang.IgnoreIf import spock.lang.Specification import static com.mongodb.ClusterFixture.OPERATION_CONTEXT -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS import static com.mongodb.ClusterFixture.createAsyncCluster import static com.mongodb.ClusterFixture.createCluster import static com.mongodb.ClusterFixture.getBinding @@ -87,13 +86,13 @@ class ScramSha256AuthenticationSpecification extends Specification { .append('pwd', password) .append('roles', ['root']) .append('mechanisms', mechanisms) - new CommandReadOperation<>(TIMEOUT_SETTINGS, 'admin', + new CommandReadOperation<>('admin', new BsonDocumentWrapper(createUserCommand, new DocumentCodec()), new DocumentCodec()) .execute(getBinding()) } def dropUser(final String userName) { - new CommandReadOperation<>(TIMEOUT_SETTINGS, 'admin', new BsonDocument('dropUser', new BsonString(userName)), + new CommandReadOperation<>('admin', new BsonDocument('dropUser', new BsonString(userName)), new BsonDocumentCodec()).execute(getBinding()) } @@ -102,7 +101,7 @@ class ScramSha256AuthenticationSpecification extends Specification { def cluster = createCluster(credential) when: - new CommandReadOperation(TIMEOUT_SETTINGS, 'admin', + new CommandReadOperation('admin', new BsonDocumentWrapper(new Document('dbstats', 1), new DocumentCodec()), new DocumentCodec()) .execute(new ClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, OPERATION_CONTEXT)) @@ -123,7 +122,7 @@ class ScramSha256AuthenticationSpecification extends Specification { when: // make this synchronous - new CommandReadOperation(TIMEOUT_SETTINGS, 'admin', + new CommandReadOperation('admin', new BsonDocumentWrapper(new Document('dbstats', 1), new DocumentCodec()), new DocumentCodec()) .executeAsync(new AsyncClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, OPERATION_CONTEXT), callback) @@ -144,7 +143,7 @@ class ScramSha256AuthenticationSpecification extends Specification { def cluster = createCluster(credential) when: - new CommandReadOperation(TIMEOUT_SETTINGS, 'admin', + new CommandReadOperation('admin', new BsonDocumentWrapper(new Document('dbstats', 1), new DocumentCodec()), new DocumentCodec()) .execute(new ClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, OPERATION_CONTEXT)) @@ -164,7 +163,7 @@ class ScramSha256AuthenticationSpecification extends Specification { def callback = new FutureResultCallback() when: - new CommandReadOperation(TIMEOUT_SETTINGS, 'admin', + new CommandReadOperation('admin', new BsonDocumentWrapper(new Document('dbstats', 1), new DocumentCodec()), new DocumentCodec()) .executeAsync(new AsyncClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, OPERATION_CONTEXT), callback) @@ -185,7 +184,7 @@ class ScramSha256AuthenticationSpecification extends Specification { def cluster = createCluster(credential) when: - new CommandReadOperation(TIMEOUT_SETTINGS, 'admin', + new CommandReadOperation('admin', new BsonDocumentWrapper(new Document('dbstats', 1), new DocumentCodec()), new DocumentCodec()) .execute(new ClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, OPERATION_CONTEXT)) @@ -205,7 +204,7 @@ class ScramSha256AuthenticationSpecification extends Specification { def callback = new FutureResultCallback() when: - new CommandReadOperation(TIMEOUT_SETTINGS, 'admin', + new CommandReadOperation('admin', new BsonDocumentWrapper(new Document('dbstats', 1), new DocumentCodec()), new DocumentCodec()) .executeAsync(new AsyncClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, OPERATION_CONTEXT), callback) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/AbortTransactionOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/AbortTransactionOperationSpecification.groovy index 73d45fea50a..fe7cd511c0c 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/AbortTransactionOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/AbortTransactionOperationSpecification.groovy @@ -21,7 +21,6 @@ import org.bson.BsonDocument import java.util.concurrent.TimeUnit -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS import static com.mongodb.WriteConcern.ACKNOWLEDGED import static com.mongodb.WriteConcern.MAJORITY @@ -34,13 +33,13 @@ class AbortTransactionOperationSpecification extends OperationFunctionalSpecific def expectedCommand = BsonDocument.parse('{abortTransaction: 1}') when: - def operation = new AbortTransactionOperation(TIMEOUT_SETTINGS, ACKNOWLEDGED) + def operation = new AbortTransactionOperation(ACKNOWLEDGED) then: testOperationInTransaction(operation, [4, 0, 0], expectedCommand, async, cannedResult) when: - operation = new AbortTransactionOperation(TIMEOUT_SETTINGS, MAJORITY) + operation = new AbortTransactionOperation(MAJORITY) expectedCommand.put('writeConcern', MAJORITY.asDocument()) then: @@ -57,14 +56,14 @@ class AbortTransactionOperationSpecification extends OperationFunctionalSpecific when: def writeConcern = MAJORITY.withWTimeout(10, TimeUnit.MILLISECONDS) - def operation = new AbortTransactionOperation(TIMEOUT_SETTINGS, writeConcern) + def operation = new AbortTransactionOperation(writeConcern) then: testOperationRetries(operation, [4, 0, 0], expectedCommand, async, cannedResult, true) when: writeConcern = MAJORITY - operation = new AbortTransactionOperation(TIMEOUT_SETTINGS, writeConcern) + operation = new AbortTransactionOperation(writeConcern) expectedCommand.put('writeConcern', writeConcern.asDocument()) then: @@ -72,7 +71,7 @@ class AbortTransactionOperationSpecification extends OperationFunctionalSpecific when: writeConcern = ACKNOWLEDGED - operation = new AbortTransactionOperation(TIMEOUT_SETTINGS, writeConcern) + operation = new AbortTransactionOperation(writeConcern) expectedCommand.remove('writeConcern') then: diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateOperationSpecification.groovy index 775246df474..a3e309a1f5f 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateOperationSpecification.groovy @@ -16,7 +16,7 @@ package com.mongodb.internal.operation -import com.mongodb.MongoExecutionTimeoutException + import com.mongodb.MongoNamespace import com.mongodb.OperationFunctionalSpecification import com.mongodb.ReadConcern @@ -52,12 +52,7 @@ import spock.lang.IgnoreIf import static TestOperationHelper.getKeyPattern import static com.mongodb.ClusterFixture.OPERATION_CONTEXT -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_TIMEOUT import static com.mongodb.ClusterFixture.collectCursorResults -import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint -import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint import static com.mongodb.ClusterFixture.executeAsync import static com.mongodb.ClusterFixture.getAsyncCluster import static com.mongodb.ClusterFixture.getBinding @@ -83,7 +78,7 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { def 'should have the correct defaults'() { when: - AggregateOperation operation = new AggregateOperation(TIMEOUT_SETTINGS, getNamespace(), [], new DocumentCodec()) + AggregateOperation operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()) then: operation.getAllowDiskUse() == null @@ -97,7 +92,7 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { def hint = BsonDocument.parse('{a: 1}') when: - AggregateOperation operation = new AggregateOperation(TIMEOUT_SETTINGS, getNamespace(), [], new DocumentCodec()) + AggregateOperation operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()) .allowDiskUse(true) .batchSize(10) .collation(defaultCollation) @@ -113,7 +108,7 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { def 'should throw when using invalid hint'() { given: def hint = new BsonString('ok') - def operation = new AggregateOperation(TIMEOUT_SETTINGS, getNamespace(), [], new DocumentCodec()).hint(hint) + def operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()).hint(hint) when: operation.getHint() @@ -137,7 +132,7 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { def 'should create the expected command'() { when: def pipeline = [new BsonDocument('$match', new BsonDocument('a', new BsonString('A')))] - def operation = new AggregateOperation(TIMEOUT_SETTINGS, helper.namespace, pipeline, new DocumentCodec()) + def operation = new AggregateOperation(helper.namespace, pipeline, new DocumentCodec()) def expectedCommand = new BsonDocument('aggregate', new BsonString(helper.namespace.getCollectionName())) .append('pipeline', new BsonArray(pipeline)) @@ -147,7 +142,7 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { testOperation(operation, [3, 4, 0], expectedCommand, async, helper.cursorResult) when: - operation = new AggregateOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, helper.namespace, pipeline, new DocumentCodec()) + operation = new AggregateOperation(helper.namespace, pipeline, new DocumentCodec()) .allowDiskUse(true) .batchSize(10) .collation(defaultCollation) @@ -157,7 +152,6 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { .append('allowDiskUse', new BsonBoolean(true)) .append('collation', defaultCollation.asDocument()) .append('cursor', new BsonDocument('batchSize', new BsonInt32(10))) - .append('maxTimeMS', new BsonInt64(100)) then: testOperation(operation, [3, 4, 0], expectedCommand, async, helper.cursorResult) @@ -172,7 +166,7 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { def document = BsonDocument.parse('{_id: 1, str: "foo"}') getCollectionHelper().insertDocuments(document) def pipeline = [BsonDocument.parse('{$match: {str: "FOO"}}')] - def operation = new AggregateOperation(TIMEOUT_SETTINGS, namespace, pipeline, new BsonDocumentCodec()) + def operation = new AggregateOperation(namespace, pipeline, new BsonDocumentCodec()) .collation(caseInsensitiveCollation) when: @@ -190,7 +184,7 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { given: def expected = [createExpectedChangeNotification(namespace, 0), createExpectedChangeNotification(namespace, 1)] def pipeline = ['{$changeStream: {}}', '{$project: {"_id.clusterTime": 0, "_id.uuid": 0}}'].collect { BsonDocument.parse(it) } - def operation = new AggregateOperation(TIMEOUT_SETTINGS, namespace, pipeline, new BsonDocumentCodec()) + def operation = new AggregateOperation(namespace, pipeline, new BsonDocumentCodec()) def helper = getCollectionHelper() when: @@ -217,7 +211,7 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { def 'should be able to aggregate'() { when: - AggregateOperation operation = new AggregateOperation(TIMEOUT_SETTINGS, getNamespace(), [], new DocumentCodec()) + AggregateOperation operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()) def batchCursor = execute(operation, async) def results = collectCursorResults(batchCursor)*.getString('name') @@ -235,11 +229,11 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { def viewSuffix = '-view' def viewName = getCollectionName() + viewSuffix def viewNamespace = new MongoNamespace(getDatabaseName(), viewName) - new CreateViewOperation(TIMEOUT_SETTINGS, getDatabaseName(), viewName, getCollectionName(), [], WriteConcern.ACKNOWLEDGED) + new CreateViewOperation(getDatabaseName(), viewName, getCollectionName(), [], WriteConcern.ACKNOWLEDGED) .execute(getBinding(getCluster())) when: - AggregateOperation operation = new AggregateOperation(TIMEOUT_SETTINGS, viewNamespace, [], new DocumentCodec()) + AggregateOperation operation = new AggregateOperation(viewNamespace, [], new DocumentCodec()) def batchCursor = execute(operation, async) def results = collectCursorResults(batchCursor)*.getString('name') @@ -248,7 +242,7 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { results.containsAll(['Pete', 'Sam']) cleanup: - new DropCollectionOperation(TIMEOUT_SETTINGS_WITH_TIMEOUT, viewNamespace, WriteConcern.ACKNOWLEDGED) + new DropCollectionOperation(viewNamespace, WriteConcern.ACKNOWLEDGED) .execute(getBinding(getCluster())) where: @@ -257,7 +251,7 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { def 'should be able to aggregate with pipeline'() { when: - AggregateOperation operation = new AggregateOperation(TIMEOUT_SETTINGS, getNamespace(), + AggregateOperation operation = new AggregateOperation(getNamespace(), [new BsonDocument('$match', new BsonDocument('job', new BsonString('plumber')))], new DocumentCodec()) def batchCursor = execute(operation, async) def results = collectCursorResults(batchCursor)*.getString('name') @@ -272,7 +266,7 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { def 'should allow disk usage'() { when: - AggregateOperation operation = new AggregateOperation(TIMEOUT_SETTINGS, getNamespace(), [], new DocumentCodec()) + AggregateOperation operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()) .allowDiskUse(allowDiskUse) def cursor = operation.execute(getBinding()) @@ -285,7 +279,7 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { def 'should allow batch size'() { when: - AggregateOperation operation = new AggregateOperation(TIMEOUT_SETTINGS, getNamespace(), [], new DocumentCodec()) + AggregateOperation operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()) .batchSize(batchSize) def cursor = operation.execute(getBinding()) @@ -296,29 +290,10 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { batchSize << [null, 0, 10] } - @IgnoreIf({ isSharded() }) - def 'should throw execution timeout exception from execute'() { - given: - enableMaxTimeFailPoint() - - when: - def operation = new AggregateOperation(timeoutSettings, getNamespace(), [], new DocumentCodec()) - execute(operation, async) - - then: - thrown(MongoExecutionTimeoutException) - - cleanup: - disableMaxTimeFailPoint() - - where: - [async, timeoutSettings] << [[true, false], [TIMEOUT_SETTINGS_WITH_MAX_TIME, TIMEOUT_SETTINGS_WITH_TIMEOUT]].combinations() - } - @IgnoreIf({ serverVersionLessThan(3, 6) }) def 'should be able to explain an empty pipeline'() { given: - def operation = new AggregateOperation(TIMEOUT_SETTINGS, getNamespace(), [], new BsonDocumentCodec()) + def operation = new AggregateOperation(getNamespace(), [], new BsonDocumentCodec()) operation = async ? operation.asAsyncExplainableOperation(QUERY_PLANNER, new BsonDocumentCodec()) : operation.asExplainableOperation(QUERY_PLANNER, new BsonDocumentCodec()) @@ -335,7 +310,7 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { @IgnoreIf({ serverVersionLessThan(3, 4) }) def 'should be able to aggregate with collation'() { when: - AggregateOperation operation = new AggregateOperation(TIMEOUT_SETTINGS, getNamespace(), + AggregateOperation operation = new AggregateOperation(getNamespace(), [BsonDocument.parse('{$match: {job : "plumber"}}')], new DocumentCodec() ).collation(options) def batchCursor = execute(operation, async) @@ -356,7 +331,7 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { def index = new BsonDocument('a', new BsonInt32(1)) collectionHelper.createIndex(index) - def operation = new AggregateOperation(TIMEOUT_SETTINGS, getNamespace(), [], new DocumentCodec()) + def operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()) .hint(hint) when: @@ -374,10 +349,10 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { def 'should apply comment'() { given: def profileCollectionHelper = getCollectionHelper(new MongoNamespace(getDatabaseName(), 'system.profile')) - new CommandReadOperation<>(TIMEOUT_SETTINGS, getDatabaseName(), new BsonDocument('profile', new BsonInt32(2)), + new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(2)), new BsonDocumentCodec()).execute(getBinding()) def expectedComment = 'this is a comment' - def operation = new AggregateOperation(TIMEOUT_SETTINGS, getNamespace(), [], new DocumentCodec()) + def operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()) .comment(new BsonString(expectedComment)) when: @@ -388,7 +363,7 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { ((Document) profileDocument.get('command')).get('comment') == expectedComment cleanup: - new CommandReadOperation<>(TIMEOUT_SETTINGS, getDatabaseName(), new BsonDocument('profile', new BsonInt32(0)), + new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(0)), new BsonDocumentCodec()).execute(getBinding()) profileCollectionHelper.drop() @@ -413,7 +388,7 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { .append('cursor', new BsonDocument()) appendReadConcernToCommand(operationContext.getSessionContext(), MIN_WIRE_VERSION, commandDocument) - def operation = new AggregateOperation(TIMEOUT_SETTINGS, getNamespace(), [], new DocumentCodec()) + def operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()) when: operation.execute(binding) @@ -454,7 +429,7 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { .append('cursor', new BsonDocument()) appendReadConcernToCommand(sessionContext, MIN_WIRE_VERSION, commandDocument) - def operation = new AggregateOperation(TIMEOUT_SETTINGS, getNamespace(), [], new DocumentCodec()) + def operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()) when: executeAsync(operation, binding) @@ -482,7 +457,7 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { def 'should use the ReadBindings readPreference to set secondaryOk'() { when: - def operation = new AggregateOperation(TIMEOUT_SETTINGS, helper.namespace, [], new BsonDocumentCodec()) + def operation = new AggregateOperation(helper.namespace, [], new BsonDocumentCodec()) then: testOperationSecondaryOk(operation, [2, 6, 0], readPreference, async, helper.cursorResult) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateToCollectionOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateToCollectionOperationSpecification.groovy index 059bf309e7d..496e7311949 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateToCollectionOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateToCollectionOperationSpecification.groovy @@ -17,7 +17,6 @@ package com.mongodb.internal.operation import com.mongodb.MongoCommandException -import com.mongodb.MongoExecutionTimeoutException import com.mongodb.MongoNamespace import com.mongodb.MongoWriteConcernException import com.mongodb.OperationFunctionalSpecification @@ -29,7 +28,6 @@ import com.mongodb.client.model.CreateCollectionOptions import com.mongodb.client.model.Filters import com.mongodb.client.model.ValidationOptions import com.mongodb.client.test.CollectionHelper -import com.mongodb.internal.TimeoutSettings import com.mongodb.internal.client.model.AggregationLevel import org.bson.BsonArray import org.bson.BsonBoolean @@ -42,11 +40,6 @@ import org.bson.codecs.BsonValueCodecProvider import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_TIMEOUT -import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint -import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint import static com.mongodb.ClusterFixture.getBinding import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet import static com.mongodb.ClusterFixture.isSharded @@ -73,7 +66,7 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe def pipeline = [new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))] when: - AggregateToCollectionOperation operation = createOperation(TIMEOUT_SETTINGS, getNamespace(), pipeline, ACKNOWLEDGED) + AggregateToCollectionOperation operation = createOperation(getNamespace(), pipeline, ACKNOWLEDGED) then: operation.getAllowDiskUse() == null @@ -89,7 +82,7 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe when: AggregateToCollectionOperation operation = - createOperation(TIMEOUT_SETTINGS, getNamespace(), pipeline, WriteConcern.MAJORITY) + createOperation(getNamespace(), pipeline, WriteConcern.MAJORITY) .allowDiskUse(true) .bypassDocumentValidation(true) .collation(defaultCollation) @@ -106,7 +99,7 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe def pipeline = [new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))] when: - AggregateToCollectionOperation operation = createOperation(TIMEOUT_SETTINGS, getNamespace(), pipeline, ReadConcern.DEFAULT) + AggregateToCollectionOperation operation = createOperation(getNamespace(), pipeline, ReadConcern.DEFAULT) .allowDiskUse(true) .bypassDocumentValidation(true) .collation(defaultCollation) @@ -120,7 +113,7 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe def 'should not accept an empty pipeline'() { when: - createOperation(TIMEOUT_SETTINGS, getNamespace(), [], ACKNOWLEDGED) + createOperation(getNamespace(), [], ACKNOWLEDGED) then: @@ -129,7 +122,7 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe def 'should be able to output to a collection'() { when: - AggregateToCollectionOperation operation = createOperation(TIMEOUT_SETTINGS_WITH_TIMEOUT, getNamespace(), + AggregateToCollectionOperation operation = createOperation(getNamespace(), [new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))], ACKNOWLEDGED) execute(operation, async) @@ -144,7 +137,7 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe @IgnoreIf({ serverVersionLessThan(4, 2) }) def 'should be able to merge into a collection'() { when: - AggregateToCollectionOperation operation = createOperation(TIMEOUT_SETTINGS_WITH_TIMEOUT, getNamespace(), + AggregateToCollectionOperation operation = createOperation(getNamespace(), [new BsonDocument('$merge', new BsonDocument('into', new BsonString(aggregateCollectionNamespace.collectionName)))]) execute(operation, async) @@ -157,7 +150,7 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe def 'should be able to match then output to a collection'() { when: - AggregateToCollectionOperation operation = createOperation(TIMEOUT_SETTINGS_WITH_TIMEOUT, getNamespace(), + AggregateToCollectionOperation operation = createOperation(getNamespace(), [new BsonDocument('$match', new BsonDocument('job', new BsonString('plumber'))), new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))], ACKNOWLEDGED) execute(operation, async) @@ -169,31 +162,10 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe async << [true, false] } - def 'should throw execution timeout exception from execute'() { - given: - AggregateToCollectionOperation operation = createOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, getNamespace(), - [new BsonDocument('$match', new BsonDocument('job', new BsonString('plumber'))), - new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))], - ACKNOWLEDGED) - enableMaxTimeFailPoint() - - when: - execute(operation, async) - - then: - thrown(MongoExecutionTimeoutException) - - cleanup: - disableMaxTimeFailPoint() - - where: - async << [true, false] - } - @IgnoreIf({ serverVersionLessThan(3, 4) || !isDiscoverableReplicaSet() }) def 'should throw on write concern error'() { given: - AggregateToCollectionOperation operation =createOperation(TIMEOUT_SETTINGS_WITH_TIMEOUT, getNamespace(), + AggregateToCollectionOperation operation = createOperation(getNamespace(), [new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))], new WriteConcern(5)) @@ -218,7 +190,7 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe getCollectionHelper().insertDocuments(BsonDocument.parse('{ level: 9 }')) when: - AggregateToCollectionOperation operation = createOperation(TIMEOUT_SETTINGS_WITH_TIMEOUT, getNamespace(), + AggregateToCollectionOperation operation = createOperation(getNamespace(), [BsonDocument.parse('{$out: "collectionOut"}')], ACKNOWLEDGED) execute(operation, async) @@ -247,7 +219,7 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe def 'should create the expected command'() { when: def pipeline = [BsonDocument.parse('{$out: "collectionOut"}')] - AggregateToCollectionOperation operation = new AggregateToCollectionOperation(TIMEOUT_SETTINGS, getNamespace(), pipeline, + AggregateToCollectionOperation operation = new AggregateToCollectionOperation(getNamespace(), pipeline, ReadConcern.MAJORITY, WriteConcern.MAJORITY) .bypassDocumentValidation(true) def expectedCommand = new BsonDocument('aggregate', new BsonString(getNamespace().getCollectionName())) @@ -290,7 +262,7 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe getCollectionHelper().insertDocuments(BsonDocument.parse('{_id: 1, str: "foo"}')) def pipeline = [BsonDocument.parse('{$match: {str: "FOO"}}'), new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))] - AggregateToCollectionOperation operation = createOperation(TIMEOUT_SETTINGS_WITH_TIMEOUT, getNamespace(), pipeline, ACKNOWLEDGED) + AggregateToCollectionOperation operation = createOperation(getNamespace(), pipeline, ACKNOWLEDGED) .collation(caseInsensitiveCollation) when: @@ -307,10 +279,10 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe def 'should apply comment'() { given: def profileCollectionHelper = getCollectionHelper(new MongoNamespace(getDatabaseName(), 'system.profile')) - new CommandReadOperation<>(TIMEOUT_SETTINGS, getDatabaseName(), new BsonDocument('profile', new BsonInt32(2)), + new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(2)), new BsonDocumentCodec()).execute(getBinding()) def expectedComment = 'this is a comment' - AggregateToCollectionOperation operation = createOperation(TIMEOUT_SETTINGS_WITH_TIMEOUT, getNamespace(), + AggregateToCollectionOperation operation = createOperation(getNamespace(), [Aggregates.out('outputCollection').toBsonDocument(BsonDocument, registry)], ACKNOWLEDGED) .comment(new BsonString(expectedComment)) @@ -322,7 +294,7 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe ((Document) profileDocument.get('command')).get('comment') == expectedComment cleanup: - new CommandReadOperation<>(TIMEOUT_SETTINGS, getDatabaseName(), new BsonDocument('profile', new BsonInt32(0)), + new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(0)), new BsonDocumentCodec()).execute(getBinding()) profileCollectionHelper.drop() @@ -330,19 +302,16 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe async << [true, false] } - def createOperation(final TimeoutSettings timeoutSettings, final MongoNamespace namespace, - final List pipeline) { - new AggregateToCollectionOperation(timeoutSettings, namespace, pipeline, null, null, AggregationLevel.COLLECTION) + def createOperation(final MongoNamespace namespace, final List pipeline) { + new AggregateToCollectionOperation(namespace, pipeline, null, null, AggregationLevel.COLLECTION) } - def createOperation(final TimeoutSettings timeoutSettings, final MongoNamespace namespace, - final List pipeline, final WriteConcern writeConcern) { - new AggregateToCollectionOperation(timeoutSettings, namespace, pipeline, null, writeConcern, AggregationLevel.COLLECTION) + def createOperation(final MongoNamespace namespace, final List pipeline, final WriteConcern writeConcern) { + new AggregateToCollectionOperation(namespace, pipeline, null, writeConcern, AggregationLevel.COLLECTION) } - def createOperation(final TimeoutSettings timeoutSettings, final MongoNamespace namespace, - final List pipeline, final ReadConcern readConcern) { - new AggregateToCollectionOperation(timeoutSettings, namespace, pipeline, readConcern, null, AggregationLevel.COLLECTION) + def createOperation(final MongoNamespace namespace, final List pipeline, final ReadConcern readConcern) { + new AggregateToCollectionOperation(namespace, pipeline, readConcern, null, AggregationLevel.COLLECTION) } } diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/ChangeStreamOperationProseTestSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/ChangeStreamOperationProseTestSpecification.groovy index d2e54e19b30..f403d4b053b 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/ChangeStreamOperationProseTestSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/ChangeStreamOperationProseTestSpecification.groovy @@ -33,7 +33,6 @@ import org.bson.Document import org.bson.codecs.BsonDocumentCodec import spock.lang.IgnoreIf -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS import static com.mongodb.ClusterFixture.getAsyncCluster import static com.mongodb.ClusterFixture.getCluster import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet @@ -54,7 +53,7 @@ class ChangeStreamOperationProseTestSpecification extends OperationFunctionalSpe given: def helper = getHelper() def pipeline = [BsonDocument.parse('{$project: {"_id": 0}}')] - def operation = new ChangeStreamOperation(TIMEOUT_SETTINGS, helper.getNamespace(), FullDocument.DEFAULT, + def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline, CODEC) when: @@ -91,7 +90,7 @@ class ChangeStreamOperationProseTestSpecification extends OperationFunctionalSpe def pipeline = [BsonDocument.parse('{$match: {operationType: "insert"}}')] def failPointDocument = createFailPointDocument('getMore', 10107) - def operation = new ChangeStreamOperation(TIMEOUT_SETTINGS, helper.getNamespace(), FullDocument.DEFAULT, + def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline, CODEC) def cursor = execute(operation, async) @@ -124,7 +123,7 @@ class ChangeStreamOperationProseTestSpecification extends OperationFunctionalSpe def 'should not resume for aggregation errors'() { given: def pipeline = [BsonDocument.parse('{$unsupportedStage: {_id: 0}}')] - def operation = new ChangeStreamOperation(TIMEOUT_SETTINGS, helper.getNamespace(), FullDocument.DEFAULT, + def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline, CODEC) when: diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/ChangeStreamOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/ChangeStreamOperationSpecification.groovy index 3459426a7a4..3b7671567ea 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/ChangeStreamOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/ChangeStreamOperationSpecification.groovy @@ -53,7 +53,6 @@ import org.bson.codecs.ValueCodecProvider import spock.lang.IgnoreIf import static com.mongodb.ClusterFixture.OPERATION_CONTEXT -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS import static com.mongodb.ClusterFixture.getAsyncCluster import static com.mongodb.ClusterFixture.getCluster import static com.mongodb.ClusterFixture.isStandalone @@ -69,7 +68,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio def 'should have the correct defaults'() { when: - ChangeStreamOperation operation = new ChangeStreamOperation(TIMEOUT_SETTINGS, getNamespace(), + ChangeStreamOperation operation = new ChangeStreamOperation(getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, [], new DocumentCodec()) then: @@ -82,7 +81,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio def 'should set optional values correctly'() { when: - ChangeStreamOperation operation = new ChangeStreamOperation(TIMEOUT_SETTINGS, getNamespace(), + ChangeStreamOperation operation = new ChangeStreamOperation(getNamespace(), FullDocument.UPDATE_LOOKUP, FullDocumentBeforeChange.DEFAULT, [], new DocumentCodec()) .batchSize(5) .collation(defaultCollation) @@ -112,7 +111,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio .append('cursor', new BsonDocument('id', new BsonInt64(0)).append('ns', new BsonString('db.coll')) .append('firstBatch', new BsonArrayWrapper([]))) - def operation = new ChangeStreamOperation(TIMEOUT_SETTINGS, namespace, FullDocument.DEFAULT, + def operation = new ChangeStreamOperation(namespace, FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline, new DocumentCodec(), changeStreamLevel as ChangeStreamLevel) .batchSize(5) .collation(defaultCollation) @@ -148,7 +147,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio def helper = getHelper() def pipeline = [BsonDocument.parse('{$match: {operationType: "insert"}}')] - def operation = new ChangeStreamOperation(TIMEOUT_SETTINGS, helper.getNamespace(), FullDocument.DEFAULT, + def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline, CODEC) when: @@ -188,7 +187,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio def helper = getHelper() def pipeline = [BsonDocument.parse('{$match: {operationType: "insert"}}')] - def operation = new ChangeStreamOperation(TIMEOUT_SETTINGS, helper.getNamespace(), FullDocument.DEFAULT, + def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline, createCodec(BsonDocument, fromProviders(new BsonValueCodecProvider(), new ValueCodecProvider()))) @@ -215,7 +214,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio def helper = getHelper() def pipeline = [BsonDocument.parse('{$match: {operationType: "update"}}')] - def operation = new ChangeStreamOperation(TIMEOUT_SETTINGS, helper.getNamespace(), FullDocument.UPDATE_LOOKUP, + def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.UPDATE_LOOKUP, FullDocumentBeforeChange.DEFAULT, pipeline, createCodec(BsonDocument, fromProviders(new BsonValueCodecProvider(), new ValueCodecProvider()))) helper.insertDocuments(BsonDocument.parse('{ _id : 2, x : 2, y : 3 }')) @@ -243,7 +242,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio def helper = getHelper() def pipeline = [BsonDocument.parse('{$match: {operationType: "replace"}}')] - def operation = new ChangeStreamOperation(TIMEOUT_SETTINGS, helper.getNamespace(), FullDocument.UPDATE_LOOKUP, + def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.UPDATE_LOOKUP, FullDocumentBeforeChange.DEFAULT, pipeline, createCodec(BsonDocument, fromProviders(new BsonValueCodecProvider(), new ValueCodecProvider()))) helper.insertDocuments(BsonDocument.parse('{ _id : 2, x : 2, y : 3 }')) @@ -271,7 +270,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio def helper = getHelper() def pipeline = [BsonDocument.parse('{$match: {operationType: "delete"}}')] - def operation = new ChangeStreamOperation(TIMEOUT_SETTINGS, helper.getNamespace(), FullDocument.UPDATE_LOOKUP, + def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.UPDATE_LOOKUP, FullDocumentBeforeChange.DEFAULT, pipeline, createCodec(BsonDocument, fromProviders(new BsonValueCodecProvider(), new ValueCodecProvider()))) helper.insertDocuments(BsonDocument.parse('{ _id : 2, x : 2, y : 3 }')) @@ -299,7 +298,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio def helper = getHelper() def pipeline = [BsonDocument.parse('{$match: {operationType: "invalidate"}}')] - def operation = new ChangeStreamOperation(TIMEOUT_SETTINGS, helper.getNamespace(), FullDocument.UPDATE_LOOKUP, + def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.UPDATE_LOOKUP, FullDocumentBeforeChange.DEFAULT, pipeline, createCodec(BsonDocument, fromProviders(new BsonValueCodecProvider(), new ValueCodecProvider()))) helper.insertDocuments(BsonDocument.parse('{ _id : 2, x : 2, y : 3 }')) @@ -328,7 +327,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio def helper = getHelper() def pipeline = [BsonDocument.parse('{$match: {operationType: "drop"}}')] - def operation = new ChangeStreamOperation(TIMEOUT_SETTINGS, helper.getNamespace(), FullDocument.UPDATE_LOOKUP, + def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.UPDATE_LOOKUP, FullDocumentBeforeChange.DEFAULT, pipeline, createCodec(BsonDocument, fromProviders(new BsonValueCodecProvider(), new ValueCodecProvider()))) helper.insertDocuments(BsonDocument.parse('{ _id : 2, x : 2, y : 3 }')) @@ -357,7 +356,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio def helper = getHelper() def pipeline = [BsonDocument.parse('{$match: {operationType: "dropDatabase"}}')] - def operation = new ChangeStreamOperation(TIMEOUT_SETTINGS, helper.getNamespace(), FullDocument.UPDATE_LOOKUP, + def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.UPDATE_LOOKUP, FullDocumentBeforeChange.DEFAULT, pipeline, createCodec(BsonDocument, fromProviders(new BsonValueCodecProvider(), new ValueCodecProvider())), ChangeStreamLevel.DATABASE) @@ -387,7 +386,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio def helper = getHelper() def pipeline = [BsonDocument.parse('{$match: {operationType: "rename"}}')] - def operation = new ChangeStreamOperation(TIMEOUT_SETTINGS, helper.getNamespace(), + def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.UPDATE_LOOKUP, FullDocumentBeforeChange.DEFAULT, pipeline, createCodec(BsonDocument, fromProviders(new BsonValueCodecProvider(), new ValueCodecProvider()))) def newNamespace = new MongoNamespace('JavaDriverTest', 'newCollectionName') @@ -416,7 +415,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio given: def helper = getHelper() def pipeline = [BsonDocument.parse('{$project: {"_id": 0}}')] - def operation = new ChangeStreamOperation(TIMEOUT_SETTINGS, helper.getNamespace(), FullDocument.DEFAULT, + def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline, CODEC) when: @@ -440,7 +439,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio def helper = getHelper() def pipeline = [BsonDocument.parse('{$match: {operationType: "insert"}}')] - def operation = new ChangeStreamOperation(TIMEOUT_SETTINGS, helper.getNamespace(), FullDocument.DEFAULT, + def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline, CODEC) when: @@ -469,7 +468,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio def helper = getHelper() def pipeline = [BsonDocument.parse('{$match: {operationType: "insert"}}')] - def operation = new ChangeStreamOperation(TIMEOUT_SETTINGS, helper.getNamespace(), FullDocument.DEFAULT, + def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline, CODEC) when: @@ -510,7 +509,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio def helper = getHelper() def pipeline = [BsonDocument.parse('{$match: {operationType: "insert"}}')] - def operation = new ChangeStreamOperation(TIMEOUT_SETTINGS, helper.getNamespace(), FullDocument.DEFAULT, + def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline, CODEC) def cursor = execute(operation, async) @@ -546,7 +545,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio def helper = getHelper() def pipeline = [BsonDocument.parse('{$match: {operationType: "insert"}}')] - def operation = new ChangeStreamOperation(TIMEOUT_SETTINGS, helper.getNamespace(), FullDocument.DEFAULT, + def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline, CODEC) def cursor = execute(operation, async) @@ -583,7 +582,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio def helper = getHelper() def pipeline = [BsonDocument.parse('{$match: {operationType: "insert"}}')] - def operation = new ChangeStreamOperation(TIMEOUT_SETTINGS, helper.getNamespace(), FullDocument.DEFAULT, + def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline, CODEC) def cursor = execute(operation, async) @@ -616,7 +615,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio def 'should support hasNext on the sync API'() { given: def helper = getHelper() - def operation = new ChangeStreamOperation(TIMEOUT_SETTINGS, helper.getNamespace(), FullDocument.DEFAULT, + def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, [], CODEC) when: @@ -658,7 +657,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio } when: 'set resumeAfter' - new ChangeStreamOperation(TIMEOUT_SETTINGS, helper.getNamespace(), FullDocument.DEFAULT, + new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, [], CODEC) .resumeAfter(new BsonDocument()) .execute(binding) @@ -668,7 +667,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio !changeStream.containsKey('startAtOperationTime') when: 'set startAfter' - new ChangeStreamOperation(TIMEOUT_SETTINGS, helper.getNamespace(), FullDocument.DEFAULT, + new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, [], CODEC) .startAfter(new BsonDocument()) .execute(binding) @@ -679,7 +678,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio when: 'set startAtOperationTime' def startAtTime = new BsonTimestamp(42) - new ChangeStreamOperation(TIMEOUT_SETTINGS, helper.getNamespace(), FullDocument.DEFAULT, + new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, [], CODEC) .startAtOperationTime(startAtTime) .execute(binding) @@ -719,7 +718,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio } when: 'set resumeAfter' - new ChangeStreamOperation(TIMEOUT_SETTINGS, helper.getNamespace(), FullDocument.DEFAULT, + new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, [], CODEC) .resumeAfter(new BsonDocument()) .executeAsync(binding, Stub(SingleResultCallback)) @@ -729,7 +728,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio !changeStream.containsKey('startAtOperationTime') when: 'set startAfter' - new ChangeStreamOperation(TIMEOUT_SETTINGS, helper.getNamespace(), FullDocument.DEFAULT, + new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, [], CODEC) .startAfter(new BsonDocument()) .executeAsync(binding, Stub(SingleResultCallback)) @@ -740,7 +739,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio when: 'set startAtOperationTime' def startAtTime = new BsonTimestamp(42) - new ChangeStreamOperation(TIMEOUT_SETTINGS, helper.getNamespace(), FullDocument.DEFAULT, + new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, [], CODEC) .startAtOperationTime(startAtTime) .executeAsync(binding, Stub(SingleResultCallback)) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/CommandOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/CommandOperationSpecification.groovy index 86b4294f1d0..a9f74ca50b3 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/CommandOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/CommandOperationSpecification.groovy @@ -16,27 +16,20 @@ package com.mongodb.internal.operation -import com.mongodb.MongoExecutionTimeoutException + import com.mongodb.OperationFunctionalSpecification import org.bson.BsonBinary import org.bson.BsonDocument import org.bson.BsonInt32 import org.bson.BsonString import org.bson.codecs.BsonDocumentCodec -import spock.lang.IgnoreIf import util.spock.annotations.Slow -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME -import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint -import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint -import static com.mongodb.ClusterFixture.isSharded - class CommandOperationSpecification extends OperationFunctionalSpecification { def 'should execute read command'() { given: - def operation = new CommandReadOperation(TIMEOUT_SETTINGS, getNamespace().databaseName, + def operation = new CommandReadOperation(getNamespace().databaseName, new BsonDocument('count', new BsonString(getCollectionName())), new BsonDocumentCodec()) when: @@ -54,7 +47,7 @@ class CommandOperationSpecification extends OperationFunctionalSpecification { @Slow def 'should execute command larger than 16MB'() { given: - def operation = new CommandReadOperation<>(TIMEOUT_SETTINGS, getNamespace().databaseName, + def operation = new CommandReadOperation<>(getNamespace().databaseName, new BsonDocument('findAndModify', new BsonString(getNamespace().fullName)) .append('query', new BsonDocument('_id', new BsonInt32(42))) .append('update', @@ -72,27 +65,4 @@ class CommandOperationSpecification extends OperationFunctionalSpecification { where: async << [true, false] } - - @IgnoreIf({ isSharded() }) - def 'should throw execution timeout exception from execute'() { - given: - def operation = new CommandReadOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, getNamespace().databaseName, - new BsonDocument('count', new BsonString(getCollectionName())) - .append('maxTimeMS', new BsonInt32(99)), // TODO - JAVA-5098 determine the correct course of action here. - new BsonDocumentCodec()) - enableMaxTimeFailPoint() - - when: - execute(operation, async) - - then: - thrown(MongoExecutionTimeoutException) - - cleanup: - disableMaxTimeFailPoint() - - where: - async << [true, false] - } - } diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/CommitTransactionOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/CommitTransactionOperationSpecification.groovy index 46081df7c5d..0d91963d5bf 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/CommitTransactionOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/CommitTransactionOperationSpecification.groovy @@ -21,7 +21,6 @@ import org.bson.BsonDocument import java.util.concurrent.TimeUnit -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS import static com.mongodb.WriteConcern.ACKNOWLEDGED import static com.mongodb.WriteConcern.MAJORITY @@ -34,13 +33,13 @@ class CommitTransactionOperationSpecification extends OperationFunctionalSpecifi def expectedCommand = BsonDocument.parse('{commitTransaction: 1}') when: - def operation = new CommitTransactionOperation(TIMEOUT_SETTINGS, ACKNOWLEDGED) + def operation = new CommitTransactionOperation(ACKNOWLEDGED) then: testOperationInTransaction(operation, [4, 0, 0], expectedCommand, async, cannedResult) when: - operation = new CommitTransactionOperation(TIMEOUT_SETTINGS, MAJORITY) + operation = new CommitTransactionOperation(MAJORITY) expectedCommand.put('writeConcern', MAJORITY.asDocument()) then: @@ -57,14 +56,14 @@ class CommitTransactionOperationSpecification extends OperationFunctionalSpecifi when: def writeConcern = MAJORITY.withWTimeout(10, TimeUnit.MILLISECONDS) - def operation = new CommitTransactionOperation(TIMEOUT_SETTINGS, writeConcern) + def operation = new CommitTransactionOperation(writeConcern) then: testOperationRetries(operation, [4, 0, 0], expectedCommand, async, cannedResult, true) when: writeConcern = MAJORITY - operation = new CommitTransactionOperation(TIMEOUT_SETTINGS, writeConcern) + operation = new CommitTransactionOperation(writeConcern) expectedCommand.put('writeConcern', writeConcern.withWTimeout(10000, TimeUnit.MILLISECONDS).asDocument()) then: @@ -72,7 +71,7 @@ class CommitTransactionOperationSpecification extends OperationFunctionalSpecifi when: writeConcern = ACKNOWLEDGED - operation = new CommitTransactionOperation(TIMEOUT_SETTINGS, writeConcern) + operation = new CommitTransactionOperation(writeConcern) expectedCommand.put('writeConcern', writeConcern.withW('majority').withWTimeout(10000, TimeUnit.MILLISECONDS).asDocument()) then: @@ -88,7 +87,7 @@ class CommitTransactionOperationSpecification extends OperationFunctionalSpecifi def expectedCommand = BsonDocument.parse('{commitTransaction: 1, writeConcern: {w: "majority", wtimeout: 10000}}') when: - def operation = new CommitTransactionOperation(TIMEOUT_SETTINGS, ACKNOWLEDGED, true) + def operation = new CommitTransactionOperation(ACKNOWLEDGED, true) then: testOperationInTransaction(operation, [4, 0, 0], expectedCommand, async, cannedResult, true) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/CountDocumentsOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/CountDocumentsOperationSpecification.groovy index 9a10dca0490..26d7d11bc6e 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/CountDocumentsOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/CountDocumentsOperationSpecification.groovy @@ -17,7 +17,6 @@ package com.mongodb.internal.operation import com.mongodb.MongoException -import com.mongodb.MongoExecutionTimeoutException import com.mongodb.MongoNamespace import com.mongodb.OperationFunctionalSpecification import com.mongodb.ReadConcern @@ -46,10 +45,6 @@ import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf import static com.mongodb.ClusterFixture.OPERATION_CONTEXT -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME -import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint -import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint import static com.mongodb.ClusterFixture.executeAsync import static com.mongodb.ClusterFixture.getBinding import static com.mongodb.ClusterFixture.serverVersionAtLeast @@ -74,7 +69,7 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat def 'should have the correct defaults'() { when: - CountDocumentsOperation operation = new CountDocumentsOperation(TIMEOUT_SETTINGS, getNamespace()) + CountDocumentsOperation operation = new CountDocumentsOperation(getNamespace()) then: operation.getFilter() == null @@ -89,7 +84,7 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat def hint = new BsonString('hint') when: - CountDocumentsOperation operation = new CountDocumentsOperation(TIMEOUT_SETTINGS, getNamespace()) + CountDocumentsOperation operation = new CountDocumentsOperation(getNamespace()) .filter(filter) .hint(hint) .limit(20) @@ -104,7 +99,7 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat def 'should get the count'() { expect: - execute(new CountDocumentsOperation(TIMEOUT_SETTINGS, getNamespace()), async) == documents.size() + execute(new CountDocumentsOperation(getNamespace()), async) == documents.size() where: async << [true, false] @@ -115,7 +110,7 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat getCollectionHelper().drop() then: - execute(new CountDocumentsOperation(TIMEOUT_SETTINGS, getNamespace()), async) == 0 + execute(new CountDocumentsOperation(getNamespace()), async) == 0 where: async << [true, false] @@ -127,25 +122,7 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat getCollectionHelper().create() then: - execute(new CountDocumentsOperation(TIMEOUT_SETTINGS, getNamespace()), async) == 0 - - where: - async << [true, false] - } - - def 'should throw execution timeout exception from execute'() { - given: - def operation = new CountDocumentsOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, getNamespace()) - enableMaxTimeFailPoint() - - when: - execute(operation, async) - - then: - thrown(MongoExecutionTimeoutException) - - cleanup: - disableMaxTimeFailPoint() + execute(new CountDocumentsOperation(getNamespace()), async) == 0 where: async << [true, false] @@ -153,7 +130,7 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat def 'should use limit with the count'() { when: - def operation = new CountDocumentsOperation(TIMEOUT_SETTINGS, getNamespace()).limit(1) + def operation = new CountDocumentsOperation(getNamespace()).limit(1) then: execute(operation, async) == 1 @@ -164,7 +141,7 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat def 'should use skip with the count'() { when: - def operation = new CountDocumentsOperation(TIMEOUT_SETTINGS, getNamespace()).skip(documents.size() - 2) + def operation = new CountDocumentsOperation(getNamespace()).skip(documents.size() - 2) then: execute(operation, async) @@ -177,9 +154,9 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat def 'should use hint with the count'() { given: def indexDefinition = new BsonDocument('y', new BsonInt32(1)) - new CreateIndexesOperation(TIMEOUT_SETTINGS, getNamespace(), [new IndexRequest(indexDefinition).sparse(true)], null) + new CreateIndexesOperation(getNamespace(), [new IndexRequest(indexDefinition).sparse(true)], null) .execute(getBinding()) - def operation = new CountDocumentsOperation(TIMEOUT_SETTINGS, getNamespace()).hint(indexDefinition) + def operation = new CountDocumentsOperation(getNamespace()).hint(indexDefinition) when: def count = execute(operation, async) @@ -194,7 +171,7 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat @IgnoreIf({ !serverVersionAtLeast(3, 6) }) def 'should support hints that are bson documents or strings'() { expect: - execute(new CountDocumentsOperation(TIMEOUT_SETTINGS, getNamespace()).hint(hint), async) == 5 + execute(new CountDocumentsOperation(getNamespace()).hint(hint), async) == 5 where: [async, hint] << [[true, false], [new BsonString('_id_'), BsonDocument.parse('{_id: 1}')]].combinations() @@ -202,7 +179,7 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat def 'should throw with bad hint'() { given: - def operation = new CountDocumentsOperation(TIMEOUT_SETTINGS, getNamespace()) + def operation = new CountDocumentsOperation(getNamespace()) .filter(new BsonDocument('a', new BsonInt32(1))) .hint(new BsonString('BAD HINT')) @@ -218,7 +195,7 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat def 'should use the ReadBindings readPreference to set secondaryOk'() { when: - def operation = new CountDocumentsOperation(TIMEOUT_SETTINGS, helper.namespace) + def operation = new CountDocumentsOperation(helper.namespace) .filter(BsonDocument.parse('{a: 1}')) then: @@ -231,7 +208,7 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat def 'should create the expected aggregation command'() { when: def filter = new BsonDocument('filter', new BsonInt32(1)) - def operation = new CountDocumentsOperation(TIMEOUT_SETTINGS, helper.namespace) + def operation = new CountDocumentsOperation(helper.namespace) def pipeline = [BsonDocument.parse('{ $match: {}}'), BsonDocument.parse('{$group: {_id: 1, n: {$sum: 1}}}')] def expectedCommand = new BsonDocument('aggregate', new BsonString(helper.namespace.getCollectionName())) .append('pipeline', new BsonArray(pipeline)) @@ -241,7 +218,7 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat testOperation(operation, [3, 4, 0], expectedCommand, async, helper.cursorResult) when: - operation = new CountDocumentsOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, helper.namespace) + operation = new CountDocumentsOperation(helper.namespace) .filter(filter) .limit(20) .skip(30) @@ -253,7 +230,6 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat new BsonDocument('$skip', new BsonInt64(30)), new BsonDocument('$limit', new BsonInt64(20)), pipeline.last()])) - .append('maxTimeMS', new BsonInt64(100)) .append('collation', defaultCollation.asDocument()) .append('hint', hint) @@ -268,7 +244,7 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat def 'should support collation'() { given: getCollectionHelper().insertDocuments(BsonDocument.parse('{str: "foo"}')) - def operation = new CountDocumentsOperation(TIMEOUT_SETTINGS, namespace) + def operation = new CountDocumentsOperation(namespace) .filter(BsonDocument.parse('{str: "FOO"}')) .collation(caseInsensitiveCollation) @@ -300,7 +276,7 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat .append('cursor', new BsonDocument()) appendReadConcernToCommand(sessionContext, MIN_WIRE_VERSION, commandDocument) - def operation = new CountDocumentsOperation(TIMEOUT_SETTINGS, getNamespace()) + def operation = new CountDocumentsOperation(getNamespace()) when: operation.execute(binding) @@ -340,7 +316,7 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat .append('cursor', new BsonDocument()) appendReadConcernToCommand(sessionContext, MIN_WIRE_VERSION, commandDocument) - def operation = new CountDocumentsOperation(TIMEOUT_SETTINGS, getNamespace()) + def operation = new CountDocumentsOperation(getNamespace()) when: executeAsync(operation, binding) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/CreateCollectionOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/CreateCollectionOperationSpecification.groovy index ac5d4ac928f..cddb1925b64 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/CreateCollectionOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/CreateCollectionOperationSpecification.groovy @@ -28,7 +28,6 @@ import org.bson.BsonString import org.bson.codecs.BsonDocumentCodec import spock.lang.IgnoreIf -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS import static com.mongodb.ClusterFixture.getBinding import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet import static com.mongodb.ClusterFixture.serverVersionAtLeast @@ -116,7 +115,7 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific execute(operation, async) then: - new ListCollectionsOperation(TIMEOUT_SETTINGS, getDatabaseName(), new BsonDocumentCodec()) + new ListCollectionsOperation(getDatabaseName(), new BsonDocumentCodec()) .execute(getBinding()).next().find { it -> it.getString('name').value == getCollectionName() } .getDocument('options').getDocument('storageEngine') == operation.storageEngineOptions @@ -138,7 +137,7 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific execute(operation, async) then: - new ListCollectionsOperation(TIMEOUT_SETTINGS, getDatabaseName(), new BsonDocumentCodec()) + new ListCollectionsOperation(getDatabaseName(), new BsonDocumentCodec()) .execute(getBinding()).next().find { it -> it.getString('name').value == getCollectionName() } .getDocument('options').getDocument('storageEngine') == operation.storageEngineOptions where: @@ -279,7 +278,7 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific } def getCollectionInfo(String collectionName) { - new ListCollectionsOperation(TIMEOUT_SETTINGS, databaseName, new BsonDocumentCodec()).filter(new BsonDocument('name', + new ListCollectionsOperation(databaseName, new BsonDocumentCodec()).filter(new BsonDocument('name', new BsonString(collectionName))).execute(getBinding()).tryNext()?.head() } @@ -290,12 +289,12 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific BsonDocument storageStats() { if (serverVersionLessThan(6, 2)) { - return new CommandReadOperation<>(TIMEOUT_SETTINGS, getDatabaseName(), + return new CommandReadOperation<>(getDatabaseName(), new BsonDocument('collStats', new BsonString(getCollectionName())), new BsonDocumentCodec()).execute(getBinding()) } BatchCursor cursor = new AggregateOperation( - TIMEOUT_SETTINGS, + getNamespace(), singletonList(new BsonDocument('$collStats', new BsonDocument('storageStats', new BsonDocument()))), new BsonDocumentCodec()).execute(getBinding()) @@ -311,6 +310,6 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific } def createOperation(WriteConcern writeConcern) { - new CreateCollectionOperation(TIMEOUT_SETTINGS, getDatabaseName(), getCollectionName(), writeConcern) + new CreateCollectionOperation(getDatabaseName(), getCollectionName(), writeConcern) } } diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/CreateIndexesOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/CreateIndexesOperationSpecification.groovy index 21e8e38c51b..389f4388b54 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/CreateIndexesOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/CreateIndexesOperationSpecification.groovy @@ -20,11 +20,9 @@ import com.mongodb.CreateIndexCommitQuorum import com.mongodb.DuplicateKeyException import com.mongodb.MongoClientException import com.mongodb.MongoCommandException -import com.mongodb.MongoExecutionTimeoutException import com.mongodb.MongoWriteConcernException import com.mongodb.OperationFunctionalSpecification import com.mongodb.WriteConcern -import com.mongodb.internal.TimeoutSettings import com.mongodb.internal.bulk.IndexRequest import org.bson.BsonBoolean import org.bson.BsonDocument @@ -36,13 +34,8 @@ import org.bson.Document import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME -import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint -import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint import static com.mongodb.ClusterFixture.getBinding import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet -import static com.mongodb.ClusterFixture.isSharded import static com.mongodb.ClusterFixture.serverVersionAtLeast import static com.mongodb.ClusterFixture.serverVersionLessThan import static java.util.concurrent.TimeUnit.SECONDS @@ -82,27 +75,6 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati async << [true, false] } - @IgnoreIf({ isSharded() }) - def 'should throw execution timeout exception from execute'() { - given: - def keys = new BsonDocument('field', new BsonInt32(1)) - def operation = createOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, [new IndexRequest(keys)]) - - enableMaxTimeFailPoint() - - when: - execute(operation, async) - - then: - thrown(MongoExecutionTimeoutException) - - cleanup: - disableMaxTimeFailPoint() - - where: - async << [true, false] - } - @IgnoreIf({ serverVersionAtLeast(4, 4) }) def 'should throw exception if commit quorum is set where server < 4.4'() { given: @@ -443,7 +415,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should throw on write concern error'() { given: def keys = new BsonDocument('field', new BsonInt32(1)) - def operation = new CreateIndexesOperation(TIMEOUT_SETTINGS, getNamespace(), [new IndexRequest(keys)], new WriteConcern(5)) + def operation = new CreateIndexesOperation(getNamespace(), [new IndexRequest(keys)], new WriteConcern(5)) when: execute(operation, async) @@ -540,7 +512,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati List getIndexes() { def indexes = [] - def cursor = new ListIndexesOperation(TIMEOUT_SETTINGS, getNamespace(), new DocumentCodec()).execute(getBinding()) + def cursor = new ListIndexesOperation(getNamespace(), new DocumentCodec()).execute(getBinding()) while (cursor.hasNext()) { indexes.addAll(cursor.next()) } @@ -556,11 +528,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati } def createOperation(final List requests) { - createOperation(TIMEOUT_SETTINGS, requests) - } - - def createOperation(final TimeoutSettings timeoutSettings, final List requests) { - new CreateIndexesOperation(timeoutSettings, getNamespace(), requests, null) + new CreateIndexesOperation(getNamespace(), requests, null) } } diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/CreateViewOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/CreateViewOperationSpecification.groovy index 77b35f39f82..52ad4334493 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/CreateViewOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/CreateViewOperationSpecification.groovy @@ -29,7 +29,6 @@ import org.bson.BsonString import org.bson.codecs.BsonDocumentCodec import spock.lang.IgnoreIf -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS import static com.mongodb.ClusterFixture.getBinding import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet import static com.mongodb.ClusterFixture.serverVersionAtLeast @@ -52,7 +51,7 @@ class CreateViewOperationSpecification extends OperationFunctionalSpecification getCollectionHelper().insertDocuments([trueXDocument, falseXDocument]) def pipeline = [new BsonDocument('$match', trueXDocument)] - def operation = new CreateViewOperation(TIMEOUT_SETTINGS, getDatabaseName(), viewName, viewOn, pipeline, + def operation = new CreateViewOperation(getDatabaseName(), viewName, viewOn, pipeline, WriteConcern.ACKNOWLEDGED) when: @@ -81,7 +80,7 @@ class CreateViewOperationSpecification extends OperationFunctionalSpecification assert !collectionNameExists(viewOn) assert !collectionNameExists(viewName) - def operation = new CreateViewOperation(TIMEOUT_SETTINGS, getDatabaseName(), viewName, viewOn, [], + def operation = new CreateViewOperation(getDatabaseName(), viewName, viewOn, [], WriteConcern.ACKNOWLEDGED) .collation(defaultCollation) @@ -103,7 +102,7 @@ class CreateViewOperationSpecification extends OperationFunctionalSpecification @IgnoreIf({ serverVersionAtLeast(3, 4) }) def 'should throw if server version is not 3.4 or greater'() { given: - def operation = new CreateViewOperation(TIMEOUT_SETTINGS, getDatabaseName(), getCollectionName() + '-view', + def operation = new CreateViewOperation(getDatabaseName(), getCollectionName() + '-view', getCollectionName(), [], WriteConcern.ACKNOWLEDGED) when: @@ -123,7 +122,7 @@ class CreateViewOperationSpecification extends OperationFunctionalSpecification def viewNamespace = new MongoNamespace(getDatabaseName(), viewName) assert !collectionNameExists(viewName) - def operation = new CreateViewOperation(TIMEOUT_SETTINGS, getDatabaseName(), viewName, getCollectionName(), [], + def operation = new CreateViewOperation(getDatabaseName(), viewName, getCollectionName(), [], new WriteConcern(5)) when: @@ -142,7 +141,7 @@ class CreateViewOperationSpecification extends OperationFunctionalSpecification } def getCollectionInfo(String collectionName) { - new ListCollectionsOperation(TIMEOUT_SETTINGS, databaseName, new BsonDocumentCodec()).filter(new BsonDocument('name', + new ListCollectionsOperation(databaseName, new BsonDocumentCodec()).filter(new BsonDocument('name', new BsonString(collectionName))).execute(getBinding()).tryNext()?.head() } diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/DistinctOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/DistinctOperationSpecification.groovy index 84a5ad3c050..587e05e1d0c 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/DistinctOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/DistinctOperationSpecification.groovy @@ -16,7 +16,6 @@ package com.mongodb.internal.operation -import com.mongodb.MongoExecutionTimeoutException import com.mongodb.MongoNamespace import com.mongodb.OperationFunctionalSpecification import com.mongodb.ReadConcern @@ -38,7 +37,6 @@ import com.mongodb.internal.session.SessionContext import org.bson.BsonBoolean import org.bson.BsonDocument import org.bson.BsonInt32 -import org.bson.BsonInt64 import org.bson.BsonInvalidOperationException import org.bson.BsonString import org.bson.BsonTimestamp @@ -54,10 +52,6 @@ import org.bson.types.ObjectId import spock.lang.IgnoreIf import static com.mongodb.ClusterFixture.OPERATION_CONTEXT -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME -import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint -import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint import static com.mongodb.ClusterFixture.executeAsync import static com.mongodb.ClusterFixture.serverVersionLessThan import static com.mongodb.connection.ServerType.STANDALONE @@ -77,7 +71,7 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification { def 'should have the correct defaults'() { when: - DistinctOperation operation = new DistinctOperation(TIMEOUT_SETTINGS, getNamespace(), 'name', stringDecoder) + DistinctOperation operation = new DistinctOperation(getNamespace(), 'name', stringDecoder) then: operation.getFilter() == null @@ -89,7 +83,7 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification { def filter = new BsonDocument('filter', new BsonInt32(1)) when: - DistinctOperation operation = new DistinctOperation(TIMEOUT_SETTINGS, getNamespace(), 'name', stringDecoder) + DistinctOperation operation = new DistinctOperation(getNamespace(), 'name', stringDecoder) .filter(filter) .collation(defaultCollation) @@ -104,7 +98,7 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification { Document sam = new Document('name', 'Sam').append('age', 21) Document pete2 = new Document('name', 'Pete').append('age', 25) getCollectionHelper().insertDocuments(new DocumentCodec(), pete, sam, pete2) - DistinctOperation operation = new DistinctOperation(TIMEOUT_SETTINGS, getNamespace(), 'name', stringDecoder) + DistinctOperation operation = new DistinctOperation(getNamespace(), 'name', stringDecoder) when: def results = executeAndCollectBatchCursorResults(operation, async) @@ -122,7 +116,7 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification { Document sam = new Document('name', 'Sam').append('age', 21) Document pete2 = new Document('name', 'Pete').append('age', 25) getCollectionHelper().insertDocuments(new DocumentCodec(), pete, sam, pete2) - def operation = new DistinctOperation(TIMEOUT_SETTINGS, getNamespace(), 'name', stringDecoder) + def operation = new DistinctOperation(getNamespace(), 'name', stringDecoder) .filter(new BsonDocument('age', new BsonInt32(25))) when: @@ -153,7 +147,7 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification { .append('numberOfJobs', sam.numberOfJobs) getCollectionHelper().insertDocuments(new Document('worker', peteDocument), new Document('worker', samDocument)) - DistinctOperation operation = new DistinctOperation(TIMEOUT_SETTINGS, getNamespace(), 'worker', new WorkerCodec()) + DistinctOperation operation = new DistinctOperation(getNamespace(), 'worker', new WorkerCodec()) when: def results = executeAndCollectBatchCursorResults(operation, async) @@ -172,7 +166,7 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification { Document sam = new Document('name', 1) Document pete2 = new Document('name', new Document('earle', 'Jones')) getCollectionHelper().insertDocuments(new DocumentCodec(), pete, sam, pete2) - DistinctOperation operation = new DistinctOperation(TIMEOUT_SETTINGS, getNamespace(), 'name', stringDecoder) + DistinctOperation operation = new DistinctOperation(getNamespace(), 'name', stringDecoder) when: execute(operation, async) @@ -184,27 +178,9 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification { async << [true, false] } - def 'should throw execution timeout exception from execute'() { - given: - def operation = new DistinctOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, getNamespace(), 'name', stringDecoder) - enableMaxTimeFailPoint() - - when: - execute(operation, async) - - then: - thrown(MongoExecutionTimeoutException) - - cleanup: - disableMaxTimeFailPoint() - - where: - async << [true, false] - } - def 'should use the ReadBindings readPreference to set secondaryOk'() { when: - def operation = new DistinctOperation(TIMEOUT_SETTINGS, helper.namespace, 'name', helper.decoder) + def operation = new DistinctOperation(helper.namespace, 'name', helper.decoder) then: testOperationSecondaryOk(operation, [3, 4, 0], readPreference, async, helper.commandResult) @@ -215,14 +191,13 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification { def 'should create the expected command'() { when: - def operation = new DistinctOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, helper.namespace, 'name', new BsonDocumentCodec()) + def operation = new DistinctOperation(helper.namespace, 'name', new BsonDocumentCodec()) .filter(new BsonDocument('a', BsonBoolean.TRUE)) .collation(defaultCollation) def expectedCommand = new BsonDocument('distinct', new BsonString(helper.namespace.getCollectionName())) .append('key', new BsonString('name')) .append('query', operation.getFilter()) - .append('maxTimeMS', new BsonInt64(100)) .append('collation', defaultCollation.asDocument()) then: @@ -237,7 +212,7 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification { given: def document = Document.parse('{str: "foo"}') getCollectionHelper().insertDocuments(document) - def operation = new DistinctOperation(TIMEOUT_SETTINGS, namespace, 'str', stringDecoder) + def operation = new DistinctOperation(namespace, 'str', stringDecoder) .filter(BsonDocument.parse('{str: "FOO"}}')) .collation(caseInsensitiveCollation) @@ -267,7 +242,7 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification { .append('key', new BsonString('str')) appendReadConcernToCommand(sessionContext, MIN_WIRE_VERSION, commandDocument) - def operation = new DistinctOperation(TIMEOUT_SETTINGS, getNamespace(), 'str', new StringCodec()) + def operation = new DistinctOperation(getNamespace(), 'str', new StringCodec()) when: operation.execute(binding) @@ -306,7 +281,7 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification { .append('key', new BsonString('str')) appendReadConcernToCommand(sessionContext, MIN_WIRE_VERSION, commandDocument) - def operation = new DistinctOperation(TIMEOUT_SETTINGS, getNamespace(), 'str', new StringCodec()) + def operation = new DistinctOperation(getNamespace(), 'str', new StringCodec()) when: executeAsync(operation, binding) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/DropCollectionOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/DropCollectionOperationSpecification.groovy index aa0dae61628..67124fecf30 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/DropCollectionOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/DropCollectionOperationSpecification.groovy @@ -24,8 +24,6 @@ import org.bson.Document import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_TIMEOUT import static com.mongodb.ClusterFixture.executeAsync import static com.mongodb.ClusterFixture.getBinding import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet @@ -39,7 +37,7 @@ class DropCollectionOperationSpecification extends OperationFunctionalSpecificat assert collectionNameExists(getCollectionName()) when: - new DropCollectionOperation(TIMEOUT_SETTINGS_WITH_TIMEOUT, getNamespace(), WriteConcern.ACKNOWLEDGED).execute(getBinding()) + new DropCollectionOperation(getNamespace(), WriteConcern.ACKNOWLEDGED).execute(getBinding()) then: !collectionNameExists(getCollectionName()) @@ -52,7 +50,7 @@ class DropCollectionOperationSpecification extends OperationFunctionalSpecificat assert collectionNameExists(getCollectionName()) when: - executeAsync(new DropCollectionOperation(TIMEOUT_SETTINGS_WITH_TIMEOUT, getNamespace(), WriteConcern.ACKNOWLEDGED)) + executeAsync(new DropCollectionOperation(getNamespace(), WriteConcern.ACKNOWLEDGED)) then: !collectionNameExists(getCollectionName()) @@ -63,7 +61,7 @@ class DropCollectionOperationSpecification extends OperationFunctionalSpecificat def namespace = new MongoNamespace(getDatabaseName(), 'nonExistingCollection') when: - new DropCollectionOperation(TIMEOUT_SETTINGS_WITH_TIMEOUT, namespace, WriteConcern.ACKNOWLEDGED).execute(getBinding()) + new DropCollectionOperation(namespace, WriteConcern.ACKNOWLEDGED).execute(getBinding()) then: !collectionNameExists('nonExistingCollection') @@ -75,7 +73,7 @@ class DropCollectionOperationSpecification extends OperationFunctionalSpecificat def namespace = new MongoNamespace(getDatabaseName(), 'nonExistingCollection') when: - executeAsync(new DropCollectionOperation(TIMEOUT_SETTINGS_WITH_TIMEOUT, namespace, WriteConcern.ACKNOWLEDGED)) + executeAsync(new DropCollectionOperation(namespace, WriteConcern.ACKNOWLEDGED)) then: !collectionNameExists('nonExistingCollection') @@ -86,7 +84,7 @@ class DropCollectionOperationSpecification extends OperationFunctionalSpecificat given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentTo', 'createTheCollection')) assert collectionNameExists(getCollectionName()) - def operation = new DropCollectionOperation(TIMEOUT_SETTINGS_WITH_TIMEOUT, getNamespace(), new WriteConcern(5)) + def operation = new DropCollectionOperation(getNamespace(), new WriteConcern(5)) when: async ? executeAsync(operation) : operation.execute(getBinding()) @@ -101,7 +99,7 @@ class DropCollectionOperationSpecification extends OperationFunctionalSpecificat } def collectionNameExists(String collectionName) { - def cursor = new ListCollectionsOperation(TIMEOUT_SETTINGS, databaseName, new DocumentCodec()).execute(getBinding()) + def cursor = new ListCollectionsOperation(databaseName, new DocumentCodec()).execute(getBinding()) if (!cursor.hasNext()) { return false } diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/DropDatabaseOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/DropDatabaseOperationSpecification.groovy index d68d555a580..61648c1daec 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/DropDatabaseOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/DropDatabaseOperationSpecification.groovy @@ -25,8 +25,6 @@ import org.bson.Document import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_TIMEOUT import static com.mongodb.ClusterFixture.configureFailPoint import static com.mongodb.ClusterFixture.executeAsync import static com.mongodb.ClusterFixture.getBinding @@ -44,7 +42,7 @@ class DropDatabaseOperationSpecification extends OperationFunctionalSpecificatio assert databaseNameExists(databaseName) when: - execute(new DropDatabaseOperation(TIMEOUT_SETTINGS_WITH_TIMEOUT, databaseName, WriteConcern.ACKNOWLEDGED), async) + execute(new DropDatabaseOperation(databaseName, WriteConcern.ACKNOWLEDGED), async) then: !databaseNameExists(databaseName) @@ -59,7 +57,7 @@ class DropDatabaseOperationSpecification extends OperationFunctionalSpecificatio def dbName = 'nonExistingDatabase' when: - execute(new DropDatabaseOperation(TIMEOUT_SETTINGS_WITH_TIMEOUT, dbName, WriteConcern.ACKNOWLEDGED), async) + execute(new DropDatabaseOperation(dbName, WriteConcern.ACKNOWLEDGED), async) then: !databaseNameExists(dbName) @@ -75,7 +73,7 @@ class DropDatabaseOperationSpecification extends OperationFunctionalSpecificatio // On servers older than 4.0 that don't support this failpoint, use a crazy w value instead def w = serverVersionAtLeast(4, 0) ? 2 : 5 - def operation = new DropDatabaseOperation(TIMEOUT_SETTINGS_WITH_TIMEOUT, databaseName, new WriteConcern(w)) + def operation = new DropDatabaseOperation(databaseName, new WriteConcern(w)) if (serverVersionAtLeast(4, 0)) { configureFailPoint(BsonDocument.parse('{ configureFailPoint: "failCommand", ' + 'mode : {times : 1}, ' + @@ -96,7 +94,7 @@ class DropDatabaseOperationSpecification extends OperationFunctionalSpecificatio } def databaseNameExists(String databaseName) { - new ListDatabasesOperation(TIMEOUT_SETTINGS, new DocumentCodec()).execute(getBinding()).next()*.name.contains(databaseName) + new ListDatabasesOperation(new DocumentCodec()).execute(getBinding()).next()*.name.contains(databaseName) } } diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/DropIndexOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/DropIndexOperationSpecification.groovy index 12aa7b3fb55..a051231af7e 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/DropIndexOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/DropIndexOperationSpecification.groovy @@ -17,7 +17,6 @@ package com.mongodb.internal.operation import com.mongodb.MongoException -import com.mongodb.MongoExecutionTimeoutException import com.mongodb.MongoWriteConcernException import com.mongodb.OperationFunctionalSpecification import com.mongodb.WriteConcern @@ -30,20 +29,15 @@ import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf import spock.lang.Unroll -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME -import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint -import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint import static com.mongodb.ClusterFixture.getBinding import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet -import static com.mongodb.ClusterFixture.isSharded import static com.mongodb.ClusterFixture.serverVersionLessThan class DropIndexOperationSpecification extends OperationFunctionalSpecification { def 'should not error when dropping non-existent index on non-existent collection'() { when: - execute(new DropIndexOperation(TIMEOUT_SETTINGS, getNamespace(), 'made_up_index_1', null), async) + execute(new DropIndexOperation(getNamespace(), 'made_up_index_1', null), async) then: getIndexes().size() == 0 @@ -57,7 +51,7 @@ class DropIndexOperationSpecification extends OperationFunctionalSpecification { getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentThat', 'forces creation of the Collection')) when: - execute(new DropIndexOperation(TIMEOUT_SETTINGS, getNamespace(), 'made_up_index_1', null), async) + execute(new DropIndexOperation(getNamespace(), 'made_up_index_1', null), async) then: thrown(MongoException) @@ -71,7 +65,7 @@ class DropIndexOperationSpecification extends OperationFunctionalSpecification { collectionHelper.createIndex(new BsonDocument('theField', new BsonInt32(1))) when: - execute(new DropIndexOperation(TIMEOUT_SETTINGS, getNamespace(), 'theField_1', null), async) + execute(new DropIndexOperation(getNamespace(), 'theField_1', null), async) List indexes = getIndexes() then: @@ -88,7 +82,7 @@ class DropIndexOperationSpecification extends OperationFunctionalSpecification { collectionHelper.createIndex(keys) when: - execute(new DropIndexOperation(TIMEOUT_SETTINGS, getNamespace(), keys, null), async) + execute(new DropIndexOperation(getNamespace(), keys, null), async) List indexes = getIndexes() then: @@ -106,35 +100,13 @@ class DropIndexOperationSpecification extends OperationFunctionalSpecification { ].combinations() } - @IgnoreIf({ isSharded() }) - def 'should throw execution timeout exception from execute'() { - given: - def keys = new BsonDocument('theField', new BsonInt32(1)) - collectionHelper.createIndex(keys) - def operation = new DropIndexOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, getNamespace(), keys, null) - - enableMaxTimeFailPoint() - - when: - execute(operation, async) - - then: - thrown(MongoExecutionTimeoutException) - - cleanup: - disableMaxTimeFailPoint() - - where: - async << [true, false] - } - def 'should drop existing index by key when using BsonInt64'() { given: def keys = new BsonDocument('theField', new BsonInt32(1)) collectionHelper.createIndex(keys) when: - execute(new DropIndexOperation(TIMEOUT_SETTINGS, getNamespace(), new BsonDocument('theField', new BsonInt64(1)), null), + execute(new DropIndexOperation(getNamespace(), new BsonDocument('theField', new BsonInt64(1)), null), async) List indexes = getIndexes() @@ -152,7 +124,7 @@ class DropIndexOperationSpecification extends OperationFunctionalSpecification { collectionHelper.createIndex(new BsonDocument('theOtherField', new BsonInt32(1))) when: - execute(new DropIndexOperation(TIMEOUT_SETTINGS, getNamespace(), '*', null), async) + execute(new DropIndexOperation(getNamespace(), '*', null), async) List indexes = getIndexes() then: @@ -167,7 +139,7 @@ class DropIndexOperationSpecification extends OperationFunctionalSpecification { def 'should throw on write concern error'() { given: collectionHelper.createIndex(new BsonDocument('theField', new BsonInt32(1))) - def operation = new DropIndexOperation(TIMEOUT_SETTINGS, getNamespace(), 'theField_1', new WriteConcern(5)) + def operation = new DropIndexOperation(getNamespace(), 'theField_1', new WriteConcern(5)) when: execute(operation, async) @@ -183,7 +155,7 @@ class DropIndexOperationSpecification extends OperationFunctionalSpecification { def getIndexes() { def indexes = [] - def cursor = new ListIndexesOperation(TIMEOUT_SETTINGS, getNamespace(), new DocumentCodec()).execute(getBinding()) + def cursor = new ListIndexesOperation(getNamespace(), new DocumentCodec()).execute(getBinding()) while (cursor.hasNext()) { indexes.addAll(cursor.next()) } diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndDeleteOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndDeleteOperationSpecification.groovy index 951b058b8e4..ccc9614d1fb 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndDeleteOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndDeleteOperationSpecification.groovy @@ -34,8 +34,6 @@ import org.bson.codecs.BsonDocumentCodec import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME import static com.mongodb.ClusterFixture.configureFailPoint import static com.mongodb.ClusterFixture.disableFailPoint import static com.mongodb.ClusterFixture.disableOnPrimaryTransactionalWriteFailPoint @@ -54,7 +52,7 @@ class FindAndDeleteOperationSpecification extends OperationFunctionalSpecificati def 'should have the correct defaults'() { when: - def operation = new FindAndDeleteOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, false, documentCodec) + def operation = new FindAndDeleteOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec) then: operation.getNamespace() == getNamespace() @@ -73,7 +71,7 @@ class FindAndDeleteOperationSpecification extends OperationFunctionalSpecificati def projection = BsonDocument.parse('{ projection : 1}') when: - def operation = new FindAndDeleteOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, false, documentCodec) + def operation = new FindAndDeleteOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec) .filter(filter) .sort(sort) .projection(projection) @@ -94,7 +92,7 @@ class FindAndDeleteOperationSpecification extends OperationFunctionalSpecificati getCollectionHelper().insertDocuments(new DocumentCodec(), pete, sam) when: - def operation = new FindAndDeleteOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, false, documentCodec) + def operation = new FindAndDeleteOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec) .filter(new BsonDocument('name', new BsonString('Pete'))) Document returnedDocument = execute(operation, async) @@ -115,7 +113,7 @@ class FindAndDeleteOperationSpecification extends OperationFunctionalSpecificati getWorkerCollectionHelper().insertDocuments(new WorkerCodec(), pete, sam) when: - FindAndDeleteOperation operation = new FindAndDeleteOperation(TIMEOUT_SETTINGS, getNamespace(), + FindAndDeleteOperation operation = new FindAndDeleteOperation(getNamespace(), ACKNOWLEDGED, false, workerCodec).filter(new BsonDocument('name', new BsonString('Pete'))) Worker returnedDocument = execute(operation, async) @@ -135,7 +133,7 @@ class FindAndDeleteOperationSpecification extends OperationFunctionalSpecificati CollectionHelper helper = new CollectionHelper(documentCodec, getNamespace()) Document pete = new Document('name', 'Pete').append('job', 'handyman') helper.insertDocuments(new DocumentCodec(), pete) - def operation = new FindAndDeleteOperation(TIMEOUT_SETTINGS, getNamespace(), new WriteConcern(5, 1), false, + def operation = new FindAndDeleteOperation(getNamespace(), new WriteConcern(5, 1), false, documentCodec).filter(new BsonDocument('name', new BsonString('Pete'))) when: @@ -167,7 +165,7 @@ class FindAndDeleteOperationSpecification extends OperationFunctionalSpecificati "writeConcernError": {"code": 91, "errmsg": "Replication is being shut down"}}}''') configureFailPoint(failPoint) - def operation = new FindAndDeleteOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, false, + def operation = new FindAndDeleteOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec).filter(new BsonDocument('name', new BsonString('Pete'))) when: @@ -193,11 +191,10 @@ class FindAndDeleteOperationSpecification extends OperationFunctionalSpecificati def includeTxnNumber = retryWrites && writeConcern.isAcknowledged() && serverType != STANDALONE def includeWriteConcern = writeConcern.isAcknowledged() && !writeConcern.isServerDefault() def cannedResult = new BsonDocument('value', new BsonDocumentWrapper(BsonDocument.parse('{}'), new BsonDocumentCodec())) - def operation = new FindAndDeleteOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, getNamespace(), writeConcern as WriteConcern, + def operation = new FindAndDeleteOperation(getNamespace(), writeConcern as WriteConcern, retryWrites as boolean, documentCodec) def expectedCommand = new BsonDocument('findAndModify', new BsonString(getNamespace().getCollectionName())) .append('remove', BsonBoolean.TRUE) - .append('maxTimeMS', new BsonInt64(100)) if (includeWriteConcern) { expectedCommand.put('writeConcern', writeConcern.asDocument()) @@ -248,7 +245,7 @@ class FindAndDeleteOperationSpecification extends OperationFunctionalSpecificati getCollectionHelper().insertDocuments(new DocumentCodec(), pete, sam) when: - def operation = new FindAndDeleteOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, true, documentCodec) + def operation = new FindAndDeleteOperation(getNamespace(), ACKNOWLEDGED, true, documentCodec) .filter(new BsonDocument('name', new BsonString('Pete'))) enableOnPrimaryTransactionalWriteFailPoint(BsonDocument.parse('{times: 1}')) @@ -269,7 +266,7 @@ class FindAndDeleteOperationSpecification extends OperationFunctionalSpecificati def 'should retry if the connection initially fails'() { when: def cannedResult = new BsonDocument('value', new BsonDocumentWrapper(BsonDocument.parse('{}'), new BsonDocumentCodec())) - def operation = new FindAndDeleteOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, true, documentCodec) + def operation = new FindAndDeleteOperation(getNamespace(), ACKNOWLEDGED, true, documentCodec) def expectedCommand = new BsonDocument('findAndModify', new BsonString(getNamespace().getCollectionName())) .append('remove', BsonBoolean.TRUE) .append('txnNumber', new BsonInt64(0)) @@ -283,7 +280,7 @@ class FindAndDeleteOperationSpecification extends OperationFunctionalSpecificati def 'should throw original error when retrying and failing'() { given: - def operation = new FindAndDeleteOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, true, documentCodec) + def operation = new FindAndDeleteOperation(getNamespace(), ACKNOWLEDGED, true, documentCodec) def originalException = new MongoSocketException('Some failure', new ServerAddress()) when: @@ -311,7 +308,7 @@ class FindAndDeleteOperationSpecification extends OperationFunctionalSpecificati given: def document = Document.parse('{_id: 1, str: "foo"}') getCollectionHelper().insertDocuments(document) - def operation = new FindAndDeleteOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, false, documentCodec) + def operation = new FindAndDeleteOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec) .filter(BsonDocument.parse('{str: "FOO"}')) .collation(caseInsensitiveCollation) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndReplaceOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndReplaceOperationSpecification.groovy index 6623f853294..4c334fa0ea0 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndReplaceOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndReplaceOperationSpecification.groovy @@ -40,8 +40,6 @@ import org.bson.codecs.BsonDocumentCodec import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME import static com.mongodb.ClusterFixture.configureFailPoint import static com.mongodb.ClusterFixture.disableFailPoint import static com.mongodb.ClusterFixture.disableOnPrimaryTransactionalWriteFailPoint @@ -62,7 +60,7 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat def 'should have the correct defaults and passed values'() { when: def replacement = new BsonDocument('replace', new BsonInt32(1)) - def operation = new FindAndReplaceOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, false, documentCodec, + def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, replacement) then: @@ -84,7 +82,7 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat def projection = new BsonDocument('projection', new BsonInt32(1)) when: - def operation = new FindAndReplaceOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, false, documentCodec, + def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, new BsonDocument('replace', new BsonInt32(1))).filter(filter).sort(sort).projection(projection) .bypassDocumentValidation(true).upsert(true).returnOriginal(false) .collation(defaultCollation) @@ -109,7 +107,7 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat helper.insertDocuments(new DocumentCodec(), pete, sam) when: - def operation = new FindAndReplaceOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, false, + def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, jordan) .filter(new BsonDocument('name', new BsonString('Pete'))) Document returnedDocument = execute(operation, async) @@ -120,7 +118,7 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat helper.find().get(0).getString('name') == 'Jordan' when: - operation = new FindAndReplaceOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, false, documentCodec, + operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, new BsonDocumentWrapper(pete, documentCodec)) .filter(new BsonDocument('name', new BsonString('Jordan'))) .returnOriginal(false) @@ -144,7 +142,7 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat helper.insertDocuments(new WorkerCodec(), pete, sam) when: - def operation = new FindAndReplaceOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, false, + def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, workerCodec, replacement).filter(new BsonDocument('name', new BsonString('Pete'))) Worker returnedDocument = execute(operation, async) @@ -154,7 +152,7 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat when: replacement = new BsonDocumentWrapper(pete, workerCodec) - operation = new FindAndReplaceOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, false, workerCodec, + operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, workerCodec, replacement) .filter(new BsonDocument('name', new BsonString('Jordan'))) .returnOriginal(false) @@ -170,7 +168,7 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat def 'should return null if query fails to match'() { when: BsonDocument jordan = BsonDocument.parse('{name: "Jordan", job: "sparky"}') - def operation = new FindAndReplaceOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, false, + def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, jordan) .filter(new BsonDocument('name', new BsonString('Pete'))) Document returnedDocument = execute(operation, async) @@ -185,7 +183,7 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat def 'should throw an exception if replacement contains update operators'() { given: def replacement = new BsonDocumentWrapper(['$inc': 1] as Document, documentCodec) - def operation = new FindAndReplaceOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, false, + def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, replacement) when: @@ -210,7 +208,7 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat when: def replacement = new BsonDocument('level', new BsonInt32(9)) - def operation = new FindAndReplaceOperation(TIMEOUT_SETTINGS, namespace, ACKNOWLEDGED, false, + def operation = new FindAndReplaceOperation(namespace, ACKNOWLEDGED, false, documentCodec, replacement) execute(operation, async) @@ -249,7 +247,7 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat BsonDocument jordan = BsonDocument.parse('{name: "Jordan", job: "sparky"}') when: - def operation = new FindAndReplaceOperation(TIMEOUT_SETTINGS, getNamespace(), + def operation = new FindAndReplaceOperation(getNamespace(), new WriteConcern(5, 1), false, documentCodec, jordan) .filter(new BsonDocument('name', new BsonString('Pete'))) execute(operation, async) @@ -263,7 +261,7 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat ex.writeResult.upsertedId == null when: - operation = new FindAndReplaceOperation(TIMEOUT_SETTINGS, getNamespace(), new WriteConcern(5, 1), + operation = new FindAndReplaceOperation(getNamespace(), new WriteConcern(5, 1), false, documentCodec, jordan).filter(new BsonDocument('name', new BsonString('Bob'))) .upsert(true) execute(operation, async) @@ -295,7 +293,7 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat configureFailPoint(failPoint) BsonDocument jordan = BsonDocument.parse('{name: "Jordan", job: "sparky"}') - def operation = new FindAndReplaceOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, + def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, jordan).filter(new BsonDocument('name', new BsonString('Pete'))) when: @@ -322,11 +320,9 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat def includeWriteConcern = writeConcern.isAcknowledged() && !writeConcern.isServerDefault() def cannedResult = new BsonDocument('value', new BsonDocumentWrapper(BsonDocument.parse('{}'), new BsonDocumentCodec())) def replacement = BsonDocument.parse('{ replacement: 1}') - def operation = new FindAndReplaceOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, getNamespace(), writeConcern, retryWrites, - documentCodec, replacement) + def operation = new FindAndReplaceOperation(getNamespace(), writeConcern, retryWrites, documentCodec, replacement) def expectedCommand = new BsonDocument('findAndModify', new BsonString(getNamespace().getCollectionName())) .append('update', replacement) - .append('maxTimeMS', new BsonInt64(100)) if (includeWriteConcern) { expectedCommand.put('writeConcern', writeConcern.asDocument()) } @@ -381,7 +377,7 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat helper.insertDocuments(new DocumentCodec(), pete, sam) when: - def operation = new FindAndReplaceOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, true, + def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, true, documentCodec, jordan) .filter(new BsonDocument('name', new BsonString('Pete'))) @@ -404,7 +400,7 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat when: def cannedResult = new BsonDocument('value', new BsonDocumentWrapper(BsonDocument.parse('{}'), new BsonDocumentCodec())) def replacement = BsonDocument.parse('{ replacement: 1}') - def operation = new FindAndReplaceOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, true, + def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, true, documentCodec, replacement) def expectedCommand = new BsonDocument('findAndModify', new BsonString(getNamespace().getCollectionName())) .append('update', replacement) @@ -421,7 +417,7 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat def 'should throw original error when retrying and failing'() { given: def replacement = BsonDocument.parse('{ replacement: 1}') - def operation = new FindAndReplaceOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, true, + def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, true, documentCodec, replacement) def originalException = new MongoSocketException('Some failure', new ServerAddress()) @@ -451,7 +447,7 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat def document = Document.parse('{_id: 1, str: "foo"}') getCollectionHelper().insertDocuments(document) def replacement = BsonDocument.parse('{str: "bar"}') - def operation = new FindAndReplaceOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, false, + def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, replacement) .filter(BsonDocument.parse('{str: "FOO"}')) .collation(caseInsensitiveCollation) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndUpdateOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndUpdateOperationSpecification.groovy index b96216ed7ff..821eacbee6e 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndUpdateOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndUpdateOperationSpecification.groovy @@ -41,8 +41,6 @@ import org.bson.codecs.BsonDocumentCodec import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME import static com.mongodb.ClusterFixture.configureFailPoint import static com.mongodb.ClusterFixture.disableFailPoint import static com.mongodb.ClusterFixture.disableOnPrimaryTransactionalWriteFailPoint @@ -64,7 +62,7 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati def 'should have the correct defaults and passed values'() { when: def update = new BsonDocument('update', new BsonInt32(1)) - def operation = new FindAndUpdateOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, false, + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) then: @@ -83,8 +81,7 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati def 'should have the correct defaults and passed values using update pipelines'() { when: def updatePipeline = new BsonArray(singletonList(new BsonDocument('update', new BsonInt32(1)))) - def operation = new FindAndUpdateOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, false, - documentCodec, updatePipeline) + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, updatePipeline) then: operation.getNamespace() == getNamespace() @@ -105,7 +102,7 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati def projection = new BsonDocument('projection', new BsonInt32(1)) when: - def operation = new FindAndUpdateOperation(TIMEOUT_SETTINGS, getNamespace(), + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, new BsonDocument('update', new BsonInt32(1))) .filter(filter) .sort(sort) @@ -132,7 +129,7 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati def projection = new BsonDocument('projection', new BsonInt32(1)) when: - def operation = new FindAndUpdateOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, false, + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, new BsonArray(singletonList(new BsonDocument('update', new BsonInt32(1))))) .filter(filter) .sort(sort) @@ -161,7 +158,7 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati when: def update = new BsonDocument('$inc', new BsonDocument('numberOfJobs', new BsonInt32(1))) - def operation = new FindAndUpdateOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, false, + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) .filter(new BsonDocument('name', new BsonString('Pete'))) Document returnedDocument = execute(operation, async) @@ -173,7 +170,7 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati when: update = new BsonDocument('$inc', new BsonDocument('numberOfJobs', new BsonInt32(1))) - operation = new FindAndUpdateOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, false, + operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) .filter(new BsonDocument('name', new BsonString('Pete'))) .returnOriginal(false) @@ -197,8 +194,7 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati when: def update = new BsonArray(singletonList(new BsonDocument('$addFields', new BsonDocument('foo', new BsonInt32(1))))) - def operation = new FindAndUpdateOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, false, - documentCodec, update) + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) .filter(new BsonDocument('name', new BsonString('Pete'))) .returnOriginal(false) Document returnedDocument = execute(operation, false) @@ -209,8 +205,7 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati when: update = new BsonArray(singletonList(new BsonDocument('$addFields', new BsonDocument('foo', new BsonInt32(1))))) - operation = new FindAndUpdateOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, false, - documentCodec, update) + operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) .filter(new BsonDocument('name', new BsonString('Pete'))) .returnOriginal(false) returnedDocument = execute(operation, false) @@ -230,7 +225,7 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati when: def update = new BsonDocument('$inc', new BsonDocument('numberOfJobs', new BsonInt32(1))) - def operation = new FindAndUpdateOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, false, + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, workerCodec, update) .filter(new BsonDocument('name', new BsonString('Pete'))) Worker returnedDocument = execute(operation, async) @@ -242,7 +237,7 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati when: update = new BsonDocument('$inc', new BsonDocument('numberOfJobs', new BsonInt32(1))) - operation = new FindAndUpdateOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, false, + operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, workerCodec, update) .filter(new BsonDocument('name', new BsonString('Pete'))) .returnOriginal(false) @@ -266,8 +261,7 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati when: def update = new BsonArray(singletonList(new BsonDocument('$project', new BsonDocument('name', new BsonInt32(1))))) - def operation = new FindAndUpdateOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, false, - documentCodec, update) + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) .filter(new BsonDocument('name', new BsonString('Pete'))) .returnOriginal(false) Document returnedDocument = execute(operation, async) @@ -283,8 +277,7 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati def 'should return null if query fails to match'() { when: def update = new BsonDocument('$inc', new BsonDocument('numberOfJobs', new BsonInt32(1))) - def operation = new FindAndUpdateOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, false - , documentCodec, update) + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) .filter(new BsonDocument('name', new BsonString('Pete'))) Document returnedDocument = execute(operation, async) @@ -298,7 +291,7 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati def 'should throw an exception if update contains fields that are not update operators'() { given: def update = new BsonDocument('x', new BsonInt32(1)) - def operation = new FindAndUpdateOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, false, + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) when: @@ -316,8 +309,7 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati def 'should throw an exception if update pipeline contains operations that are not supported'() { when: def update = new BsonArray(singletonList(new BsonDocument('$foo', new BsonDocument('x', new BsonInt32(1))))) - def operation = new FindAndUpdateOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, false, - documentCodec, update) + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) execute(operation, async) then: @@ -325,8 +317,7 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati when: update = singletonList(new BsonInt32(1)) - operation = new FindAndUpdateOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, false, - documentCodec, update) + operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) execute(operation, async) then: @@ -347,7 +338,7 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati when: def update = new BsonDocument('$inc', new BsonDocument('level', new BsonInt32(-1))) - def operation = new FindAndUpdateOperation(TIMEOUT_SETTINGS, namespace, ACKNOWLEDGED, false, + def operation = new FindAndUpdateOperation(namespace, ACKNOWLEDGED, false, documentCodec, update) execute(operation, async) @@ -383,7 +374,7 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati def update = new BsonDocument('$inc', new BsonDocument('numberOfJobs', new BsonInt32(1))) when: - def operation = new FindAndUpdateOperation(TIMEOUT_SETTINGS, getNamespace(), + def operation = new FindAndUpdateOperation(getNamespace(), new WriteConcern(5, 1), false, documentCodec, update) .filter(new BsonDocument('name', new BsonString('Pete'))) execute(operation, async) @@ -397,7 +388,7 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati ex.writeResult.upsertedId == null when: - operation = new FindAndUpdateOperation(TIMEOUT_SETTINGS, getNamespace(), new WriteConcern(5, 1), false, + operation = new FindAndUpdateOperation(getNamespace(), new WriteConcern(5, 1), false, documentCodec, update) .filter(new BsonDocument('name', new BsonString('Bob'))) .upsert(true) @@ -427,7 +418,7 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati configureFailPoint(failPoint) def update = new BsonDocument('$inc', new BsonDocument('numberOfJobs', new BsonInt32(1))) - def operation = new FindAndUpdateOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, false, + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) .filter(new BsonDocument('name', new BsonString('Pete'))) @@ -455,11 +446,9 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati def includeWriteConcern = writeConcern.isAcknowledged() && !writeConcern.isServerDefault() def cannedResult = new BsonDocument('value', new BsonDocumentWrapper(BsonDocument.parse('{}'), new BsonDocumentCodec())) def update = BsonDocument.parse('{ update: 1}') - def operation = new FindAndUpdateOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, getNamespace(), writeConcern, retryWrites, - documentCodec, update) + def operation = new FindAndUpdateOperation(getNamespace(), writeConcern, retryWrites, documentCodec, update) def expectedCommand = new BsonDocument('findAndModify', new BsonString(getNamespace().getCollectionName())) .append('update', update) - .append('maxTimeMS', new BsonInt64(100)) if (includeWriteConcern) { expectedCommand.put('writeConcern', writeConcern.asDocument()) } @@ -514,7 +503,7 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati when: def update = new BsonDocument('$inc', new BsonDocument('numberOfJobs', new BsonInt32(1))) - def operation = new FindAndUpdateOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, true, + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, true, documentCodec, update) .filter(new BsonDocument('name', new BsonString('Pete'))) @@ -538,7 +527,7 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati when: def cannedResult = new BsonDocument('value', new BsonDocumentWrapper(BsonDocument.parse('{}'), new BsonDocumentCodec())) def update = BsonDocument.parse('{ update: 1}') - def operation = new FindAndUpdateOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, true, + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, true, documentCodec, update) def expectedCommand = new BsonDocument('findAndModify', new BsonString(getNamespace().getCollectionName())) .append('update', update) @@ -555,7 +544,7 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati def 'should throw original error when retrying and failing'() { given: def update = BsonDocument.parse('{ update: 1}') - def operation = new FindAndUpdateOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, true, + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, true, documentCodec, update) def originalException = new MongoSocketException('Some failure', new ServerAddress()) @@ -585,7 +574,7 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati def document = Document.parse('{_id: 1, str: "foo"}') getCollectionHelper().insertDocuments(document) def update = BsonDocument.parse('{ $set: {str: "bar"}}') - def operation = new FindAndUpdateOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, false, + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) .filter(BsonDocument.parse('{str: "FOO"}')) .collation(caseInsensitiveCollation) @@ -608,7 +597,7 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati getCollectionHelper().insertDocuments(documentOne, documentTwo) def update = BsonDocument.parse('{ $set: {"y.$[i].b": 2}}') def arrayFilters = [BsonDocument.parse('{"i.b": 3}')] - def operation = new FindAndUpdateOperation(TIMEOUT_SETTINGS, getNamespace(), ACKNOWLEDGED, false, + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) .returnOriginal(false) .arrayFilters(arrayFilters) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/FindOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/FindOperationSpecification.groovy index 65395331dbb..8a1534898d7 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/FindOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/FindOperationSpecification.groovy @@ -17,7 +17,6 @@ package com.mongodb.internal.operation import com.mongodb.ClusterFixture -import com.mongodb.MongoExecutionTimeoutException import com.mongodb.MongoNamespace import com.mongodb.MongoQueryException import com.mongodb.OperationFunctionalSpecification @@ -52,10 +51,6 @@ import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf import static com.mongodb.ClusterFixture.OPERATION_CONTEXT -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME -import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint -import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint import static com.mongodb.ClusterFixture.executeAsync import static com.mongodb.ClusterFixture.executeSync import static com.mongodb.ClusterFixture.getAsyncCluster @@ -77,7 +72,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { def decoder = new DocumentCodec() when: - FindOperation operation = new FindOperation(TIMEOUT_SETTINGS, getNamespace(), decoder) + FindOperation operation = new FindOperation(getNamespace(), decoder) then: operation.getNamespace() == getNamespace() @@ -101,7 +96,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { def hint = new BsonString('a_1') when: - FindOperation operation = new FindOperation(TIMEOUT_SETTINGS, getNamespace(), new DocumentCodec()) + FindOperation operation = new FindOperation(getNamespace(), new DocumentCodec()) .filter(filter) .limit(20) .skip(30) @@ -131,7 +126,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { given: def document = new Document('_id', 1) getCollectionHelper().insertDocuments(new DocumentCodec(), document) - def operation = new FindOperation(TIMEOUT_SETTINGS, getNamespace(), new DocumentCodec()) + def operation = new FindOperation(getNamespace(), new DocumentCodec()) when: def results = executeAndCollectBatchCursorResults(operation, async) @@ -157,7 +152,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { where: [async, operation] << [ [true, false], - [new FindOperation(TIMEOUT_SETTINGS, getNamespace(), new DocumentCodec()) + [new FindOperation(getNamespace(), new DocumentCodec()) .filter(new BsonDocument('_id', new BsonInt32(1)))] ].combinations() } @@ -178,7 +173,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { where: [async, operation] << [ [true, false], - [new FindOperation(TIMEOUT_SETTINGS, getNamespace(), new DocumentCodec()) + [new FindOperation(getNamespace(), new DocumentCodec()) .sort(new BsonDocument('_id', new BsonInt32(1)))] ].combinations() } @@ -187,7 +182,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('x', 5).append('y', 10), new Document('_id', 1).append('x', 10)) - def operation = new FindOperation(TIMEOUT_SETTINGS, getNamespace(), new DocumentCodec()) + def operation = new FindOperation(getNamespace(), new DocumentCodec()) .projection(new BsonDocument('_id', new BsonInt32(0)).append('x', new BsonInt32(1))) when: @@ -206,7 +201,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { new Document('_id', 5)] getCollectionHelper().insertDocuments(new DocumentCodec(), documents) - def operation = new FindOperation(TIMEOUT_SETTINGS, getNamespace(), new DocumentCodec()) + def operation = new FindOperation(getNamespace(), new DocumentCodec()) .sort(new BsonDocument('_id', new BsonInt32(1))) .skip(3) @@ -226,7 +221,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { new Document('_id', 5)] getCollectionHelper().insertDocuments(new DocumentCodec(), documents) - def operation = new FindOperation(TIMEOUT_SETTINGS, getNamespace(), new DocumentCodec()) + def operation = new FindOperation(getNamespace(), new DocumentCodec()) .sort(new BsonDocument('_id', new BsonInt32(1))) .limit(limit) @@ -245,7 +240,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { def documents = [new Document('_id', 1), new Document('_id', 2), new Document('_id', 3), new Document('_id', 4), new Document('_id', 5)] getCollectionHelper().insertDocuments(new DocumentCodec(), documents) - def operation = new FindOperation(TIMEOUT_SETTINGS, getNamespace(), new DocumentCodec()) + def operation = new FindOperation(getNamespace(), new DocumentCodec()) .sort(new BsonDocument('_id', new BsonInt32(1))) .batchSize(batchSize) @@ -288,7 +283,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { def 'should throw query exception'() { given: - def operation = new FindOperation(TIMEOUT_SETTINGS, getNamespace(), new DocumentCodec()) + def operation = new FindOperation(getNamespace(), new DocumentCodec()) .filter(new BsonDocument('x', new BsonDocument('$thisIsNotAnOperator', BsonBoolean.TRUE))) when: @@ -301,36 +296,13 @@ class FindOperationSpecification extends OperationFunctionalSpecification { async << [true, false] } - @IgnoreIf({ isSharded() }) - def 'should throw execution timeout exception from execute'() { - given: - getCollectionHelper().insertDocuments(new DocumentCodec(), new Document()) - - enableMaxTimeFailPoint() - - when: - execute(operation, async) - - then: - thrown(MongoExecutionTimeoutException) - - cleanup: - disableMaxTimeFailPoint() - - where: - [async, operation] << [ - [true, false], - [new FindOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, getNamespace(), new DocumentCodec())] - ].combinations() - } - def '$max should limit items returned'() { given: (1..100).each { collectionHelper.insertDocuments(new DocumentCodec(), new Document('x', 'y').append('count', it)) } collectionHelper.createIndex(new BsonDocument('count', new BsonInt32(1))) - def operation = new FindOperation(TIMEOUT_SETTINGS, getNamespace(), new DocumentCodec()) + def operation = new FindOperation(getNamespace(), new DocumentCodec()) .max(new BsonDocument('count', new BsonInt32(11))) .hint(new BsonDocument('count', new BsonInt32(1))) @@ -350,7 +322,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { collectionHelper.insertDocuments(new DocumentCodec(), new Document('x', 'y').append('count', it)) } collectionHelper.createIndex(new BsonDocument('count', new BsonInt32(1))) - def operation = new FindOperation(TIMEOUT_SETTINGS, getNamespace(), new DocumentCodec()) + def operation = new FindOperation(getNamespace(), new DocumentCodec()) .min(new BsonDocument('count', new BsonInt32(10))) .hint(new BsonDocument('count', new BsonInt32(1))) @@ -371,7 +343,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { } collectionHelper.createIndex(new BsonDocument('x', new BsonInt32(1))) - def operation = new FindOperation(TIMEOUT_SETTINGS, getNamespace(), new DocumentCodec()) + def operation = new FindOperation(getNamespace(), new DocumentCodec()) .filter(new BsonDocument('x', new BsonInt32(7))) .returnKey(true) @@ -391,7 +363,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { def index = new BsonDocument('a', new BsonInt32(1)) collectionHelper.createIndex(index) - def operation = new FindOperation(TIMEOUT_SETTINGS, getNamespace(), new DocumentCodec()) + def operation = new FindOperation(getNamespace(), new DocumentCodec()) .hint((BsonValue) hint) .asExplainableOperation(null, new BsonDocumentCodec()) @@ -410,10 +382,10 @@ class FindOperationSpecification extends OperationFunctionalSpecification { def 'should apply comment'() { given: def profileCollectionHelper = getCollectionHelper(new MongoNamespace(getDatabaseName(), 'system.profile')) - new CommandReadOperation<>(TIMEOUT_SETTINGS, getDatabaseName(), new BsonDocument('profile', new BsonInt32(2)), + new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(2)), new BsonDocumentCodec()).execute(getBinding()) def expectedComment = 'this is a comment' - def operation = new FindOperation(TIMEOUT_SETTINGS, getNamespace(), new DocumentCodec()) + def operation = new FindOperation(getNamespace(), new DocumentCodec()) .comment(new BsonString(expectedComment)) when: @@ -430,7 +402,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { } cleanup: - new CommandReadOperation<>(TIMEOUT_SETTINGS, getDatabaseName(), new BsonDocument('profile', new BsonInt32(0)), + new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(0)), new BsonDocumentCodec()) .execute(getBinding()) profileCollectionHelper.drop() @@ -444,7 +416,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { String fieldName = serverVersionAtLeast(3, 2) ? '$recordId' : '$diskLoc' collectionHelper.insertDocuments(new BsonDocument()) - def operation = new FindOperation(TIMEOUT_SETTINGS, getNamespace(), new DocumentCodec()) + def operation = new FindOperation(getNamespace(), new DocumentCodec()) .showRecordId(true) when: @@ -461,7 +433,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { def 'should read from a secondary'() { given: collectionHelper.insertDocuments(new DocumentCodec(), new Document()) - def operation = new FindOperation(TIMEOUT_SETTINGS, getNamespace(), new DocumentCodec()) + def operation = new FindOperation(getNamespace(), new DocumentCodec()) def syncBinding = new ClusterBinding(getCluster(), ReadPreference.secondary(), ReadConcern.DEFAULT, OPERATION_CONTEXT) def asyncBinding = new AsyncClusterBinding(getAsyncCluster(), ReadPreference.secondary(), ReadConcern.DEFAULT, OPERATION_CONTEXT) @@ -482,7 +454,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { def documents = [new Document('_id', 3), new Document('_id', 1), new Document('_id', 2), new Document('_id', 5), new Document('_id', 4)] collectionHelper.insertDocuments(new DocumentCodec(), documents) - def operation = new FindOperation(TIMEOUT_SETTINGS, getNamespace(), new DocumentCodec()) + def operation = new FindOperation(getNamespace(), new DocumentCodec()) when: def hedgeOptions = isHedgeEnabled != null ? @@ -523,7 +495,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { def commandDocument = new BsonDocument('find', new BsonString(getCollectionName())) appendReadConcernToCommand(sessionContext, MIN_WIRE_VERSION, commandDocument) - def operation = new FindOperation(TIMEOUT_SETTINGS, getNamespace(), new DocumentCodec()) + def operation = new FindOperation(getNamespace(), new DocumentCodec()) when: operation.execute(binding) @@ -563,7 +535,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { def commandDocument = new BsonDocument('find', new BsonString(getCollectionName())) appendReadConcernToCommand(sessionContext, MIN_WIRE_VERSION, commandDocument) - def operation = new FindOperation(TIMEOUT_SETTINGS, getNamespace(), new DocumentCodec()) + def operation = new FindOperation(getNamespace(), new DocumentCodec()) when: executeAsync(operation, binding) @@ -604,7 +576,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { def commandDocument = new BsonDocument('find', new BsonString(getCollectionName())).append('allowDiskUse', BsonBoolean.TRUE) appendReadConcernToCommand(sessionContext, MIN_WIRE_VERSION, commandDocument) - def operation = new FindOperation(TIMEOUT_SETTINGS, getNamespace(), new DocumentCodec()).allowDiskUse(true) + def operation = new FindOperation(getNamespace(), new DocumentCodec()).allowDiskUse(true) when: operation.execute(binding) @@ -644,7 +616,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { def commandDocument = new BsonDocument('find', new BsonString(getCollectionName())).append('allowDiskUse', BsonBoolean.TRUE) appendReadConcernToCommand(sessionContext, MIN_WIRE_VERSION, commandDocument) - def operation = new FindOperation(TIMEOUT_SETTINGS, getNamespace(), new DocumentCodec()).allowDiskUse(true) + def operation = new FindOperation(getNamespace(), new DocumentCodec()).allowDiskUse(true) when: executeAsync(operation, binding) @@ -699,7 +671,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { // sanity check that the server accepts the miscallaneous flags def 'should pass miscallaneous flags through'() { given: - def operation = new FindOperation(TIMEOUT_SETTINGS, namespace, new BsonDocumentCodec()) + def operation = new FindOperation(namespace, new BsonDocumentCodec()) .noCursorTimeout(true) .partial(true) @@ -718,7 +690,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { given: def document = BsonDocument.parse('{_id: 1, str: "foo"}') getCollectionHelper().insertDocuments(document) - def operation = new FindOperation(TIMEOUT_SETTINGS, getNamespace(), new BsonDocumentCodec()) + def operation = new FindOperation(getNamespace(), new BsonDocumentCodec()) .filter(BsonDocument.parse('{str: "FOO"}')) .collation(caseInsensitiveCollation) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/ListCollectionsOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/ListCollectionsOperationSpecification.groovy index b1aa7f53a7c..07a3fadc5fd 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/ListCollectionsOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/ListCollectionsOperationSpecification.groovy @@ -16,7 +16,7 @@ package com.mongodb.internal.operation -import com.mongodb.MongoExecutionTimeoutException + import com.mongodb.MongoNamespace import com.mongodb.OperationFunctionalSpecification import com.mongodb.ReadPreference @@ -46,14 +46,8 @@ import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf import static com.mongodb.ClusterFixture.OPERATION_CONTEXT -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_TIMEOUT -import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint -import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint import static com.mongodb.ClusterFixture.executeAsync import static com.mongodb.ClusterFixture.getBinding -import static com.mongodb.ClusterFixture.isSharded import static com.mongodb.ClusterFixture.serverVersionAtLeast import static com.mongodb.ClusterFixture.serverVersionLessThan @@ -63,7 +57,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica def 'should return empty set if database does not exist'() { given: - def operation = new ListCollectionsOperation(TIMEOUT_SETTINGS, madeUpDatabase, new DocumentCodec()) + def operation = new ListCollectionsOperation(madeUpDatabase, new DocumentCodec()) when: def cursor = operation.execute(getBinding()) @@ -78,7 +72,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica def 'should return empty cursor if database does not exist asynchronously'() { given: - def operation = new ListCollectionsOperation(TIMEOUT_SETTINGS, madeUpDatabase, new DocumentCodec()) + def operation = new ListCollectionsOperation(madeUpDatabase, new DocumentCodec()) when: def cursor = executeAsync(operation) @@ -94,7 +88,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica def 'should return collection names if a collection exists'() { given: - def operation = new ListCollectionsOperation(TIMEOUT_SETTINGS, databaseName, new DocumentCodec()) + def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()) def helper = getCollectionHelper() def helper2 = getCollectionHelper(new MongoNamespace(databaseName, 'collection2')) def codec = new DocumentCodec() @@ -115,7 +109,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica @IgnoreIf({ serverVersionAtLeast(3, 0) }) def 'should throw if filtering on name with something other than a string'() { given: - def operation = new ListCollectionsOperation(TIMEOUT_SETTINGS, databaseName, new DocumentCodec()) + def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()) .filter(new BsonDocument('name', new BsonRegularExpression('^[^$]*$'))) when: @@ -127,7 +121,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica def 'should filter collection names if a name filter is specified'() { given: - def operation = new ListCollectionsOperation(TIMEOUT_SETTINGS, databaseName, new DocumentCodec()) + def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()) .filter(new BsonDocument('name', new BsonString('collection2'))) def helper = getCollectionHelper() def helper2 = getCollectionHelper(new MongoNamespace(databaseName, 'collection2')) @@ -147,7 +141,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica def 'should filter capped collections'() { given: - def operation = new ListCollectionsOperation(TIMEOUT_SETTINGS, databaseName, new DocumentCodec()) + def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()) .filter(new BsonDocument('options.capped', BsonBoolean.TRUE)) def helper = getCollectionHelper() getCollectionHelper().create('collection3', new CreateCollectionOptions().capped(true).sizeInBytes(1000)) @@ -167,7 +161,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica @IgnoreIf({ serverVersionLessThan(3, 4) || serverVersionAtLeast(4, 0) }) def 'should get all fields when nameOnly is not requested'() { given: - def operation = new ListCollectionsOperation(TIMEOUT_SETTINGS, databaseName, new DocumentCodec()) + def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()) getCollectionHelper().create('collection4', new CreateCollectionOptions()) when: @@ -181,7 +175,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica @IgnoreIf({ serverVersionLessThan(4, 0) }) def 'should only get collection names when nameOnly is requested'() { given: - def operation = new ListCollectionsOperation(TIMEOUT_SETTINGS, databaseName, new DocumentCodec()) + def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()) .nameOnly(true) getCollectionHelper().create('collection5', new CreateCollectionOptions()) @@ -196,7 +190,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica @IgnoreIf({ serverVersionLessThan(4, 0) }) def 'should only get collection names when nameOnly and authorizedCollections are requested'() { given: - def operation = new ListCollectionsOperation(TIMEOUT_SETTINGS, databaseName, new DocumentCodec()) + def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()) .nameOnly(true) .authorizedCollections(true) getCollectionHelper().create('collection6', new CreateCollectionOptions()) @@ -212,7 +206,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica @IgnoreIf({ serverVersionLessThan(3, 4) || serverVersionAtLeast(4, 0) }) def 'should only get all field names when nameOnly is requested on server versions that do not support nameOnly'() { given: - def operation = new ListCollectionsOperation(TIMEOUT_SETTINGS, databaseName, new DocumentCodec()) + def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()) .nameOnly(true) getCollectionHelper().create('collection7', new CreateCollectionOptions()) @@ -227,7 +221,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica @IgnoreIf({ serverVersionLessThan(4, 0) }) def 'should get all fields when authorizedCollections is requested and nameOnly is not requested'() { given: - def operation = new ListCollectionsOperation(TIMEOUT_SETTINGS, databaseName, new DocumentCodec()) + def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()) .nameOnly(false) .authorizedCollections(true) getCollectionHelper().create('collection8', new CreateCollectionOptions()) @@ -242,7 +236,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica def 'should return collection names if a collection exists asynchronously'() { given: - def operation = new ListCollectionsOperation(TIMEOUT_SETTINGS, databaseName, new DocumentCodec()) + def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()) def helper = getCollectionHelper() def helper2 = getCollectionHelper(new MongoNamespace(databaseName, 'collection2')) def codec = new DocumentCodec() @@ -263,9 +257,9 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica def 'should filter indexes when calling hasNext before next'() { given: - new DropDatabaseOperation(TIMEOUT_SETTINGS_WITH_TIMEOUT, databaseName, WriteConcern.ACKNOWLEDGED).execute(getBinding()) + new DropDatabaseOperation(databaseName, WriteConcern.ACKNOWLEDGED).execute(getBinding()) addSeveralIndexes() - def operation = new ListCollectionsOperation(TIMEOUT_SETTINGS, databaseName, new DocumentCodec()).batchSize(2) + def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()).batchSize(2) when: def cursor = operation.execute(getBinding()) @@ -279,9 +273,9 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica def 'should filter indexes without calling hasNext before next'() { given: - new DropDatabaseOperation(TIMEOUT_SETTINGS_WITH_TIMEOUT, databaseName, WriteConcern.ACKNOWLEDGED).execute(getBinding()) + new DropDatabaseOperation(databaseName, WriteConcern.ACKNOWLEDGED).execute(getBinding()) addSeveralIndexes() - def operation = new ListCollectionsOperation(TIMEOUT_SETTINGS, databaseName, new DocumentCodec()).batchSize(2) + def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()).batchSize(2) when: def cursor = operation.execute(getBinding()) @@ -301,9 +295,9 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica def 'should filter indexes when calling hasNext before tryNext'() { given: - new DropDatabaseOperation(TIMEOUT_SETTINGS_WITH_TIMEOUT, databaseName, WriteConcern.ACKNOWLEDGED).execute(getBinding()) + new DropDatabaseOperation(databaseName, WriteConcern.ACKNOWLEDGED).execute(getBinding()) addSeveralIndexes() - def operation = new ListCollectionsOperation(TIMEOUT_SETTINGS, databaseName, new DocumentCodec()).batchSize(2) + def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()).batchSize(2) when: def cursor = operation.execute(getBinding()) @@ -323,9 +317,9 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica def 'should filter indexes without calling hasNext before tryNext'() { given: - new DropDatabaseOperation(TIMEOUT_SETTINGS_WITH_TIMEOUT, databaseName, WriteConcern.ACKNOWLEDGED).execute(getBinding()) + new DropDatabaseOperation(databaseName, WriteConcern.ACKNOWLEDGED).execute(getBinding()) addSeveralIndexes() - def operation = new ListCollectionsOperation(TIMEOUT_SETTINGS, databaseName, new DocumentCodec()).batchSize(2) + def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()).batchSize(2) when: def cursor = operation.execute(getBinding()) @@ -340,9 +334,9 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica def 'should filter indexes asynchronously'() { given: - new DropDatabaseOperation(TIMEOUT_SETTINGS_WITH_TIMEOUT, databaseName, WriteConcern.ACKNOWLEDGED).execute(getBinding()) + new DropDatabaseOperation(databaseName, WriteConcern.ACKNOWLEDGED).execute(getBinding()) addSeveralIndexes() - def operation = new ListCollectionsOperation(TIMEOUT_SETTINGS, databaseName, new DocumentCodec()).batchSize(2) + def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()).batchSize(2) when: def cursor = executeAsync(operation) @@ -355,7 +349,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica def 'should use the set batchSize of collections'() { given: - def operation = new ListCollectionsOperation(TIMEOUT_SETTINGS, databaseName, new DocumentCodec()).batchSize(2) + def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()).batchSize(2) def codec = new DocumentCodec() getCollectionHelper().insertDocuments(codec, ['a': 1] as Document) getCollectionHelper(new MongoNamespace(databaseName, 'collection2')).insertDocuments(codec, ['a': 1] as Document) @@ -387,7 +381,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica def 'should use the set batchSize of collections asynchronously'() { given: - def operation = new ListCollectionsOperation(TIMEOUT_SETTINGS, databaseName, new DocumentCodec()).batchSize(2) + def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()).batchSize(2) def codec = new DocumentCodec() getCollectionHelper().insertDocuments(codec, ['a': 1] as Document) getCollectionHelper(new MongoNamespace(databaseName, 'collection2')).insertDocuments(codec, ['a': 1] as Document) @@ -416,27 +410,6 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica cursor?.close() } - @IgnoreIf({ isSharded() }) - def 'should throw execution timeout exception'() { - given: - getCollectionHelper().insertDocuments(new DocumentCodec(), new Document()) - def operation = new ListCollectionsOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, databaseName, new DocumentCodec()) - - enableMaxTimeFailPoint() - - when: - execute(operation, async) - - then: - thrown(MongoExecutionTimeoutException) - - cleanup: - disableMaxTimeFailPoint() - - where: - async << [true, false] - } - def 'should use the readPreference to set secondaryOk'() { given: def connection = Mock(Connection) @@ -450,7 +423,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica getReadPreference() >> readPreference getOperationContext() >> OPERATION_CONTEXT } - def operation = new ListCollectionsOperation(TIMEOUT_SETTINGS, helper.dbName, helper.decoder) + def operation = new ListCollectionsOperation(helper.dbName, helper.decoder) when: '3.6.0' operation.execute(readBinding) @@ -477,7 +450,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica getReadPreference() >> readPreference getOperationContext() >> OPERATION_CONTEXT } - def operation = new ListCollectionsOperation(TIMEOUT_SETTINGS, helper.dbName, helper.decoder) + def operation = new ListCollectionsOperation(helper.dbName, helper.decoder) when: '3.6.0' operation.executeAsync(readBinding, Stub(SingleResultCallback)) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/ListDatabasesOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/ListDatabasesOperationSpecification.groovy index 37193c8fbf9..740f9073dcd 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/ListDatabasesOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/ListDatabasesOperationSpecification.groovy @@ -16,7 +16,7 @@ package com.mongodb.internal.operation -import com.mongodb.MongoExecutionTimeoutException + import com.mongodb.OperationFunctionalSpecification import com.mongodb.ReadPreference import com.mongodb.connection.ConnectionDescription @@ -32,14 +32,8 @@ import org.bson.BsonRegularExpression import org.bson.Document import org.bson.codecs.Decoder import org.bson.codecs.DocumentCodec -import spock.lang.IgnoreIf import static com.mongodb.ClusterFixture.OPERATION_CONTEXT -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME -import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint -import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint -import static com.mongodb.ClusterFixture.isSharded class ListDatabasesOperationSpecification extends OperationFunctionalSpecification { def codec = new DocumentCodec() @@ -47,7 +41,7 @@ class ListDatabasesOperationSpecification extends OperationFunctionalSpecificati def 'should return a list of database names'() { given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('_id', 1)) - def operation = new ListDatabasesOperation(TIMEOUT_SETTINGS, codec) + def operation = new ListDatabasesOperation(codec) when: def names = executeAndCollectBatchCursorResults(operation, async)*.get('name') @@ -74,27 +68,6 @@ class ListDatabasesOperationSpecification extends OperationFunctionalSpecificati async << [true, false] } - @IgnoreIf({ isSharded() }) - def 'should throw execution timeout exception from execute'() { - given: - getCollectionHelper().insertDocuments(new DocumentCodec(), new Document()) - def operation = new ListDatabasesOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, codec) - - enableMaxTimeFailPoint() - - when: - execute(operation, async) - - then: - thrown(MongoExecutionTimeoutException) - - cleanup: - disableMaxTimeFailPoint() - - where: - async << [true, false] - } - def 'should use the readPreference to set secondaryOk'() { given: def connection = Mock(Connection) @@ -108,7 +81,7 @@ class ListDatabasesOperationSpecification extends OperationFunctionalSpecificati getReadPreference() >> readPreference getOperationContext() >> OPERATION_CONTEXT } - def operation = new ListDatabasesOperation(TIMEOUT_SETTINGS, helper.decoder) + def operation = new ListDatabasesOperation(helper.decoder) when: operation.execute(readBinding) @@ -133,7 +106,7 @@ class ListDatabasesOperationSpecification extends OperationFunctionalSpecificati getReadPreference() >> readPreference getReadConnectionSource(_) >> { it[0].onResult(connectionSource, null) } } - def operation = new ListDatabasesOperation(TIMEOUT_SETTINGS, helper.decoder) + def operation = new ListDatabasesOperation(helper.decoder) when: operation.executeAsync(readBinding, Stub(SingleResultCallback)) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/ListIndexesOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/ListIndexesOperationSpecification.groovy index 27e4bed3cd8..462bf367e50 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/ListIndexesOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/ListIndexesOperationSpecification.groovy @@ -16,7 +16,7 @@ package com.mongodb.internal.operation -import com.mongodb.MongoExecutionTimeoutException + import com.mongodb.MongoNamespace import com.mongodb.OperationFunctionalSpecification import com.mongodb.ReadPreference @@ -41,22 +41,16 @@ import org.bson.BsonString import org.bson.Document import org.bson.codecs.Decoder import org.bson.codecs.DocumentCodec -import spock.lang.IgnoreIf import static com.mongodb.ClusterFixture.OPERATION_CONTEXT -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME -import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint -import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint import static com.mongodb.ClusterFixture.executeAsync import static com.mongodb.ClusterFixture.getBinding -import static com.mongodb.ClusterFixture.isSharded class ListIndexesOperationSpecification extends OperationFunctionalSpecification { def 'should return empty list for nonexistent collection'() { given: - def operation = new ListIndexesOperation(TIMEOUT_SETTINGS, getNamespace(), new DocumentCodec()) + def operation = new ListIndexesOperation(getNamespace(), new DocumentCodec()) when: def cursor = operation.execute(getBinding()) @@ -68,7 +62,7 @@ class ListIndexesOperationSpecification extends OperationFunctionalSpecification def 'should return empty list for nonexistent collection asynchronously'() { given: - def operation = new ListIndexesOperation(TIMEOUT_SETTINGS, getNamespace(), new DocumentCodec()) + def operation = new ListIndexesOperation(getNamespace(), new DocumentCodec()) when: AsyncBatchCursor cursor = executeAsync(operation) @@ -82,7 +76,7 @@ class ListIndexesOperationSpecification extends OperationFunctionalSpecification def 'should return default index on Collection that exists'() { given: - def operation = new ListIndexesOperation(TIMEOUT_SETTINGS, getNamespace(), new DocumentCodec()) + def operation = new ListIndexesOperation(getNamespace(), new DocumentCodec()) getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentThat', 'forces creation of the Collection')) when: @@ -98,7 +92,7 @@ class ListIndexesOperationSpecification extends OperationFunctionalSpecification def 'should return default index on Collection that exists asynchronously'() { given: - def operation = new ListIndexesOperation(TIMEOUT_SETTINGS, getNamespace(), new DocumentCodec()) + def operation = new ListIndexesOperation(getNamespace(), new DocumentCodec()) getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentThat', 'forces creation of the Collection')) when: @@ -114,11 +108,11 @@ class ListIndexesOperationSpecification extends OperationFunctionalSpecification def 'should return created indexes on Collection'() { given: - def operation = new ListIndexesOperation(TIMEOUT_SETTINGS, getNamespace(), new DocumentCodec()) + def operation = new ListIndexesOperation(getNamespace(), new DocumentCodec()) collectionHelper.createIndex(new BsonDocument('theField', new BsonInt32(1))) collectionHelper.createIndex(new BsonDocument('compound', new BsonInt32(1)).append('index', new BsonInt32(-1))) - new CreateIndexesOperation(TIMEOUT_SETTINGS, namespace, - [new IndexRequest(new BsonDocument('unique', new BsonInt32(1))).unique (true)], null).execute(getBinding()) + new CreateIndexesOperation(namespace, + [new IndexRequest(new BsonDocument('unique', new BsonInt32(1))).unique(true)], null).execute(getBinding()) when: BatchCursor cursor = operation.execute(getBinding()) @@ -134,10 +128,10 @@ class ListIndexesOperationSpecification extends OperationFunctionalSpecification def 'should return created indexes on Collection asynchronously'() { given: - def operation = new ListIndexesOperation(TIMEOUT_SETTINGS, getNamespace(), new DocumentCodec()) + def operation = new ListIndexesOperation(getNamespace(), new DocumentCodec()) collectionHelper.createIndex(new BsonDocument('theField', new BsonInt32(1))) collectionHelper.createIndex(new BsonDocument('compound', new BsonInt32(1)).append('index', new BsonInt32(-1))) - new CreateIndexesOperation(TIMEOUT_SETTINGS, namespace, + new CreateIndexesOperation(namespace, [new IndexRequest(new BsonDocument('unique', new BsonInt32(1))).unique(true)], null).execute(getBinding()) when: @@ -154,7 +148,7 @@ class ListIndexesOperationSpecification extends OperationFunctionalSpecification def 'should use the set batchSize of collections'() { given: - def operation = new ListIndexesOperation(TIMEOUT_SETTINGS, getNamespace(), new DocumentCodec()).batchSize(2) + def operation = new ListIndexesOperation(getNamespace(), new DocumentCodec()).batchSize(2) collectionHelper.createIndex(new BsonDocument('collection1', new BsonInt32(1))) collectionHelper.createIndex(new BsonDocument('collection2', new BsonInt32(1))) collectionHelper.createIndex(new BsonDocument('collection3', new BsonInt32(1))) @@ -185,7 +179,7 @@ class ListIndexesOperationSpecification extends OperationFunctionalSpecification def 'should use the set batchSize of collections asynchronously'() { given: - def operation = new ListIndexesOperation(TIMEOUT_SETTINGS, getNamespace(), new DocumentCodec()).batchSize(2) + def operation = new ListIndexesOperation(getNamespace(), new DocumentCodec()).batchSize(2) collectionHelper.createIndex(new BsonDocument('collection1', new BsonInt32(1))) collectionHelper.createIndex(new BsonDocument('collection2', new BsonInt32(1))) collectionHelper.createIndex(new BsonDocument('collection3', new BsonInt32(1))) @@ -213,27 +207,6 @@ class ListIndexesOperationSpecification extends OperationFunctionalSpecification cursor?.close() } - @IgnoreIf({ isSharded() }) - def 'should throw execution timeout exception'() { - given: - def operation = new ListIndexesOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, getNamespace(), new DocumentCodec()) - collectionHelper.createIndex(new BsonDocument('collection1', new BsonInt32(1))) - - enableMaxTimeFailPoint() - - when: - execute(operation, async) - - then: - thrown(MongoExecutionTimeoutException) - - cleanup: - disableMaxTimeFailPoint() - - where: - async << [true, false] - } - def 'should use the readPreference to set secondaryOk'() { given: def connection = Mock(Connection) @@ -247,7 +220,7 @@ class ListIndexesOperationSpecification extends OperationFunctionalSpecification getReadPreference() >> readPreference getOperationContext() >> OPERATION_CONTEXT } - def operation = new ListIndexesOperation(TIMEOUT_SETTINGS, helper.namespace, helper.decoder) + def operation = new ListIndexesOperation(helper.namespace, helper.decoder) when: '3.6.0' operation.execute(readBinding) @@ -272,7 +245,7 @@ class ListIndexesOperationSpecification extends OperationFunctionalSpecification getReadPreference() >> readPreference getReadConnectionSource(_) >> { it[0].onResult(connectionSource, null) } } - def operation = new ListIndexesOperation(TIMEOUT_SETTINGS, helper.namespace, helper.decoder) + def operation = new ListIndexesOperation(helper.namespace, helper.decoder) when: '3.6.0' operation.executeAsync(readBinding, Stub(SingleResultCallback)) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/MapReduceToCollectionOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/MapReduceToCollectionOperationSpecification.groovy index aad7133b93b..bba11b0bdeb 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/MapReduceToCollectionOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/MapReduceToCollectionOperationSpecification.groovy @@ -29,7 +29,6 @@ import org.bson.BsonBoolean import org.bson.BsonDocument import org.bson.BsonDouble import org.bson.BsonInt32 -import org.bson.BsonInt64 import org.bson.BsonJavaScript import org.bson.BsonString import org.bson.Document @@ -37,9 +36,6 @@ import org.bson.codecs.BsonDocumentCodec import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_TIMEOUT import static com.mongodb.ClusterFixture.getBinding import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet import static com.mongodb.ClusterFixture.serverVersionAtLeast @@ -49,7 +45,7 @@ import static com.mongodb.client.model.Filters.gte class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpecification { def mapReduceInputNamespace = new MongoNamespace(getDatabaseName(), 'mapReduceInput') def mapReduceOutputNamespace = new MongoNamespace(getDatabaseName(), 'mapReduceOutput') - def mapReduceOperation = new MapReduceToCollectionOperation(TIMEOUT_SETTINGS, mapReduceInputNamespace, + def mapReduceOperation = new MapReduceToCollectionOperation(mapReduceInputNamespace, new BsonJavaScript('function(){ emit( this.name , 1 ); }'), new BsonJavaScript('function(key, values){ return values.length; }'), mapReduceOutputNamespace.getCollectionName(), null) @@ -66,8 +62,8 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe } def cleanup() { - new DropCollectionOperation(TIMEOUT_SETTINGS_WITH_TIMEOUT, mapReduceInputNamespace, WriteConcern.ACKNOWLEDGED).execute(getBinding()) - new DropCollectionOperation(TIMEOUT_SETTINGS_WITH_TIMEOUT, mapReduceOutputNamespace, WriteConcern.ACKNOWLEDGED) + new DropCollectionOperation(mapReduceInputNamespace, WriteConcern.ACKNOWLEDGED).execute(getBinding()) + new DropCollectionOperation(mapReduceOutputNamespace, WriteConcern.ACKNOWLEDGED) .execute(getBinding()) } @@ -78,7 +74,7 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe def out = 'outCollection' when: - def operation = new MapReduceToCollectionOperation(TIMEOUT_SETTINGS, getNamespace(), mapF, reduceF, out, null) + def operation = new MapReduceToCollectionOperation(getNamespace(), mapF, reduceF, out, null) then: operation.getMapFunction() == mapF @@ -112,7 +108,7 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe def writeConcern = WriteConcern.MAJORITY when: - def operation = new MapReduceToCollectionOperation(TIMEOUT_SETTINGS, getNamespace(), mapF, reduceF, out, writeConcern) + def operation = new MapReduceToCollectionOperation(getNamespace(), mapF, reduceF, out, writeConcern) .action(action) .databaseName(dbName) .finalizeFunction(finalizeF) @@ -180,7 +176,7 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe getCollectionHelper().insertDocuments(new BsonDocument()) when: - def operation = new MapReduceToCollectionOperation(TIMEOUT_SETTINGS, mapReduceInputNamespace, + def operation = new MapReduceToCollectionOperation(mapReduceInputNamespace, new BsonJavaScript('function(){ emit( "level" , 1 ); }'), new BsonJavaScript('function(key, values){ return values.length; }'), 'collectionOut', null) @@ -214,7 +210,7 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe def 'should throw on write concern error'() { given: getCollectionHelper().insertDocuments(new BsonDocument()) - def operation = new MapReduceToCollectionOperation(TIMEOUT_SETTINGS, mapReduceInputNamespace, + def operation = new MapReduceToCollectionOperation(mapReduceInputNamespace, new BsonJavaScript('function(){ emit( "level" , 1 ); }'), new BsonJavaScript('function(key, values){ return values.length; }'), 'collectionOut', new WriteConcern(5)) @@ -246,7 +242,7 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe def dbName = 'dbName' when: - def operation = new MapReduceToCollectionOperation(TIMEOUT_SETTINGS, getNamespace(), mapF, reduceF, out, + def operation = new MapReduceToCollectionOperation(getNamespace(), mapF, reduceF, out, WriteConcern.MAJORITY) def expectedCommand = new BsonDocument('mapreduce', new BsonString(getCollectionName())) .append('map', mapF) @@ -262,7 +258,7 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe ReadPreference.primary(), false) when: - operation = new MapReduceToCollectionOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, getNamespace(), mapF, reduceF, out, + operation = new MapReduceToCollectionOperation(getNamespace(), mapF, reduceF, out, WriteConcern.MAJORITY) .action(action) .databaseName(dbName) @@ -281,7 +277,6 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe .append('scope', scope) .append('verbose', BsonBoolean.TRUE) .append('limit', new BsonInt32(10)) - .append('maxTimeMS', new BsonInt64(100)) if (includeCollation) { operation.collation(defaultCollation) @@ -309,7 +304,7 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe def document = Document.parse('{_id: 1, str: "foo"}') getCollectionHelper(mapReduceInputNamespace).insertDocuments(document) - def operation = new MapReduceToCollectionOperation(TIMEOUT_SETTINGS, mapReduceInputNamespace, + def operation = new MapReduceToCollectionOperation(mapReduceInputNamespace, new BsonJavaScript('function(){ emit( this._id, this.str ); }'), new BsonJavaScript('function(key, values){ return values; }'), 'collectionOut', null) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/MapReduceWithInlineResultsOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/MapReduceWithInlineResultsOperationSpecification.groovy index 6c0d300a35c..693d8ab3a68 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/MapReduceWithInlineResultsOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/MapReduceWithInlineResultsOperationSpecification.groovy @@ -37,7 +37,6 @@ import org.bson.BsonBoolean import org.bson.BsonDocument import org.bson.BsonDouble import org.bson.BsonInt32 -import org.bson.BsonInt64 import org.bson.BsonJavaScript import org.bson.BsonString import org.bson.BsonTimestamp @@ -47,8 +46,6 @@ import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf import static com.mongodb.ClusterFixture.OPERATION_CONTEXT -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME import static com.mongodb.ClusterFixture.executeAsync import static com.mongodb.ClusterFixture.serverVersionLessThan import static com.mongodb.connection.ServerType.STANDALONE @@ -57,8 +54,7 @@ import static com.mongodb.internal.operation.ServerVersionHelper.MIN_WIRE_VERSIO class MapReduceWithInlineResultsOperationSpecification extends OperationFunctionalSpecification { private final bsonDocumentCodec = new BsonDocumentCodec() - def mapReduceOperation = new MapReduceWithInlineResultsOperation(TIMEOUT_SETTINGS, - getNamespace(), + def mapReduceOperation = new MapReduceWithInlineResultsOperation(getNamespace(), new BsonJavaScript('function(){ emit( this.name , 1 ); }'), new BsonJavaScript('function(key, values){ return values.length; }'), bsonDocumentCodec) @@ -78,7 +74,7 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction when: def mapF = new BsonJavaScript('function(){ }') def reduceF = new BsonJavaScript('function(key, values){ }') - def operation = new MapReduceWithInlineResultsOperation(TIMEOUT_SETTINGS, helper.namespace, mapF, reduceF, + def operation = new MapReduceWithInlineResultsOperation(helper.namespace, mapF, reduceF, bsonDocumentCodec) then: @@ -102,7 +98,7 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction def finalizeF = new BsonJavaScript('function(key, value){}') def mapF = new BsonJavaScript('function(){ }') def reduceF = new BsonJavaScript('function(key, values){ }') - def operation = new MapReduceWithInlineResultsOperation(TIMEOUT_SETTINGS, helper.namespace, + def operation = new MapReduceWithInlineResultsOperation(helper.namespace, mapF, reduceF, bsonDocumentCodec) .filter(filter) .finalizeFunction(finalizeF) @@ -142,7 +138,7 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction def 'should use the ReadBindings readPreference to set secondaryOk'() { when: - def operation = new MapReduceWithInlineResultsOperation(TIMEOUT_SETTINGS, helper.namespace, + def operation = new MapReduceWithInlineResultsOperation(helper.namespace, new BsonJavaScript('function(){ }'), new BsonJavaScript('function(key, values){ }'), bsonDocumentCodec) then: @@ -154,13 +150,12 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction def 'should create the expected command'() { when: - def operation = new MapReduceWithInlineResultsOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, helper.namespace, + def operation = new MapReduceWithInlineResultsOperation(helper.namespace, new BsonJavaScript('function(){ }'), new BsonJavaScript('function(key, values){ }'), bsonDocumentCodec) def expectedCommand = new BsonDocument('mapreduce', new BsonString(helper.namespace.getCollectionName())) .append('map', operation.getMapFunction()) .append('reduce', operation.getReduceFunction()) .append('out', new BsonDocument('inline', new BsonInt32(1))) - .append('maxTimeMS', new BsonInt64(100)) then: testOperation(operation, serverVersion, expectedCommand, async, helper.commandResult) @@ -204,8 +199,7 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction given: def document = Document.parse('{_id: 1, str: "foo"}') getCollectionHelper().insertDocuments(document) - def operation = new MapReduceWithInlineResultsOperation(TIMEOUT_SETTINGS, - namespace, + def operation = new MapReduceWithInlineResultsOperation(namespace, new BsonJavaScript('function(){ emit( this.str, 1 ); }'), new BsonJavaScript('function(key, values){ return Array.sum(values); }'), bsonDocumentCodec) @@ -242,7 +236,7 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction }''') appendReadConcernToCommand(sessionContext, MIN_WIRE_VERSION, commandDocument) - def operation = new MapReduceWithInlineResultsOperation(TIMEOUT_SETTINGS, helper.namespace, + def operation = new MapReduceWithInlineResultsOperation(helper.namespace, new BsonJavaScript('function(){ }'), new BsonJavaScript('function(key, values){ }'), bsonDocumentCodec) when: @@ -291,7 +285,7 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction }''') appendReadConcernToCommand(sessionContext, MIN_WIRE_VERSION, commandDocument) - def operation = new MapReduceWithInlineResultsOperation(TIMEOUT_SETTINGS, helper.namespace, + def operation = new MapReduceWithInlineResultsOperation(helper.namespace, new BsonJavaScript('function(){ }'), new BsonJavaScript('function(key, values){ }'), bsonDocumentCodec) when: diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/MixedBulkWriteOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/MixedBulkWriteOperationSpecification.groovy index 57e0cb5f0d7..9363f6a1812 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/MixedBulkWriteOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/MixedBulkWriteOperationSpecification.groovy @@ -48,7 +48,6 @@ import org.bson.types.ObjectId import spock.lang.IgnoreIf import util.spock.annotations.Slow -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS import static com.mongodb.ClusterFixture.configureFailPoint import static com.mongodb.ClusterFixture.disableFailPoint import static com.mongodb.ClusterFixture.disableOnPrimaryTransactionalWriteFailPoint @@ -73,7 +72,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'should throw IllegalArgumentException for empty list of requests'() { when: - new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), [], true, ACKNOWLEDGED, false) + new MixedBulkWriteOperation(getNamespace(), [], true, ACKNOWLEDGED, false) then: thrown(IllegalArgumentException) @@ -81,7 +80,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'should have the expected passed values'() { when: - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), requests, ordered, writeConcern, retryWrites) + def operation = new MixedBulkWriteOperation(getNamespace(), requests, ordered, writeConcern, retryWrites) .bypassDocumentValidation(bypassValidation) then: @@ -101,7 +100,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'when no document with the same id exists, should insert the document'() { given: - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), + def operation = new MixedBulkWriteOperation(getNamespace(), [new InsertRequest(new BsonDocument('_id', new BsonInt32(1)))], ordered, ACKNOWLEDGED, false) when: @@ -121,7 +120,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat given: def document = new BsonDocument('_id', new BsonInt32(1)) getCollectionHelper().insertDocuments(document) - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), [new InsertRequest(document)], ordered, + def operation = new MixedBulkWriteOperation(getNamespace(), [new InsertRequest(document)], ordered, ACKNOWLEDGED, false) when: @@ -137,7 +136,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'RawBsonDocument should not generate an _id'() { given: - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), + def operation = new MixedBulkWriteOperation(getNamespace(), [new InsertRequest(RawBsonDocument.parse('{_id: 1}'))], ordered, ACKNOWLEDGED, false) when: @@ -156,7 +155,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'when documents match the query, a remove of one should remove one of them'() { given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('x', true), new Document('x', true)) - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), + def operation = new MixedBulkWriteOperation(getNamespace(), [new DeleteRequest(new BsonDocument('x', BsonBoolean.TRUE)).multi(false)], ordered, ACKNOWLEDGED, false) @@ -175,7 +174,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('x', true), new Document('x', true), new Document('x', false)) - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), + def operation = new MixedBulkWriteOperation(getNamespace(), [new DeleteRequest(new BsonDocument('x', BsonBoolean.TRUE))], ordered, ACKNOWLEDGED, false) @@ -193,7 +192,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'when multiple document match the query, update of one should update only one of them'() { given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('x', true), new Document('x', true)) - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), + def operation = new MixedBulkWriteOperation(getNamespace(), [new UpdateRequest(new BsonDocument('x', BsonBoolean.TRUE), new BsonDocument('$set', new BsonDocument('y', new BsonInt32(1))), UPDATE).multi(false)], @@ -213,7 +212,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'when documents match the query, update multi should update all of them'() { given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('x', true), new Document('x', true)) - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), + def operation = new MixedBulkWriteOperation(getNamespace(), [new UpdateRequest(new BsonDocument('x', BsonBoolean.TRUE), new BsonDocument('$set', new BsonDocument('y', new BsonInt32(1))), UPDATE).multi(true)], ordered, ACKNOWLEDGED, false) @@ -233,7 +232,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat given: def id = new ObjectId() def query = new BsonDocument('_id', new BsonObjectId(id)) - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), + def operation = new MixedBulkWriteOperation(getNamespace(), [new UpdateRequest(query, new BsonDocument('$set', new BsonDocument('x', new BsonInt32(2))), UPDATE).upsert(true)], ordered, ACKNOWLEDGED, false) @@ -252,7 +251,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def id = new ObjectId() def query = new BsonDocument('_id', new BsonObjectId(id)) given: - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), + def operation = new MixedBulkWriteOperation(getNamespace(), [new UpdateRequest(query, new BsonDocument('$set', new BsonDocument('x', new BsonInt32(2))), UPDATE).upsert(true).multi(true)], ordered, ACKNOWLEDGED, false) @@ -272,7 +271,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'when documents matches the query, update one with upsert should update only one of them'() { given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('x', true), new Document('x', true)) - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), + def operation = new MixedBulkWriteOperation(getNamespace(), [new UpdateRequest(new BsonDocument('x', BsonBoolean.TRUE), new BsonDocument('$set', new BsonDocument('y', new BsonInt32(1))), UPDATE).multi(false).upsert(true)], @@ -292,7 +291,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'when documents match the query, update multi with upsert should update all of them'() { given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('x', true), new Document('x', true)) - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), + def operation = new MixedBulkWriteOperation(getNamespace(), [new UpdateRequest(new BsonDocument('x', BsonBoolean.TRUE), new BsonDocument('$set', new BsonDocument('y', new BsonInt32(1))), UPDATE).upsert(true).multi(true)], @@ -312,7 +311,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'when updating with an empty document, update should throw IllegalArgumentException'() { given: def id = new ObjectId() - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), + def operation = new MixedBulkWriteOperation(getNamespace(), [new UpdateRequest(new BsonDocument('_id', new BsonObjectId(id)), new BsonDocument(), UPDATE)], true, ACKNOWLEDGED, false) @@ -329,7 +328,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'when replacing with an empty document, update should not throw IllegalArgumentException'() { given: def id = new ObjectId() - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), + def operation = new MixedBulkWriteOperation(getNamespace(), [new UpdateRequest(new BsonDocument('_id', new BsonObjectId(id)), new BsonDocument(), REPLACE)], true, ACKNOWLEDGED, false) @@ -346,7 +345,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'when updating with an invalid document, update should throw IllegalArgumentException'() { given: def id = new ObjectId() - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), + def operation = new MixedBulkWriteOperation(getNamespace(), [new UpdateRequest(new BsonDocument('_id', new BsonObjectId(id)), new BsonDocument('a', new BsonInt32(1)), UPDATE)], true, ACKNOWLEDGED, false) @@ -364,7 +363,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'when replacing an invalid document, replace should throw IllegalArgumentException'() { given: def id = new ObjectId() - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), + def operation = new MixedBulkWriteOperation(getNamespace(), [new UpdateRequest(new BsonDocument('_id', new BsonObjectId(id)), new BsonDocument('$set', new BsonDocument('x', new BsonInt32(1))), REPLACE)], true, ACKNOWLEDGED, false) @@ -383,7 +382,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat @IgnoreIf({ serverVersionLessThan(5, 0) }) def 'when inserting a document with a field starting with a dollar sign, insert should not throw'() { given: - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), + def operation = new MixedBulkWriteOperation(getNamespace(), [new InsertRequest(new BsonDocument('$inc', new BsonDocument('x', new BsonInt32(1))))], true, ACKNOWLEDGED, false) @@ -400,7 +399,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'when a document contains a key with an illegal character, replacing a document with it should throw IllegalArgumentException'() { given: def id = new ObjectId() - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), + def operation = new MixedBulkWriteOperation(getNamespace(), [new UpdateRequest(new BsonDocument('_id', new BsonObjectId(id)), new BsonDocument('$set', new BsonDocument('x', new BsonInt32(1))), REPLACE) @@ -420,7 +419,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'when no document matches the query, a replace with upsert should insert a document'() { given: def id = new ObjectId() - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), + def operation = new MixedBulkWriteOperation(getNamespace(), [new UpdateRequest(new BsonDocument('_id', new BsonObjectId(id)), new BsonDocument('_id', new BsonObjectId(id)) .append('x', new BsonInt32(2)), @@ -441,7 +440,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'when a custom _id is upserted it should be in the write result'() { given: - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), + def operation = new MixedBulkWriteOperation(getNamespace(), [new UpdateRequest(new BsonDocument('_id', new BsonInt32(0)), new BsonDocument('$set', new BsonDocument('a', new BsonInt32(0))), UPDATE) @@ -473,7 +472,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'unacknowledged upserts with custom _id should not error'() { given: def binding = async ? getAsyncSingleConnectionBinding() : getSingleConnectionBinding() - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), + def operation = new MixedBulkWriteOperation(getNamespace(), [new UpdateRequest(new BsonDocument('_id', new BsonInt32(0)), new BsonDocument('$set', new BsonDocument('a', new BsonInt32(0))), UPDATE) @@ -505,7 +504,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('x', true), new Document('x', true)) - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), + def operation = new MixedBulkWriteOperation(getNamespace(), [new UpdateRequest(new BsonDocument('x', BsonBoolean.TRUE), new BsonDocument('y', new BsonInt32(1)).append('x', BsonBoolean.FALSE), REPLACE).upsert(true)], @@ -526,7 +525,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'when a replacement document is 16MB, the document is still replaced'() { given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('_id', 1)) - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), + def operation = new MixedBulkWriteOperation(getNamespace(), [new UpdateRequest(new BsonDocument('_id', new BsonInt32(1)), new BsonDocument('_id', new BsonInt32(1)) .append('x', new BsonBinary(new byte[1024 * 1024 * 16 - 30])), @@ -547,7 +546,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'when two update documents together exceed 16MB, the documents are still updated'() { given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('_id', 1), new Document('_id', 2)) - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), + def operation = new MixedBulkWriteOperation(getNamespace(), [new UpdateRequest(new BsonDocument('_id', new BsonInt32(1)), new BsonDocument('_id', new BsonInt32(1)) .append('x', new BsonBinary(new byte[1024 * 1024 * 16 - 30])), @@ -573,7 +572,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'when documents together are just below the max message size, the documents are still inserted'() { given: def bsonBinary = new BsonBinary(new byte[16 * 1000 * 1000 - (getCollectionName().length() + 33)]) - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), + def operation = new MixedBulkWriteOperation(getNamespace(), [ new InsertRequest(new BsonDocument('_id', new BsonObjectId()).append('b', bsonBinary)), new InsertRequest(new BsonDocument('_id', new BsonObjectId()).append('b', bsonBinary)), @@ -594,7 +593,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'when documents together are just above the max message size, the documents are still inserted'() { given: def bsonBinary = new BsonBinary(new byte[16 * 1000 * 1000 - (getCollectionName().length() + 32)]) - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), + def operation = new MixedBulkWriteOperation(getNamespace(), [ new InsertRequest(new BsonDocument('_id', new BsonObjectId()).append('b', bsonBinary)), new InsertRequest(new BsonDocument('_id', new BsonObjectId()).append('b', bsonBinary)), @@ -615,7 +614,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'should handle multi-length runs of ordered insert, update, replace, and remove'() { given: getCollectionHelper().insertDocuments(getTestInserts()) - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), getTestWrites(), ordered, ACKNOWLEDGED, false) + def operation = new MixedBulkWriteOperation(getNamespace(), getTestWrites(), ordered, ACKNOWLEDGED, false) when: BulkWriteResult result = execute(operation, async) @@ -638,13 +637,13 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'should handle multi-length runs of UNACKNOWLEDGED insert, update, replace, and remove'() { given: getCollectionHelper().insertDocuments(getTestInserts()) - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), getTestWrites(), ordered, UNACKNOWLEDGED, + def operation = new MixedBulkWriteOperation(getNamespace(), getTestWrites(), ordered, UNACKNOWLEDGED, false) def binding = async ? getAsyncSingleConnectionBinding() : getSingleConnectionBinding() when: def result = execute(operation, binding) - execute(new MixedBulkWriteOperation(TIMEOUT_SETTINGS, namespace, + execute(new MixedBulkWriteOperation(namespace, [new InsertRequest(new BsonDocument('_id', new BsonInt32(9)))], true, ACKNOWLEDGED, false,), binding) then: @@ -674,7 +673,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat (1..numberOfWrites).each { writes.add(new InsertRequest(new BsonDocument())) } - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), writes, ordered, ACKNOWLEDGED, false) + def operation = new MixedBulkWriteOperation(getNamespace(), writes, ordered, ACKNOWLEDGED, false) when: execute(operation, binding) @@ -697,7 +696,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat writeOperations.add(upsert) writeOperations.add(new DeleteRequest(new BsonDocument('key', new BsonInt32(it)))) } - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), writeOperations, ordered, ACKNOWLEDGED, false) + def operation = new MixedBulkWriteOperation(getNamespace(), writeOperations, ordered, ACKNOWLEDGED, false) when: BulkWriteResult result = execute(operation, async) @@ -712,7 +711,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'error details should have correct index on ordered write failure'() { given: - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), + def operation = new MixedBulkWriteOperation(getNamespace(), [new InsertRequest(new BsonDocument('_id', new BsonInt32(1))), new UpdateRequest(new BsonDocument('_id', new BsonInt32(1)), new BsonDocument('$set', new BsonDocument('x', new BsonInt32(3))), @@ -735,7 +734,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'error details should have correct index on unordered write failure'() { given: getCollectionHelper().insertDocuments(getTestInserts()) - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), + def operation = new MixedBulkWriteOperation(getNamespace(), [new InsertRequest(new BsonDocument('_id', new BsonInt32(1))), new UpdateRequest(new BsonDocument('_id', new BsonInt32(2)), new BsonDocument('$set', new BsonDocument('x', new BsonInt32(3))), @@ -764,7 +763,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat for (int i = 0; i < 2000; i++) { inserts.add(new InsertRequest(new BsonDocument('_id', new BsonInt32(i)))) } - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), inserts, false, ACKNOWLEDGED, false) + def operation = new MixedBulkWriteOperation(getNamespace(), inserts, false, ACKNOWLEDGED, false) when: execute(operation, async) @@ -786,7 +785,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat for (int i = 0; i < 2000; i++) { inserts.add(new InsertRequest(new BsonDocument('_id', new BsonInt32(i)))) } - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), inserts, true, ACKNOWLEDGED, false) + def operation = new MixedBulkWriteOperation(getNamespace(), inserts, true, ACKNOWLEDGED, false) when: execute(operation, async) @@ -806,7 +805,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat @IgnoreIf({ !isDiscoverableReplicaSet() }) def 'should throw bulk write exception with a write concern error when wtimeout is exceeded'() { given: - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), + def operation = new MixedBulkWriteOperation(getNamespace(), [new InsertRequest(new BsonDocument('_id', new BsonInt32(1)))], false, new WriteConcern(5, 1), false) when: @@ -825,7 +824,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'when there is a duplicate key error and a write concern error, both should be reported'() { given: getCollectionHelper().insertDocuments(getTestInserts()) - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), + def operation = new MixedBulkWriteOperation(getNamespace(), [new InsertRequest(new BsonDocument('_id', new BsonInt32(7))), new InsertRequest(new BsonDocument('_id', new BsonInt32(1))) // duplicate key ], false, new WriteConcern(4, 1), false) @@ -848,7 +847,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'should throw on write concern error on multiple failpoint'() { given: getCollectionHelper().insertDocuments(getTestInserts()) - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), + def operation = new MixedBulkWriteOperation(getNamespace(), [new DeleteRequest(new BsonDocument('_id', new BsonInt32(2))), // existing key new InsertRequest(new BsonDocument('_id', new BsonInt32(1))) // existing (duplicate) key ], true, ACKNOWLEDGED, true) @@ -880,7 +879,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'should throw IllegalArgumentException when passed an empty bulk operation'() { when: - new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), [], ordered, UNACKNOWLEDGED, false) + new MixedBulkWriteOperation(getNamespace(), [], ordered, UNACKNOWLEDGED, false) then: thrown(IllegalArgumentException) @@ -892,7 +891,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat @IgnoreIf({ serverVersionLessThan(3, 2) }) def 'should throw if bypassDocumentValidation is set and writeConcern is UNACKNOWLEDGED'() { given: - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), + def operation = new MixedBulkWriteOperation(getNamespace(), [new InsertRequest(BsonDocument.parse('{ level: 9 }'))], true, UNACKNOWLEDGED, false) .bypassDocumentValidation(bypassDocumentValidation) @@ -909,7 +908,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat @IgnoreIf({ serverVersionLessThan(3, 4) }) def 'should throw if collation is set and write is UNACKNOWLEDGED'() { given: - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), + def operation = new MixedBulkWriteOperation(getNamespace(), [new DeleteRequest(BsonDocument.parse('{ level: 9 }')).collation(defaultCollation)], true, UNACKNOWLEDGED, false) when: @@ -929,7 +928,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def collectionHelper = getCollectionHelper(namespace) collectionHelper.create(namespace.getCollectionName(), new CreateCollectionOptions().validationOptions( new ValidationOptions().validator(gte('level', 10)))) - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, namespace, + def operation = new MixedBulkWriteOperation(namespace, [new InsertRequest(BsonDocument.parse('{ level: 9 }'))], ordered, ACKNOWLEDGED, false) when: @@ -965,7 +964,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat new ValidationOptions().validator(gte('level', 10)))) collectionHelper.insertDocuments(BsonDocument.parse('{ x: true, level: 10}')) - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, namespace, + def operation = new MixedBulkWriteOperation(namespace, [new UpdateRequest(BsonDocument.parse('{x: true}'), BsonDocument.parse('{$inc: {level: -1}}'), UPDATE).multi(false)], ordered, ACKNOWLEDGED, false) @@ -993,7 +992,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('x', 1), new Document('y', 1), new Document('z', 1)) - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, namespace, requests, false, ACKNOWLEDGED, false) + def operation = new MixedBulkWriteOperation(namespace, requests, false, ACKNOWLEDGED, false) when: execute(operation, async) @@ -1024,7 +1023,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def requests = [new DeleteRequest(BsonDocument.parse('{str: "FOO"}}')).collation(caseInsensitiveCollation), new UpdateRequest(BsonDocument.parse('{str: "BAR"}}'), BsonDocument.parse('{str: "bar"}}'), REPLACE) .collation(caseInsensitiveCollation)] - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, namespace, requests, false, ACKNOWLEDGED, false) + def operation = new MixedBulkWriteOperation(namespace, requests, false, ACKNOWLEDGED, false) when: BulkWriteResult result = execute(operation, async) @@ -1042,7 +1041,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def testWrites = getTestWrites() Collections.shuffle(testWrites) getCollectionHelper().insertDocuments(getTestInserts()) - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), testWrites, true, ACKNOWLEDGED, true) + def operation = new MixedBulkWriteOperation(getNamespace(), testWrites, true, ACKNOWLEDGED, true) when: if (serverVersionAtLeast(3, 6) && isDiscoverableReplicaSet()) { @@ -1085,7 +1084,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat given: def testWrites = getTestWrites() getCollectionHelper().insertDocuments(getTestInserts()) - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), testWrites, true, ACKNOWLEDGED, true) + def operation = new MixedBulkWriteOperation(getNamespace(), testWrites, true, ACKNOWLEDGED, true) when: enableOnPrimaryTransactionalWriteFailPoint(BsonDocument.parse(failPoint)) @@ -1110,7 +1109,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat given: def testWrites = getTestWrites() getCollectionHelper().insertDocuments(getTestInserts()) - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), testWrites, true, UNACKNOWLEDGED, true) + def operation = new MixedBulkWriteOperation(getNamespace(), testWrites, true, UNACKNOWLEDGED, true) when: enableOnPrimaryTransactionalWriteFailPoint(BsonDocument.parse(failPoint)) @@ -1134,7 +1133,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'should retry if the connection initially fails'() { when: def cannedResult = BsonDocument.parse('{ok: 1.0, n: 1}') - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), + def operation = new MixedBulkWriteOperation(getNamespace(), [new InsertRequest(BsonDocument.parse('{ level: 9 }'))], true, ACKNOWLEDGED, true) def expectedCommand = new BsonDocument('insert', new BsonString(getNamespace().getCollectionName())) .append('ordered', BsonBoolean.TRUE) @@ -1149,7 +1148,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'should throw original error when retrying and failing'() { given: - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), + def operation = new MixedBulkWriteOperation(getNamespace(), [new InsertRequest(BsonDocument.parse('{ level: 9 }'))], true, ACKNOWLEDGED, true) def originalException = new MongoSocketException('Some failure', new ServerAddress()) @@ -1176,7 +1175,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat @IgnoreIf({ serverVersionLessThan(3, 6) }) def 'should not request retryable write for multi updates or deletes'() { given: - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, getNamespace(), writes, true, ACKNOWLEDGED, true) + def operation = new MixedBulkWriteOperation(getNamespace(), writes, true, ACKNOWLEDGED, true) when: executeWithSession(operation, async) @@ -1224,7 +1223,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat .multi(true) .arrayFilters([BsonDocument.parse('{"i.b": 1}')]), ] - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, namespace, requests, true, ACKNOWLEDGED, false) + def operation = new MixedBulkWriteOperation(namespace, requests, true, ACKNOWLEDGED, false) when: execute(operation, async) @@ -1246,7 +1245,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat new UpdateRequest(new BsonDocument(), BsonDocument.parse('{ $set: {"y.$[i].b": 2}}'), UPDATE) .arrayFilters([BsonDocument.parse('{"i.b": 3}')]) ] - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, namespace, requests, true, ACKNOWLEDGED, false) + def operation = new MixedBulkWriteOperation(namespace, requests, true, ACKNOWLEDGED, false) when: execute(operation, async) @@ -1265,7 +1264,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat new UpdateRequest(new BsonDocument(), BsonDocument.parse('{ $set: {"y.$[i].b": 2}}'), UPDATE) .arrayFilters([BsonDocument.parse('{"i.b": 3}')]) ] - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, namespace, requests, true, UNACKNOWLEDGED, false) + def operation = new MixedBulkWriteOperation(namespace, requests, true, UNACKNOWLEDGED, false) when: execute(operation, async) @@ -1284,7 +1283,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat new UpdateRequest(new BsonDocument(), BsonDocument.parse('{ $set: {"y.$[i].b": 2}}'), UPDATE) .hint(BsonDocument.parse('{ _id: 1 }')) ] - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, namespace, requests, true, ACKNOWLEDGED, false) + def operation = new MixedBulkWriteOperation(namespace, requests, true, ACKNOWLEDGED, false) when: execute(operation, async) @@ -1303,7 +1302,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat new UpdateRequest(new BsonDocument(), BsonDocument.parse('{ $set: {"y.$[i].b": 2}}'), UPDATE) .hintString('_id') ] - def operation = new MixedBulkWriteOperation(TIMEOUT_SETTINGS, namespace, requests, true, UNACKNOWLEDGED, false) + def operation = new MixedBulkWriteOperation(namespace, requests, true, UNACKNOWLEDGED, false) when: execute(operation, async) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/RenameCollectionOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/RenameCollectionOperationSpecification.groovy index 83cc3a13b24..043c6de48a3 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/RenameCollectionOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/RenameCollectionOperationSpecification.groovy @@ -25,8 +25,6 @@ import org.bson.Document import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_TIMEOUT import static com.mongodb.ClusterFixture.executeAsync import static com.mongodb.ClusterFixture.getBinding import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet @@ -37,7 +35,7 @@ import static com.mongodb.ClusterFixture.serverVersionLessThan class RenameCollectionOperationSpecification extends OperationFunctionalSpecification { def cleanup() { - new DropCollectionOperation(TIMEOUT_SETTINGS_WITH_TIMEOUT, new MongoNamespace(getDatabaseName(), 'newCollection'), + new DropCollectionOperation(new MongoNamespace(getDatabaseName(), 'newCollection'), WriteConcern.ACKNOWLEDGED).execute(getBinding()) } @@ -45,7 +43,7 @@ class RenameCollectionOperationSpecification extends OperationFunctionalSpecific given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentThat', 'forces creation of the Collection')) assert collectionNameExists(getCollectionName()) - def operation = new RenameCollectionOperation(TIMEOUT_SETTINGS, getNamespace(), + def operation = new RenameCollectionOperation(getNamespace(), new MongoNamespace(getDatabaseName(), 'newCollection'), null) when: @@ -63,7 +61,7 @@ class RenameCollectionOperationSpecification extends OperationFunctionalSpecific given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentThat', 'forces creation of the Collection')) assert collectionNameExists(getCollectionName()) - def operation = new RenameCollectionOperation(TIMEOUT_SETTINGS, getNamespace(), getNamespace(), null) + def operation = new RenameCollectionOperation(getNamespace(), getNamespace(), null) when: execute(operation, async) @@ -81,7 +79,7 @@ class RenameCollectionOperationSpecification extends OperationFunctionalSpecific given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentThat', 'forces creation of the Collection')) assert collectionNameExists(getCollectionName()) - def operation = new RenameCollectionOperation(TIMEOUT_SETTINGS, getNamespace(), + def operation = new RenameCollectionOperation(getNamespace(), new MongoNamespace(getDatabaseName(), 'newCollection'), new WriteConcern(5)) when: @@ -97,7 +95,7 @@ class RenameCollectionOperationSpecification extends OperationFunctionalSpecific } def collectionNameExists(String collectionName) { - def cursor = new ListCollectionsOperation(TIMEOUT_SETTINGS, databaseName, new DocumentCodec()).execute(getBinding()) + def cursor = new ListCollectionsOperation(databaseName, new DocumentCodec()).execute(getBinding()) if (!cursor.hasNext()) { return false } diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/AbstractConnectionPoolTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/AbstractConnectionPoolTest.java index 70ed0ba2e2a..4c7970afad8 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/AbstractConnectionPoolTest.java +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/AbstractConnectionPoolTest.java @@ -477,7 +477,7 @@ private Event getNextEvent(final Iterator eventsIterator, final } private static void executeAdminCommand(final BsonDocument command) { - new CommandReadOperation<>(TIMEOUT_SETTINGS, "admin", command, new BsonDocumentCodec()) + new CommandReadOperation<>("admin", command, new BsonDocumentCodec()) .execute(ClusterFixture.getBinding()); } diff --git a/driver-core/src/test/unit/com/mongodb/internal/mockito/InsufficientStubbingDetectorDemoTest.java b/driver-core/src/test/unit/com/mongodb/internal/mockito/InsufficientStubbingDetectorDemoTest.java index c38ef8072fe..431db0d6475 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/mockito/InsufficientStubbingDetectorDemoTest.java +++ b/driver-core/src/test/unit/com/mongodb/internal/mockito/InsufficientStubbingDetectorDemoTest.java @@ -15,7 +15,6 @@ */ package com.mongodb.internal.mockito; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.binding.ReadBinding; import com.mongodb.internal.diagnostics.logging.Logger; import com.mongodb.internal.diagnostics.logging.Loggers; @@ -38,7 +37,7 @@ final class InsufficientStubbingDetectorDemoTest { @BeforeEach void beforeEach() { - operation = new ListCollectionsOperation<>(TimeoutSettings.DEFAULT, "db", new BsonDocumentCodec()); + operation = new ListCollectionsOperation<>("db", new BsonDocumentCodec()); } @Test diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/CommitTransactionOperationUnitSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/CommitTransactionOperationUnitSpecification.groovy index ce8569e5646..21ae1c4dfb9 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/operation/CommitTransactionOperationUnitSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/CommitTransactionOperationUnitSpecification.groovy @@ -26,7 +26,6 @@ import com.mongodb.internal.binding.WriteBinding import com.mongodb.internal.session.SessionContext import static com.mongodb.ClusterFixture.OPERATION_CONTEXT -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS class CommitTransactionOperationUnitSpecification extends OperationUnitSpecification { def 'should add UnknownTransactionCommitResult error label to MongoTimeoutException'() { @@ -39,7 +38,7 @@ class CommitTransactionOperationUnitSpecification extends OperationUnitSpecifica getWriteConnectionSource() >> { throw new MongoTimeoutException('Time out!') } getOperationContext() >> OPERATION_CONTEXT.withSessionContext(sessionContext) } - def operation = new CommitTransactionOperation(TIMEOUT_SETTINGS, WriteConcern.ACKNOWLEDGED) + def operation = new CommitTransactionOperation(WriteConcern.ACKNOWLEDGED) when: operation.execute(writeBinding) @@ -61,7 +60,7 @@ class CommitTransactionOperationUnitSpecification extends OperationUnitSpecifica } getOperationContext() >> OPERATION_CONTEXT.withSessionContext(sessionContext) } - def operation = new CommitTransactionOperation(TIMEOUT_SETTINGS, WriteConcern.ACKNOWLEDGED) + def operation = new CommitTransactionOperation(WriteConcern.ACKNOWLEDGED) def callback = new FutureResultCallback() when: diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/FindOperationUnitSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/FindOperationUnitSpecification.groovy index f349a45d3cc..021b392593c 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/operation/FindOperationUnitSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/FindOperationUnitSpecification.groovy @@ -27,21 +27,20 @@ import org.bson.Document import org.bson.codecs.BsonDocumentCodec import org.bson.codecs.DocumentCodec -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS import static com.mongodb.CursorType.TailableAwait class FindOperationUnitSpecification extends OperationUnitSpecification { def 'should find with correct command'() { when: - def operation = new FindOperation(TIMEOUT_SETTINGS, namespace, new BsonDocumentCodec()) + def operation = new FindOperation(namespace, new BsonDocumentCodec()) def expectedCommand = new BsonDocument('find', new BsonString(namespace.getCollectionName())) then: testOperation(operation, [3, 2, 0], expectedCommand, async, commandResult) // Overrides when: - operation = new FindOperation(TIMEOUT_SETTINGS, namespace, new BsonDocumentCodec()) + operation = new FindOperation(namespace, new BsonDocumentCodec()) .filter(new BsonDocument('a', BsonBoolean.TRUE)) .projection(new BsonDocument('x', new BsonInt32(1))) .skip(2) @@ -105,7 +104,7 @@ class FindOperationUnitSpecification extends OperationUnitSpecification { def 'should use the readPreference to set secondaryOk for commands'() { when: - def operation = new FindOperation(TIMEOUT_SETTINGS, namespace, new DocumentCodec()) + def operation = new FindOperation(namespace, new DocumentCodec()) then: testOperationSecondaryOk(operation, [3, 2, 0], readPreference, async, commandResult) diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/ListCollectionsOperationTest.java b/driver-core/src/test/unit/com/mongodb/internal/operation/ListCollectionsOperationTest.java index c62f83e50f7..12a964db625 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/operation/ListCollectionsOperationTest.java +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/ListCollectionsOperationTest.java @@ -24,7 +24,6 @@ import com.mongodb.connection.ServerDescription; import com.mongodb.connection.ServerId; import com.mongodb.connection.ServerType; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.binding.ConnectionSource; import com.mongodb.internal.binding.ReadBinding; import com.mongodb.internal.connection.Connection; @@ -58,7 +57,7 @@ final class ListCollectionsOperationTest { @BeforeEach void beforeEach() { MongoNamespace namespace = new MongoNamespace("db", "coll"); - operation = new ListCollectionsOperation<>(TimeoutSettings.DEFAULT, namespace.getDatabaseName(), new BsonDocumentCodec()); + operation = new ListCollectionsOperation<>(namespace.getDatabaseName(), new BsonDocumentCodec()); mocks = mocks(namespace); } diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/OperationUnitSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/OperationUnitSpecification.groovy index b912b9bd26d..e372298c465 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/operation/OperationUnitSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/OperationUnitSpecification.groovy @@ -41,7 +41,7 @@ import spock.lang.Specification import java.util.concurrent.TimeUnit -import static com.mongodb.ClusterFixture.createNewOperationContext +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT class OperationUnitSpecification extends Specification { @@ -96,7 +96,7 @@ class OperationUnitSpecification extends Specification { def testSyncOperation(operation, List serverVersion, result, Boolean checkCommand=true, BsonDocument expectedCommand=null, Boolean checkSecondaryOk=false, ReadPreference readPreference=ReadPreference.primary()) { - def operationContext = createNewOperationContext(operation.getTimeoutSettings()) + def operationContext = OPERATION_CONTEXT .withSessionContext(Stub(SessionContext) { hasActiveTransaction() >> false getReadConcern() >> ReadConcern.DEFAULT @@ -153,7 +153,7 @@ class OperationUnitSpecification extends Specification { Boolean checkCommand=true, BsonDocument expectedCommand=null, Boolean checkSecondaryOk=false, ReadPreference readPreference=ReadPreference.primary()) { - def operationContext = createNewOperationContext(operation.getTimeoutSettings()) + def operationContext = OPERATION_CONTEXT .withSessionContext(Stub(SessionContext) { hasActiveTransaction() >> false getReadConcern() >> ReadConcern.DEFAULT diff --git a/driver-legacy/src/main/com/mongodb/DB.java b/driver-legacy/src/main/com/mongodb/DB.java index 4ddebd787a4..7b47cfb8515 100644 --- a/driver-legacy/src/main/com/mongodb/DB.java +++ b/driver-legacy/src/main/com/mongodb/DB.java @@ -196,7 +196,7 @@ public DBCollection getCollection(final String name) { */ public void dropDatabase() { try { - getExecutor().execute(new DropDatabaseOperation(getTimeoutSettings(), getName(), getWriteConcern()), getReadConcern()); + getExecutor().execute(new DropDatabaseOperation(getName(), getWriteConcern()), getReadConcern()); } catch (MongoWriteConcernException e) { throw createWriteConcernException(e); } @@ -224,7 +224,12 @@ public Set getCollectionNames() { mongo.getMongoClientOptions().getRetryReads(), DB.this.getTimeoutSettings()) { @Override public ReadOperation> asReadOperation() { - return new ListCollectionsOperation<>(super.getTimeoutSettings(), name, commandCodec).nameOnly(true); + return new ListCollectionsOperation<>(name, commandCodec).nameOnly(true); + } + + @Override + protected OperationExecutor getExecutor() { + return executor; } }.map(result -> (String) result.get("name")).into(new ArrayList<>()); Collections.sort(collectionNames); @@ -304,7 +309,7 @@ public DBCollection createView(final String viewName, final String viewOn, final try { notNull("options", options); DBCollection view = getCollection(viewName); - executor.execute(new CreateViewOperation(getTimeoutSettings(), name, viewName, viewOn, + executor.execute(new CreateViewOperation(name, viewName, viewOn, view.preparePipeline(pipeline), writeConcern) .collation(options.getCollation()), getReadConcern()); return view; @@ -381,7 +386,7 @@ private CreateCollectionOperation getCreateCollectionOperation(final String coll validationAction = ValidationAction.fromString((String) options.get("validationAction")); } Collation collation = DBObjectCollationHelper.createCollationFromOptions(options); - return new CreateCollectionOperation(getTimeoutSettings(), getName(), collectionName, + return new CreateCollectionOperation(getName(), collectionName, getWriteConcern()) .capped(capped) .collation(collation) @@ -516,8 +521,8 @@ public String toString() { CommandResult executeCommand(final BsonDocument commandDocument, final ReadPreference readPreference) { return new CommandResult(executor.execute( - new CommandReadOperation<>(getTimeoutSettings(), getName(), commandDocument, - new BsonDocumentCodec()), readPreference, getReadConcern()), getDefaultDBObjectCodec()); + new CommandReadOperation<>(getName(), commandDocument, + new BsonDocumentCodec()), readPreference, getReadConcern(), null), getDefaultDBObjectCodec()); } OperationExecutor getExecutor() { diff --git a/driver-legacy/src/main/com/mongodb/DBCollection.java b/driver-legacy/src/main/com/mongodb/DBCollection.java index 8a02c4ec8a9..54eb354a877 100644 --- a/driver-legacy/src/main/com/mongodb/DBCollection.java +++ b/driver-legacy/src/main/com/mongodb/DBCollection.java @@ -85,6 +85,7 @@ import static com.mongodb.MongoNamespace.checkCollectionNameValidity; import static com.mongodb.ReadPreference.primary; import static com.mongodb.ReadPreference.primaryPreferred; +import static com.mongodb.TimeoutSettingsHelper.createTimeoutSettings; import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.Locks.withLock; import static com.mongodb.internal.bulk.WriteRequest.Type.UPDATE; @@ -346,7 +347,7 @@ private Encoder toEncoder(@Nullable final DBEncoder dbEncoder) { private WriteResult insert(final List insertRequestList, final WriteConcern writeConcern, final boolean continueOnError, @Nullable final Boolean bypassDocumentValidation) { - return executeWriteOperation(createBulkWriteOperationForInsert(getTimeoutSettings(), getNamespace(), + return executeWriteOperation(createBulkWriteOperationForInsert(getNamespace(), !continueOnError, writeConcern, retryWrites, insertRequestList).bypassDocumentValidation(bypassDocumentValidation)); } @@ -430,7 +431,7 @@ private WriteResult replaceOrInsert(final DBObject obj, final Object id, final W UpdateRequest replaceRequest = new UpdateRequest(wrap(filter), wrap(obj, objectCodec), Type.REPLACE).upsert(true); - return executeWriteOperation(createBulkWriteOperationForReplace(getTimeoutSettings(), getNamespace(), false, + return executeWriteOperation(createBulkWriteOperationForReplace(getNamespace(), false, writeConcern, retryWrites, singletonList(replaceRequest))); } @@ -583,9 +584,9 @@ public WriteResult update(final DBObject query, final DBObject update, final DBC .collation(options.getCollation()) .arrayFilters(wrapAllowNull(options.getArrayFilters(), options.getEncoder())); LegacyMixedBulkWriteOperation operation = (updateType == UPDATE - ? createBulkWriteOperationForUpdate(getTimeoutSettings(), getNamespace(), true, writeConcern, retryWrites, + ? createBulkWriteOperationForUpdate(getNamespace(), true, writeConcern, retryWrites, singletonList(updateRequest)) - : createBulkWriteOperationForReplace(getTimeoutSettings(), getNamespace(), true, writeConcern, retryWrites, + : createBulkWriteOperationForReplace(getNamespace(), true, writeConcern, retryWrites, singletonList(updateRequest))) .bypassDocumentValidation(options.getBypassDocumentValidation()); return executeWriteOperation(operation); @@ -658,7 +659,7 @@ public WriteResult remove(final DBObject query, final DBCollectionRemoveOptions WriteConcern optionsWriteConcern = options.getWriteConcern(); WriteConcern writeConcern = optionsWriteConcern != null ? optionsWriteConcern : getWriteConcern(); DeleteRequest deleteRequest = new DeleteRequest(wrap(query, options.getEncoder())).collation(options.getCollation()); - return executeWriteOperation(createBulkWriteOperationForDelete(getTimeoutSettings(), getNamespace(), false, + return executeWriteOperation(createBulkWriteOperationForDelete(getNamespace(), false, writeConcern, retryWrites, singletonList(deleteRequest))); } @@ -917,7 +918,7 @@ public long getCount(@Nullable final DBObject query) { public long getCount(@Nullable final DBObject query, final DBCollectionCountOptions options) { notNull("countOptions", options); CountOperation operation = new CountOperation( - getTimeoutSettings(options.getMaxTime(MILLISECONDS)), getNamespace()) + getNamespace()) .skip(options.getSkip()) .limit(options.getLimit()) .collation(options.getCollation()) @@ -936,8 +937,9 @@ public long getCount(@Nullable final DBObject query, final DBCollectionCountOpti } ReadPreference optionsReadPreference = options.getReadPreference(); ReadConcern optionsReadConcern = options.getReadConcern(); - return executor.execute(operation, optionsReadPreference != null ? optionsReadPreference : getReadPreference(), - optionsReadConcern != null ? optionsReadConcern : getReadConcern()); + return getExecutor(createTimeoutSettings(getTimeoutSettings(), options)) + .execute(operation, optionsReadPreference != null ? optionsReadPreference : getReadPreference(), + optionsReadConcern != null ? optionsReadConcern : getReadConcern(), null); } /** @@ -963,7 +965,7 @@ public DBCollection rename(final String newName) { */ public DBCollection rename(final String newName, final boolean dropTarget) { try { - executor.execute(new RenameCollectionOperation(getTimeoutSettings(), getNamespace(), + executor.execute(new RenameCollectionOperation(getNamespace(), new MongoNamespace(getNamespace().getDatabaseName(), newName), getWriteConcern()) .dropTarget(dropTarget), getReadConcern()); return getDB().getCollection(newName); @@ -1037,11 +1039,17 @@ public List distinct(final String fieldName, final DBCollectionDistinctOptions o retryReads, DBCollection.this.getTimeoutSettings()) { @Override public ReadOperation> asReadOperation() { - return new DistinctOperation<>(super.getTimeoutSettings(), getNamespace(), fieldName, new BsonValueCodec()) + return new DistinctOperation<>(getNamespace(), fieldName, new BsonValueCodec()) .filter(wrapAllowNull(options.getFilter())) .collation(options.getCollation()) .retryReads(retryReads); } + + @Override + protected OperationExecutor getExecutor() { + return executor; + } + }.map(bsonValue -> { if (bsonValue == null) { return null; @@ -1120,7 +1128,7 @@ public MapReduceOutput mapReduce(final MapReduceCommand command) { if (command.getOutputType() == MapReduceCommand.OutputType.INLINE) { MapReduceWithInlineResultsOperation operation = new MapReduceWithInlineResultsOperation<>( - getTimeoutSettings(command.getMaxTime(MILLISECONDS)), getNamespace(), new BsonJavaScript(command.getMap()), + getNamespace(), new BsonJavaScript(command.getMap()), new BsonJavaScript(command.getReduce()), getDefaultDBObjectCodec()) .filter(wrapAllowNull(command.getQuery())) .limit(command.getLimit()) @@ -1135,7 +1143,9 @@ public MapReduceOutput mapReduce(final MapReduceCommand command) { if (command.getFinalize() != null) { operation.finalizeFunction(new BsonJavaScript(command.getFinalize())); } - MapReduceBatchCursor executionResult = executor.execute(operation, readPreference, getReadConcern()); + MapReduceBatchCursor executionResult = + getExecutor(createTimeoutSettings(getTimeoutSettings(), command)) + .execute(operation, readPreference, getReadConcern(), null); return new MapReduceOutput(command.toDBObject(), executionResult); } else { String action; @@ -1155,7 +1165,6 @@ public MapReduceOutput mapReduce(final MapReduceCommand command) { MapReduceToCollectionOperation operation = new MapReduceToCollectionOperation( - getTimeoutSettings(command.getMaxTime(MILLISECONDS)), getNamespace(), new BsonJavaScript(command.getMap()), new BsonJavaScript(command.getReduce()), command.getOutputTarget(), getWriteConcern()) .filter(wrapAllowNull(command.getQuery())) @@ -1227,13 +1236,13 @@ public Cursor aggregate(final List pipeline, final Aggregati if (outCollection != null) { AggregateToCollectionOperation operation = new AggregateToCollectionOperation( - getTimeoutSettings(options.getMaxTime(MILLISECONDS)), getNamespace(), stages, getReadConcern(), getWriteConcern()) .allowDiskUse(options.getAllowDiskUse()) .bypassDocumentValidation(options.getBypassDocumentValidation()) .collation(options.getCollation()); try { - executor.execute(operation, getReadPreference(), getReadConcern()); + getExecutor(createTimeoutSettings(getTimeoutSettings(), options)) + .execute(operation, getReadPreference(), getReadConcern(), null); result = new DBCursor(database.getCollection(outCollection.asString().getValue()), new BasicDBObject(), new DBCollectionFindOptions().readPreference(primary()).collation(options.getCollation())); } catch (MongoWriteConcernException e) { @@ -1241,13 +1250,15 @@ public Cursor aggregate(final List pipeline, final Aggregati } } else { AggregateOperation operation = new AggregateOperation<>( - getTimeoutSettings(options.getMaxTime(MILLISECONDS)), getNamespace(), stages, + getNamespace(), stages, getDefaultDBObjectCodec()) .allowDiskUse(options.getAllowDiskUse()) .batchSize(options.getBatchSize()) .collation(options.getCollation()) .retryReads(retryReads); - BatchCursor cursor1 = executor.execute(operation, readPreference, getReadConcern()); + BatchCursor cursor1 = + getExecutor(createTimeoutSettings(getTimeoutSettings(), options)) + .execute(operation, readPreference, getReadConcern(), null); result = new MongoCursorAdapter(new MongoBatchCursorAdapter<>(cursor1)); } return result; @@ -1265,13 +1276,13 @@ public Cursor aggregate(final List pipeline, final Aggregati */ public CommandResult explainAggregate(final List pipeline, final AggregationOptions options) { AggregateOperation operation = new AggregateOperation<>( - getTimeoutSettings(options.getMaxTime(MILLISECONDS)), getNamespace(), + getNamespace(), preparePipeline(pipeline), new BsonDocumentCodec()) .allowDiskUse(options.getAllowDiskUse()) .collation(options.getCollation()) .retryReads(retryReads); - return new CommandResult(executor.execute(operation.asExplainableOperation(ExplainVerbosity.QUERY_PLANNER, new BsonDocumentCodec()), - primaryPreferred(), getReadConcern()), getDefaultDBObjectCodec()); + return new CommandResult(executor.execute( + operation.asExplainableOperation(ExplainVerbosity.QUERY_PLANNER, new BsonDocumentCodec()), primaryPreferred(), getReadConcern(), null), getDefaultDBObjectCodec()); } List preparePipeline(final List pipeline) { @@ -1654,9 +1665,8 @@ public DBObject findAndModify(final DBObject query, final DBCollectionFindAndMod WriteConcern optionsWriteConcern = options.getWriteConcern(); WriteConcern writeConcern = optionsWriteConcern != null ? optionsWriteConcern : getWriteConcern(); WriteOperation operation; - TimeoutSettings timeoutSettings = getTimeoutSettings(options.getMaxTime(MILLISECONDS)); if (options.isRemove()) { - operation = new FindAndDeleteOperation<>(timeoutSettings, getNamespace(), writeConcern, retryWrites, objectCodec) + operation = new FindAndDeleteOperation<>(getNamespace(), writeConcern, retryWrites, objectCodec) .filter(wrapAllowNull(query)) .projection(wrapAllowNull(options.getProjection())) .sort(wrapAllowNull(options.getSort())) @@ -1667,7 +1677,7 @@ public DBObject findAndModify(final DBObject query, final DBCollectionFindAndMod throw new IllegalArgumentException("update can not be null unless it's a remove"); } if (!update.keySet().isEmpty() && update.keySet().iterator().next().charAt(0) == '$') { - operation = new FindAndUpdateOperation<>(timeoutSettings, getNamespace(), writeConcern, retryWrites, + operation = new FindAndUpdateOperation<>(getNamespace(), writeConcern, retryWrites, objectCodec, wrap(update)) .filter(wrap(query)) .projection(wrapAllowNull(options.getProjection())) @@ -1678,7 +1688,7 @@ objectCodec, wrap(update)) .collation(options.getCollation()) .arrayFilters(wrapAllowNull(options.getArrayFilters(), (Encoder) null)); } else { - operation = new FindAndReplaceOperation<>(timeoutSettings, getNamespace(), writeConcern, retryWrites, + operation = new FindAndReplaceOperation<>(getNamespace(), writeConcern, retryWrites, objectCodec, wrap(update)) .filter(wrap(query)) .projection(wrapAllowNull(options.getProjection())) @@ -1691,7 +1701,7 @@ objectCodec, wrap(update)) } try { - return executor.execute(operation, getReadConcern()); + return getExecutor(createTimeoutSettings(getTimeoutSettings(), options)).execute(operation, getReadConcern()); } catch (MongoWriteConcernException e) { throw createWriteConcernException(e); } @@ -1791,7 +1801,7 @@ public ReadConcern getReadConcern() { */ public void drop() { try { - executor.execute(new DropCollectionOperation(getTimeoutSettings(), getNamespace(), + executor.execute(new DropCollectionOperation(getNamespace(), getWriteConcern()), getReadConcern()); } catch (MongoWriteConcernException e) { throw createWriteConcernException(e); @@ -1864,9 +1874,14 @@ public List getIndexInfo() { DBCollection.this.getTimeoutSettings()) { @Override public ReadOperation> asReadOperation() { - return new ListIndexesOperation<>(super.getTimeoutSettings(), getNamespace(), getDefaultDBObjectCodec()) + return new ListIndexesOperation<>(getNamespace(), getDefaultDBObjectCodec()) .retryReads(retryReads); } + + @Override + public OperationExecutor getExecutor() { + return executor; + } }.into(new ArrayList<>()); } @@ -1880,7 +1895,7 @@ public ReadOperation> asReadOperation() { */ public void dropIndex(final DBObject index) { try { - executor.execute(new DropIndexOperation(getTimeoutSettings(), getNamespace(), wrap(index), + executor.execute(new DropIndexOperation(getNamespace(), wrap(index), getWriteConcern()), getReadConcern()); } catch (MongoWriteConcernException e) { throw createWriteConcernException(e); @@ -1896,7 +1911,7 @@ public void dropIndex(final DBObject index) { */ public void dropIndex(final String indexName) { try { - executor.execute(new DropIndexOperation(getTimeoutSettings(), getNamespace(), indexName, + executor.execute(new DropIndexOperation(getNamespace(), indexName, getWriteConcern()), getReadConcern()); } catch (MongoWriteConcernException e) { throw createWriteConcernException(e); @@ -2011,7 +2026,7 @@ BulkWriteResult executeBulkWriteOperation(final boolean ordered, final Boolean b final List writeRequests, final WriteConcern writeConcern) { try { - return translateBulkWriteResult(executor.execute(new MixedBulkWriteOperation(getTimeoutSettings(), + return translateBulkWriteResult(executor.execute(new MixedBulkWriteOperation( getNamespace(), translateWriteRequestsToNew(writeRequests), ordered, writeConcern, false) .bypassDocumentValidation(bypassDocumentValidation), getReadConcern()), getObjectCodec()); } catch (MongoBulkWriteException e) { @@ -2124,7 +2139,7 @@ private CreateIndexesOperation createIndexOperation(final DBObject key, final DB if (options.containsField("collation")) { request.collation(DBObjectCollationHelper.createCollationFromOptions(options)); } - return new CreateIndexesOperation(getTimeoutSettings(), getNamespace(), singletonList(request), writeConcern); + return new CreateIndexesOperation(getNamespace(), singletonList(request), writeConcern); } Codec getObjectCodec() { @@ -2185,18 +2200,10 @@ BsonDocument wrap(final DBObject document, @Nullable final Encoder enc } } - private TimeoutSettings getTimeoutSettings(){ + TimeoutSettings getTimeoutSettings(){ return database.getTimeoutSettings(); } - private TimeoutSettings getTimeoutSettings(final long maxTimeMS){ - return getTimeoutSettings().withMaxTimeMS(maxTimeMS); - } - - TimeoutSettings getTimeoutSettings(final long maxTimeMS, final long maxAwaitTimeMS){ - return getTimeoutSettings().withMaxTimeAndMaxAwaitTimeMS(maxTimeMS, maxAwaitTimeMS); - } - static WriteConcernException createWriteConcernException(final MongoWriteConcernException e) { return new WriteConcernException(new BsonDocument("code", new BsonInt32(e.getWriteConcernError().getCode())) .append("errmsg", new BsonString(e.getWriteConcernError().getMessage())), @@ -2204,4 +2211,8 @@ static WriteConcernException createWriteConcernException(final MongoWriteConcern e.getWriteResult()); } + private OperationExecutor getExecutor(final TimeoutSettings timeoutSettings) { + return executor.withTimeoutSettings(timeoutSettings); + } + } diff --git a/driver-legacy/src/main/com/mongodb/DBCursor.java b/driver-legacy/src/main/com/mongodb/DBCursor.java index 8f17afe92fb..9b91bad5984 100644 --- a/driver-legacy/src/main/com/mongodb/DBCursor.java +++ b/driver-legacy/src/main/com/mongodb/DBCursor.java @@ -36,6 +36,7 @@ import java.util.concurrent.TimeUnit; import static com.mongodb.MongoClient.getDefaultCodecRegistry; +import static com.mongodb.TimeoutSettingsHelper.createTimeoutSettings; import static com.mongodb.assertions.Assertions.notNull; import static java.util.concurrent.TimeUnit.MILLISECONDS; @@ -370,9 +371,9 @@ public DBCursor maxTime(final long maxTime, final TimeUnit timeUnit) { * @mongodb.server.release 3.0 */ public DBObject explain() { - return executor.execute(getQueryOperation(collection.getObjectCodec()) - .asExplainableOperation(null, getDefaultCodecRegistry().get(DBObject.class)), - getReadPreference(), getReadConcern()); + return executor.execute( + getQueryOperation(collection.getObjectCodec()) + .asExplainableOperation(null, getDefaultCodecRegistry().get(DBObject.class)), getReadPreference(), getReadConcern(), null); } /** @@ -414,7 +415,6 @@ public DBCursor partial(final boolean partial) { private FindOperation getQueryOperation(final Decoder decoder) { return new FindOperation<>( - collection.getTimeoutSettings(findOptions.getMaxTime(MILLISECONDS), findOptions.getMaxAwaitTime(MILLISECONDS)), collection.getNamespace(), decoder) .filter(collection.wrapAllowNull(filter)) .batchSize(findOptions.getBatchSize()) @@ -786,7 +786,10 @@ public String toString() { } private void initializeCursor(final FindOperation operation) { - cursor = new MongoBatchCursorAdapter<>(executor.execute(operation, getReadPreference(), getReadConcern())); + cursor = + new MongoBatchCursorAdapter<>(executor + .withTimeoutSettings(createTimeoutSettings(collection.getTimeoutSettings(), findOptions)) + .execute(operation, getReadPreference(), getReadConcern(), null)); ServerCursor serverCursor = cursor.getServerCursor(); if (isCursorFinalizerEnabled() && serverCursor != null) { optionalCleaner = DBCursorCleaner.create(collection.getDB().getMongoClient(), collection.getNamespace(), diff --git a/driver-legacy/src/main/com/mongodb/LegacyMixedBulkWriteOperation.java b/driver-legacy/src/main/com/mongodb/LegacyMixedBulkWriteOperation.java index a2c7f6180cb..4d8eb22cb7a 100644 --- a/driver-legacy/src/main/com/mongodb/LegacyMixedBulkWriteOperation.java +++ b/driver-legacy/src/main/com/mongodb/LegacyMixedBulkWriteOperation.java @@ -19,7 +19,6 @@ import com.mongodb.bulk.BulkWriteError; import com.mongodb.bulk.BulkWriteResult; import com.mongodb.bulk.WriteConcernError; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.binding.WriteBinding; import com.mongodb.internal.bulk.DeleteRequest; import com.mongodb.internal.bulk.InsertRequest; @@ -48,7 +47,6 @@ * Operation for bulk writes for the legacy API. */ final class LegacyMixedBulkWriteOperation implements WriteOperation { - private final TimeoutSettings timeoutSettings; private final WriteConcern writeConcern; private final MongoNamespace namespace; private final List writeRequests; @@ -57,41 +55,31 @@ final class LegacyMixedBulkWriteOperation implements WriteOperation insertRequests) { - return new LegacyMixedBulkWriteOperation(timeoutSettings, namespace, ordered, writeConcern, retryWrites, insertRequests, - INSERT); + static LegacyMixedBulkWriteOperation createBulkWriteOperationForInsert(final MongoNamespace namespace, final boolean ordered, + final WriteConcern writeConcern, final boolean retryWrites, final List insertRequests) { + return new LegacyMixedBulkWriteOperation(namespace, ordered, writeConcern, retryWrites, insertRequests, INSERT); } - static LegacyMixedBulkWriteOperation createBulkWriteOperationForUpdate(final TimeoutSettings timeoutSettings, - final MongoNamespace namespace, final boolean ordered, final WriteConcern writeConcern, final boolean retryWrites, - final List updateRequests) { + static LegacyMixedBulkWriteOperation createBulkWriteOperationForUpdate(final MongoNamespace namespace, final boolean ordered, + final WriteConcern writeConcern, final boolean retryWrites, final List updateRequests) { assertTrue(updateRequests.stream().allMatch(updateRequest -> updateRequest.getType() == UPDATE)); - return new LegacyMixedBulkWriteOperation(timeoutSettings, namespace, ordered, writeConcern, retryWrites, updateRequests, - UPDATE); + return new LegacyMixedBulkWriteOperation(namespace, ordered, writeConcern, retryWrites, updateRequests, UPDATE); } - static LegacyMixedBulkWriteOperation createBulkWriteOperationForReplace(final TimeoutSettings timeoutSettings, - final MongoNamespace namespace, final boolean ordered, final WriteConcern writeConcern, final boolean retryWrites, - final List replaceRequests) { + static LegacyMixedBulkWriteOperation createBulkWriteOperationForReplace(final MongoNamespace namespace, final boolean ordered, + final WriteConcern writeConcern, final boolean retryWrites, final List replaceRequests) { assertTrue(replaceRequests.stream().allMatch(updateRequest -> updateRequest.getType() == REPLACE)); - return new LegacyMixedBulkWriteOperation(timeoutSettings, namespace, ordered, writeConcern, retryWrites, replaceRequests, - REPLACE); + return new LegacyMixedBulkWriteOperation(namespace, ordered, writeConcern, retryWrites, replaceRequests, REPLACE); } - static LegacyMixedBulkWriteOperation createBulkWriteOperationForDelete(final TimeoutSettings timeoutSettings, - final MongoNamespace namespace, final boolean ordered, final WriteConcern writeConcern, final boolean retryWrites, - final List deleteRequests) { - return new LegacyMixedBulkWriteOperation(timeoutSettings, namespace, ordered, writeConcern, retryWrites, deleteRequests, - DELETE); + static LegacyMixedBulkWriteOperation createBulkWriteOperationForDelete(final MongoNamespace namespace, final boolean ordered, + final WriteConcern writeConcern, final boolean retryWrites, final List deleteRequests) { + return new LegacyMixedBulkWriteOperation(namespace, ordered, writeConcern, retryWrites, deleteRequests, DELETE); } - private LegacyMixedBulkWriteOperation(final TimeoutSettings timeoutSettings, final MongoNamespace namespace, - final boolean ordered, final WriteConcern writeConcern, + private LegacyMixedBulkWriteOperation(final MongoNamespace namespace, final boolean ordered, final WriteConcern writeConcern, final boolean retryWrites, final List writeRequests, final WriteRequest.Type type) { isTrueArgument("writeRequests not empty", !writeRequests.isEmpty()); - this.timeoutSettings = notNull("timeoutSettings", timeoutSettings); this.writeRequests = notNull("writeRequests", writeRequests); this.type = type; this.ordered = ordered; @@ -109,16 +97,11 @@ LegacyMixedBulkWriteOperation bypassDocumentValidation(@Nullable final Boolean b return this; } - @Override - public TimeoutSettings getTimeoutSettings() { - return timeoutSettings; - } - @Override public WriteConcernResult execute(final WriteBinding binding) { try { - BulkWriteResult result = new MixedBulkWriteOperation(timeoutSettings, namespace, writeRequests, - ordered, writeConcern, retryWrites).bypassDocumentValidation(bypassDocumentValidation).execute(binding); + BulkWriteResult result = new MixedBulkWriteOperation(namespace, writeRequests, ordered, writeConcern, retryWrites) + .bypassDocumentValidation(bypassDocumentValidation).execute(binding); if (result.wasAcknowledged()) { return translateBulkWriteResult(result); } else { diff --git a/driver-legacy/src/main/com/mongodb/TimeoutSettingsHelper.java b/driver-legacy/src/main/com/mongodb/TimeoutSettingsHelper.java new file mode 100644 index 00000000000..e47dd7bd32b --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/TimeoutSettingsHelper.java @@ -0,0 +1,60 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.client.model.DBCollectionCountOptions; +import com.mongodb.client.model.DBCollectionFindAndModifyOptions; +import com.mongodb.client.model.DBCollectionFindOptions; +import com.mongodb.internal.TimeoutSettings; + +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +final class TimeoutSettingsHelper { + + private TimeoutSettingsHelper() { + } + + static TimeoutSettings createTimeoutSettings(final TimeoutSettings timeoutSettings, final long maxTimeMS) { + return timeoutSettings.withMaxTimeMS(maxTimeMS); + } + + static TimeoutSettings createTimeoutSettings(final TimeoutSettings timeoutSettings, final long maxTimeMS, final long maxAwaitTimeMS) { + return timeoutSettings.withMaxTimeAndMaxAwaitTimeMS(maxTimeMS, maxAwaitTimeMS); + } + + static TimeoutSettings createTimeoutSettings(final TimeoutSettings timeoutSettings, final AggregationOptions options) { + return createTimeoutSettings(timeoutSettings, options.getMaxTime(MILLISECONDS)); + } + + static TimeoutSettings createTimeoutSettings(final TimeoutSettings timeoutSettings, final DBCollectionCountOptions options) { + return createTimeoutSettings(timeoutSettings, options.getMaxTime(MILLISECONDS)); + } + + static TimeoutSettings createTimeoutSettings(final TimeoutSettings timeoutSettings, final DBCollectionFindOptions options) { + return timeoutSettings.withMaxTimeAndMaxAwaitTimeMS(options.getMaxTime(MILLISECONDS), options.getMaxAwaitTime(MILLISECONDS)); + } + + static TimeoutSettings createTimeoutSettings(final TimeoutSettings timeoutSettings, final DBCollectionFindAndModifyOptions options) { + return createTimeoutSettings(timeoutSettings, options.getMaxTime(MILLISECONDS)); + } + + @SuppressWarnings("deprecation") + static TimeoutSettings createTimeoutSettings(final TimeoutSettings timeoutSettings, final MapReduceCommand options) { + return createTimeoutSettings(timeoutSettings, options.getMaxTime(MILLISECONDS)); + } + +} diff --git a/driver-legacy/src/test/functional/com/mongodb/DBCollectionSpecification.groovy b/driver-legacy/src/test/functional/com/mongodb/DBCollectionSpecification.groovy index 028066b02a9..98cb8282c17 100644 --- a/driver-legacy/src/test/functional/com/mongodb/DBCollectionSpecification.groovy +++ b/driver-legacy/src/test/functional/com/mongodb/DBCollectionSpecification.groovy @@ -61,7 +61,6 @@ import spock.lang.Specification import java.util.concurrent.TimeUnit import static Fixture.getMongoClient -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.LegacyMixedBulkWriteOperation.createBulkWriteOperationForDelete import static com.mongodb.LegacyMixedBulkWriteOperation.createBulkWriteOperationForUpdate @@ -272,7 +271,7 @@ class DBCollectionSpecification extends Specification { collection.find().iterator().hasNext() then: - expect executor.getReadOperation(), isTheSameAs(new FindOperation(TIMEOUT_SETTINGS, collection.getNamespace(), + expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), collection.getObjectCodec()) .filter(new BsonDocument()) .retryReads(true)) @@ -282,7 +281,7 @@ class DBCollectionSpecification extends Specification { collection.find().iterator().hasNext() then: - expect executor.getReadOperation(), isTheSameAs(new FindOperation(TIMEOUT_SETTINGS, collection.getNamespace(), + expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), collection.getObjectCodec()) .filter(new BsonDocument()) .retryReads(true)) @@ -292,7 +291,7 @@ class DBCollectionSpecification extends Specification { collection.find(new BasicDBObject(), new DBCollectionFindOptions().collation(collation)).iterator().hasNext() then: - expect executor.getReadOperation(), isTheSameAs(new FindOperation(TIMEOUT_SETTINGS, collection.getNamespace(), + expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), collection.getObjectCodec()) .filter(new BsonDocument()) .collation(collation) @@ -315,7 +314,7 @@ class DBCollectionSpecification extends Specification { collection.findOne() then: - expect executor.getReadOperation(), isTheSameAs(new FindOperation(TIMEOUT_SETTINGS, collection.getNamespace(), + expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), collection.getObjectCodec()) .filter(new BsonDocument()) .limit(-1) @@ -326,7 +325,7 @@ class DBCollectionSpecification extends Specification { collection.findOne() then: - expect executor.getReadOperation(), isTheSameAs(new FindOperation(TIMEOUT_SETTINGS, collection.getNamespace(), + expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), collection.getObjectCodec()) .filter(new BsonDocument()) .limit(-1) @@ -337,7 +336,7 @@ class DBCollectionSpecification extends Specification { collection.findOne(new BasicDBObject(), new DBCollectionFindOptions().collation(collation)) then: - expect executor.getReadOperation(), isTheSameAs(new FindOperation(TIMEOUT_SETTINGS, collection.getNamespace(), + expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), collection.getObjectCodec()) .filter(new BsonDocument()) .limit(-1) @@ -358,7 +357,7 @@ class DBCollectionSpecification extends Specification { collection.findAndRemove(query) then: - expect executor.getWriteOperation(), isTheSameAs(new FindAndDeleteOperation(TIMEOUT_SETTINGS, collection. + expect executor.getWriteOperation(), isTheSameAs(new FindAndDeleteOperation(collection. getNamespace(), WriteConcern.ACKNOWLEDGED, retryWrites, collection.getObjectCodec()).filter(new BsonDocument())) } @@ -378,8 +377,8 @@ class DBCollectionSpecification extends Specification { collection.findAndModify(query, update) then: - expect executor.getWriteOperation(), isTheSameAs(new FindAndUpdateOperation(TIMEOUT_SETTINGS, - collection.getNamespace(), WriteConcern.ACKNOWLEDGED, retryWrites, collection.getObjectCodec(), bsonUpdate) + expect executor.getWriteOperation(), isTheSameAs(new FindAndUpdateOperation(collection.getNamespace(), + WriteConcern.ACKNOWLEDGED, retryWrites, collection.getObjectCodec(), bsonUpdate) .filter(new BsonDocument())) when: // With options @@ -387,8 +386,8 @@ class DBCollectionSpecification extends Specification { .arrayFilters(dbObjectArrayFilters).writeConcern(WriteConcern.W3)) then: - expect executor.getWriteOperation(), isTheSameAs(new FindAndUpdateOperation(TIMEOUT_SETTINGS, - collection.getNamespace(), WriteConcern.W3, retryWrites, collection.getObjectCodec(), bsonUpdate) + expect executor.getWriteOperation(), isTheSameAs(new FindAndUpdateOperation(collection.getNamespace(), WriteConcern.W3, + retryWrites, collection.getObjectCodec(), bsonUpdate) .filter(new BsonDocument()) .collation(collation) .arrayFilters(bsonDocumentWrapperArrayFilters)) @@ -415,7 +414,7 @@ class DBCollectionSpecification extends Specification { collection.findAndModify(query, replace) then: - expect executor.getWriteOperation(), isTheSameAs(new FindAndReplaceOperation(TIMEOUT_SETTINGS, collection. + expect executor.getWriteOperation(), isTheSameAs(new FindAndReplaceOperation(collection. getNamespace(), WriteConcern.ACKNOWLEDGED, retryWrites, collection.getObjectCodec(), bsonReplace) .filter(new BsonDocument())) @@ -424,8 +423,8 @@ class DBCollectionSpecification extends Specification { .writeConcern(WriteConcern.W3)) then: - expect executor.getWriteOperation(), isTheSameAs(new FindAndReplaceOperation(TIMEOUT_SETTINGS, - collection.getNamespace(), WriteConcern.W3, retryWrites, collection.getObjectCodec(), bsonReplace) + expect executor.getWriteOperation(), isTheSameAs(new FindAndReplaceOperation(collection.getNamespace(), WriteConcern.W3, + retryWrites, collection.getObjectCodec(), bsonReplace) .filter(new BsonDocument()) .collation(collation)) } @@ -440,7 +439,7 @@ class DBCollectionSpecification extends Specification { collection.count() then: - expect executor.getReadOperation(), isTheSameAs(new CountOperation(TIMEOUT_SETTINGS, collection.getNamespace()) + expect executor.getReadOperation(), isTheSameAs(new CountOperation(collection.getNamespace()) .filter(new BsonDocument()).retryReads(true)) when: // Inherits from DB @@ -449,7 +448,7 @@ class DBCollectionSpecification extends Specification { executor.getReadConcern() == ReadConcern.MAJORITY then: - expect executor.getReadOperation(), isTheSameAs(new CountOperation(TIMEOUT_SETTINGS, collection.getNamespace()) + expect executor.getReadOperation(), isTheSameAs(new CountOperation(collection.getNamespace()) .filter(new BsonDocument()).retryReads(true)) executor.getReadConcern() == ReadConcern.MAJORITY @@ -458,7 +457,7 @@ class DBCollectionSpecification extends Specification { collection.count(new BasicDBObject(), new DBCollectionCountOptions().collation(collation)) then: - expect executor.getReadOperation(), isTheSameAs(new CountOperation(TIMEOUT_SETTINGS, collection.getNamespace()) + expect executor.getReadOperation(), isTheSameAs(new CountOperation(collection.getNamespace()) .filter(new BsonDocument()).retryReads(true) .collation(collation)) executor.getReadConcern() == ReadConcern.LOCAL @@ -485,7 +484,7 @@ class DBCollectionSpecification extends Specification { then: distinctFieldValues == [1, 2] - expect executor.getReadOperation(), isTheSameAs(new DistinctOperation(TIMEOUT_SETTINGS, collection.getNamespace(), 'field1', + expect executor.getReadOperation(), isTheSameAs(new DistinctOperation(collection.getNamespace(), 'field1', new BsonValueCodec()).filter(new BsonDocument()).retryReads(true)) executor.getReadConcern() == ReadConcern.DEFAULT @@ -494,7 +493,7 @@ class DBCollectionSpecification extends Specification { collection.distinct('field1') then: - expect executor.getReadOperation(), isTheSameAs(new DistinctOperation(TIMEOUT_SETTINGS, collection.getNamespace(), 'field1', + expect executor.getReadOperation(), isTheSameAs(new DistinctOperation(collection.getNamespace(), 'field1', new BsonValueCodec()) .filter(new BsonDocument()).retryReads(true)) executor.getReadConcern() == ReadConcern.MAJORITY @@ -504,7 +503,7 @@ class DBCollectionSpecification extends Specification { collection.distinct('field1', new DBCollectionDistinctOptions().collation(collation)) then: - expect executor.getReadOperation(), isTheSameAs(new DistinctOperation(TIMEOUT_SETTINGS, collection.getNamespace(), 'field1', + expect executor.getReadOperation(), isTheSameAs(new DistinctOperation(collection.getNamespace(), 'field1', new BsonValueCodec()).collation(collation).retryReads(true)) executor.getReadConcern() == ReadConcern.LOCAL } @@ -524,7 +523,7 @@ class DBCollectionSpecification extends Specification { then: expect executor.getReadOperation(), isTheSameAs( - new MapReduceWithInlineResultsOperation(TIMEOUT_SETTINGS, collection.getNamespace(), new BsonJavaScript('map'), + new MapReduceWithInlineResultsOperation(collection.getNamespace(), new BsonJavaScript('map'), new BsonJavaScript('reduce'), collection.getDefaultDBObjectCodec()) .verbose(true) .filter(new BsonDocument())) @@ -536,7 +535,7 @@ class DBCollectionSpecification extends Specification { then: expect executor.getReadOperation(), isTheSameAs( - new MapReduceWithInlineResultsOperation(TIMEOUT_SETTINGS, collection.getNamespace(), new BsonJavaScript('map'), + new MapReduceWithInlineResultsOperation(collection.getNamespace(), new BsonJavaScript('map'), new BsonJavaScript('reduce'), collection.getDefaultDBObjectCodec()) .verbose(true) .filter(new BsonDocument())) @@ -551,7 +550,7 @@ class DBCollectionSpecification extends Specification { then: expect executor.getReadOperation(), isTheSameAs( - new MapReduceWithInlineResultsOperation(TIMEOUT_SETTINGS, collection.getNamespace(), new BsonJavaScript('map'), + new MapReduceWithInlineResultsOperation(collection.getNamespace(), new BsonJavaScript('map'), new BsonJavaScript('reduce'), collection.getDefaultDBObjectCodec()) .verbose(true) .filter(new BsonDocument()) @@ -571,7 +570,7 @@ class DBCollectionSpecification extends Specification { then: expect executor.getWriteOperation(), isTheSameAs( - new MapReduceToCollectionOperation(TIMEOUT_SETTINGS, collection.getNamespace(), new BsonJavaScript('map'), + new MapReduceToCollectionOperation(collection.getNamespace(), new BsonJavaScript('map'), new BsonJavaScript('reduce'), 'myColl', collection.getWriteConcern()) .verbose(true) .filter(new BsonDocument()) @@ -582,7 +581,7 @@ class DBCollectionSpecification extends Specification { then: expect executor.getWriteOperation(), isTheSameAs( - new MapReduceToCollectionOperation(TIMEOUT_SETTINGS, collection.getNamespace(), new BsonJavaScript('map'), + new MapReduceToCollectionOperation(collection.getNamespace(), new BsonJavaScript('map'), new BsonJavaScript('reduce'), 'myColl', collection.getWriteConcern()) .verbose(true) .filter(new BsonDocument()) @@ -596,7 +595,7 @@ class DBCollectionSpecification extends Specification { then: expect executor.getWriteOperation(), isTheSameAs( - new MapReduceToCollectionOperation(TIMEOUT_SETTINGS, collection.getNamespace(), new BsonJavaScript('map'), + new MapReduceToCollectionOperation(collection.getNamespace(), new BsonJavaScript('map'), new BsonJavaScript('reduce'), 'myColl', collection.getWriteConcern()) .verbose(true) .filter(new BsonDocument()) @@ -620,7 +619,7 @@ class DBCollectionSpecification extends Specification { collection.aggregate(pipeline, AggregationOptions.builder().build()) then: - expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(TIMEOUT_SETTINGS, collection.getNamespace(), + expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(), bsonPipeline, collection.getDefaultDBObjectCodec()).retryReads(true)) executor.getReadConcern() == ReadConcern.DEFAULT @@ -629,7 +628,7 @@ class DBCollectionSpecification extends Specification { collection.aggregate(pipeline, AggregationOptions.builder().build()) then: - expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(TIMEOUT_SETTINGS, collection.getNamespace(), + expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(), bsonPipeline, collection.getDefaultDBObjectCodec()).retryReads(true)) executor.getReadConcern() == ReadConcern.MAJORITY @@ -638,7 +637,7 @@ class DBCollectionSpecification extends Specification { collection.aggregate(pipeline, AggregationOptions.builder().collation(collation).build()) then: - expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(TIMEOUT_SETTINGS, collection.getNamespace(), + expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(), bsonPipeline, collection.getDefaultDBObjectCodec()).collation(collation).retryReads(true)) executor.getReadConcern() == ReadConcern.LOCAL } @@ -655,21 +654,21 @@ class DBCollectionSpecification extends Specification { collection.aggregate(pipeline, AggregationOptions.builder().build()) then: - expect executor.getReadOperation(), isTheSameAs(new AggregateToCollectionOperation(TIMEOUT_SETTINGS, collection.getNamespace(), + expect executor.getReadOperation(), isTheSameAs(new AggregateToCollectionOperation(collection.getNamespace(), bsonPipeline, collection.getReadConcern(), collection.getWriteConcern())) when: // Inherits from DB collection.aggregate(pipeline, AggregationOptions.builder().build()) then: - expect executor.getReadOperation(), isTheSameAs(new AggregateToCollectionOperation(TIMEOUT_SETTINGS, collection.getNamespace(), + expect executor.getReadOperation(), isTheSameAs(new AggregateToCollectionOperation(collection.getNamespace(), bsonPipeline, collection.getReadConcern(), collection.getWriteConcern())) when: collection.aggregate(pipeline, AggregationOptions.builder().collation(collation).build()) then: - expect executor.getReadOperation(), isTheSameAs(new AggregateToCollectionOperation(TIMEOUT_SETTINGS, collection.getNamespace(), + expect executor.getReadOperation(), isTheSameAs(new AggregateToCollectionOperation(collection.getNamespace(), bsonPipeline, collection.getReadConcern(), collection.getWriteConcern()).collation(collation)) } @@ -687,7 +686,7 @@ class DBCollectionSpecification extends Specification { collection.explainAggregate(pipeline, options) then: - expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(TIMEOUT_SETTINGS, collection.getNamespace(), + expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(), bsonPipeline, collection.getDefaultDBObjectCodec()).retryReads(true).collation(collation) .asExplainableOperation(ExplainVerbosity.QUERY_PLANNER, new BsonDocumentCodec())) @@ -696,7 +695,7 @@ class DBCollectionSpecification extends Specification { collection.explainAggregate(pipeline, options) then: - expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(TIMEOUT_SETTINGS, collection.getNamespace(), + expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(), bsonPipeline, collection.getDefaultDBObjectCodec()).retryReads(true).collation(collation) .asExplainableOperation(ExplainVerbosity.QUERY_PLANNER, new BsonDocumentCodec())) @@ -705,7 +704,7 @@ class DBCollectionSpecification extends Specification { collection.explainAggregate(pipeline, options) then: - expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(TIMEOUT_SETTINGS, collection.getNamespace(), + expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(), bsonPipeline, collection.getDefaultDBObjectCodec()).retryReads(true).collation(collation) .asExplainableOperation(ExplainVerbosity.QUERY_PLANNER, new BsonDocumentCodec())) } @@ -726,7 +725,7 @@ class DBCollectionSpecification extends Specification { collection.update(BasicDBObject.parse(query), BasicDBObject.parse(update)) then: - expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForUpdate(TIMEOUT_SETTINGS, collection.getNamespace(), + expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForUpdate(collection.getNamespace(), true, WriteConcern.ACKNOWLEDGED, retryWrites, asList(updateRequest))) when: // Inherits from DB @@ -735,7 +734,7 @@ class DBCollectionSpecification extends Specification { then: - expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForUpdate(TIMEOUT_SETTINGS, collection.getNamespace(), + expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForUpdate(collection.getNamespace(), true, WriteConcern.W3, retryWrites, asList(updateRequest))) when: @@ -745,7 +744,7 @@ class DBCollectionSpecification extends Specification { new DBCollectionUpdateOptions().collation(collation).arrayFilters(dbObjectArrayFilters)) then: - expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForUpdate(TIMEOUT_SETTINGS, collection.getNamespace(), + expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForUpdate(collection.getNamespace(), true, WriteConcern.W1, retryWrites, asList(updateRequest.arrayFilters(bsonDocumentWrapperArrayFilters)))) where: @@ -768,7 +767,7 @@ class DBCollectionSpecification extends Specification { collection.remove(BasicDBObject.parse(query)) then: - expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForDelete(TIMEOUT_SETTINGS, collection.getNamespace(), + expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForDelete(collection.getNamespace(), false, WriteConcern.ACKNOWLEDGED, retryWrites, asList(deleteRequest))) when: // Inherits from DB @@ -776,7 +775,7 @@ class DBCollectionSpecification extends Specification { collection.remove(BasicDBObject.parse(query)) then: - expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForDelete(TIMEOUT_SETTINGS, collection.getNamespace(), + expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForDelete(collection.getNamespace(), false, WriteConcern.W3, retryWrites, asList(deleteRequest))) when: @@ -785,7 +784,7 @@ class DBCollectionSpecification extends Specification { collection.remove(BasicDBObject.parse(query), new DBCollectionRemoveOptions().collation(collation)) then: - expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForDelete(TIMEOUT_SETTINGS, collection.getNamespace(), + expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForDelete(collection.getNamespace(), false, WriteConcern.W1, retryWrites, asList(deleteRequest))) } @@ -817,7 +816,7 @@ class DBCollectionSpecification extends Specification { bulk().execute() then: - expect executor.getWriteOperation(), isTheSameAs(new MixedBulkWriteOperation(TIMEOUT_SETTINGS, collection.getNamespace(), + expect executor.getWriteOperation(), isTheSameAs(new MixedBulkWriteOperation(collection.getNamespace(), writeRequests, ordered, WriteConcern.ACKNOWLEDGED, false)) @@ -826,7 +825,7 @@ class DBCollectionSpecification extends Specification { bulk().execute() then: - expect executor.getWriteOperation(), isTheSameAs(new MixedBulkWriteOperation(TIMEOUT_SETTINGS, collection.getNamespace(), + expect executor.getWriteOperation(), isTheSameAs(new MixedBulkWriteOperation(collection.getNamespace(), writeRequests, ordered, WriteConcern.W3, false)) when: @@ -834,7 +833,7 @@ class DBCollectionSpecification extends Specification { bulk().execute() then: - expect executor.getWriteOperation(), isTheSameAs(new MixedBulkWriteOperation(TIMEOUT_SETTINGS, collection.getNamespace(), + expect executor.getWriteOperation(), isTheSameAs(new MixedBulkWriteOperation(collection.getNamespace(), writeRequests, ordered, WriteConcern.W1, false)) where: diff --git a/driver-legacy/src/test/functional/com/mongodb/DBTest.java b/driver-legacy/src/test/functional/com/mongodb/DBTest.java index 7849906faac..8b2f8f59d90 100644 --- a/driver-legacy/src/test/functional/com/mongodb/DBTest.java +++ b/driver-legacy/src/test/functional/com/mongodb/DBTest.java @@ -31,7 +31,6 @@ import java.util.Locale; import java.util.UUID; -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS; import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint; import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint; import static com.mongodb.ClusterFixture.getBinding; @@ -346,7 +345,7 @@ public void shouldApplyUuidRepresentationToCommandEncodingAndDecoding() { } BsonDocument getCollectionInfo(final String collectionName) { - return new ListCollectionsOperation<>(TIMEOUT_SETTINGS, getDefaultDatabaseName(), new BsonDocumentCodec()) + return new ListCollectionsOperation<>(getDefaultDatabaseName(), new BsonDocumentCodec()) .filter(new BsonDocument("name", new BsonString(collectionName))).execute(getBinding()).next().get(0); } diff --git a/driver-legacy/src/test/functional/com/mongodb/LegacyMixedBulkWriteOperationSpecification.groovy b/driver-legacy/src/test/functional/com/mongodb/LegacyMixedBulkWriteOperationSpecification.groovy index 078abccf758..85fb3ad867e 100644 --- a/driver-legacy/src/test/functional/com/mongodb/LegacyMixedBulkWriteOperationSpecification.groovy +++ b/driver-legacy/src/test/functional/com/mongodb/LegacyMixedBulkWriteOperationSpecification.groovy @@ -28,7 +28,6 @@ import org.bson.codecs.DocumentCodec import org.bson.types.ObjectId import spock.lang.IgnoreIf -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS import static com.mongodb.ClusterFixture.getBinding import static com.mongodb.ClusterFixture.getSingleConnectionBinding import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet @@ -47,7 +46,7 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec def 'should throw IllegalArgumentException for empty list of requests'() { when: - createBulkWriteOperationForInsert(TIMEOUT_SETTINGS, getNamespace(), true, ACKNOWLEDGED, true, []) + createBulkWriteOperationForInsert(getNamespace(), true, ACKNOWLEDGED, true, []) then: thrown(IllegalArgumentException) @@ -57,7 +56,7 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec given: def inserts = [new InsertRequest(new BsonDocument('_id', new BsonInt32(1))), new InsertRequest(new BsonDocument('_id', new BsonInt32(2)))] - def operation = createBulkWriteOperationForInsert(TIMEOUT_SETTINGS, getNamespace(), true, ACKNOWLEDGED, false, inserts) + def operation = createBulkWriteOperationForInsert(getNamespace(), true, ACKNOWLEDGED, false, inserts) when: def result = execute(operation) @@ -74,7 +73,7 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec def 'should insert a single document'() { given: def insert = new InsertRequest(new BsonDocument('_id', new BsonInt32(1))) - def operation = createBulkWriteOperationForInsert(TIMEOUT_SETTINGS, getNamespace(), true, ACKNOWLEDGED, false, asList(insert)) + def operation = createBulkWriteOperationForInsert(getNamespace(), true, ACKNOWLEDGED, false, asList(insert)) when: execute(operation) @@ -86,7 +85,7 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec def 'should execute unacknowledged write'() { given: def binding = getSingleConnectionBinding() - def operation = createBulkWriteOperationForInsert(TIMEOUT_SETTINGS, getNamespace(), true, UNACKNOWLEDGED, false, + def operation = createBulkWriteOperationForInsert(getNamespace(), true, UNACKNOWLEDGED, false, [new InsertRequest(new BsonDocument('_id', new BsonInt32(1))), new InsertRequest(new BsonDocument('_id', new BsonInt32(2)))]) @@ -108,7 +107,7 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec new InsertRequest(new BsonDocument('_id', new BsonInt32(1))), new InsertRequest(new BsonDocument('_id', new BsonInt32(2))), ] - def operation = createBulkWriteOperationForInsert(TIMEOUT_SETTINGS, getNamespace(), false, ACKNOWLEDGED, false, documents) + def operation = createBulkWriteOperationForInsert(getNamespace(), false, ACKNOWLEDGED, false, documents) when: execute(operation) @@ -125,7 +124,7 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec new InsertRequest(new BsonDocument('_id', new BsonInt32(1))), new InsertRequest(new BsonDocument('_id', new BsonInt32(2))), ] - def operation = createBulkWriteOperationForInsert(TIMEOUT_SETTINGS, getNamespace(), true, ACKNOWLEDGED, false, documents) + def operation = createBulkWriteOperationForInsert(getNamespace(), true, ACKNOWLEDGED, false, documents) when: execute(operation) @@ -139,7 +138,7 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec def 'should support retryable writes'() { given: def insert = new InsertRequest(new BsonDocument('_id', new BsonInt32(1))) - def operation = createBulkWriteOperationForInsert(TIMEOUT_SETTINGS, getNamespace(), true, ACKNOWLEDGED, true, asList(insert)) + def operation = createBulkWriteOperationForInsert(getNamespace(), true, ACKNOWLEDGED, true, asList(insert)) when: executeWithSession(operation, false) @@ -151,7 +150,7 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec def 'should remove a document'() { given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('_id', 1)) - def operation = createBulkWriteOperationForDelete(TIMEOUT_SETTINGS, getNamespace(), true, ACKNOWLEDGED, false, + def operation = createBulkWriteOperationForDelete(getNamespace(), true, ACKNOWLEDGED, false, [new DeleteRequest(new BsonDocument('_id', new BsonInt32(1)))]) when: @@ -168,7 +167,7 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec def 'should return correct result for replace'() { given: def replacement = new UpdateRequest(new BsonDocument(), new BsonDocument('_id', new BsonInt32(1)), REPLACE) - def operation = createBulkWriteOperationForReplace(TIMEOUT_SETTINGS, getNamespace(), true, ACKNOWLEDGED, + def operation = createBulkWriteOperationForReplace(getNamespace(), true, ACKNOWLEDGED, false, asList(replacement)) when: @@ -184,12 +183,12 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec def 'should replace a single document'() { given: def insert = new InsertRequest(new BsonDocument('_id', new BsonInt32(1))) - createBulkWriteOperationForInsert(TIMEOUT_SETTINGS, getNamespace(), true, ACKNOWLEDGED, false, asList(insert)) + createBulkWriteOperationForInsert(getNamespace(), true, ACKNOWLEDGED, false, asList(insert)) .execute(getBinding()) def replacement = new UpdateRequest(new BsonDocument('_id', new BsonInt32(1)), new BsonDocument('_id', new BsonInt32(1)).append('x', new BsonInt32(1)), REPLACE) - def operation = createBulkWriteOperationForReplace(TIMEOUT_SETTINGS, getNamespace(), true, ACKNOWLEDGED, + def operation = createBulkWriteOperationForReplace(getNamespace(), true, ACKNOWLEDGED, false, asList(replacement)) when: @@ -209,7 +208,7 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec def replacement = new UpdateRequest(new BsonDocument('_id', new BsonInt32(1)), new BsonDocument('_id', new BsonInt32(1)).append('x', new BsonInt32(1)), REPLACE) .upsert(true) - def operation = createBulkWriteOperationForReplace(TIMEOUT_SETTINGS, getNamespace(), true, ACKNOWLEDGED, + def operation = createBulkWriteOperationForReplace(getNamespace(), true, ACKNOWLEDGED, false, asList(replacement)) when: @@ -221,7 +220,7 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec def 'should update nothing if no documents match'() { given: - def operation = createBulkWriteOperationForUpdate(TIMEOUT_SETTINGS, getNamespace(), true, ACKNOWLEDGED, + def operation = createBulkWriteOperationForUpdate(getNamespace(), true, ACKNOWLEDGED, false, asList(new UpdateRequest(new BsonDocument('x', new BsonInt32(1)), new BsonDocument('$set', new BsonDocument('y', new BsonInt32(2))), UPDATE).multi(false))) @@ -241,7 +240,7 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('x', 1), new Document('x', 1)) - def operation = createBulkWriteOperationForUpdate(TIMEOUT_SETTINGS, getNamespace(), true, ACKNOWLEDGED, false, + def operation = createBulkWriteOperationForUpdate(getNamespace(), true, ACKNOWLEDGED, false, asList(new UpdateRequest(new BsonDocument('x', new BsonInt32(1)), new BsonDocument('$set', new BsonDocument('y', new BsonInt32(2))), UPDATE).multi(false))) @@ -261,7 +260,7 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('x', 1), new Document('x', 1)) - def operation = createBulkWriteOperationForUpdate(TIMEOUT_SETTINGS, getNamespace(), true, ACKNOWLEDGED, false, + def operation = createBulkWriteOperationForUpdate(getNamespace(), true, ACKNOWLEDGED, false, asList(new UpdateRequest(new BsonDocument('x', new BsonInt32(1)), new BsonDocument('$set', new BsonDocument('y', new BsonInt32(2))), UPDATE).multi(true))) @@ -278,7 +277,7 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec def 'when upsert is true should insert a document if there are no matching documents'() { given: - def operation = createBulkWriteOperationForUpdate(TIMEOUT_SETTINGS, getNamespace(), true, ACKNOWLEDGED, false, + def operation = createBulkWriteOperationForUpdate(getNamespace(), true, ACKNOWLEDGED, false, asList(new UpdateRequest(new BsonDocument('_id', new BsonInt32(1)), new BsonDocument('$set', new BsonDocument('y', new BsonInt32(2))), UPDATE).upsert(true))) @@ -296,7 +295,7 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec def 'should return correct result for upsert'() { given: def id = new ObjectId() - def operation = createBulkWriteOperationForUpdate(TIMEOUT_SETTINGS, getNamespace(), true, ACKNOWLEDGED, false, + def operation = createBulkWriteOperationForUpdate(getNamespace(), true, ACKNOWLEDGED, false, asList(new UpdateRequest(new BsonDocument('_id', new BsonObjectId(id)), new BsonDocument('$set', new BsonDocument('x', new BsonInt32(1))), UPDATE).upsert(true))) diff --git a/driver-legacy/src/test/unit/com/mongodb/DBCursorSpecification.groovy b/driver-legacy/src/test/unit/com/mongodb/DBCursorSpecification.groovy index c87252c7bc1..59dceb6478a 100644 --- a/driver-legacy/src/test/unit/com/mongodb/DBCursorSpecification.groovy +++ b/driver-legacy/src/test/unit/com/mongodb/DBCursorSpecification.groovy @@ -28,10 +28,7 @@ import spock.lang.Specification import java.util.concurrent.TimeUnit -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME import static Fixture.getMongoClient -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME_AND_AWAIT_TIME -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS import static com.mongodb.CustomMatchers.isTheSameAs import static spock.util.matcher.HamcrestSupport.expect @@ -125,7 +122,7 @@ class DBCursorSpecification extends Specification { cursor.toArray() then: - expect executor.getReadOperation(), isTheSameAs(new FindOperation(TIMEOUT_SETTINGS, collection.getNamespace(), + expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), collection.getObjectCodec()) .filter(new BsonDocument()) .projection(new BsonDocument()) @@ -145,7 +142,7 @@ class DBCursorSpecification extends Specification { then: expect executor.getReadOperation(), isTheSameAs( - new FindOperation(TIMEOUT_SETTINGS, collection.getNamespace(), collection.getObjectCodec()) + new FindOperation(collection.getNamespace(), collection.getObjectCodec()) .limit(-1) .filter(new BsonDocument()) .projection(new BsonDocument()) @@ -184,7 +181,7 @@ class DBCursorSpecification extends Specification { then: expect executor.getReadOperation(), isTheSameAs( - new FindOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, collection.getNamespace(), collection.getObjectCodec()) + new FindOperation(collection.getNamespace(), collection.getObjectCodec()) .batchSize(1) .collation(collation) .cursorType(cursorType) @@ -249,8 +246,7 @@ class DBCursorSpecification extends Specification { cursor.toArray() then: - expect executor.getReadOperation(), isTheSameAs(new FindOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME_AND_AWAIT_TIME, - collection.getNamespace(), collection.getObjectCodec()) + expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), collection.getObjectCodec()) .batchSize(1) .collation(collation) .cursorType(cursorType) @@ -284,7 +280,7 @@ class DBCursorSpecification extends Specification { then: result == 42 - expect executor.getReadOperation(), isTheSameAs(new CountOperation(TIMEOUT_SETTINGS, collection.getNamespace()) + expect executor.getReadOperation(), isTheSameAs(new CountOperation(collection.getNamespace()) .filter(new BsonDocument()).retryReads(true)) executor.getReadConcern() == ReadConcern.MAJORITY } @@ -300,7 +296,7 @@ class DBCursorSpecification extends Specification { then: result == 42 - expect executor.getReadOperation(), isTheSameAs(new CountOperation(TIMEOUT_SETTINGS, collection.getNamespace()) + expect executor.getReadOperation(), isTheSameAs(new CountOperation(collection.getNamespace()) .filter(new BsonDocument()).retryReads(true)) executor.getReadConcern() == ReadConcern.MAJORITY } diff --git a/driver-legacy/src/test/unit/com/mongodb/DBSpecification.groovy b/driver-legacy/src/test/unit/com/mongodb/DBSpecification.groovy index 5ed1565eb9c..5f0c81f28cc 100644 --- a/driver-legacy/src/test/unit/com/mongodb/DBSpecification.groovy +++ b/driver-legacy/src/test/unit/com/mongodb/DBSpecification.groovy @@ -88,7 +88,7 @@ class DBSpecification extends Specification { then: def operation = executor.getWriteOperation() as CreateCollectionOperation - expect operation, isTheSameAs(new CreateCollectionOperation(TIMEOUT_SETTINGS, 'test', 'ctest', db.getWriteConcern())) + expect operation, isTheSameAs(new CreateCollectionOperation('test', 'ctest', db.getWriteConcern())) executor.getReadConcern() == ReadConcern.MAJORITY when: @@ -108,7 +108,7 @@ class DBSpecification extends Specification { operation = executor.getWriteOperation() as CreateCollectionOperation then: - expect operation, isTheSameAs(new CreateCollectionOperation(TIMEOUT_SETTINGS, 'test', 'ctest', db.getWriteConcern()) + expect operation, isTheSameAs(new CreateCollectionOperation('test', 'ctest', db.getWriteConcern()) .sizeInBytes(100000) .maxDocuments(2000) .capped(true) @@ -136,7 +136,7 @@ class DBSpecification extends Specification { operation = executor.getWriteOperation() as CreateCollectionOperation then: - expect operation, isTheSameAs(new CreateCollectionOperation(TIMEOUT_SETTINGS, 'test', 'ctest', db.getWriteConcern()) + expect operation, isTheSameAs(new CreateCollectionOperation('test', 'ctest', db.getWriteConcern()) .collation(collation)) executor.getReadConcern() == ReadConcern.MAJORITY } @@ -166,7 +166,7 @@ class DBSpecification extends Specification { then: def operation = executor.getWriteOperation() as CreateViewOperation - expect operation, isTheSameAs(new CreateViewOperation(TIMEOUT_SETTINGS, databaseName, viewName, viewOn, + expect operation, isTheSameAs(new CreateViewOperation(databaseName, viewName, viewOn, [new BsonDocument('$match', new BsonDocument('x', BsonBoolean.TRUE))], writeConcern)) executor.getReadConcern() == ReadConcern.MAJORITY @@ -175,7 +175,7 @@ class DBSpecification extends Specification { operation = executor.getWriteOperation() as CreateViewOperation then: - expect operation, isTheSameAs(new CreateViewOperation(TIMEOUT_SETTINGS, databaseName, viewName, viewOn, + expect operation, isTheSameAs(new CreateViewOperation(databaseName, viewName, viewOn, [new BsonDocument('$match', new BsonDocument('x', BsonBoolean.TRUE))], writeConcern).collation(collation)) executor.getReadConcern() == ReadConcern.MAJORITY } @@ -196,7 +196,7 @@ class DBSpecification extends Specification { def operation = executor.getReadOperation() as ListCollectionsOperation then: - expect operation, isTheSameAs(new ListCollectionsOperation(TIMEOUT_SETTINGS, databaseName, + expect operation, isTheSameAs(new ListCollectionsOperation(databaseName, new DBObjectCodec(getDefaultCodecRegistry())) .nameOnly(true)) @@ -205,7 +205,7 @@ class DBSpecification extends Specification { operation = executor.getReadOperation() as ListCollectionsOperation then: - expect operation, isTheSameAs(new ListCollectionsOperation(TIMEOUT_SETTINGS, databaseName, + expect operation, isTheSameAs(new ListCollectionsOperation(databaseName, new DBObjectCodec(getDefaultCodecRegistry())) .nameOnly(true)) } diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/AggregatePublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/AggregatePublisherImpl.java index 71b7dfe9ecd..73ae8a05a6f 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/AggregatePublisherImpl.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/AggregatePublisherImpl.java @@ -20,10 +20,12 @@ import com.mongodb.MongoNamespace; import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.client.model.AggregationLevel; import com.mongodb.internal.client.model.FindOptions; import com.mongodb.internal.operation.AsyncExplainableReadOperation; +import com.mongodb.internal.operation.AsyncOperations; import com.mongodb.internal.operation.AsyncReadOperation; import com.mongodb.lang.Nullable; import com.mongodb.reactivestreams.client.AggregatePublisher; @@ -37,6 +39,7 @@ import java.util.List; import java.util.concurrent.TimeUnit; +import java.util.function.Function; import static com.mongodb.assertions.Assertions.notNull; @@ -144,7 +147,9 @@ public Publisher toCollection() { if (lastPipelineStage == null || !lastPipelineStage.containsKey("$out") && !lastPipelineStage.containsKey("$merge")) { throw new IllegalStateException("The last stage of the aggregation pipeline must be $out or $merge"); } - return getMongoOperationPublisher().createReadOperationMono(this::getAggregateToCollectionOperation, getClientSession()); + return getMongoOperationPublisher().createReadOperationMono( + (asyncOperations) -> asyncOperations.createTimeoutSettings(maxTimeMS, maxAwaitTimeMS), + this::getAggregateToCollectionOperation, getClientSession()); } @Override @@ -169,10 +174,10 @@ public Publisher explain(final Class explainResultClass, final Explain private Publisher publishExplain(final Class explainResultClass, @Nullable final ExplainVerbosity verbosity) { notNull("explainDocumentClass", explainResultClass); - return getMongoOperationPublisher().createReadOperationMono(() -> - asAggregateOperation(1).asAsyncExplainableOperation(verbosity, - getCodecRegistry().get(explainResultClass)), - getClientSession()); + return getMongoOperationPublisher().createReadOperationMono( + AsyncOperations::getTimeoutSettings, + () -> asAggregateOperation(1).asAsyncExplainableOperation(verbosity, + getCodecRegistry().get(explainResultClass)), getClientSession()); } @Override @@ -193,14 +198,19 @@ AsyncReadOperation> asAsyncReadOperation(final int initialBa } } + @Override + Function, TimeoutSettings> getTimeoutSettings() { + return (asyncOperations -> asyncOperations.createTimeoutSettings(maxTimeMS, maxAwaitTimeMS)); + } + private AsyncExplainableReadOperation> asAggregateOperation(final int initialBatchSize) { return getOperations() - .aggregate(pipeline, getDocumentClass(), maxTimeMS, maxAwaitTimeMS, getTimeoutMode(), + .aggregate(pipeline, getDocumentClass(), getTimeoutMode(), initialBatchSize, collation, hint, hintString, comment, variables, allowDiskUse, aggregationLevel); } private AsyncReadOperation getAggregateToCollectionOperation() { - return getOperations().aggregateToCollection(pipeline, maxTimeMS, getTimeoutMode(), allowDiskUse, bypassDocumentValidation, + return getOperations().aggregateToCollection(pipeline, getTimeoutMode(), allowDiskUse, bypassDocumentValidation, collation, hint, hintString, comment, variables, aggregationLevel); } diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/BatchCursorPublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/BatchCursorPublisher.java index 61c550e76e3..a45e691b72e 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/BatchCursorPublisher.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/BatchCursorPublisher.java @@ -19,6 +19,7 @@ import com.mongodb.MongoNamespace; import com.mongodb.ReadPreference; import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.VisibleForTesting; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.operation.AsyncOperations; @@ -30,6 +31,7 @@ import org.reactivestreams.Subscriber; import reactor.core.publisher.Mono; +import java.util.function.Function; import java.util.function.Supplier; import static com.mongodb.assertions.Assertions.assertNotNull; @@ -54,6 +56,7 @@ public abstract class BatchCursorPublisher implements Publisher { } abstract AsyncReadOperation> asAsyncReadOperation(int initialBatchSize); + abstract Function, TimeoutSettings> getTimeoutSettings(); AsyncReadOperation> asAsyncFirstReadOperation() { return asAsyncReadOperation(1); @@ -104,6 +107,9 @@ public Publisher batchSize(final int batchSize) { } public Publisher timeoutMode(final TimeoutMode timeoutMode) { + if (mongoOperationPublisher.getTimeoutSettings().getTimeoutMS() == null) { + throw new IllegalArgumentException("TimeoutMode requires timeoutMS to be set."); + } this.timeoutMode = timeoutMode; return this; } @@ -142,7 +148,7 @@ public Mono> batchCursor(final int initialBatchSize) { } Mono> batchCursor(final Supplier>> supplier) { - return mongoOperationPublisher.createReadOperationMono(supplier, clientSession).map(BatchCursor::new); + return mongoOperationPublisher.createReadOperationMono(getTimeoutSettings(), supplier, clientSession).map(BatchCursor::new); } } diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ChangeStreamPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ChangeStreamPublisherImpl.java index 06c1857287a..ab7ade7d354 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ChangeStreamPublisherImpl.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ChangeStreamPublisherImpl.java @@ -20,8 +20,10 @@ import com.mongodb.client.model.changestream.ChangeStreamDocument; import com.mongodb.client.model.changestream.FullDocument; import com.mongodb.client.model.changestream.FullDocumentBeforeChange; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.client.model.changestream.ChangeStreamLevel; +import com.mongodb.internal.operation.AsyncOperations; import com.mongodb.internal.operation.AsyncReadOperation; import com.mongodb.lang.Nullable; import com.mongodb.reactivestreams.client.ChangeStreamPublisher; @@ -36,6 +38,7 @@ import java.util.List; import java.util.concurrent.TimeUnit; +import java.util.function.Function; import static com.mongodb.assertions.Assertions.notNull; import static java.util.concurrent.TimeUnit.MILLISECONDS; @@ -140,6 +143,11 @@ public Publisher withDocumentClass(final Class AsyncReadOperation> asAsyncReadOperation(final int initialBatchSize) { return createChangeStreamOperation(getMongoOperationPublisher().getCodecRegistry().get(clazz), initialBatchSize); } + + @Override + Function, TimeoutSettings> getTimeoutSettings() { + return (asyncOperations -> asyncOperations.createTimeoutSettings(0, maxAwaitTimeMS)); + } }; } @@ -166,8 +174,14 @@ AsyncReadOperation>> asAsyncReadOperati return createChangeStreamOperation(codec, initialBatchSize); } + + @Override + Function, TimeoutSettings> getTimeoutSettings() { + return (asyncOperations -> asyncOperations.createTimeoutSettings(0, maxAwaitTimeMS)); + } + private AsyncReadOperation> createChangeStreamOperation(final Codec codec, final int initialBatchSize) { return getOperations().changeStream(fullDocument, fullDocumentBeforeChange, pipeline, codec, changeStreamLevel, initialBatchSize, - collation, comment, maxAwaitTimeMS, resumeToken, startAtOperationTime, startAfter, showExpandedEvents); + collation, comment, resumeToken, startAtOperationTime, startAfter, showExpandedEvents); } } diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ClientSessionPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ClientSessionPublisherImpl.java index c77566fba4b..7ecb64a1542 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ClientSessionPublisherImpl.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ClientSessionPublisherImpl.java @@ -40,7 +40,6 @@ import static com.mongodb.assertions.Assertions.assertTrue; import static com.mongodb.assertions.Assertions.isTrue; import static com.mongodb.assertions.Assertions.notNull; -import static java.util.concurrent.TimeUnit.MILLISECONDS; final class ClientSessionPublisherImpl extends BaseClientSessionImpl implements ClientSession { @@ -144,11 +143,11 @@ public Publisher commitTransaction() { boolean alreadyCommitted = commitInProgress || transactionState == TransactionState.COMMITTED; commitInProgress = true; - Long maxCommitTime = transactionOptions.getMaxCommitTime(MILLISECONDS); + // TODO (CSOT) - JAVA-4067 + // Long maxCommitTime = transactionOptions.getMaxCommitTime(MILLISECONDS); return executor.execute( new CommitTransactionOperation( // TODO (CSOT) - JAVA-4067 - mongoClient.getTimeoutSettings().withMaxCommitMS(maxCommitTime == null ? 0 : maxCommitTime), assertNotNull(transactionOptions.getWriteConcern()), alreadyCommitted) .recoveryToken(getRecoveryToken()), readConcern, this) @@ -179,11 +178,11 @@ public Publisher abortTransaction() { if (readConcern == null) { throw new MongoInternalException("Invariant violated. Transaction options read concern can not be null"); } - Long maxCommitTime = transactionOptions.getMaxCommitTime(MILLISECONDS); + // TODO (CSOT) - JAVA-4067 + // Long maxCommitTime = transactionOptions.getMaxCommitTime(MILLISECONDS); return executor.execute( new AbortTransactionOperation( // TODO (CSOT) - JAVA-4067 - mongoClient.getTimeoutSettings().withMaxCommitMS(maxCommitTime == null ? 0 : maxCommitTime), assertNotNull(transactionOptions.getWriteConcern())) .recoveryToken(getRecoveryToken()), readConcern, this) diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/DistinctPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/DistinctPublisherImpl.java index 37d062d6f7d..4c33bf11b07 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/DistinctPublisherImpl.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/DistinctPublisherImpl.java @@ -18,7 +18,9 @@ import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.AsyncBatchCursor; +import com.mongodb.internal.operation.AsyncOperations; import com.mongodb.internal.operation.AsyncReadOperation; import com.mongodb.lang.Nullable; import com.mongodb.reactivestreams.client.ClientSession; @@ -28,6 +30,7 @@ import org.bson.conversions.Bson; import java.util.concurrent.TimeUnit; +import java.util.function.Function; import static com.mongodb.assertions.Assertions.notNull; @@ -95,6 +98,11 @@ public DistinctPublisher timeoutMode(final TimeoutMode timeoutMode) { @Override AsyncReadOperation> asAsyncReadOperation(final int initialBatchSize) { // initialBatchSize is ignored for distinct operations. - return getOperations().distinct(fieldName, filter, getDocumentClass(), maxTimeMS, collation, comment); + return getOperations().distinct(fieldName, filter, getDocumentClass(), collation, comment); + } + + @Override + Function, TimeoutSettings> getTimeoutSettings() { + return (asyncOperations -> asyncOperations.createTimeoutSettings(maxTimeMS)); } } diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/FindPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/FindPublisherImpl.java index 2cd3a6b8a85..069954d15a1 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/FindPublisherImpl.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/FindPublisherImpl.java @@ -20,9 +20,11 @@ import com.mongodb.ExplainVerbosity; import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.client.model.FindOptions; import com.mongodb.internal.operation.AsyncExplainableReadOperation; +import com.mongodb.internal.operation.AsyncOperations; import com.mongodb.internal.operation.AsyncReadOperation; import com.mongodb.lang.Nullable; import com.mongodb.reactivestreams.client.ClientSession; @@ -33,6 +35,7 @@ import org.reactivestreams.Publisher; import java.util.concurrent.TimeUnit; +import java.util.function.Function; import static com.mongodb.assertions.Assertions.notNull; @@ -213,10 +216,10 @@ public Publisher explain(final Class explainResultClass, final Explain private Publisher publishExplain(final Class explainResultClass, @Nullable final ExplainVerbosity verbosity) { notNull("explainDocumentClass", explainResultClass); - return getMongoOperationPublisher().createReadOperationMono(() -> - asAsyncReadOperation(0).asAsyncExplainableOperation(verbosity, - getCodecRegistry().get(explainResultClass)), - getClientSession()); + return getMongoOperationPublisher().createReadOperationMono( + getTimeoutSettings(), + () -> asAsyncReadOperation(0) + .asAsyncExplainableOperation(verbosity, getCodecRegistry().get(explainResultClass)), getClientSession()); } @Override @@ -224,6 +227,11 @@ AsyncExplainableReadOperation> asAsyncReadOperation(final in return getOperations().find(filter, getDocumentClass(), findOptions.withBatchSize(initialBatchSize)); } + @Override + Function, TimeoutSettings> getTimeoutSettings() { + return (asyncOperations -> asyncOperations.createTimeoutSettings(findOptions)); + } + @Override AsyncReadOperation> asAsyncFirstReadOperation() { return getOperations().findFirst(filter, getDocumentClass(), findOptions); diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListCollectionsPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListCollectionsPublisherImpl.java index e07dd09db53..057a8067ad3 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListCollectionsPublisherImpl.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListCollectionsPublisherImpl.java @@ -18,7 +18,9 @@ import com.mongodb.ReadConcern; import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.AsyncBatchCursor; +import com.mongodb.internal.operation.AsyncOperations; import com.mongodb.internal.operation.AsyncReadOperation; import com.mongodb.lang.Nullable; import com.mongodb.reactivestreams.client.ClientSession; @@ -29,6 +31,7 @@ import org.bson.conversions.Bson; import java.util.concurrent.TimeUnit; +import java.util.function.Function; import static com.mongodb.assertions.Assertions.notNull; import static java.util.concurrent.TimeUnit.MILLISECONDS; @@ -95,6 +98,11 @@ void authorizedCollections(final boolean authorizedCollections) { AsyncReadOperation> asAsyncReadOperation(final int initialBatchSize) { return getOperations().listCollections(getNamespace().getDatabaseName(), getDocumentClass(), filter, collectionNamesOnly, - authorizedCollections, initialBatchSize, maxTimeMS, comment, getTimeoutMode()); + authorizedCollections, initialBatchSize, comment, getTimeoutMode()); + } + + @Override + Function, TimeoutSettings> getTimeoutSettings() { + return (asyncOperations -> asyncOperations.createTimeoutSettings(maxTimeMS)); } } diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListDatabasesPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListDatabasesPublisherImpl.java index 6e007eb43c5..b897a8bf9df 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListDatabasesPublisherImpl.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListDatabasesPublisherImpl.java @@ -17,7 +17,9 @@ package com.mongodb.reactivestreams.client.internal; import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.AsyncBatchCursor; +import com.mongodb.internal.operation.AsyncOperations; import com.mongodb.internal.operation.AsyncReadOperation; import com.mongodb.lang.Nullable; import com.mongodb.reactivestreams.client.ClientSession; @@ -27,6 +29,7 @@ import org.bson.conversions.Bson; import java.util.concurrent.TimeUnit; +import java.util.function.Function; import static com.mongodb.assertions.Assertions.notNull; import static java.util.concurrent.TimeUnit.MILLISECONDS; @@ -89,8 +92,13 @@ public ListDatabasesPublisher timeoutMode(final TimeoutMode timeoutMode) { return this; } + @Override + Function, TimeoutSettings> getTimeoutSettings() { + return (asyncOperations -> asyncOperations.createTimeoutSettings(maxTimeMS)); + } + AsyncReadOperation> asAsyncReadOperation(final int initialBatchSize) { // initialBatchSize is ignored for distinct operations. - return getOperations().listDatabases(getDocumentClass(), filter, nameOnly, maxTimeMS, authorizedDatabasesOnly, comment); + return getOperations().listDatabases(getDocumentClass(), filter, nameOnly, authorizedDatabasesOnly, comment); } } diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListIndexesPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListIndexesPublisherImpl.java index b6ce20c49cb..79e5ce2a14a 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListIndexesPublisherImpl.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListIndexesPublisherImpl.java @@ -17,7 +17,9 @@ package com.mongodb.reactivestreams.client.internal; import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.AsyncBatchCursor; +import com.mongodb.internal.operation.AsyncOperations; import com.mongodb.internal.operation.AsyncReadOperation; import com.mongodb.lang.Nullable; import com.mongodb.reactivestreams.client.ClientSession; @@ -26,6 +28,7 @@ import org.bson.BsonValue; import java.util.concurrent.TimeUnit; +import java.util.function.Function; import static com.mongodb.assertions.Assertions.notNull; import static java.util.concurrent.TimeUnit.MILLISECONDS; @@ -71,6 +74,11 @@ public ListIndexesPublisher timeoutMode(final TimeoutMode timeoutMode) { } AsyncReadOperation> asAsyncReadOperation(final int initialBatchSize) { - return getOperations().listIndexes(getDocumentClass(), initialBatchSize, maxTimeMS, comment, getTimeoutMode()); + return getOperations().listIndexes(getDocumentClass(), initialBatchSize, comment, getTimeoutMode()); + } + + @Override + Function, TimeoutSettings> getTimeoutSettings() { + return (asyncOperations -> asyncOperations.createTimeoutSettings(maxTimeMS)); } } diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListSearchIndexesPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListSearchIndexesPublisherImpl.java index 11ee1f48846..035d7d3bbec 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListSearchIndexesPublisherImpl.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListSearchIndexesPublisherImpl.java @@ -19,8 +19,10 @@ import com.mongodb.ExplainVerbosity; import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.operation.AsyncExplainableReadOperation; +import com.mongodb.internal.operation.AsyncOperations; import com.mongodb.internal.operation.AsyncReadOperation; import com.mongodb.lang.Nullable; import com.mongodb.reactivestreams.client.ListSearchIndexesPublisher; @@ -30,6 +32,7 @@ import org.reactivestreams.Publisher; import java.util.concurrent.TimeUnit; +import java.util.function.Function; import static com.mongodb.assertions.Assertions.notNull; @@ -124,8 +127,9 @@ public Publisher explain(final Class explainResultClass, final Explain } private Publisher publishExplain(final Class explainResultClass, @Nullable final ExplainVerbosity verbosity) { - return getMongoOperationPublisher().createReadOperationMono(() -> - asAggregateOperation(1).asAsyncExplainableOperation(verbosity, + return getMongoOperationPublisher().createReadOperationMono( + (asyncOperations -> asyncOperations.createTimeoutSettings(maxTimeMS)), + () -> asAggregateOperation(1).asAsyncExplainableOperation(verbosity, getCodecRegistry().get(explainResultClass)), getClientSession()); } @@ -134,9 +138,12 @@ AsyncReadOperation> asAsyncReadOperation(final int initialBa return asAggregateOperation(initialBatchSize); } + @Override + Function, TimeoutSettings> getTimeoutSettings() { + return (asyncOperations -> asyncOperations.createTimeoutSettings(maxTimeMS)); + } + private AsyncExplainableReadOperation> asAggregateOperation(final int initialBatchSize) { - return getOperations().listSearchIndexes(getDocumentClass(), maxTimeMS, indexName, initialBatchSize, collation, - comment, - allowDiskUse); + return getOperations().listSearchIndexes(getDocumentClass(), indexName, initialBatchSize, collation, comment, allowDiskUse); } } diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MapReducePublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MapReducePublisherImpl.java index 5712053831c..f8371c8afb6 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MapReducePublisherImpl.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MapReducePublisherImpl.java @@ -26,6 +26,7 @@ import com.mongodb.internal.binding.AsyncReadBinding; import com.mongodb.internal.binding.AsyncWriteBinding; import com.mongodb.internal.client.model.FindOptions; +import com.mongodb.internal.operation.AsyncOperations; import com.mongodb.internal.operation.AsyncReadOperation; import com.mongodb.internal.operation.AsyncWriteOperation; import com.mongodb.internal.operation.MapReduceAsyncBatchCursor; @@ -37,6 +38,7 @@ import org.reactivestreams.Publisher; import java.util.concurrent.TimeUnit; +import java.util.function.Function; import static com.mongodb.ReadPreference.primary; import static com.mongodb.assertions.Assertions.notNull; @@ -164,7 +166,10 @@ public Publisher toCollection() { if (inline) { throw new IllegalStateException("The options must specify a non-inline result"); } - return getMongoOperationPublisher().createWriteOperationMono(this::createMapReduceToCollectionOperation, getClientSession()); + return getMongoOperationPublisher().createWriteOperationMono( + (asyncOperations -> asyncOperations.createTimeoutSettings(maxTimeMS)), + this::createMapReduceToCollectionOperation, + getClientSession()); } @Override @@ -182,6 +187,11 @@ ReadPreference getReadPreference() { } } + @Override + Function, TimeoutSettings> getTimeoutSettings() { + return (asyncOperations -> asyncOperations.createTimeoutSettings(maxTimeMS)); + } + @Override AsyncReadOperation> asAsyncReadOperation(final int initialBatchSize) { if (inline) { @@ -195,15 +205,13 @@ AsyncReadOperation> asAsyncReadOperation(final int initialBa private WrappedMapReduceReadOperation createMapReduceInlineOperation() { return new WrappedMapReduceReadOperation<>(getOperations().mapReduce(mapFunction, reduceFunction, finalizeFunction, - getDocumentClass(), filter, limit, maxTimeMS, jsMode, scope, - sort, verbose, collation)); + getDocumentClass(), filter, limit, jsMode, scope, sort, verbose, collation)); } private WrappedMapReduceWriteOperation createMapReduceToCollectionOperation() { - return new WrappedMapReduceWriteOperation(getOperations().mapReduceToCollection(databaseName, collectionName, mapFunction, - reduceFunction, finalizeFunction, filter, limit, - maxTimeMS, jsMode, scope, sort, verbose, action, - bypassDocumentValidation, collation)); + return new WrappedMapReduceWriteOperation( + getOperations().mapReduceToCollection(databaseName, collectionName, mapFunction, reduceFunction, finalizeFunction, filter, + limit, jsMode, scope, sort, verbose, action, bypassDocumentValidation, collation)); } private AsyncReadOperation> createFindOperation(final int initialBatchSize) { @@ -224,11 +232,6 @@ AsyncReadOperation> getOperation() { return operation; } - @Override - public TimeoutSettings getTimeoutSettings() { - return operation.getTimeoutSettings(); - } - @Override public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback> callback) { operation.executeAsync(binding, callback::onResult); @@ -246,11 +249,6 @@ AsyncWriteOperation getOperation() { return operation; } - @Override - public TimeoutSettings getTimeoutSettings() { - return operation.getTimeoutSettings(); - } - @Override public void executeAsync(final AsyncWriteBinding binding, final SingleResultCallback callback) { operation.executeAsync(binding, (result, t) -> callback.onResult(null, t)); diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoClientImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoClientImpl.java index f1244e2f07c..ab1fec7c0d5 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoClientImpl.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoClientImpl.java @@ -90,7 +90,7 @@ private MongoClientImpl(final MongoClientSettings settings, final MongoDriverInf AutoEncryptionSettings autoEncryptSettings = settings.getAutoEncryptionSettings(); this.crypt = autoEncryptSettings != null ? Crypts.createCrypt(this, autoEncryptSettings) : null; if (executor == null) { - this.executor = new OperationExecutorImpl(this, clientSessionHelper); + this.executor = new OperationExecutorImpl(this, clientSessionHelper, timeoutSettings); } else { this.executor = executor; } diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoOperationPublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoOperationPublisher.java index 30b91097ecf..f73d1e8c07b 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoOperationPublisher.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoOperationPublisher.java @@ -238,18 +238,22 @@ MongoOperationPublisher withTimeout(final long timeout, final TimeUnit timeUn } Publisher dropDatabase(@Nullable final ClientSession clientSession) { - return createWriteOperationMono(operations::dropDatabase, clientSession); + return createWriteOperationMono(operations::getTimeoutSettings, operations::dropDatabase, clientSession); } Publisher createCollection( @Nullable final ClientSession clientSession, final String collectionName, final CreateCollectionOptions options) { - return createWriteOperationMono(() -> operations.createCollection(collectionName, options, autoEncryptionSettings), clientSession); + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.createCollection(collectionName, options, autoEncryptionSettings), clientSession); } Publisher createView( @Nullable final ClientSession clientSession, final String viewName, final String viewOn, final List pipeline, final CreateViewOptions options) { - return createWriteOperationMono(() -> operations.createView(viewName, viewOn, pipeline, options), clientSession); + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.createView(viewName, viewOn, pipeline, options), clientSession); } public Publisher runCommand( @@ -259,24 +263,30 @@ public Publisher runCommand( return Mono.error(new MongoClientException("Read preference in a transaction must be primary")); } return createReadOperationMono( + operations::getTimeoutSettings, () -> operations.commandRead(command, clazz), clientSession, notNull("readPreference", readPreference)); } Publisher estimatedDocumentCount(final EstimatedDocumentCountOptions options) { - return createReadOperationMono(() -> operations.estimatedDocumentCount(notNull("options", options)), null); + return createReadOperationMono( + (asyncOperations -> asyncOperations.createTimeoutSettings(options)), + () -> operations.estimatedDocumentCount(notNull("options", options)), null); } Publisher countDocuments(@Nullable final ClientSession clientSession, final Bson filter, final CountOptions options) { - return createReadOperationMono(() -> operations.countDocuments(notNull("filter", filter), notNull("options", options) + return createReadOperationMono( + (asyncOperations -> asyncOperations.createTimeoutSettings(options)), + () -> operations.countDocuments(notNull("filter", filter), notNull("options", options) ), clientSession); } Publisher bulkWrite( @Nullable final ClientSession clientSession, final List> requests, final BulkWriteOptions options) { - return createWriteOperationMono(() -> operations.bulkWrite(notNull("requests", requests), notNull("options", options)), - clientSession); + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.bulkWrite(notNull("requests", requests), notNull("options", options)), clientSession); } Publisher insertOne(@Nullable final ClientSession clientSession, final T document, final InsertOneOptions options) { @@ -289,8 +299,9 @@ Publisher insertOne(@Nullable final ClientSession clientSession Publisher insertMany( @Nullable final ClientSession clientSession, final List documents, final InsertManyOptions options) { - return createWriteOperationMono(() -> operations.insertMany(notNull("documents", documents), notNull("options", options)), - clientSession) + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.insertMany(notNull("documents", documents), notNull("options", options)), clientSession) .map(INSERT_MANY_RESULT_MAPPER); } @@ -357,15 +368,17 @@ Publisher updateMany( } Publisher findOneAndDelete(@Nullable final ClientSession clientSession, final Bson filter, final FindOneAndDeleteOptions options) { - return createWriteOperationMono(() -> operations.findOneAndDelete(notNull("filter", filter), - notNull("options", options)), - clientSession); + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.findOneAndDelete(notNull("filter", filter), notNull("options", options)), clientSession); } Publisher findOneAndReplace( @Nullable final ClientSession clientSession, final Bson filter, final T replacement, final FindOneAndReplaceOptions options) { - return createWriteOperationMono(() -> operations.findOneAndReplace(notNull("filter", filter), + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.findOneAndReplace(notNull("filter", filter), notNull("replacement", replacement), notNull("options", options)), clientSession); @@ -374,7 +387,9 @@ Publisher findOneAndReplace( Publisher findOneAndUpdate( @Nullable final ClientSession clientSession, final Bson filter, final Bson update, final FindOneAndUpdateOptions options) { - return createWriteOperationMono(() -> operations.findOneAndUpdate(notNull("filter", filter), + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.findOneAndUpdate(notNull("filter", filter), notNull("update", update), notNull("options", options)), clientSession); @@ -383,14 +398,18 @@ Publisher findOneAndUpdate( Publisher findOneAndUpdate( @Nullable final ClientSession clientSession, final Bson filter, final List update, final FindOneAndUpdateOptions options) { - return createWriteOperationMono(() -> operations.findOneAndUpdate(notNull("filter", filter), + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.findOneAndUpdate(notNull("filter", filter), notNull("update", update), notNull("options", options)), clientSession); } Publisher dropCollection(@Nullable final ClientSession clientSession, final DropCollectionOptions dropCollectionOptions) { - return createWriteOperationMono(() -> operations.dropCollection(dropCollectionOptions, autoEncryptionSettings), clientSession); + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.dropCollection(dropCollectionOptions, autoEncryptionSettings), clientSession); } Publisher createIndex(@Nullable final ClientSession clientSession, final Bson key, final IndexOptions options) { @@ -401,8 +420,9 @@ Publisher createIndex(@Nullable final ClientSession clientSession, final Publisher createIndexes( @Nullable final ClientSession clientSession, final List indexes, final CreateIndexOptions options) { - return createWriteOperationMono(() -> operations.createIndexes(notNull("indexes", indexes), - notNull("options", options)), clientSession) + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.createIndexes(notNull("indexes", indexes), notNull("options", options)), clientSession) .thenMany(Flux.fromIterable(IndexHelper.getIndexNames(indexes, getCodecRegistry()))); } @@ -414,27 +434,37 @@ Publisher createSearchIndex(@Nullable final String indexName, final Bson } Publisher createSearchIndexes(final List indexes) { - return createWriteOperationMono(() -> operations.createSearchIndexes(indexes), null) + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.createSearchIndexes(indexes), null) .thenMany(Flux.fromIterable(IndexHelper.getSearchIndexNames(indexes))); } public Publisher updateSearchIndex(final String name, final Bson definition) { - return createWriteOperationMono(() -> operations.updateSearchIndex(name, definition), null); + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.updateSearchIndex(name, definition), null); } public Publisher dropSearchIndex(final String indexName) { - return createWriteOperationMono(() -> operations.dropSearchIndex(indexName), null); + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.dropSearchIndex(indexName), null); } Publisher dropIndex(@Nullable final ClientSession clientSession, final String indexName, final DropIndexOptions options) { - return createWriteOperationMono(() -> operations.dropIndex(notNull("indexName", indexName), notNull("options", options)), + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.dropIndex(notNull("indexName", indexName), notNull("options", options)), clientSession); } Publisher dropIndex(@Nullable final ClientSession clientSession, final Bson keys, final DropIndexOptions options) { - return createWriteOperationMono(() -> operations.dropIndex(notNull("keys", keys), notNull("options", options)), + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.dropIndex(notNull("keys", keys), notNull("options", options)), clientSession); } @@ -445,35 +475,45 @@ Publisher dropIndexes(@Nullable final ClientSession clientSession, final D Publisher renameCollection( @Nullable final ClientSession clientSession, final MongoNamespace newCollectionNamespace, final RenameCollectionOptions options) { - return createWriteOperationMono(() -> operations.renameCollection(notNull("newCollectionNamespace", newCollectionNamespace), + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.renameCollection(notNull("newCollectionNamespace", newCollectionNamespace), notNull("options", options)), clientSession); } - Mono createReadOperationMono( - final Supplier> operation, - @Nullable final ClientSession clientSession) { - return createReadOperationMono(operation, clientSession, getReadPreference()); + + Mono createReadOperationMono(final Function, TimeoutSettings> timeoutSettingsFunction, + final Supplier> operation, @Nullable final ClientSession clientSession) { + return createReadOperationMono(() -> timeoutSettingsFunction.apply(operations), operation, clientSession, getReadPreference()); } - Mono createReadOperationMono( - final Supplier> operation, - @Nullable final ClientSession clientSession, + + Mono createReadOperationMono(final Supplier timeoutSettingsSupplier, + final Supplier> operationSupplier, @Nullable final ClientSession clientSession, final ReadPreference readPreference) { - AsyncReadOperation readOperation = operation.get(); - return executor.execute(readOperation, readPreference, getReadConcern(), clientSession); + AsyncReadOperation readOperation = operationSupplier.get(); + return getExecutor(timeoutSettingsSupplier.get()) + .execute(readOperation, readPreference, getReadConcern(), clientSession); } - Mono createWriteOperationMono(final Supplier> operation, @Nullable final ClientSession clientSession) { - AsyncWriteOperation writeOperation = operation.get(); - return executor.execute(writeOperation, getReadConcern(), clientSession); + Mono createWriteOperationMono(final Function, TimeoutSettings> timeoutSettingsFunction, + final Supplier> operationSupplier, @Nullable final ClientSession clientSession) { + return createWriteOperationMono(() -> timeoutSettingsFunction.apply(operations), operationSupplier, clientSession); + } + + Mono createWriteOperationMono(final Supplier timeoutSettingsSupplier, + final Supplier> operationSupplier, @Nullable final ClientSession clientSession) { + AsyncWriteOperation writeOperation = operationSupplier.get(); + return getExecutor(timeoutSettingsSupplier.get()) + .execute(writeOperation, getReadConcern(), clientSession); } private Mono createSingleWriteRequestMono( final Supplier> operation, @Nullable final ClientSession clientSession, final WriteRequest.Type type) { - return createWriteOperationMono(operation, clientSession) + return createWriteOperationMono(operations::getTimeoutSettings, operation, clientSession) .onErrorMap(MongoBulkWriteException.class, e -> { MongoException exception; WriteConcernError writeConcernError = e.getWriteConcernError(); @@ -504,6 +544,10 @@ private Mono createSingleWriteRequestMono( }); } + private OperationExecutor getExecutor(final TimeoutSettings timeoutSettings) { + return executor.withTimeoutSettings(timeoutSettings); + } + private static final Function INSERT_ONE_RESULT_MAPPER = result -> { if (result.wasAcknowledged()) { BsonValue insertedId = result.getInserts().isEmpty() ? null : result.getInserts().get(0).getId(); @@ -548,6 +592,3 @@ public static SingleResultCallback sinkToCallback(final MonoSink sink) }; } } - - - diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/OperationExecutor.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/OperationExecutor.java index 371168bedd8..6808c1b575a 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/OperationExecutor.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/OperationExecutor.java @@ -18,6 +18,7 @@ import com.mongodb.ReadConcern; import com.mongodb.ReadPreference; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.operation.AsyncReadOperation; import com.mongodb.internal.operation.AsyncWriteOperation; import com.mongodb.lang.Nullable; @@ -52,4 +53,14 @@ Mono execute(AsyncReadOperation operation, ReadPreference readPreferen * @param the operations result type. */ Mono execute(AsyncWriteOperation operation, ReadConcern readConcern, @Nullable ClientSession session); + + /** + * Create a new OperationExecutor with a specific timeout settings + * + * @param timeoutSettings the TimeoutContext to use for the operations + * @return the new operation executor with the set timeout context + * @since CSOT + */ + OperationExecutor withTimeoutSettings(TimeoutSettings timeoutSettings); + } diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/OperationExecutorImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/OperationExecutorImpl.java index 2c09de24ad6..b0446c6aa03 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/OperationExecutorImpl.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/OperationExecutorImpl.java @@ -43,6 +43,8 @@ import org.reactivestreams.Subscriber; import reactor.core.publisher.Mono; +import java.util.Objects; + import static com.mongodb.MongoException.TRANSIENT_TRANSACTION_ERROR_LABEL; import static com.mongodb.MongoException.UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL; import static com.mongodb.ReadPreference.primary; @@ -58,17 +60,32 @@ public class OperationExecutorImpl implements OperationExecutor { private final ClientSessionHelper clientSessionHelper; private final ReactiveContextProvider contextProvider; - OperationExecutorImpl(final MongoClientImpl mongoClient, final ClientSessionHelper clientSessionHelper) { + private final TimeoutSettings timeoutSettings; + + OperationExecutorImpl(final MongoClientImpl mongoClient, final ClientSessionHelper clientSessionHelper, + final TimeoutSettings timeoutSettings) { + this(mongoClient, clientSessionHelper, timeoutSettings, getReactiveContextProvider(mongoClient)); + } + + OperationExecutorImpl(final MongoClientImpl mongoClient, final ClientSessionHelper clientSessionHelper, + final TimeoutSettings timeoutSettings, @Nullable final ReactiveContextProvider contextProvider) { this.mongoClient = mongoClient; this.clientSessionHelper = clientSessionHelper; + this.timeoutSettings = timeoutSettings; + this.contextProvider = contextProvider; + } + + @Nullable + private static ReactiveContextProvider getReactiveContextProvider(final MongoClientImpl mongoClient) { ContextProvider contextProvider = mongoClient.getSettings().getContextProvider(); if (contextProvider != null && !(contextProvider instanceof ReactiveContextProvider)) { throw new IllegalArgumentException("The contextProvider must be an instance of " + ReactiveContextProvider.class.getName() + " when using the Reactive Streams driver"); } - this.contextProvider = (ReactiveContextProvider) contextProvider; + return (ReactiveContextProvider) contextProvider; } + @Override public Mono execute(final AsyncReadOperation operation, final ReadPreference readPreference, final ReadConcern readConcern, @Nullable final ClientSession session) { @@ -82,10 +99,10 @@ public Mono execute(final AsyncReadOperation operation, final ReadPref return Mono.from(subscriber -> clientSessionHelper.withClientSession(session, this) - .map(clientSession -> getReadWriteBinding(operation.getTimeoutSettings(), getContext(subscriber), + .map(clientSession -> getReadWriteBinding(getContext(subscriber), readPreference, readConcern, clientSession, session == null && clientSession != null)) .switchIfEmpty(Mono.fromCallable(() -> - getReadWriteBinding(operation.getTimeoutSettings(), getContext(subscriber), + getReadWriteBinding(getContext(subscriber), readPreference, readConcern, session, false))) .flatMap(binding -> { if (session != null && session.hasActiveTransaction() && !binding.getReadPreference().equals(primary())) { @@ -119,10 +136,10 @@ public Mono execute(final AsyncWriteOperation operation, final ReadCon return Mono.from(subscriber -> clientSessionHelper.withClientSession(session, this) - .map(clientSession -> getReadWriteBinding(operation.getTimeoutSettings(), getContext(subscriber), + .map(clientSession -> getReadWriteBinding(getContext(subscriber), primary(), readConcern, clientSession, session == null && clientSession != null)) .switchIfEmpty(Mono.fromCallable(() -> - getReadWriteBinding(operation.getTimeoutSettings(), getContext(subscriber), primary(), + getReadWriteBinding(getContext(subscriber), primary(), readConcern, session, false))) .flatMap(binding -> Mono.create(sink -> operation.executeAsync(binding, (result, t) -> { @@ -139,6 +156,14 @@ public Mono execute(final AsyncWriteOperation operation, final ReadCon ); } + @Override + public OperationExecutor withTimeoutSettings(final TimeoutSettings newTimeoutSettings) { + if (Objects.equals(timeoutSettings, newTimeoutSettings)) { + return this; + } + return new OperationExecutorImpl(mongoClient, clientSessionHelper, newTimeoutSettings, contextProvider); + } + private RequestContext getContext(final Subscriber subscriber) { RequestContext context = null; if (contextProvider != null) { @@ -164,7 +189,7 @@ private void unpinServerAddressOnTransientTransactionError(@Nullable final Clien } } - private AsyncReadWriteBinding getReadWriteBinding(final TimeoutSettings timeoutSettings, final RequestContext requestContext, + private AsyncReadWriteBinding getReadWriteBinding(final RequestContext requestContext, final ReadPreference readPreference, final ReadConcern readConcern, @Nullable final ClientSession session, final boolean ownsSession) { notNull("readPreference", readPreference); diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/VoidReadOperationThenCursorReadOperation.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/VoidReadOperationThenCursorReadOperation.java index e6d94ea0acf..17a54c345a5 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/VoidReadOperationThenCursorReadOperation.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/VoidReadOperationThenCursorReadOperation.java @@ -16,7 +16,6 @@ package com.mongodb.reactivestreams.client.internal; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; @@ -40,11 +39,6 @@ public AsyncReadOperation> getCursorReadOperation() { return cursorReadOperation; } - @Override - public TimeoutSettings getTimeoutSettings() { - return readOperation.getTimeoutSettings(); - } - @Override public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback> callback) { readOperation.executeAsync(binding, (result, t) -> { diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/VoidWriteOperationThenCursorReadOperation.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/VoidWriteOperationThenCursorReadOperation.java index f3ad8131d2c..bde5811a713 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/VoidWriteOperationThenCursorReadOperation.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/VoidWriteOperationThenCursorReadOperation.java @@ -16,7 +16,6 @@ package com.mongodb.reactivestreams.client.internal; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; @@ -34,11 +33,6 @@ class VoidWriteOperationThenCursorReadOperation implements AsyncReadOperation this.cursorReadOperation = cursorReadOperation; } - @Override - public TimeoutSettings getTimeoutSettings() { - return writeOperation.getTimeoutSettings(); - } - @Override public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback> callback) { writeOperation.executeAsync((AsyncWriteBinding) binding, (result, t) -> { diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/internal/BatchCursorPublisherTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/internal/BatchCursorPublisherTest.java index 8e7b1af1bc9..102b96e424f 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/internal/BatchCursorPublisherTest.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/internal/BatchCursorPublisherTest.java @@ -18,8 +18,10 @@ import com.mongodb.ReadConcern; import com.mongodb.ReadPreference; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.operation.AsyncOperations; import com.mongodb.internal.operation.AsyncReadOperation; import org.bson.Document; import org.junit.jupiter.api.Test; @@ -36,6 +38,7 @@ import java.util.Queue; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; import java.util.stream.IntStream; import static com.mongodb.reactivestreams.client.internal.TestHelper.OPERATION_EXECUTOR; @@ -169,6 +172,11 @@ BatchCursorPublisher createVerifiableBatchCursor(final List AsyncReadOperation> asAsyncReadOperation(final int initialBatchSize) { return readOperation; } + + @Override + Function, TimeoutSettings> getTimeoutSettings() { + return (AsyncOperations::getTimeoutSettings); + } }; OperationExecutor executor = OPERATION_EXECUTOR; diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/AggregatePublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/AggregatePublisherImplTest.java index be45e90f8bb..143499c841a 100644 --- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/AggregatePublisherImplTest.java +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/AggregatePublisherImplTest.java @@ -38,9 +38,6 @@ import java.util.List; -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS; -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME; -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME_AND_AWAIT_TIME; import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry; import static java.lang.String.format; import static java.util.Arrays.asList; @@ -62,7 +59,7 @@ void shouldBuildTheExpectedOperation() { AggregatePublisher publisher = new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); - AggregateOperation expectedOperation = new AggregateOperation<>(TIMEOUT_SETTINGS, NAMESPACE, pipeline, + AggregateOperation expectedOperation = new AggregateOperation<>(NAMESPACE, pipeline, getDefaultCodecRegistry().get(Document.class)) .batchSize(Integer.MAX_VALUE) .retryReads(true); @@ -84,7 +81,7 @@ void shouldBuildTheExpectedOperation() { .maxAwaitTime(1001, MILLISECONDS) .maxTime(101, MILLISECONDS); - expectedOperation = new AggregateOperation<>(TIMEOUT_SETTINGS_WITH_MAX_TIME_AND_AWAIT_TIME, NAMESPACE, pipeline, + expectedOperation = new AggregateOperation<>(NAMESPACE, pipeline, getDefaultCodecRegistry().get(Document.class)) .retryReads(true) .allowDiskUse(true) @@ -107,7 +104,7 @@ void shouldBuildTheExpectedOperationForHintString() { AggregatePublisher publisher = new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); - AggregateOperation expectedOperation = new AggregateOperation<>(TIMEOUT_SETTINGS, NAMESPACE, pipeline, + AggregateOperation expectedOperation = new AggregateOperation<>(NAMESPACE, pipeline, getDefaultCodecRegistry().get(Document.class)) .batchSize(Integer.MAX_VALUE) .retryReads(true); @@ -131,7 +128,7 @@ void shouldBuildTheExpectedOperationForHintPlusHintString() { AggregatePublisher publisher = new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); - AggregateOperation expectedOperation = new AggregateOperation<>(TIMEOUT_SETTINGS, NAMESPACE, pipeline, + AggregateOperation expectedOperation = new AggregateOperation<>(NAMESPACE, pipeline, getDefaultCodecRegistry().get(Document.class)) .batchSize(Integer.MAX_VALUE) .retryReads(true); @@ -160,7 +157,7 @@ void shouldBuildTheExpectedOperationsForDollarOut() { AggregatePublisher publisher = new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); - AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(TIMEOUT_SETTINGS, NAMESPACE, pipeline, + AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED); // default input should be as expected @@ -181,7 +178,7 @@ void shouldBuildTheExpectedOperationsForDollarOut() { .maxAwaitTime(1001, MILLISECONDS) // Ignored on $out .maxTime(100, MILLISECONDS); - expectedOperation = new AggregateToCollectionOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, NAMESPACE, pipeline, + expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED) .allowDiskUse(true) .bypassDocumentValidation(true) @@ -195,7 +192,7 @@ void shouldBuildTheExpectedOperationsForDollarOut() { assertOperationIsTheSameAs(expectedOperation, operation.getReadOperation()); FindOperation expectedFindOperation = - new FindOperation<>(TIMEOUT_SETTINGS, collectionNamespace, getDefaultCodecRegistry().get(Document.class)) + new FindOperation<>(collectionNamespace, getDefaultCodecRegistry().get(Document.class)) .batchSize(100) .collation(COLLATION) .filter(new BsonDocument()) @@ -207,7 +204,7 @@ void shouldBuildTheExpectedOperationsForDollarOut() { // Should handle database level aggregations publisher = new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.DATABASE); - expectedOperation = new AggregateToCollectionOperation(TIMEOUT_SETTINGS, NAMESPACE, pipeline, ReadConcern.DEFAULT, + expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED); Flux.from(publisher).blockFirst(); @@ -218,7 +215,7 @@ void shouldBuildTheExpectedOperationsForDollarOut() { // Should handle toCollection publisher = new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); - expectedOperation = new AggregateToCollectionOperation(TIMEOUT_SETTINGS, NAMESPACE, pipeline, ReadConcern.DEFAULT, + expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED); // default input should be as expected @@ -238,7 +235,7 @@ void shouldBuildTheExpectedOperationsForDollarOutWithHintString() { AggregatePublisher publisher = new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); - AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(TIMEOUT_SETTINGS, NAMESPACE, pipeline, + AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED); publisher @@ -265,7 +262,7 @@ void shouldBuildTheExpectedOperationsForDollarOutWithHintPlusHintString() { AggregatePublisher publisher = new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); - AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(TIMEOUT_SETTINGS, NAMESPACE, pipeline, + AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED); publisher @@ -298,7 +295,7 @@ void shouldBuildTheExpectedOperationsForDollarOutAsDocument() { new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION) .toCollection(); - AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(TIMEOUT_SETTINGS, NAMESPACE, pipeline, + AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED); Flux.from(toCollectionPublisher).blockFirst(); @@ -319,7 +316,7 @@ void shouldBuildTheExpectedOperationsForDollarOutAsDocument() { AggregationLevel.COLLECTION) .toCollection(); - expectedOperation = new AggregateToCollectionOperation(TIMEOUT_SETTINGS, NAMESPACE, pipelineWithNamespace, ReadConcern.DEFAULT, + expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipelineWithNamespace, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED); Flux.from(toCollectionPublisher).blockFirst(); @@ -339,7 +336,7 @@ void shouldBuildTheExpectedOperationsForDollarMergeDocument() { AggregatePublisher publisher = new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); - AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(TIMEOUT_SETTINGS, NAMESPACE, pipeline, + AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED); // default input should be as expected @@ -360,7 +357,7 @@ void shouldBuildTheExpectedOperationsForDollarMergeDocument() { .maxAwaitTime(1001, MILLISECONDS) // Ignored on $out .maxTime(100, MILLISECONDS); - expectedOperation = new AggregateToCollectionOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, NAMESPACE, pipeline, + expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED) .allowDiskUse(true) .bypassDocumentValidation(true) @@ -374,7 +371,7 @@ void shouldBuildTheExpectedOperationsForDollarMergeDocument() { assertOperationIsTheSameAs(expectedOperation, operation.getReadOperation()); FindOperation expectedFindOperation = - new FindOperation<>(TIMEOUT_SETTINGS, collectionNamespace, getDefaultCodecRegistry().get(Document.class)) + new FindOperation<>(collectionNamespace, getDefaultCodecRegistry().get(Document.class)) .batchSize(100) .collation(COLLATION) .filter(new BsonDocument()) @@ -386,7 +383,7 @@ void shouldBuildTheExpectedOperationsForDollarMergeDocument() { // Should handle database level aggregations publisher = new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.DATABASE); - expectedOperation = new AggregateToCollectionOperation(TIMEOUT_SETTINGS, NAMESPACE, pipeline, ReadConcern.DEFAULT, + expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED); Flux.from(publisher).blockFirst(); @@ -397,7 +394,7 @@ void shouldBuildTheExpectedOperationsForDollarMergeDocument() { // Should handle toCollection publisher = new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); - expectedOperation = new AggregateToCollectionOperation(TIMEOUT_SETTINGS, NAMESPACE, pipeline, ReadConcern.DEFAULT, + expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED); // default input should be as expected @@ -417,7 +414,7 @@ void shouldBuildTheExpectedOperationsForDollarMergeString() { AggregatePublisher publisher = new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); - AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(TIMEOUT_SETTINGS, NAMESPACE, pipeline, + AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED); // default input should be as expected @@ -428,7 +425,7 @@ void shouldBuildTheExpectedOperationsForDollarMergeString() { assertOperationIsTheSameAs(expectedOperation, operation.getReadOperation()); FindOperation expectedFindOperation = - new FindOperation<>(TIMEOUT_SETTINGS, collectionNamespace, getDefaultCodecRegistry().get(Document.class)) + new FindOperation<>(collectionNamespace, getDefaultCodecRegistry().get(Document.class)) .filter(new BsonDocument()) .batchSize(Integer.MAX_VALUE) .retryReads(true); diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ChangeStreamPublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ChangeStreamPublisherImplTest.java index 541c751473d..7c2ab637c27 100644 --- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ChangeStreamPublisherImplTest.java +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ChangeStreamPublisherImplTest.java @@ -37,8 +37,6 @@ import java.util.List; -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS; -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_AWAIT_TIME; import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry; import static java.util.Arrays.asList; import static java.util.Collections.singletonList; @@ -59,7 +57,7 @@ void shouldBuildTheExpectedOperation() { Document.class, pipeline, ChangeStreamLevel.COLLECTION); ChangeStreamOperation> expectedOperation = - new ChangeStreamOperation<>(TIMEOUT_SETTINGS, NAMESPACE, FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline, + new ChangeStreamOperation<>(NAMESPACE, FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline, codec) .batchSize(Integer.MAX_VALUE) .retryReads(true); @@ -78,7 +76,7 @@ void shouldBuildTheExpectedOperation() { .maxAwaitTime(101, MILLISECONDS) .fullDocument(FullDocument.UPDATE_LOOKUP); - expectedOperation = new ChangeStreamOperation<>(TIMEOUT_SETTINGS_WITH_MAX_AWAIT_TIME, NAMESPACE, FullDocument.UPDATE_LOOKUP, + expectedOperation = new ChangeStreamOperation<>(NAMESPACE, FullDocument.UPDATE_LOOKUP, FullDocumentBeforeChange.DEFAULT, pipeline, codec).retryReads(true); @@ -106,7 +104,7 @@ void shouldBuildTheExpectedOperationWhenSettingDocumentClass() { .withDocumentClass(BsonDocument.class); ChangeStreamOperation expectedOperation = - new ChangeStreamOperation<>(TIMEOUT_SETTINGS, NAMESPACE, FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline, + new ChangeStreamOperation<>(NAMESPACE, FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline, getDefaultCodecRegistry().get(BsonDocument.class)) .batchSize(batchSize) .comment(new BsonInt32(1)) diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/DistinctPublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/DistinctPublisherImplTest.java index 58750ee7ef8..f9de792574c 100644 --- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/DistinctPublisherImplTest.java +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/DistinctPublisherImplTest.java @@ -28,7 +28,6 @@ import org.reactivestreams.Publisher; import reactor.core.publisher.Flux; -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS; import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry; import static java.util.Arrays.asList; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -44,7 +43,7 @@ void shouldBuildTheExpectedOperation() { DistinctPublisher publisher = new DistinctPublisherImpl<>(null, createMongoOperationPublisher(executor), fieldName, new Document()); - DistinctOperation expectedOperation = new DistinctOperation<>(TIMEOUT_SETTINGS, NAMESPACE, fieldName, + DistinctOperation expectedOperation = new DistinctOperation<>(NAMESPACE, fieldName, getDefaultCodecRegistry().get(Document.class)) .retryReads(true).filter(new BsonDocument()); diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/FindPublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/FindPublisherImplTest.java index 846b67a82d6..8217750368d 100644 --- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/FindPublisherImplTest.java +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/FindPublisherImplTest.java @@ -31,8 +31,6 @@ import org.junit.jupiter.api.Test; import reactor.core.publisher.Flux; -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS; -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME_AND_AWAIT_TIME; import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry; import static java.util.Arrays.asList; import static java.util.concurrent.TimeUnit.MILLISECONDS; @@ -52,7 +50,7 @@ void shouldBuildTheExpectedOperation() { TestOperationExecutor executor = createOperationExecutor(asList(getBatchCursor(), getBatchCursor())); FindPublisher publisher = new FindPublisherImpl<>(null, createMongoOperationPublisher(executor), new Document()); - FindOperation expectedOperation = new FindOperation<>(TIMEOUT_SETTINGS, NAMESPACE, + FindOperation expectedOperation = new FindOperation<>(NAMESPACE, getDefaultCodecRegistry().get(Document.class)) .batchSize(Integer.MAX_VALUE) .retryReads(true) @@ -86,8 +84,8 @@ void shouldBuildTheExpectedOperation() { .showRecordId(false) .allowDiskUse(false); - expectedOperation = new FindOperation<>(TIMEOUT_SETTINGS_WITH_MAX_TIME_AND_AWAIT_TIME, NAMESPACE, - getDefaultCodecRegistry().get(Document.class)) + expectedOperation = new FindOperation<>(NAMESPACE, + getDefaultCodecRegistry().get(Document.class)) .retryReads(true) .filter(new BsonDocument()) .allowDiskUse(false) diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListCollectionNamesPublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListCollectionNamesPublisherImplTest.java index 22f2cb6467e..6613723b49d 100644 --- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListCollectionNamesPublisherImplTest.java +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListCollectionNamesPublisherImplTest.java @@ -17,7 +17,6 @@ package com.mongodb.reactivestreams.client.internal; import com.mongodb.ReadPreference; -import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.operation.ListCollectionsOperation; import com.mongodb.reactivestreams.client.ListCollectionNamesPublisher; import org.bson.BsonDocument; @@ -46,8 +45,8 @@ void shouldBuildTheExpectedOperation() { .withDocumentClass(Document.class), true)) .authorizedCollections(true); - ListCollectionsOperation expectedOperation = new ListCollectionsOperation<>(TimeoutSettings.DEFAULT, DATABASE_NAME, - getDefaultCodecRegistry().get(Document.class)) + ListCollectionsOperation expectedOperation = new ListCollectionsOperation<>(DATABASE_NAME, + getDefaultCodecRegistry().get(Document.class)) .batchSize(Integer.MAX_VALUE) .nameOnly(true) .authorizedCollections(true) @@ -65,8 +64,8 @@ void shouldBuildTheExpectedOperation() { .maxTime(10, SECONDS) .batchSize(100); - expectedOperation = new ListCollectionsOperation<>(TimeoutSettings.DEFAULT.withMaxTimeMS(10_000), DATABASE_NAME, - getDefaultCodecRegistry().get(Document.class)) + expectedOperation = new ListCollectionsOperation<>(DATABASE_NAME, + getDefaultCodecRegistry().get(Document.class)) .nameOnly(true) .authorizedCollections(true) .retryReads(true) diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListCollectionsPublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListCollectionsPublisherImplTest.java index 5f3e4110f41..a632edbae82 100644 --- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListCollectionsPublisherImplTest.java +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListCollectionsPublisherImplTest.java @@ -26,8 +26,6 @@ import org.junit.jupiter.api.Test; import reactor.core.publisher.Flux; -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS; -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME; import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry; import static java.util.Arrays.asList; import static java.util.concurrent.TimeUnit.MILLISECONDS; @@ -44,7 +42,7 @@ void shouldBuildTheExpectedOperation() { ListCollectionsPublisher publisher = new ListCollectionsPublisherImpl<>(null, createMongoOperationPublisher(executor) .withDocumentClass(String.class), true); - ListCollectionsOperation expectedOperation = new ListCollectionsOperation<>(TIMEOUT_SETTINGS, DATABASE_NAME, + ListCollectionsOperation expectedOperation = new ListCollectionsOperation<>(DATABASE_NAME, getDefaultCodecRegistry().get(String.class)) .batchSize(Integer.MAX_VALUE) .nameOnly(true).retryReads(true); @@ -61,8 +59,8 @@ void shouldBuildTheExpectedOperation() { .maxTime(100, MILLISECONDS) .batchSize(100); - expectedOperation = new ListCollectionsOperation<>(TIMEOUT_SETTINGS_WITH_MAX_TIME, DATABASE_NAME, - getDefaultCodecRegistry().get(String.class)) + expectedOperation = new ListCollectionsOperation<>(DATABASE_NAME, + getDefaultCodecRegistry().get(String.class)) .nameOnly(true) .retryReads(true) .filter(new BsonDocument("filter", new BsonInt32(1))) diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListDatabasesPublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListDatabasesPublisherImplTest.java index c5978b2b692..c19a56f14cc 100644 --- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListDatabasesPublisherImplTest.java +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListDatabasesPublisherImplTest.java @@ -26,8 +26,6 @@ import org.junit.jupiter.api.Test; import reactor.core.publisher.Flux; -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS; -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME; import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry; import static java.util.Arrays.asList; import static java.util.concurrent.TimeUnit.MILLISECONDS; @@ -43,7 +41,7 @@ void shouldBuildTheExpectedOperation() { TestOperationExecutor executor = createOperationExecutor(asList(getBatchCursor(), getBatchCursor())); ListDatabasesPublisher publisher = new ListDatabasesPublisherImpl<>(null, createMongoOperationPublisher(executor)); - ListDatabasesOperation expectedOperation = new ListDatabasesOperation<>(TIMEOUT_SETTINGS, + ListDatabasesOperation expectedOperation = new ListDatabasesOperation<>( getDefaultCodecRegistry().get(Document.class)) .retryReads(true); @@ -60,7 +58,7 @@ void shouldBuildTheExpectedOperation() { .maxTime(100, MILLISECONDS) .batchSize(100); - expectedOperation = new ListDatabasesOperation<>(TIMEOUT_SETTINGS_WITH_MAX_TIME, + expectedOperation = new ListDatabasesOperation<>( getDefaultCodecRegistry().get(Document.class)) .retryReads(true) .authorizedDatabasesOnly(true) diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListIndexesPublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListIndexesPublisherImplTest.java index 671f26743f0..5ae221b8a02 100644 --- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListIndexesPublisherImplTest.java +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListIndexesPublisherImplTest.java @@ -25,8 +25,6 @@ import org.junit.jupiter.api.Test; import reactor.core.publisher.Flux; -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS; -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME; import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry; import static java.util.Arrays.asList; import static java.util.concurrent.TimeUnit.MILLISECONDS; @@ -45,7 +43,7 @@ void shouldBuildTheExpectedOperation() { ListIndexesPublisher publisher = new ListIndexesPublisherImpl<>(null, createMongoOperationPublisher(executor)); ListIndexesOperation expectedOperation = - new ListIndexesOperation<>(TIMEOUT_SETTINGS, NAMESPACE, getDefaultCodecRegistry().get(Document.class)) + new ListIndexesOperation<>(NAMESPACE, getDefaultCodecRegistry().get(Document.class)) .batchSize(Integer.MAX_VALUE) .retryReads(true); @@ -60,7 +58,7 @@ void shouldBuildTheExpectedOperation() { .maxTime(100, MILLISECONDS); expectedOperation = - new ListIndexesOperation<>(TIMEOUT_SETTINGS_WITH_MAX_TIME, NAMESPACE, getDefaultCodecRegistry().get(Document.class)) + new ListIndexesOperation<>(NAMESPACE, getDefaultCodecRegistry().get(Document.class)) .batchSize(100) .retryReads(true); diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MapReducePublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MapReducePublisherImplTest.java index 0aeb069c12f..c112395a818 100644 --- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MapReducePublisherImplTest.java +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MapReducePublisherImplTest.java @@ -34,8 +34,6 @@ import org.reactivestreams.Publisher; import reactor.core.publisher.Flux; -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS; -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME; import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry; import static java.util.Arrays.asList; import static java.util.concurrent.TimeUnit.MILLISECONDS; @@ -60,7 +58,7 @@ void shouldBuildTheExpectedMapReduceWithInlineResultsOperation() { new MapReducePublisherImpl<>(null, createMongoOperationPublisher(executor), MAP_FUNCTION, REDUCE_FUNCTION); MapReduceWithInlineResultsOperation expectedOperation = new MapReduceWithInlineResultsOperation<>( - TIMEOUT_SETTINGS, NAMESPACE, new BsonJavaScript(MAP_FUNCTION), new BsonJavaScript(REDUCE_FUNCTION), + NAMESPACE, new BsonJavaScript(MAP_FUNCTION), new BsonJavaScript(REDUCE_FUNCTION), getDefaultCodecRegistry().get(Document.class)).verbose(true); // default input should be as expected @@ -86,7 +84,7 @@ TIMEOUT_SETTINGS, NAMESPACE, new BsonJavaScript(MAP_FUNCTION), new BsonJavaScrip .verbose(false); expectedOperation = new MapReduceWithInlineResultsOperation<>( - TIMEOUT_SETTINGS_WITH_MAX_TIME, NAMESPACE, new BsonJavaScript(MAP_FUNCTION), new BsonJavaScript(REDUCE_FUNCTION), + NAMESPACE, new BsonJavaScript(MAP_FUNCTION), new BsonJavaScript(REDUCE_FUNCTION), getDefaultCodecRegistry().get(Document.class)) .verbose(true) .collation(COLLATION) @@ -115,7 +113,7 @@ void shouldBuildTheExpectedMapReduceToCollectionOperation() { new MapReducePublisherImpl<>(null, createMongoOperationPublisher(executor), MAP_FUNCTION, REDUCE_FUNCTION) .collectionName(NAMESPACE.getCollectionName()); - MapReduceToCollectionOperation expectedOperation = new MapReduceToCollectionOperation(TIMEOUT_SETTINGS, NAMESPACE, + MapReduceToCollectionOperation expectedOperation = new MapReduceToCollectionOperation(NAMESPACE, new BsonJavaScript(MAP_FUNCTION), new BsonJavaScript(REDUCE_FUNCTION), NAMESPACE.getCollectionName(), WriteConcern.ACKNOWLEDGED).verbose(true); @@ -136,8 +134,8 @@ void shouldBuildTheExpectedMapReduceToCollectionOperation() { .sort(Sorts.ascending("sort")) .verbose(false); - expectedOperation = new MapReduceToCollectionOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, NAMESPACE, new BsonJavaScript(MAP_FUNCTION), - new BsonJavaScript(REDUCE_FUNCTION), NAMESPACE.getCollectionName(), WriteConcern.ACKNOWLEDGED) + expectedOperation = new MapReduceToCollectionOperation(NAMESPACE, new BsonJavaScript(MAP_FUNCTION), + new BsonJavaScript(REDUCE_FUNCTION), NAMESPACE.getCollectionName(), WriteConcern.ACKNOWLEDGED) .verbose(true) .collation(COLLATION) .bypassDocumentValidation(true) diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoOperationPublisherTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoOperationPublisherTest.java index 8a28c8d5f68..fbb64237f9c 100644 --- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoOperationPublisherTest.java +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoOperationPublisherTest.java @@ -32,17 +32,27 @@ import org.bson.codecs.configuration.CodecRegistries; import org.bson.codecs.configuration.CodecRegistry; import org.junit.jupiter.api.Test; +import org.mockito.Mockito; import java.util.concurrent.TimeUnit; import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_TIMEOUT; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; public class MongoOperationPublisherTest { - private static final OperationExecutor OPERATION_EXECUTOR = mock(OperationExecutor.class); + + private static final OperationExecutor OPERATION_EXECUTOR; + + static { + OPERATION_EXECUTOR = mock(OperationExecutor.class); + Mockito.lenient().doAnswer(invocation -> OPERATION_EXECUTOR) + .when(OPERATION_EXECUTOR) + .withTimeoutSettings(any()); + } private static final MongoNamespace MONGO_NAMESPACE = new MongoNamespace("a.b"); private static final MongoOperationPublisher DEFAULT_MOP = new MongoOperationPublisher<>( diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TestHelper.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TestHelper.java index 8f953c0e400..5ca7c91cd2e 100644 --- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TestHelper.java +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TestHelper.java @@ -82,6 +82,9 @@ public class TestHelper { static { OperationExecutor executor = mock(OperationExecutor.class); + Mockito.lenient().doAnswer(invocation -> executor) + .when(executor).withTimeoutSettings(any()); + Mockito.lenient().doAnswer(invocation -> Mono.empty()) .when(executor) .execute(any(), any(), any()); diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TestOperationExecutor.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TestOperationExecutor.java index 99c9642f8d6..e176eec8be6 100644 --- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TestOperationExecutor.java +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TestOperationExecutor.java @@ -18,6 +18,7 @@ import com.mongodb.ReadConcern; import com.mongodb.ReadPreference; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.operation.AsyncReadOperation; import com.mongodb.internal.operation.AsyncWriteOperation; import com.mongodb.lang.Nullable; @@ -59,6 +60,11 @@ public Mono execute(final AsyncWriteOperation operation, final ReadCon return createMono(); } + @Override + public OperationExecutor withTimeoutSettings(final TimeoutSettings timeoutSettings) { + return this; + } + Mono createMono() { return Mono.create(sink -> { Object response = responses.remove(0); diff --git a/driver-sync/src/main/com/mongodb/client/internal/AggregateIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/AggregateIterableImpl.java index 9f0bfd86f41..ad052738948 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/AggregateIterableImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/AggregateIterableImpl.java @@ -98,8 +98,9 @@ public void toCollection() { throw new IllegalStateException("The last stage of the aggregation pipeline must be $out or $merge"); } - getExecutor().execute(operations.aggregateToCollection(pipeline, maxTimeMS, getTimeoutMode(), allowDiskUse, - bypassDocumentValidation, collation, hint, hintString, comment, variables, aggregationLevel), + getExecutor().execute( + operations.aggregateToCollection(pipeline, getTimeoutMode(), allowDiskUse, + bypassDocumentValidation, collation, hint, hintString, comment, variables, aggregationLevel), getReadPreference(), getReadConcern(), getClientSession()); } @@ -122,7 +123,7 @@ public AggregateIterable timeoutMode(final TimeoutMode timeoutMode) { } @Override - @SuppressWarnings("deprecation") + @Deprecated public AggregateIterable maxTime(final long maxTime, final TimeUnit timeUnit) { notNull("timeUnit", timeUnit); this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); @@ -200,8 +201,9 @@ public E explain(final Class explainResultClass, final ExplainVerbosity v private E executeExplain(final Class explainResultClass, @Nullable final ExplainVerbosity verbosity) { notNull("explainDocumentClass", explainResultClass); - return getExecutor().execute(asAggregateOperation().asExplainableOperation(verbosity, codecRegistry.get(explainResultClass)), - getReadPreference(), getReadConcern(), getClientSession()); + return getExecutor().execute( + asAggregateOperation().asExplainableOperation(verbosity, codecRegistry.get(explainResultClass)), getReadPreference(), + getReadConcern(), getClientSession()); } @Override @@ -209,8 +211,9 @@ public ReadOperation> asReadOperation() { MongoNamespace outNamespace = getOutNamespace(); if (outNamespace != null) { validateTimeoutMode(); - getExecutor().execute(operations.aggregateToCollection(pipeline, maxTimeMS, getTimeoutMode(), allowDiskUse, - bypassDocumentValidation, collation, hint, hintString, comment, variables, aggregationLevel), + getExecutor().execute( + operations.aggregateToCollection(pipeline, getTimeoutMode(), allowDiskUse, + bypassDocumentValidation, collation, hint, hintString, comment, variables, aggregationLevel), getReadPreference(), getReadConcern(), getClientSession()); FindOptions findOptions = new FindOptions().collation(collation); @@ -224,9 +227,13 @@ public ReadOperation> asReadOperation() { } } + protected OperationExecutor getExecutor() { + return getExecutor(operations.createTimeoutSettings(maxTimeMS, maxAwaitTimeMS)); + } + private ExplainableReadOperation> asAggregateOperation() { - return operations.aggregate(pipeline, resultClass, maxTimeMS, maxAwaitTimeMS, getTimeoutMode(), getBatchSize(), collation, - hint, hintString, comment, variables, allowDiskUse, aggregationLevel); + return operations.aggregate(pipeline, resultClass, getTimeoutMode(), getBatchSize(), collation, hint, hintString, comment, + variables, allowDiskUse, aggregationLevel); } @Nullable diff --git a/driver-sync/src/main/com/mongodb/client/internal/ChangeStreamIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/ChangeStreamIterableImpl.java index 283fb12834b..3a24da53984 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/ChangeStreamIterableImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/ChangeStreamIterableImpl.java @@ -24,7 +24,6 @@ import com.mongodb.client.MongoChangeStreamCursor; import com.mongodb.client.MongoCursor; import com.mongodb.client.MongoIterable; -import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; import com.mongodb.client.model.changestream.ChangeStreamDocument; import com.mongodb.client.model.changestream.FullDocument; @@ -89,7 +88,6 @@ public ChangeStreamIterableImpl(@Nullable final ClientSession clientSession, fin this.codec = ChangeStreamDocument.createCodec(notNull("resultClass", resultClass), codecRegistry); this.changeStreamLevel = notNull("changeStreamLevel", changeStreamLevel); this.operations = new SyncOperations<>(namespace, resultClass, readPreference, codecRegistry, retryReads, timeoutSettings); - super.timeoutMode(TimeoutMode.ITERATION); } @Override @@ -147,6 +145,12 @@ public MongoChangeStreamCursor cursor() { public ReadOperation> asReadOperation() { throw new UnsupportedOperationException(); } + + @Override + + protected OperationExecutor getExecutor() { + return ChangeStreamIterableImpl.this.getExecutor(); + } }; } @@ -207,9 +211,14 @@ public ReadOperation>> asReadOperation throw new UnsupportedOperationException(); } + + protected OperationExecutor getExecutor() { + return getExecutor(operations.createTimeoutSettings(0, maxAwaitTimeMS)); + } + private ReadOperation> createChangeStreamOperation() { return operations.changeStream(fullDocument, fullDocumentBeforeChange, pipeline, new RawBsonDocumentCodec(), changeStreamLevel, - getBatchSize(), collation, comment, maxAwaitTimeMS, resumeToken, startAtOperationTime, startAfter, showExpandedEvents); + getBatchSize(), collation, comment, resumeToken, startAtOperationTime, startAfter, showExpandedEvents); } private BatchCursor execute() { diff --git a/driver-sync/src/main/com/mongodb/client/internal/ClientSessionImpl.java b/driver-sync/src/main/com/mongodb/client/internal/ClientSessionImpl.java index bb6b8064f7d..d0e88984109 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/ClientSessionImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/ClientSessionImpl.java @@ -39,7 +39,6 @@ import static com.mongodb.assertions.Assertions.assertTrue; import static com.mongodb.assertions.Assertions.isTrue; import static com.mongodb.assertions.Assertions.notNull; -import static java.util.concurrent.TimeUnit.MILLISECONDS; final class ClientSessionImpl extends BaseClientSessionImpl implements ClientSession { @@ -145,11 +144,11 @@ public void commitTransaction() { throw new MongoInternalException("Invariant violated. Transaction options read concern can not be null"); } commitInProgress = true; - Long maxCommitTime = transactionOptions.getMaxCommitTime(MILLISECONDS); + // TODO (CSOT) - JAVA-4067 + // Long maxCommitTime = transactionOptions.getMaxCommitTime(MILLISECONDS); delegate.getOperationExecutor().execute( new CommitTransactionOperation( // TODO (CSOT) - JAVA-4067 - delegate.getTimeoutSettings().withMaxCommitMS(maxCommitTime == null ? 0 : maxCommitTime), assertNotNull(transactionOptions.getWriteConcern()), transactionState == TransactionState.COMMITTED) .recoveryToken(getRecoveryToken()), @@ -181,10 +180,10 @@ public void abortTransaction() { if (readConcern == null) { throw new MongoInternalException("Invariant violated. Transaction options read concern can not be null"); } - Long maxCommitTime = transactionOptions.getMaxCommitTime(MILLISECONDS); + // TODO (CSOT) - JAVA-4067 + // Long maxCommitTime = transactionOptions.getMaxCommitTime(MILLISECONDS); delegate.getOperationExecutor().execute(new AbortTransactionOperation( // TODO (CSOT) - JAVA-4067 - delegate.getTimeoutSettings().withMaxCommitMS(maxCommitTime == null ? 0 : maxCommitTime), assertNotNull(transactionOptions.getWriteConcern())) .recoveryToken(getRecoveryToken()), readConcern, this); } diff --git a/driver-sync/src/main/com/mongodb/client/internal/DistinctIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/DistinctIterableImpl.java index 17bfac03381..bf7dd5c8a49 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/DistinctIterableImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/DistinctIterableImpl.java @@ -105,6 +105,11 @@ public DistinctIterable comment(@Nullable final BsonValue comment) { @Override public ReadOperation> asReadOperation() { - return operations.distinct(fieldName, filter, resultClass, maxTimeMS, collation, comment); + return operations.distinct(fieldName, filter, resultClass, collation, comment); + } + + + protected OperationExecutor getExecutor() { + return getExecutor(operations.createTimeoutSettings(maxTimeMS)); } } diff --git a/driver-sync/src/main/com/mongodb/client/internal/FindIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/FindIterableImpl.java index f9446a6f6e3..2d6fb54b333 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/FindIterableImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/FindIterableImpl.java @@ -208,8 +208,8 @@ public FindIterable allowDiskUse(@Nullable final Boolean allowDiskUse) @Nullable @Override public TResult first() { - try (BatchCursor batchCursor = getExecutor().execute(operations.findFirst(filter, resultClass, findOptions), - getReadPreference(), getReadConcern(), getClientSession())) { + try (BatchCursor batchCursor = getExecutor().execute( + operations.findFirst(filter, resultClass, findOptions), getReadPreference(), getReadConcern(), getClientSession())) { return batchCursor.hasNext() ? batchCursor.next().iterator().next() : null; } } @@ -234,10 +234,15 @@ public E explain(final Class explainResultClass, final ExplainVerbosity v return executeExplain(explainResultClass, notNull("verbosity", verbosity)); } + + protected OperationExecutor getExecutor() { + return getExecutor(operations.createTimeoutSettings(findOptions)); + } + private E executeExplain(final Class explainResultClass, @Nullable final ExplainVerbosity verbosity) { notNull("explainDocumentClass", explainResultClass); - return getExecutor().execute(asReadOperation().asExplainableOperation(verbosity, codecRegistry.get(explainResultClass)), - getReadPreference(), getReadConcern(), getClientSession()); + return getExecutor().execute( + asReadOperation().asExplainableOperation(verbosity, codecRegistry.get(explainResultClass)), getReadPreference(), getReadConcern(), getClientSession()); } public ExplainableReadOperation> asReadOperation() { diff --git a/driver-sync/src/main/com/mongodb/client/internal/ListCollectionsIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/ListCollectionsIterableImpl.java index e9a0b79f39c..7d617947077 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/ListCollectionsIterableImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/ListCollectionsIterableImpl.java @@ -106,6 +106,11 @@ ListCollectionsIterableImpl authorizedCollections(final boolean authori @Override public ReadOperation> asReadOperation() { return operations.listCollections(databaseName, resultClass, filter, collectionNamesOnly, authorizedCollections, - getBatchSize(), maxTimeMS, comment, getTimeoutMode()); + getBatchSize(), comment, getTimeoutMode()); + } + + + protected OperationExecutor getExecutor() { + return getExecutor(operations.createTimeoutSettings(maxTimeMS)); } } diff --git a/driver-sync/src/main/com/mongodb/client/internal/ListDatabasesIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/ListDatabasesIterableImpl.java index b7cccd6ed5d..83bc08b3dd1 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/ListDatabasesIterableImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/ListDatabasesIterableImpl.java @@ -109,6 +109,11 @@ public ListDatabasesIterable comment(@Nullable final BsonValue comment) @Override public ReadOperation> asReadOperation() { - return operations.listDatabases(resultClass, filter, nameOnly, maxTimeMS, authorizedDatabasesOnly, comment); + return operations.listDatabases(resultClass, filter, nameOnly, authorizedDatabasesOnly, comment); + } + + + protected OperationExecutor getExecutor() { + return getExecutor(operations.createTimeoutSettings(maxTimeMS)); } } diff --git a/driver-sync/src/main/com/mongodb/client/internal/ListIndexesIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/ListIndexesIterableImpl.java index 618f55b2a1b..19be1bdc8ed 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/ListIndexesIterableImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/ListIndexesIterableImpl.java @@ -84,6 +84,10 @@ public ListIndexesIterable comment(@Nullable final BsonValue comment) { @Override public ReadOperation> asReadOperation() { - return operations.listIndexes(resultClass, getBatchSize(), maxTimeMS, comment, getTimeoutMode()); + return operations.listIndexes(resultClass, getBatchSize(), comment, getTimeoutMode()); + } + + protected OperationExecutor getExecutor() { + return getExecutor(operations.createTimeoutSettings(maxTimeMS)); } } diff --git a/driver-sync/src/main/com/mongodb/client/internal/ListSearchIndexesIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/ListSearchIndexesIterableImpl.java index 912d701576e..c67106d357d 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/ListSearchIndexesIterableImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/ListSearchIndexesIterableImpl.java @@ -142,12 +142,18 @@ public E explain(final Class explainResultClass, final ExplainVerbosity v } private E executeExplain(final Class explainResultClass, @Nullable final ExplainVerbosity verbosity) { - return getExecutor().execute(asAggregateOperation().asExplainableOperation(verbosity, codecRegistry.get(explainResultClass)), - getReadPreference(), getReadConcern(), getClientSession()); + return getExecutor().execute(asAggregateOperation() + .asExplainableOperation(verbosity, codecRegistry.get(explainResultClass)), getReadPreference(), getReadConcern(), getClientSession()); } private ExplainableReadOperation> asAggregateOperation() { - return operations.listSearchIndexes(resultClass, maxTimeMS, indexName, getBatchSize(), collation, comment, + return operations.listSearchIndexes(resultClass, indexName, getBatchSize(), collation, comment, allowDiskUse); } + + + protected OperationExecutor getExecutor() { + return getExecutor(operations.createTimeoutSettings(maxTimeMS)); + } + } diff --git a/driver-sync/src/main/com/mongodb/client/internal/MapReduceIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/MapReduceIterableImpl.java index 76ef2a42495..8a0107aafeb 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/MapReduceIterableImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/MapReduceIterableImpl.java @@ -188,11 +188,16 @@ ReadPreference getReadPreference() { } } + + protected OperationExecutor getExecutor() { + return getExecutor(operations.createTimeoutSettings(maxTimeMS)); + } + @Override public ReadOperation> asReadOperation() { if (inline) { ReadOperation> operation = operations.mapReduce(mapFunction, reduceFunction, finalizeFunction, - resultClass, filter, limit, maxTimeMS, jsMode, scope, sort, verbose, collation); + resultClass, filter, limit, jsMode, scope, sort, verbose, collation); return new WrappedMapReduceReadOperation<>(operation); } else { getExecutor().execute(createMapReduceToCollectionOperation(), getReadConcern(), getClientSession()); @@ -211,7 +216,7 @@ public ReadOperation> asReadOperation() { private WriteOperation createMapReduceToCollectionOperation() { return operations.mapReduceToCollection(databaseName, collectionName, mapFunction, reduceFunction, finalizeFunction, filter, - limit, maxTimeMS, jsMode, scope, sort, verbose, action, bypassDocumentValidation, collation + limit, jsMode, scope, sort, verbose, action, bypassDocumentValidation, collation ); } @@ -227,11 +232,6 @@ ReadOperation> getOperation() { this.operation = operation; } - @Override - public TimeoutSettings getTimeoutSettings() { - return operation.getTimeoutSettings(); - } - @Override public BatchCursor execute(final ReadBinding binding) { return operation.execute(binding); diff --git a/driver-sync/src/main/com/mongodb/client/internal/MongoClientDelegate.java b/driver-sync/src/main/com/mongodb/client/internal/MongoClientDelegate.java index 9ee358867ed..7f64f96f86e 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/MongoClientDelegate.java +++ b/driver-sync/src/main/com/mongodb/client/internal/MongoClientDelegate.java @@ -48,6 +48,7 @@ import com.mongodb.lang.Nullable; import org.bson.codecs.configuration.CodecRegistry; +import java.util.Objects; import java.util.concurrent.atomic.AtomicBoolean; import static com.mongodb.MongoException.TRANSIENT_TRANSACTION_ERROR_LABEL; @@ -80,7 +81,7 @@ final class MongoClientDelegate { this.contextProvider = contextProvider; this.serverSessionPool = new ServerSessionPool(cluster, timeoutSettings, serverApi); this.originator = originator; - this.operationExecutor = operationExecutor == null ? new DelegateOperationExecutor() : operationExecutor; + this.operationExecutor = operationExecutor == null ? new DelegateOperationExecutor(timeoutSettings) : operationExecutor; this.crypt = crypt; this.serverApi = serverApi; this.timeoutSettings = timeoutSettings; @@ -137,6 +138,12 @@ public TimeoutSettings getTimeoutSettings() { } private class DelegateOperationExecutor implements OperationExecutor { + private final TimeoutSettings timeoutSettings; + + DelegateOperationExecutor(final TimeoutSettings timeoutSettings) { + this.timeoutSettings = timeoutSettings; + } + @Override public T execute(final ReadOperation operation, final ReadPreference readPreference, final ReadConcern readConcern) { return execute(operation, readPreference, readConcern, null); @@ -155,7 +162,7 @@ public T execute(final ReadOperation operation, final ReadPreference read } ClientSession actualClientSession = getClientSession(session); - ReadBinding binding = getReadBinding(operation.getTimeoutSettings(), readPreference, readConcern, actualClientSession, session == null); + ReadBinding binding = getReadBinding(readPreference, readConcern, actualClientSession, session == null); try { if (actualClientSession.hasActiveTransaction() && !binding.getReadPreference().equals(primary())) { @@ -172,13 +179,14 @@ public T execute(final ReadOperation operation, final ReadPreference read } @Override - public T execute(final WriteOperation operation, final ReadConcern readConcern, @Nullable final ClientSession session) { + public T execute(final WriteOperation operation, final ReadConcern readConcern, + @Nullable final ClientSession session) { if (session != null) { session.notifyOperationInitiated(operation); } ClientSession actualClientSession = getClientSession(session); - WriteBinding binding = getWriteBinding(operation.getTimeoutSettings(), readConcern, actualClientSession, session == null); + WriteBinding binding = getWriteBinding(readConcern, actualClientSession, session == null); try { return operation.execute(binding); @@ -191,21 +199,28 @@ public T execute(final WriteOperation operation, final ReadConcern readCo } } - WriteBinding getWriteBinding(final TimeoutSettings timeoutSettings, final ReadConcern readConcern, final ClientSession session, - final boolean ownsSession) { - return getReadWriteBinding(timeoutSettings, primary(), readConcern, session, ownsSession); + @Override + public OperationExecutor withTimeoutSettings(final TimeoutSettings newTimeoutSettings) { + if (Objects.equals(timeoutSettings, newTimeoutSettings)) { + return this; + } + return new DelegateOperationExecutor(newTimeoutSettings); } - ReadBinding getReadBinding(final TimeoutSettings timeoutSettings, final ReadPreference readPreference, - final ReadConcern readConcern, final ClientSession session, final boolean ownsSession) { - return getReadWriteBinding(timeoutSettings, readPreference, readConcern, session, ownsSession); + WriteBinding getWriteBinding(final ReadConcern readConcern, final ClientSession session, final boolean ownsSession) { + return getReadWriteBinding(primary(), readConcern, session, ownsSession); } - ReadWriteBinding getReadWriteBinding(final TimeoutSettings timeoutSettings, final ReadPreference readPreference, + ReadBinding getReadBinding(final ReadPreference readPreference, final ReadConcern readConcern, final ClientSession session, + final boolean ownsSession) { + return getReadWriteBinding(readPreference, readConcern, session, ownsSession); + } + + ReadWriteBinding getReadWriteBinding(final ReadPreference readPreference, final ReadConcern readConcern, final ClientSession session, final boolean ownsSession) { ClusterAwareReadWriteBinding readWriteBinding = new ClusterBinding(cluster, - getReadPreferenceForBinding(readPreference, session), readConcern, getOperationContext(timeoutSettings, readConcern)); + getReadPreferenceForBinding(readPreference, session), readConcern, getOperationContext(readConcern)); if (crypt != null) { readWriteBinding = new CryptBinding(readWriteBinding, crypt); @@ -214,11 +229,11 @@ ReadWriteBinding getReadWriteBinding(final TimeoutSettings timeoutSettings, fina return new ClientSessionBinding(session, ownsSession, readWriteBinding); } - private OperationContext getOperationContext(final TimeoutSettings timeoutSettings, final ReadConcern readConcern) { + private OperationContext getOperationContext(final ReadConcern readConcern) { return new OperationContext( getRequestContext(), new ReadConcernAwareNoOpSessionContext(readConcern), - new TimeoutContext(timeoutSettings), + new TimeoutContext(getTimeoutSettings()), serverApi); } @@ -230,6 +245,9 @@ private RequestContext getRequestContext() { return context == null ? IgnorableRequestContext.INSTANCE : context; } + private TimeoutSettings getTimeoutSettings() { + return timeoutSettings; + } private void labelException(final ClientSession session, final MongoException e) { if (session.hasActiveTransaction() && (e instanceof MongoSocketException || e instanceof MongoTimeoutException diff --git a/driver-sync/src/main/com/mongodb/client/internal/MongoCollectionImpl.java b/driver-sync/src/main/com/mongodb/client/internal/MongoCollectionImpl.java index d64bd09c81a..ce7f517545d 100755 --- a/driver-sync/src/main/com/mongodb/client/internal/MongoCollectionImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/MongoCollectionImpl.java @@ -64,7 +64,6 @@ import com.mongodb.internal.client.model.AggregationLevel; import com.mongodb.internal.client.model.changestream.ChangeStreamLevel; import com.mongodb.internal.operation.IndexHelper; -import com.mongodb.internal.operation.RenameCollectionOperation; import com.mongodb.internal.operation.SyncOperations; import com.mongodb.internal.operation.WriteOperation; import com.mongodb.lang.Nullable; @@ -243,11 +242,13 @@ public long estimatedDocumentCount() { @Override public long estimatedDocumentCount(final EstimatedDocumentCountOptions options) { - return executor.execute(operations.estimatedDocumentCount(options), readPreference, readConcern, null); + return getExecutor(operations.createTimeoutSettings(options)) + .execute(operations.estimatedDocumentCount(options), readPreference, readConcern, null); } private long executeCount(@Nullable final ClientSession clientSession, final Bson filter, final CountOptions options) { - return executor.execute(operations.countDocuments(filter, options), readPreference, readConcern, clientSession); + return getExecutor(operations.createTimeoutSettings(options)) + .execute(operations.countDocuments(filter, options), readPreference, readConcern, clientSession); } @Override @@ -470,7 +471,8 @@ private BulkWriteResult executeBulkWrite(@Nullable final ClientSession clientSes final List> requests, final BulkWriteOptions options) { notNull("requests", requests); - return executor.execute(operations.bulkWrite(requests, options), readConcern, clientSession); + return getExecutor(timeoutSettings) + .execute(operations.bulkWrite(requests, options), readConcern, clientSession); } @Override @@ -525,8 +527,10 @@ public InsertManyResult insertMany(final ClientSession clientSession, final List } private InsertManyResult executeInsertMany(@Nullable final ClientSession clientSession, final List documents, - final InsertManyOptions options) { - return toInsertManyResult(executor.execute(operations.insertMany(documents, options), readConcern, clientSession)); + final InsertManyOptions options) { + return toInsertManyResult( + getExecutor(timeoutSettings).execute(operations.insertMany(documents, options), readConcern, clientSession) + ); } @Override @@ -717,7 +721,8 @@ public TDocument findOneAndDelete(final ClientSession clientSession, final Bson @Nullable private TDocument executeFindOneAndDelete(@Nullable final ClientSession clientSession, final Bson filter, final FindOneAndDeleteOptions options) { - return executor.execute(operations.findOneAndDelete(filter, options), readConcern, clientSession); + return getExecutor(operations.createTimeoutSettings(options)) + .execute(operations.findOneAndDelete(filter, options), readConcern, clientSession); } @Override @@ -749,7 +754,8 @@ public TDocument findOneAndReplace(final ClientSession clientSession, final Bson @Nullable private TDocument executeFindOneAndReplace(@Nullable final ClientSession clientSession, final Bson filter, final TDocument replacement, final FindOneAndReplaceOptions options) { - return executor.execute(operations.findOneAndReplace(filter, replacement, options), readConcern, clientSession); + return getExecutor(operations.createTimeoutSettings(options)) + .execute(operations.findOneAndReplace(filter, replacement, options), readConcern, clientSession); } @Override @@ -781,7 +787,8 @@ public TDocument findOneAndUpdate(final ClientSession clientSession, final Bson @Nullable private TDocument executeFindOneAndUpdate(@Nullable final ClientSession clientSession, final Bson filter, final Bson update, final FindOneAndUpdateOptions options) { - return executor.execute(operations.findOneAndUpdate(filter, update, options), readConcern, clientSession); + return getExecutor(operations.createTimeoutSettings(options)) + .execute(operations.findOneAndUpdate(filter, update, options), readConcern, clientSession); } @Override @@ -813,7 +820,8 @@ public TDocument findOneAndUpdate(final ClientSession clientSession, final Bson @Nullable private TDocument executeFindOneAndUpdate(@Nullable final ClientSession clientSession, final Bson filter, final List update, final FindOneAndUpdateOptions options) { - return executor.execute(operations.findOneAndUpdate(filter, update, options), readConcern, clientSession); + return getExecutor(operations.createTimeoutSettings(options)) + .execute(operations.findOneAndUpdate(filter, update, options), readConcern, clientSession); } @Override @@ -864,14 +872,14 @@ public void updateSearchIndex(final String indexName, final Bson definition) { notNull("indexName", indexName); notNull("definition", definition); - executor.execute(operations.updateSearchIndex(indexName, definition), readConcern, null); + getExecutor(timeoutSettings).execute(operations.updateSearchIndex(indexName, definition), readConcern, null); } @Override public void dropSearchIndex(final String indexName) { notNull("indexName", indexName); - executor.execute(operations.dropSearchIndex(indexName), readConcern, null); + getExecutor(timeoutSettings).execute(operations.dropSearchIndex(indexName), readConcern, null); } @Override @@ -886,7 +894,8 @@ public ListSearchIndexesIterable listSearchIndexes(final Clas } private void executeDrop(@Nullable final ClientSession clientSession, final DropCollectionOptions dropCollectionOptions) { - executor.execute(operations.dropCollection(dropCollectionOptions, autoEncryptionSettings), readConcern, clientSession); + getExecutor(timeoutSettings) + .execute(operations.dropCollection(dropCollectionOptions, autoEncryptionSettings), readConcern, clientSession); } @Override @@ -933,12 +942,13 @@ public List createIndexes(final ClientSession clientSession, final List< private List executeCreateIndexes(@Nullable final ClientSession clientSession, final List indexes, final CreateIndexOptions createIndexOptions) { - executor.execute(operations.createIndexes(indexes, createIndexOptions), readConcern, clientSession); + getExecutor(operations.createTimeoutSettings(createIndexOptions)) + .execute(operations.createIndexes(indexes, createIndexOptions), readConcern, clientSession); return IndexHelper.getIndexNames(indexes, codecRegistry); } private List executeCreateSearchIndexes(final List searchIndexModels) { - executor.execute(operations.createSearchIndexes(searchIndexModels), readConcern, null); + getExecutor(timeoutSettings).execute(operations.createSearchIndexes(searchIndexModels), readConcern, null); return IndexHelper.getSearchIndexNames(searchIndexModels); } @@ -1038,13 +1048,16 @@ public void dropIndexes(final ClientSession clientSession, final DropIndexOption } private void executeDropIndex(@Nullable final ClientSession clientSession, final String indexName, - final DropIndexOptions dropIndexOptions) { - notNull("dropIndexOptions", dropIndexOptions); - executor.execute(operations.dropIndex(indexName, dropIndexOptions), readConcern, clientSession); + final DropIndexOptions options) { + notNull("options", options); + getExecutor(operations.createTimeoutSettings(options)) + .execute(operations.dropIndex(indexName, options), readConcern, clientSession); } - private void executeDropIndex(@Nullable final ClientSession clientSession, final Bson keys, final DropIndexOptions dropIndexOptions) { - executor.execute(operations.dropIndex(keys, dropIndexOptions), readConcern, clientSession); + private void executeDropIndex(@Nullable final ClientSession clientSession, final Bson keys, final DropIndexOptions options) { + notNull("options", options); + getExecutor(operations.createTimeoutSettings(options)) + .execute(operations.dropIndex(keys, options), readConcern, clientSession); } @Override @@ -1071,10 +1084,8 @@ public void renameCollection(final ClientSession clientSession, final MongoNames private void executeRenameCollection(@Nullable final ClientSession clientSession, final MongoNamespace newCollectionNamespace, final RenameCollectionOptions renameCollectionOptions) { - executor.execute(new RenameCollectionOperation(timeoutSettings, getNamespace(), - newCollectionNamespace, writeConcern) - .dropTarget(renameCollectionOptions.isDropTarget()), - readConcern, clientSession); + getExecutor(timeoutSettings) + .execute(operations.renameCollection(newCollectionNamespace, renameCollectionOptions), readConcern, clientSession); } private DeleteResult executeDelete(@Nullable final ClientSession clientSession, final Bson filter, final DeleteOptions deleteOptions, @@ -1106,7 +1117,8 @@ private BulkWriteResult executeSingleWriteRequest(@Nullable final ClientSession final WriteOperation writeOperation, final WriteRequest.Type type) { try { - return executor.execute(writeOperation, readConcern, clientSession); + return getExecutor(timeoutSettings) + .execute(writeOperation, readConcern, clientSession); } catch (MongoBulkWriteException e) { if (e.getWriteErrors().isEmpty()) { throw new MongoWriteConcernException(e.getWriteConcernError(), @@ -1163,4 +1175,8 @@ private UpdateResult toUpdateResult(final com.mongodb.bulk.BulkWriteResult resul } } + private OperationExecutor getExecutor(final TimeoutSettings timeoutSettings) { + return executor.withTimeoutSettings(timeoutSettings); + } + } diff --git a/driver-sync/src/main/com/mongodb/client/internal/MongoDatabaseImpl.java b/driver-sync/src/main/com/mongodb/client/internal/MongoDatabaseImpl.java index 795c88fcbb5..80b91796ca0 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/MongoDatabaseImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/MongoDatabaseImpl.java @@ -216,8 +216,7 @@ private TResult executeCommand(@Nullable final ClientSession clientSes if (clientSession != null && clientSession.hasActiveTransaction() && !readPreference.equals(ReadPreference.primary())) { throw new MongoClientException("Read preference in a transaction must be primary"); } - return executor.execute(operations.commandRead(command, resultClass), - readPreference, readConcern, clientSession); + return getExecutor().execute(operations.commandRead(command, resultClass), readPreference, readConcern, clientSession); } @Override @@ -232,7 +231,7 @@ public void drop(final ClientSession clientSession) { } private void executeDrop(@Nullable final ClientSession clientSession) { - executor.execute(operations.dropDatabase(), readConcern, clientSession); + getExecutor().execute(operations.dropDatabase(), readConcern, clientSession); } @Override @@ -302,8 +301,8 @@ public void createCollection(final ClientSession clientSession, final String col private void executeCreateCollection(@Nullable final ClientSession clientSession, final String collectionName, final CreateCollectionOptions createCollectionOptions) { - executor.execute(operations.createCollection(collectionName, createCollectionOptions, autoEncryptionSettings), readConcern, - clientSession); + getExecutor().execute(operations.createCollection(collectionName, createCollectionOptions, autoEncryptionSettings), + readConcern, clientSession); } @Override @@ -411,6 +410,10 @@ private ChangeStreamIterable createChangeStreamIterable(@Null private void executeCreateView(@Nullable final ClientSession clientSession, final String viewName, final String viewOn, final List pipeline, final CreateViewOptions createViewOptions) { notNull("createViewOptions", createViewOptions); - executor.execute(operations.createView(viewName, viewOn, pipeline, createViewOptions), readConcern, clientSession); + getExecutor().execute(operations.createView(viewName, viewOn, pipeline, createViewOptions), readConcern, clientSession); + } + + private OperationExecutor getExecutor() { + return executor.withTimeoutSettings(timeoutSettings); } } diff --git a/driver-sync/src/main/com/mongodb/client/internal/MongoIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/MongoIterableImpl.java index 6bb3bd995e6..0e1a46a8dd7 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/MongoIterableImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/MongoIterableImpl.java @@ -63,8 +63,10 @@ ClientSession getClientSession() { return clientSession; } - OperationExecutor getExecutor() { - return executor; + protected abstract OperationExecutor getExecutor(); + + OperationExecutor getExecutor(final TimeoutSettings timeoutSettings) { + return executor.withTimeoutSettings(timeoutSettings); } ReadPreference getReadPreference() { @@ -100,6 +102,9 @@ public TimeoutMode getTimeoutMode() { } public MongoIterable timeoutMode(final TimeoutMode timeoutMode) { + if (timeoutSettings.getTimeoutMS() == null) { + throw new IllegalArgumentException("TimeoutMode requires timeoutMS to be set."); + } this.timeoutMode = timeoutMode; return this; } @@ -146,6 +151,6 @@ public > A into(final A target) { } private BatchCursor execute() { - return executor.execute(asReadOperation(), readPreference, readConcern, clientSession); + return getExecutor().execute(asReadOperation(), readPreference, readConcern, clientSession); } } diff --git a/driver-sync/src/main/com/mongodb/client/internal/OperationExecutor.java b/driver-sync/src/main/com/mongodb/client/internal/OperationExecutor.java index 3786dc1ad6f..e812d021d37 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/OperationExecutor.java +++ b/driver-sync/src/main/com/mongodb/client/internal/OperationExecutor.java @@ -19,6 +19,7 @@ import com.mongodb.ReadConcern; import com.mongodb.ReadPreference; import com.mongodb.client.ClientSession; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.operation.ReadOperation; import com.mongodb.internal.operation.WriteOperation; import com.mongodb.lang.Nullable; @@ -33,10 +34,10 @@ public interface OperationExecutor { /** * Execute the read operation with the given read preference. * - * @param the operations result type. - * @param operation the read operation. + * @param the operations result type. + * @param operation the read operation. * @param readPreference the read preference. - * @param readConcern the read concern + * @param readConcern the read concern * @return the result of executing the operation. */ T execute(ReadOperation operation, ReadPreference readPreference, ReadConcern readConcern); @@ -44,9 +45,9 @@ public interface OperationExecutor { /** * Execute the write operation. * - * @param operation the write operation. + * @param the operations result type. + * @param operation the write operation. * @param readConcern the read concern - * @param the operations result type. * @return the result of executing the operation. */ T execute(WriteOperation operation, ReadConcern readConcern); @@ -54,11 +55,11 @@ public interface OperationExecutor { /** * Execute the read operation with the given read preference. * - * @param the operations result type. - * @param operation the read operation. + * @param the operations result type. + * @param operation the read operation. * @param readPreference the read preference. - * @param readConcern the read concern - * @param session the session to associate this operation with + * @param readConcern the read concern + * @param session the session to associate this operation with * @return the result of executing the operation. */ T execute(ReadOperation operation, ReadPreference readPreference, ReadConcern readConcern, @Nullable ClientSession session); @@ -66,11 +67,20 @@ public interface OperationExecutor { /** * Execute the write operation. * - * @param operation the write operation. + * @param the operations result type. + * @param operation the write operation. * @param readConcern the read concern - * @param session the session to associate this operation with - * @param the operations result type. + * @param session the session to associate this operation with * @return the result of executing the operation. */ T execute(WriteOperation operation, ReadConcern readConcern, @Nullable ClientSession session); + + /** + * Create a new OperationExecutor with a specific TimeoutContext + * + * @param timeoutSettings the TimeoutContext to use for the operations + * @return the new operation executor with the set timeout context + * @since CSOT + */ + OperationExecutor withTimeoutSettings(TimeoutSettings timeoutSettings); } diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractMainTransactionsTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractMainTransactionsTest.java index 4179a9962d3..030677b9f79 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/AbstractMainTransactionsTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractMainTransactionsTest.java @@ -55,6 +55,13 @@ public AbstractMainTransactionsTest(final String filename, final String descript || description.equals("distinct")) && isSharded() && serverVersionLessThan(4, 4)); + + // TODO (CSOT) - JAVA-4067 + assumeFalse(description.equals("add UnknownTransactionCommitResult label to MaxTimeMSExpired")); + assumeFalse(description.equals("add UnknownTransactionCommitResult label to writeConcernError MaxTimeMSExpired")); + assumeFalse(description.equals("defaultTransactionOptions override client options")); + assumeFalse(description.equals("startTransaction options override defaults")); + assumeFalse(description.equals("transaction options inherited from defaultTransactionOptions")); } @Parameterized.Parameters(name = "{0}: {1}") diff --git a/driver-sync/src/test/functional/com/mongodb/client/WithTransactionHelperTransactionsTest.java b/driver-sync/src/test/functional/com/mongodb/client/WithTransactionHelperTransactionsTest.java index 4a3a2973c9b..226e0f06b1d 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/WithTransactionHelperTransactionsTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/WithTransactionHelperTransactionsTest.java @@ -45,6 +45,9 @@ public WithTransactionHelperTransactionsTest(final String filename, final String final BsonDocument definition, final boolean skipTest) { super(filename, description, databaseName, collectionName, data, definition, skipTest, true); assumeFalse(isServerlessTest()); + + // TODO (CSOT) - JAVA-4067 / JAVA-4066 + assumeFalse(description.equals("commit is not retried after MaxTimeMSExpired error")); } @Override diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedCrudHelper.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedCrudHelper.java index f611e3d704a..33e22167163 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedCrudHelper.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedCrudHelper.java @@ -242,29 +242,30 @@ OperationResult executeListCollections(final BsonDocument operation) { ListCollectionsIterable iterable = session == null ? database.listCollections(BsonDocument.class) : database.listCollections(session, BsonDocument.class); - for (Map.Entry cur : arguments.entrySet()) { - switch (cur.getKey()) { - case "session": - break; - case "filter": - iterable.filter(cur.getValue().asDocument()); - break; - case "batchSize": - iterable.batchSize(cur.getValue().asNumber().intValue()); - break; - case "timeoutMode": - setTimeoutMode(iterable, cur); - break; - case "maxTimeMS": - iterable.maxTime(cur.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS); - break; - default: - throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + return resultOf(() -> { + for (Map.Entry cur : arguments.entrySet()) { + switch (cur.getKey()) { + case "session": + break; + case "filter": + iterable.filter(cur.getValue().asDocument()); + break; + case "batchSize": + iterable.batchSize(cur.getValue().asNumber().intValue()); + break; + case "timeoutMode": + setTimeoutMode(iterable, cur); + break; + case "maxTimeMS": + iterable.maxTime(cur.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } } - } - return resultOf(() -> - new BsonArray(iterable.into(new ArrayList<>()))); + return new BsonArray(iterable.into(new ArrayList<>())); + }); } OperationResult executeListCollectionNames(final BsonDocument operation) { @@ -298,17 +299,17 @@ OperationResult executeListCollectionNames(final BsonDocument operation) { } OperationResult executeListIndexes(final BsonDocument operation) { - ListIndexesIterable iterable = createListIndexesIterable(operation); - - return resultOf(() -> - new BsonArray(iterable.into(new ArrayList<>()))); + return resultOf(() -> { + ListIndexesIterable iterable = createListIndexesIterable(operation); + return new BsonArray(iterable.into(new ArrayList<>())); + }); } OperationResult executeListIndexNames(final BsonDocument operation) { - ListIndexesIterable iterable = createListIndexesIterable(operation); - - return resultOf(() -> - new BsonArray(iterable.into(new ArrayList<>()).stream().map(document -> document.getString("name")).collect(toList()))); + return resultOf(() -> { + ListIndexesIterable iterable = createListIndexesIterable(operation); + return new BsonArray(iterable.into(new ArrayList<>()).stream().map(document -> document.getString("name")).collect(toList())); + }); } private ListIndexesIterable createListIndexesIterable(final BsonDocument operation) { @@ -339,19 +340,19 @@ private ListIndexesIterable createListIndexesIterable(final BsonDo } OperationResult executeFind(final BsonDocument operation) { - FindIterable iterable = createFindIterable(operation); - return resultOf(() -> - new BsonArray(iterable.into(new ArrayList<>()))); + return resultOf(() -> { + FindIterable iterable = createFindIterable(operation); + return new BsonArray(iterable.into(new ArrayList<>())); + }); } OperationResult executeFindOne(final BsonDocument operation) { - FindIterable iterable = createFindIterable(operation); - return resultOf(iterable::first); + return resultOf(() -> createFindIterable(operation).first()); } OperationResult createFindCursor(final BsonDocument operation) { - FindIterable iterable = createFindIterable(operation); return resultOf(() -> { + FindIterable iterable = createFindIterable(operation); entities.addCursor(operation.getString("saveResultAsEntity", new BsonString(createRandomEntityId())).getValue(), iterable.cursor()); return null; @@ -647,40 +648,40 @@ OperationResult executeAggregate(final BsonDocument operation) { } else { throw new UnsupportedOperationException("Unsupported entity type with name: " + entityName); } - for (Map.Entry cur : arguments.entrySet()) { - switch (cur.getKey()) { - case "pipeline": - case "session": - break; - case "batchSize": - iterable.batchSize(cur.getValue().asNumber().intValue()); - break; - case "allowDiskUse": - iterable.allowDiskUse(cur.getValue().asBoolean().getValue()); - break; - case "let": - iterable.let(cur.getValue().asDocument()); - break; - case "comment": - iterable.comment(cur.getValue()); - break; - case "timeoutMode": - setTimeoutMode(iterable, cur); - break; - case "maxTimeMS": - iterable.maxTime(cur.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS); - break; - case "maxAwaitTimeMS": - iterable.maxAwaitTime(cur.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS); - break; - default: - throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); - } - } - String lastStageName = pipeline.isEmpty() ? null : pipeline.get(pipeline.size() - 1).getFirstKey(); - boolean useToCollection = Objects.equals(lastStageName, "$out") || Objects.equals(lastStageName, "$merge"); return resultOf(() -> { + for (Map.Entry cur : arguments.entrySet()) { + switch (cur.getKey()) { + case "pipeline": + case "session": + break; + case "batchSize": + iterable.batchSize(cur.getValue().asNumber().intValue()); + break; + case "allowDiskUse": + iterable.allowDiskUse(cur.getValue().asBoolean().getValue()); + break; + case "let": + iterable.let(cur.getValue().asDocument()); + break; + case "comment": + iterable.comment(cur.getValue()); + break; + case "timeoutMode": + setTimeoutMode(iterable, cur); + break; + case "maxTimeMS": + iterable.maxTime(cur.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS); + break; + case "maxAwaitTimeMS": + iterable.maxAwaitTime(cur.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } + } + String lastStageName = pipeline.isEmpty() ? null : pipeline.get(pipeline.size() - 1).getFirstKey(); + boolean useToCollection = Objects.equals(lastStageName, "$out") || Objects.equals(lastStageName, "$merge"); if (!pipeline.isEmpty() && useToCollection) { iterable.toCollection(); return null; @@ -1834,8 +1835,14 @@ private static void invokeTimeoutMode(final MongoIterable iterable timeoutModeMethod.invoke(iterable, timeoutMode); } catch (NoSuchMethodException e) { throw new UnsupportedOperationException("Unsupported timeoutMode method for class: " + iterable.getClass(), e); - } catch (InvocationTargetException | IllegalAccessException e) { + } catch (IllegalAccessException e) { throw new UnsupportedOperationException("Unable to set timeoutMode method for class: " + iterable.getClass(), e); + } catch (InvocationTargetException e) { + Throwable targetException = e.getTargetException(); + if (targetException instanceof IllegalArgumentException) { + throw (IllegalArgumentException) targetException; + } + throw new UnsupportedOperationException("Unable to set timeoutMode method for class: " + iterable.getClass(), targetException); } } } diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTest.java index 7dec3768a70..6827ac5e39b 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTest.java @@ -304,6 +304,7 @@ private void assertOutcome(final UnifiedTestContext context) { private void assertOperation(final UnifiedTestContext context, final BsonDocument operation, final int operationIndex) { OperationResult result = executeOperation(context, operation, operationIndex); context.getAssertionContext().push(ContextElement.ofCompletedOperation(operation, result, operationIndex)); + if (!operation.getBoolean("ignoreResultAndError", BsonBoolean.FALSE).getValue()) { if (operation.containsKey("expectResult")) { assertNull(context.getAssertionContext().getMessage("The operation expects a result but an exception occurred"), diff --git a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSBucketSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSBucketSpecification.groovy index ac3fb98281c..4ad2f49c07f 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSBucketSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSBucketSpecification.groovy @@ -597,8 +597,8 @@ class GridFSBucketSpecification extends Specification { then: executor.getReadPreference() == primary() - expect executor.getReadOperation(), isTheSameAs(new FindOperation(TIMEOUT_SETTINGS, - new MongoNamespace('test.fs.files'), decoder).filter(new BsonDocument())) + expect executor.getReadOperation(), isTheSameAs(new FindOperation(new MongoNamespace('test.fs.files'), decoder) + .filter(new BsonDocument())) when: def filter = new BsonDocument('filename', new BsonString('filename')) @@ -608,7 +608,7 @@ class GridFSBucketSpecification extends Specification { then: executor.getReadPreference() == secondary() expect executor.getReadOperation(), isTheSameAs( - new FindOperation(TIMEOUT_SETTINGS, new MongoNamespace('test.fs.files'), decoder).filter(filter)) + new FindOperation(new MongoNamespace('test.fs.files'), decoder).filter(filter)) } def 'should throw an exception if file not found when opening by name'() { diff --git a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSFindIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSFindIterableSpecification.groovy index 76c2e91e9b6..632e59a16d0 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSFindIterableSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSFindIterableSpecification.groovy @@ -38,7 +38,6 @@ import spock.lang.Specification import java.util.function.Consumer -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.ReadPreference.secondary @@ -68,7 +67,7 @@ class GridFSFindIterableSpecification extends Specification { def readPreference = executor.getReadPreference() then: - expect operation, isTheSameAs(new FindOperation(TIMEOUT_SETTINGS, namespace, gridFSFileCodec) + expect operation, isTheSameAs(new FindOperation(namespace, gridFSFileCodec) .filter(new BsonDocument()).retryReads(true)) readPreference == secondary() @@ -86,7 +85,7 @@ class GridFSFindIterableSpecification extends Specification { operation = executor.getReadOperation() as FindOperation then: 'should use the overrides' - expect operation, isTheSameAs(new FindOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, namespace, gridFSFileCodec) + expect operation, isTheSameAs(new FindOperation(namespace, gridFSFileCodec) .filter(new BsonDocument('filter', new BsonInt32(2))) .sort(new BsonDocument('sort', new BsonInt32(2))) .batchSize(99) @@ -112,7 +111,7 @@ class GridFSFindIterableSpecification extends Specification { def operation = executor.getReadOperation() as FindOperation then: - expect operation, isTheSameAs(new FindOperation(TIMEOUT_SETTINGS, namespace, gridFSFileCodec) + expect operation, isTheSameAs(new FindOperation(namespace, gridFSFileCodec) .filter(new BsonDocument('filter', new BsonInt32(1))) .sort(new BsonDocument('sort', new BsonInt32(1))) .cursorType(CursorType.NonTailable) diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/AggregateIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/AggregateIterableSpecification.groovy index bc0fdcc5ecf..733ee4c57df 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/internal/AggregateIterableSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/AggregateIterableSpecification.groovy @@ -41,8 +41,6 @@ import spock.lang.Specification import java.util.function.Consumer -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME_AND_AWAIT_TIME import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.ReadPreference.secondary @@ -74,7 +72,7 @@ class AggregateIterableSpecification extends Specification { def readPreference = executor.getReadPreference() then: - expect operation, isTheSameAs(new AggregateOperation(TIMEOUT_SETTINGS, namespace, + expect operation, isTheSameAs(new AggregateOperation(namespace, [new BsonDocument('$match', new BsonInt32(1))], new DocumentCodec()) .retryReads(true)) readPreference == secondary() @@ -91,7 +89,7 @@ class AggregateIterableSpecification extends Specification { operation = executor.getReadOperation() as AggregateOperation then: 'should use the overrides' - expect operation, isTheSameAs(new AggregateOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME_AND_AWAIT_TIME, namespace, + expect operation, isTheSameAs(new AggregateOperation(namespace, [new BsonDocument('$match', new BsonInt32(1))], new DocumentCodec()) .retryReads(true) .collation(collation) @@ -110,7 +108,7 @@ class AggregateIterableSpecification extends Specification { operation = executor.getReadOperation() as AggregateOperation then: 'should use hint not hint string' - expect operation, isTheSameAs(new AggregateOperation(TIMEOUT_SETTINGS, namespace, + expect operation, isTheSameAs(new AggregateOperation(namespace, [new BsonDocument('$match', new BsonInt32(1))], new DocumentCodec()) .hint(new BsonDocument('a', new BsonInt32(1)))) } @@ -135,7 +133,7 @@ class AggregateIterableSpecification extends Specification { def operation = executor.getReadOperation() as AggregateToCollectionOperation then: 'should use the overrides' - expect operation, isTheSameAs(new AggregateToCollectionOperation(TIMEOUT_SETTINGS, namespace, + expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, [new BsonDocument('$match', new BsonInt32(1)), new BsonDocument('$out', new BsonString(collectionName))], readConcern, writeConcern, AggregationLevel.COLLECTION) .allowDiskUse(true) @@ -166,7 +164,7 @@ class AggregateIterableSpecification extends Specification { operation = executor.getReadOperation() as AggregateToCollectionOperation then: 'should use the overrides' - expect operation, isTheSameAs(new AggregateToCollectionOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, namespace, + expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, [new BsonDocument('$match', new BsonInt32(1)), new BsonDocument('$out', new BsonString(collectionName))], readConcern, writeConcern, AggregationLevel.DATABASE) @@ -197,7 +195,7 @@ class AggregateIterableSpecification extends Specification { operation = executor.getReadOperation() as AggregateToCollectionOperation then: - expect operation, isTheSameAs(new AggregateToCollectionOperation(TIMEOUT_SETTINGS, namespace, + expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, [new BsonDocument('$match', new BsonInt32(1)), new BsonDocument('$out', new BsonString(collectionName))], readConcern, writeConcern) .allowDiskUse(true) @@ -220,7 +218,7 @@ class AggregateIterableSpecification extends Specification { def operation = executor.getReadOperation() as AggregateToCollectionOperation then: 'should use the overrides' - expect operation, isTheSameAs(new AggregateToCollectionOperation(TIMEOUT_SETTINGS, namespace, + expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, [new BsonDocument('$match', new BsonInt32(1)), new BsonDocument('$out', new BsonString(collectionName))], readConcern, writeConcern, AggregationLevel.COLLECTION) .hint(new BsonString('x_1'))) @@ -235,7 +233,7 @@ class AggregateIterableSpecification extends Specification { operation = executor.getReadOperation() as AggregateToCollectionOperation then: 'should use the hint not the hint string' - expect operation, isTheSameAs(new AggregateToCollectionOperation(TIMEOUT_SETTINGS, namespace, + expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, [new BsonDocument('$match', new BsonInt32(1)), new BsonDocument('$out', new BsonString(collectionName))], readConcern, writeConcern, AggregationLevel.COLLECTION) .hint(new BsonDocument('x', new BsonInt32(1)))) @@ -262,7 +260,7 @@ class AggregateIterableSpecification extends Specification { def operation = executor.getReadOperation() as AggregateToCollectionOperation then: 'should use the overrides' - expect operation, isTheSameAs(new AggregateToCollectionOperation(TIMEOUT_SETTINGS, namespace, + expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, [new BsonDocument('$match', new BsonInt32(1)), new BsonDocument('$merge', new BsonDocument('into', new BsonString(collectionName)))], readConcern, writeConcern, @@ -294,7 +292,7 @@ class AggregateIterableSpecification extends Specification { operation = executor.getReadOperation() as AggregateToCollectionOperation then: 'should use the overrides' - expect operation, isTheSameAs(new AggregateToCollectionOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, namespace, + expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, [new BsonDocument('$match', new BsonInt32(1)), new BsonDocument('$merge', new BsonDocument('into', new BsonDocument('db', new BsonString('db2')).append('coll', new BsonString(collectionName))))], @@ -327,7 +325,7 @@ class AggregateIterableSpecification extends Specification { operation = executor.getReadOperation() as AggregateToCollectionOperation then: 'should use the overrides' - expect operation, isTheSameAs(new AggregateToCollectionOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, namespace, + expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, [new BsonDocument('$match', new BsonInt32(1)), new BsonDocument('$merge', new BsonDocument('into', new BsonString(collectionName)))], readConcern, writeConcern, @@ -358,7 +356,7 @@ class AggregateIterableSpecification extends Specification { operation = executor.getReadOperation() as AggregateToCollectionOperation then: - expect operation, isTheSameAs(new AggregateToCollectionOperation(TIMEOUT_SETTINGS, namespace, + expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, [new BsonDocument('$match', new BsonInt32(1)), new BsonDocument('$merge', new BsonDocument('into', new BsonString(collectionName)))], readConcern, writeConcern) @@ -383,7 +381,7 @@ class AggregateIterableSpecification extends Specification { def operation = executor.getReadOperation() as AggregateToCollectionOperation then: - expect operation, isTheSameAs(new AggregateToCollectionOperation(TIMEOUT_SETTINGS, namespace, pipeline, readConcern, + expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, pipeline, readConcern, writeConcern, AggregationLevel.COLLECTION)) when: @@ -426,7 +424,7 @@ class AggregateIterableSpecification extends Specification { def operation = executor.getReadOperation() as AggregateToCollectionOperation then: - expect operation, isTheSameAs(new AggregateToCollectionOperation(TIMEOUT_SETTINGS, namespace, + expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, [new BsonDocument('$match', new BsonInt32(1)), BsonDocument.parse('{$out: {s3: true}}')], readConcern, writeConcern, AggregationLevel.COLLECTION) ) @@ -445,7 +443,7 @@ class AggregateIterableSpecification extends Specification { operation = executor.getReadOperation() as AggregateToCollectionOperation then: - expect operation, isTheSameAs(new AggregateToCollectionOperation(TIMEOUT_SETTINGS, namespace, + expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, [new BsonDocument('$match', new BsonInt32(1)), BsonDocument.parse('{$out: {s3: true}}')], readConcern, writeConcern, AggregationLevel.DATABASE) ) @@ -464,7 +462,7 @@ class AggregateIterableSpecification extends Specification { operation = executor.getReadOperation() as AggregateToCollectionOperation then: - expect operation, isTheSameAs(new AggregateToCollectionOperation(TIMEOUT_SETTINGS, namespace, + expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, [new BsonDocument('$match', new BsonInt32(1)), BsonDocument.parse('{$out: {s3: true}}')], readConcern, writeConcern)) @@ -482,7 +480,7 @@ class AggregateIterableSpecification extends Specification { operation = executor.getReadOperation() as AggregateToCollectionOperation then: - expect operation, isTheSameAs(new AggregateToCollectionOperation(TIMEOUT_SETTINGS, namespace, + expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, [new BsonDocument('$match', new BsonInt32(1)), BsonDocument.parse('{$out: {db: "testDB", coll: "testCollection"}}')], readConcern, writeConcern)) diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/ChangeStreamIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/ChangeStreamIterableSpecification.groovy index 42e5fd48fe0..b66373b221f 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/internal/ChangeStreamIterableSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/ChangeStreamIterableSpecification.groovy @@ -42,7 +42,6 @@ import spock.lang.Specification import java.util.function.Consumer -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_AWAIT_TIME import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.ReadPreference.secondary @@ -72,7 +71,7 @@ class ChangeStreamIterableSpecification extends Specification { def readPreference = executor.getReadPreference() then: - expect operation, isTheSameAs(new ChangeStreamOperation(TIMEOUT_SETTINGS, namespace, + expect operation, isTheSameAs(new ChangeStreamOperation(namespace, FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, [BsonDocument.parse('{$match: 1}')], codec, ChangeStreamLevel.COLLECTION) .retryReads(true)) @@ -91,7 +90,7 @@ class ChangeStreamIterableSpecification extends Specification { operation = executor.getReadOperation() as ChangeStreamOperation then: 'should use the overrides' - expect operation, isTheSameAs(new ChangeStreamOperation(TIMEOUT_SETTINGS_WITH_MAX_AWAIT_TIME, namespace, + expect operation, isTheSameAs(new ChangeStreamOperation(namespace, FullDocument.UPDATE_LOOKUP, FullDocumentBeforeChange.WHEN_AVAILABLE, [BsonDocument.parse('{$match: 1}')], codec, ChangeStreamLevel.COLLECTION) .retryReads(true) diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/DistinctIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/DistinctIterableSpecification.groovy index 3a4e601a32d..3baac05653a 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/internal/DistinctIterableSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/DistinctIterableSpecification.groovy @@ -37,7 +37,6 @@ import spock.lang.Specification import java.util.function.Consumer -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.ReadPreference.secondary @@ -66,7 +65,7 @@ class DistinctIterableSpecification extends Specification { def readPreference = executor.getReadPreference() then: - expect operation, isTheSameAs(new DistinctOperation(TIMEOUT_SETTINGS, namespace, 'field', new DocumentCodec()) + expect operation, isTheSameAs(new DistinctOperation(namespace, 'field', new DocumentCodec()) .filter(new BsonDocument()).retryReads(true)) readPreference == secondary() @@ -77,7 +76,7 @@ class DistinctIterableSpecification extends Specification { then: 'should use the overrides' expect operation, isTheSameAs( - new DistinctOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, namespace, 'field', new DocumentCodec()) + new DistinctOperation(namespace, 'field', new DocumentCodec()) .filter(new BsonDocument('field', new BsonInt32(1))).collation(collation).retryReads(true)) } diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/FindIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/FindIterableSpecification.groovy index ae41f65c96a..e2f7cae2d62 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/internal/FindIterableSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/FindIterableSpecification.groovy @@ -39,7 +39,6 @@ import spock.lang.Specification import java.util.function.Consumer import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME_AND_AWAIT_TIME import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.ReadPreference.secondary import static java.util.concurrent.TimeUnit.MILLISECONDS @@ -84,7 +83,7 @@ class FindIterableSpecification extends Specification { def readPreference = executor.getReadPreference() then: - expect operation, isTheSameAs(new FindOperation(TIMEOUT_SETTINGS, namespace, new DocumentCodec()) + expect operation, isTheSameAs(new FindOperation(namespace, new DocumentCodec()) .filter(new BsonDocument('filter', new BsonInt32(1))) .sort(new BsonDocument('sort', new BsonInt32(1))) .projection(new BsonDocument('projection', new BsonInt32(1))) @@ -129,7 +128,7 @@ class FindIterableSpecification extends Specification { then: 'should use the overrides' expect operation, isTheSameAs( - new FindOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME_AND_AWAIT_TIME, namespace, new DocumentCodec()) + new FindOperation(namespace, new DocumentCodec()) .filter(new BsonDocument('filter', new BsonInt32(2))) .sort(new BsonDocument('sort', new BsonInt32(2))) .projection(new BsonDocument('projection', new BsonInt32(2))) @@ -166,7 +165,7 @@ class FindIterableSpecification extends Specification { operation = executor.getReadOperation() as FindOperation then: 'should set an empty doc for the filter' - expect operation, isTheSameAs(new FindOperation(TIMEOUT_SETTINGS, namespace, new DocumentCodec()) + expect operation, isTheSameAs(new FindOperation(namespace, new DocumentCodec()) .filter(new BsonDocument()).retryReads(true)) } @@ -209,7 +208,7 @@ class FindIterableSpecification extends Specification { def operation = executor.getReadOperation() as FindOperation then: - expect operation, isTheSameAs(new FindOperation(TIMEOUT_SETTINGS, namespace, new DocumentCodec()) + expect operation, isTheSameAs(new FindOperation(namespace, new DocumentCodec()) .filter(new BsonDocument('filter', new BsonInt32(1))) .sort(new BsonDocument('sort', new BsonInt32(1))) .cursorType(CursorType.NonTailable) diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/ListCollectionsIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/ListCollectionsIterableSpecification.groovy index 3c92c8a1138..559935c05ee 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/internal/ListCollectionsIterableSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/ListCollectionsIterableSpecification.groovy @@ -33,7 +33,6 @@ import spock.lang.Specification import java.util.concurrent.TimeUnit import java.util.function.Consumer -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.ReadPreference.secondary @@ -63,7 +62,7 @@ class ListCollectionsIterableSpecification extends Specification { def readPreference = executor.getReadPreference() then: - expect operation, isTheSameAs(new ListCollectionsOperation(TIMEOUT_SETTINGS, 'db', new DocumentCodec()) + expect operation, isTheSameAs(new ListCollectionsOperation('db', new DocumentCodec()) .filter(new BsonDocument('filter', new BsonInt32(1))).batchSize(100) .retryReads(true) .authorizedCollections(false)) @@ -75,7 +74,7 @@ class ListCollectionsIterableSpecification extends Specification { operation = executor.getReadOperation() as ListCollectionsOperation then: 'should use the overrides' - expect operation, isTheSameAs(new ListCollectionsOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, 'db', new DocumentCodec()) + expect operation, isTheSameAs(new ListCollectionsOperation('db', new DocumentCodec()) .filter(new BsonDocument('filter', new BsonInt32(2))).batchSize(99) .retryReads(true)) @@ -85,7 +84,7 @@ class ListCollectionsIterableSpecification extends Specification { operation = executor.getReadOperation() as ListCollectionsOperation then: 'should create operation with nameOnly' - expect operation, isTheSameAs(new ListCollectionsOperation(TIMEOUT_SETTINGS, 'db', new DocumentCodec()).nameOnly(true) + expect operation, isTheSameAs(new ListCollectionsOperation('db', new DocumentCodec()).nameOnly(true) .retryReads(true)) when: 'requesting `authorizedCollections`' @@ -93,7 +92,7 @@ class ListCollectionsIterableSpecification extends Specification { operation = executor.getReadOperation() as ListCollectionsOperation then: 'should create operation with `authorizedCollections`' - expect operation, isTheSameAs(new ListCollectionsOperation(TIMEOUT_SETTINGS, 'db', new DocumentCodec()) + expect operation, isTheSameAs(new ListCollectionsOperation('db', new DocumentCodec()) .authorizedCollections(true) .nameOnly(true) .retryReads(true)) diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/ListDatabasesIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/ListDatabasesIterableSpecification.groovy index 6c93d0c84e1..8df91709486 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/internal/ListDatabasesIterableSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/ListDatabasesIterableSpecification.groovy @@ -30,7 +30,6 @@ import spock.lang.Specification import java.util.function.Consumer -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.ReadPreference.secondary @@ -57,7 +56,7 @@ class ListDatabasesIterableSpecification extends Specification { def readPreference = executor.getReadPreference() then: - expect operation, isTheSameAs(new ListDatabasesOperation(TIMEOUT_SETTINGS, new DocumentCodec()) + expect operation, isTheSameAs(new ListDatabasesOperation(new DocumentCodec()) .retryReads(true)) readPreference == secondary() @@ -67,7 +66,7 @@ class ListDatabasesIterableSpecification extends Specification { operation = executor.getReadOperation() as ListDatabasesOperation then: 'should use the overrides' - expect operation, isTheSameAs(new ListDatabasesOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, new DocumentCodec()) + expect operation, isTheSameAs(new ListDatabasesOperation(new DocumentCodec()) .filter(BsonDocument.parse('{a: 1}')).nameOnly(true).retryReads(true)) when: 'overriding initial options' @@ -76,7 +75,7 @@ class ListDatabasesIterableSpecification extends Specification { operation = executor.getReadOperation() as ListDatabasesOperation then: 'should use the overrides' - expect operation, isTheSameAs(new ListDatabasesOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, new DocumentCodec()) + expect operation, isTheSameAs(new ListDatabasesOperation(new DocumentCodec()) .filter(BsonDocument.parse('{a: 1}')).nameOnly(true).authorizedDatabasesOnly(true).retryReads(true)) } diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/ListIndexesIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/ListIndexesIterableSpecification.groovy index d5b1b21c355..d11c59d46d2 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/internal/ListIndexesIterableSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/ListIndexesIterableSpecification.groovy @@ -31,7 +31,6 @@ import spock.lang.Specification import java.util.function.Consumer -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.ReadPreference.secondary @@ -59,7 +58,7 @@ class ListIndexesIterableSpecification extends Specification { def readPreference = executor.getReadPreference() then: - expect operation, isTheSameAs(new ListIndexesOperation(TIMEOUT_SETTINGS, namespace, new DocumentCodec()) + expect operation, isTheSameAs(new ListIndexesOperation(namespace, new DocumentCodec()) .batchSize(100).retryReads(true)) readPreference == secondary() @@ -71,7 +70,7 @@ class ListIndexesIterableSpecification extends Specification { operation = executor.getReadOperation() as ListIndexesOperation then: 'should use the overrides' - expect operation, isTheSameAs(new ListIndexesOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, namespace, new DocumentCodec()) + expect operation, isTheSameAs(new ListIndexesOperation(namespace, new DocumentCodec()) .batchSize(99).retryReads(true)) } diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/MapReduceIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/MapReduceIterableSpecification.groovy index ca51b42aaaf..b6cb01d31cb 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/internal/MapReduceIterableSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/MapReduceIterableSpecification.groovy @@ -42,7 +42,6 @@ import spock.lang.Specification import java.util.function.Consumer -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.ReadPreference.secondary @@ -73,7 +72,7 @@ class MapReduceIterableSpecification extends Specification { def readPreference = executor.getReadPreference() then: - expect operation, isTheSameAs(new MapReduceWithInlineResultsOperation(TIMEOUT_SETTINGS, namespace, + expect operation, isTheSameAs(new MapReduceWithInlineResultsOperation(namespace, new BsonJavaScript('map'), new BsonJavaScript('reduce'), new DocumentCodec()) .verbose(true)) readPreference == secondary() @@ -92,7 +91,7 @@ class MapReduceIterableSpecification extends Specification { operation = (executor.getReadOperation() as MapReduceIterableImpl.WrappedMapReduceReadOperation).getOperation() then: 'should use the overrides' - expect operation, isTheSameAs(new MapReduceWithInlineResultsOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, namespace, + expect operation, isTheSameAs(new MapReduceWithInlineResultsOperation(namespace, new BsonJavaScript('map'), new BsonJavaScript('reduce'), new DocumentCodec()) .filter(new BsonDocument('filter', new BsonInt32(1))) .finalizeFunction(new BsonJavaScript('finalize')) @@ -129,7 +128,7 @@ class MapReduceIterableSpecification extends Specification { mapReduceIterable.iterator() def operation = executor.getWriteOperation() as MapReduceToCollectionOperation - def expectedOperation = new MapReduceToCollectionOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, namespace, + def expectedOperation = new MapReduceToCollectionOperation(namespace, new BsonJavaScript('map'), new BsonJavaScript('reduce'), 'collName', writeConcern) .databaseName(collectionNamespace.getDatabaseName()) .filter(new BsonDocument('filter', new BsonInt32(1))) diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/MongoCollectionSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/MongoCollectionSpecification.groovy index a4e2c785315..9200f3a7f5d 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/internal/MongoCollectionSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/MongoCollectionSpecification.groovy @@ -92,7 +92,6 @@ import spock.lang.Specification import java.util.concurrent.TimeUnit -import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.ReadPreference.primary @@ -211,7 +210,7 @@ class MongoCollectionSpecification extends Specification { def filter = new BsonDocument() def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) - def expectedOperation = new CountDocumentsOperation(TIMEOUT_SETTINGS, namespace) + def expectedOperation = new CountDocumentsOperation(namespace) .filter(filter).retryReads(true) def countMethod = collection.&countDocuments @@ -252,7 +251,7 @@ class MongoCollectionSpecification extends Specification { def executor = new TestOperationExecutor([1L, 2L]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) - def expectedOperation = new EstimatedDocumentCountOperation(TIMEOUT_SETTINGS, namespace) + def expectedOperation = new EstimatedDocumentCountOperation(namespace) .retryReads(true) def countMethod = collection.&estimatedDocumentCount @@ -266,7 +265,7 @@ class MongoCollectionSpecification extends Specification { expect operation, isTheSameAs(expectedOperation) when: - expectedOperation = new EstimatedDocumentCountOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, namespace).retryReads(true) + expectedOperation = new EstimatedDocumentCountOperation(namespace).retryReads(true) execute(countMethod, session, new EstimatedDocumentCountOptions().maxTime(100, MILLISECONDS)) operation = executor.getReadOperation() as EstimatedDocumentCountOperation @@ -476,7 +475,7 @@ class MongoCollectionSpecification extends Specification { def collection = new MongoCollectionImpl(namespace, BsonDocument, codecRegistry, readPreference, writeConcern, retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) def expectedOperation = { boolean ordered, WriteConcern wc, Boolean bypassValidation, List filters -> - new MixedBulkWriteOperation(TIMEOUT_SETTINGS, namespace, [ + new MixedBulkWriteOperation(namespace, [ new InsertRequest(BsonDocument.parse('{_id: 1}')), new UpdateRequest(BsonDocument.parse('{a: 2}'), BsonDocument.parse('{a: 200}'), REPLACE) .multi(false).upsert(true).collation(collation).hint(hint).hintString(hintString), @@ -570,7 +569,7 @@ class MongoCollectionSpecification extends Specification { def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, writeConcern, retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) def expectedOperation = { WriteConcern wc, Boolean bypassDocumentValidation -> - new MixedBulkWriteOperation(TIMEOUT_SETTINGS, namespace, [new InsertRequest(new BsonDocument('_id', new BsonInt32(1)))], + new MixedBulkWriteOperation(namespace, [new InsertRequest(new BsonDocument('_id', new BsonInt32(1)))], true, wc, retryWrites).bypassDocumentValidation(bypassDocumentValidation) } def insertOneMethod = collection.&insertOne @@ -615,7 +614,7 @@ class MongoCollectionSpecification extends Specification { def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, writeConcern, retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) def expectedOperation = { boolean ordered, WriteConcern wc, Boolean bypassDocumentValidation -> - new MixedBulkWriteOperation(TIMEOUT_SETTINGS, namespace, + new MixedBulkWriteOperation(namespace, [new InsertRequest(new BsonDocument('_id', new BsonInt32(1))), new InsertRequest(new BsonDocument('_id', new BsonInt32(2)))], ordered, wc, retryWrites).bypassDocumentValidation(bypassDocumentValidation) @@ -690,7 +689,7 @@ class MongoCollectionSpecification extends Specification { then: result.wasAcknowledged() == writeConcern.isAcknowledged() - expect operation, isTheSameAs(new MixedBulkWriteOperation(TIMEOUT_SETTINGS, namespace, + expect operation, isTheSameAs(new MixedBulkWriteOperation(namespace, [new DeleteRequest(new BsonDocument('_id', new BsonInt32(1))).multi(false)], true, writeConcern, retryWrites)) result == expectedResult @@ -702,7 +701,7 @@ class MongoCollectionSpecification extends Specification { then: result.wasAcknowledged() == writeConcern.isAcknowledged() - expect operation, isTheSameAs(new MixedBulkWriteOperation(TIMEOUT_SETTINGS, namespace, + expect operation, isTheSameAs(new MixedBulkWriteOperation(namespace, [new DeleteRequest(new BsonDocument('_id', new BsonInt32(1))).multi(false).collation(collation)], true, writeConcern, retryWrites)) result == expectedResult @@ -753,7 +752,7 @@ class MongoCollectionSpecification extends Specification { then: result.wasAcknowledged() == writeConcern.isAcknowledged() - expect operation, isTheSameAs(new MixedBulkWriteOperation(TIMEOUT_SETTINGS, namespace, + expect operation, isTheSameAs(new MixedBulkWriteOperation(namespace, [new DeleteRequest(new BsonDocument('_id', new BsonInt32(1))).multi(true)], true, writeConcern, retryWrites)) result == expectedResult @@ -764,7 +763,7 @@ class MongoCollectionSpecification extends Specification { then: result.wasAcknowledged() == writeConcern.isAcknowledged() - expect operation, isTheSameAs(new MixedBulkWriteOperation(TIMEOUT_SETTINGS, namespace, + expect operation, isTheSameAs(new MixedBulkWriteOperation(namespace, [new DeleteRequest(new BsonDocument('_id', new BsonInt32(1))).multi(true).collation(collation)], true, writeConcern, retryWrites)) result == expectedResult @@ -791,7 +790,7 @@ class MongoCollectionSpecification extends Specification { retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) def expectedOperation = { boolean upsert, WriteConcern wc, Boolean bypassValidation, Collation collation -> - new MixedBulkWriteOperation(TIMEOUT_SETTINGS, namespace, + new MixedBulkWriteOperation(namespace, [new UpdateRequest(new BsonDocument('a', new BsonInt32(1)), new BsonDocument('a', new BsonInt32(10)), REPLACE) .collation(collation).upsert(upsert).hint(hint).hintString(hintString)], true, wc, retryWrites) .bypassDocumentValidation(bypassValidation) @@ -861,7 +860,7 @@ class MongoCollectionSpecification extends Specification { retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) def expectedOperation = { boolean upsert, WriteConcern wc, Boolean bypassDocumentValidation, Collation collation, List filters, BsonDocument hintDoc, String hintStr -> - new MixedBulkWriteOperation(TIMEOUT_SETTINGS, namespace, + new MixedBulkWriteOperation(namespace, [new UpdateRequest(new BsonDocument('a', new BsonInt32(1)), new BsonDocument('a', new BsonInt32(10)), UPDATE) .multi(false).upsert(upsert).collation(collation).arrayFilters(filters) .hint(hintDoc).hintString(hintStr)], true, wc, retryWrites) @@ -910,7 +909,7 @@ class MongoCollectionSpecification extends Specification { retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) def expectedOperation = { boolean upsert, WriteConcern wc, Boolean bypassDocumentValidation, Collation collation, List filters, BsonDocument hintDoc, String hintStr -> - new MixedBulkWriteOperation(TIMEOUT_SETTINGS, namespace, + new MixedBulkWriteOperation(namespace, [new UpdateRequest(new BsonDocument('a', new BsonInt32(1)), new BsonDocument('a', new BsonInt32(10)), UPDATE) .multi(true).upsert(upsert).collation(collation).arrayFilters(filters) .hint(hintDoc).hintString(hintStr)], true, wc, retryWrites) @@ -990,7 +989,7 @@ class MongoCollectionSpecification extends Specification { }) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) - def expectedOperation = new FindAndDeleteOperation(TIMEOUT_SETTINGS, namespace, ACKNOWLEDGED, retryWrites, + def expectedOperation = new FindAndDeleteOperation(namespace, ACKNOWLEDGED, retryWrites, new DocumentCodec()) .filter(new BsonDocument('a', new BsonInt32(1))) def findOneAndDeleteMethod = collection.&findOneAndDelete @@ -1004,7 +1003,7 @@ class MongoCollectionSpecification extends Specification { when: expectedOperation = - new FindAndDeleteOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, namespace, ACKNOWLEDGED, retryWrites, new DocumentCodec()) + new FindAndDeleteOperation(namespace, ACKNOWLEDGED, retryWrites, new DocumentCodec()) .filter(new BsonDocument('a', new BsonInt32(1))) .projection(new BsonDocument('projection', new BsonInt32(1))) .collation(collation) @@ -1033,7 +1032,7 @@ class MongoCollectionSpecification extends Specification { }) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, writeConcern, retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) - def expectedOperation = new FindAndReplaceOperation(TIMEOUT_SETTINGS, namespace, writeConcern, + def expectedOperation = new FindAndReplaceOperation(namespace, writeConcern, retryWrites, new DocumentCodec(), new BsonDocument('a', new BsonInt32(10))) .filter(new BsonDocument('a', new BsonInt32(1))) def findOneAndReplaceMethod = collection.&findOneAndReplace @@ -1046,7 +1045,7 @@ class MongoCollectionSpecification extends Specification { expect operation, isTheSameAs(expectedOperation) when: - expectedOperation = new FindAndReplaceOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, namespace, writeConcern, + expectedOperation = new FindAndReplaceOperation(namespace, writeConcern, retryWrites, new DocumentCodec(), new BsonDocument('a', new BsonInt32(10))) .filter(new BsonDocument('a', new BsonInt32(1))) .projection(new BsonDocument('projection', new BsonInt32(1))) @@ -1078,7 +1077,7 @@ class MongoCollectionSpecification extends Specification { }) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, writeConcern, retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) - def expectedOperation = new FindAndUpdateOperation(TIMEOUT_SETTINGS, namespace, writeConcern, retryWrites, + def expectedOperation = new FindAndUpdateOperation(namespace, writeConcern, retryWrites, new DocumentCodec(), new BsonDocument('a', new BsonInt32(10))) .filter(new BsonDocument('a', new BsonInt32(1))) def findOneAndUpdateMethod = collection.&findOneAndUpdate @@ -1091,7 +1090,7 @@ class MongoCollectionSpecification extends Specification { expect operation, isTheSameAs(expectedOperation) when: - expectedOperation = new FindAndUpdateOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, namespace, writeConcern, retryWrites, + expectedOperation = new FindAndUpdateOperation(namespace, writeConcern, retryWrites, new DocumentCodec(), new BsonDocument('a', new BsonInt32(10))) .filter(new BsonDocument('a', new BsonInt32(1))) .projection(new BsonDocument('projection', new BsonInt32(1))) @@ -1126,7 +1125,7 @@ class MongoCollectionSpecification extends Specification { def executor = new TestOperationExecutor([null]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) - def expectedOperation = new DropCollectionOperation(TIMEOUT_SETTINGS, namespace, ACKNOWLEDGED) + def expectedOperation = new DropCollectionOperation(namespace, ACKNOWLEDGED) def dropMethod = collection.&drop when: @@ -1150,7 +1149,7 @@ class MongoCollectionSpecification extends Specification { def createIndexesMethod = collection.&createIndexes when: - def expectedOperation = new CreateIndexesOperation(TIMEOUT_SETTINGS, namespace, + def expectedOperation = new CreateIndexesOperation(namespace, [new IndexRequest(new BsonDocument('key', new BsonInt32(1)))], ACKNOWLEDGED) def indexName = execute(createIndexMethod, session, new Document('key', 1)) def operation = executor.getWriteOperation() as CreateIndexesOperation @@ -1160,7 +1159,7 @@ class MongoCollectionSpecification extends Specification { indexName == 'key_1' when: - expectedOperation = new CreateIndexesOperation(TIMEOUT_SETTINGS, namespace, + expectedOperation = new CreateIndexesOperation(namespace, [new IndexRequest(new BsonDocument('key', new BsonInt32(1))), new IndexRequest(new BsonDocument('key1', new BsonInt32(1)))], ACKNOWLEDGED) def indexNames = execute(createIndexesMethod, session, [new IndexModel(new Document('key', 1)), @@ -1173,7 +1172,7 @@ class MongoCollectionSpecification extends Specification { indexNames == ['key_1', 'key1_1'] when: - expectedOperation = new CreateIndexesOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, namespace, + expectedOperation = new CreateIndexesOperation(namespace, [new IndexRequest(new BsonDocument('key', new BsonInt32(1))), new IndexRequest(new BsonDocument('key1', new BsonInt32(1)))], ACKNOWLEDGED) indexNames = execute(createIndexesMethod, session, @@ -1187,7 +1186,7 @@ class MongoCollectionSpecification extends Specification { indexNames == ['key_1', 'key1_1'] when: - expectedOperation = new CreateIndexesOperation(TIMEOUT_SETTINGS, namespace, + expectedOperation = new CreateIndexesOperation(namespace, [new IndexRequest(new BsonDocument('key', new BsonInt32(1))), new IndexRequest(new BsonDocument('key1', new BsonInt32(1)))], ACKNOWLEDGED) .commitQuorum(CreateIndexCommitQuorum.VOTING_MEMBERS) @@ -1202,7 +1201,7 @@ class MongoCollectionSpecification extends Specification { indexNames == ['key_1', 'key1_1'] when: - expectedOperation = new CreateIndexesOperation(TIMEOUT_SETTINGS, namespace, + expectedOperation = new CreateIndexesOperation(namespace, [new IndexRequest(new BsonDocument('key', new BsonInt32(1))) .background(true) .unique(true) @@ -1258,7 +1257,7 @@ class MongoCollectionSpecification extends Specification { def 'should validate the createIndexes data correctly'() { given: def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, null, Stub(OperationExecutor)) + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, Stub(OperationExecutor)) when: collection.createIndexes(null) @@ -1286,7 +1285,7 @@ class MongoCollectionSpecification extends Specification { def operation = executor.getReadOperation() as ListIndexesOperation then: - expect operation, isTheSameAs(new ListIndexesOperation(TIMEOUT_SETTINGS, namespace, new DocumentCodec()).retryReads(true)) + expect operation, isTheSameAs(new ListIndexesOperation(namespace, new DocumentCodec()).retryReads(true)) executor.getClientSession() == session when: @@ -1295,7 +1294,7 @@ class MongoCollectionSpecification extends Specification { indexes == [] then: - expect operation, isTheSameAs(new ListIndexesOperation(TIMEOUT_SETTINGS, namespace, new BsonDocumentCodec()).retryReads(true)) + expect operation, isTheSameAs(new ListIndexesOperation(namespace, new BsonDocumentCodec()).retryReads(true)) executor.getClientSession() == session when: @@ -1303,7 +1302,7 @@ class MongoCollectionSpecification extends Specification { operation = executor.getReadOperation() as ListIndexesOperation then: - expect operation, isTheSameAs(new ListIndexesOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, namespace, new DocumentCodec()).batchSize(10) + expect operation, isTheSameAs(new ListIndexesOperation(namespace, new DocumentCodec()).batchSize(10) .retryReads(true)) executor.getClientSession() == session @@ -1319,7 +1318,7 @@ class MongoCollectionSpecification extends Specification { def dropIndexMethod = collection.&dropIndex when: - def expectedOperation = new DropIndexOperation(TIMEOUT_SETTINGS, namespace, 'indexName', ACKNOWLEDGED) + def expectedOperation = new DropIndexOperation(namespace, 'indexName', ACKNOWLEDGED) execute(dropIndexMethod, session, 'indexName') def operation = executor.getWriteOperation() as DropIndexOperation @@ -1329,7 +1328,7 @@ class MongoCollectionSpecification extends Specification { when: def keys = new BsonDocument('x', new BsonInt32(1)) - expectedOperation = new DropIndexOperation(TIMEOUT_SETTINGS, namespace, keys, ACKNOWLEDGED) + expectedOperation = new DropIndexOperation(namespace, keys, ACKNOWLEDGED) execute(dropIndexMethod, session, keys) operation = executor.getWriteOperation() as DropIndexOperation @@ -1338,7 +1337,7 @@ class MongoCollectionSpecification extends Specification { executor.getClientSession() == session when: - expectedOperation = new DropIndexOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, namespace, keys, ACKNOWLEDGED) + expectedOperation = new DropIndexOperation(namespace, keys, ACKNOWLEDGED) execute(dropIndexMethod, session, keys, new DropIndexOptions().maxTime(100, MILLISECONDS)) operation = executor.getWriteOperation() as DropIndexOperation @@ -1355,7 +1354,7 @@ class MongoCollectionSpecification extends Specification { def executor = new TestOperationExecutor([null, null]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) - def expectedOperation = new DropIndexOperation(TIMEOUT_SETTINGS, namespace, '*', ACKNOWLEDGED) + def expectedOperation = new DropIndexOperation(namespace, '*', ACKNOWLEDGED) def dropIndexesMethod = collection.&dropIndexes when: @@ -1367,7 +1366,7 @@ class MongoCollectionSpecification extends Specification { executor.getClientSession() == session when: - expectedOperation = new DropIndexOperation(TIMEOUT_SETTINGS_WITH_MAX_TIME, namespace, '*', ACKNOWLEDGED) + expectedOperation = new DropIndexOperation(namespace, '*', ACKNOWLEDGED) execute(dropIndexesMethod, session, new DropIndexOptions().maxTime(100, MILLISECONDS)) operation = executor.getWriteOperation() as DropIndexOperation @@ -1386,7 +1385,7 @@ class MongoCollectionSpecification extends Specification { true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) def newNamespace = new MongoNamespace(namespace.getDatabaseName(), 'newName') def renameCollectionOptions = new RenameCollectionOptions().dropTarget(dropTarget) - def expectedOperation = new RenameCollectionOperation(TIMEOUT_SETTINGS, namespace, newNamespace, ACKNOWLEDGED) + def expectedOperation = new RenameCollectionOperation(namespace, newNamespace, ACKNOWLEDGED) def renameCollection = collection.&renameCollection when: diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/MongoDatabaseSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/MongoDatabaseSpecification.groovy index 174c78a28c9..67679700734 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/internal/MongoDatabaseSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/MongoDatabaseSpecification.groovy @@ -214,7 +214,7 @@ class MongoDatabaseSpecification extends Specification { def operation = executor.getWriteOperation() as DropDatabaseOperation then: - expect operation, isTheSameAs(new DropDatabaseOperation(TIMEOUT_SETTINGS, name, writeConcern)) + expect operation, isTheSameAs(new DropDatabaseOperation(name, writeConcern)) executor.getClientSession() == session where: @@ -268,7 +268,7 @@ class MongoDatabaseSpecification extends Specification { def operation = executor.getWriteOperation() as CreateCollectionOperation then: - expect operation, isTheSameAs(new CreateCollectionOperation(TIMEOUT_SETTINGS, name, collectionName, writeConcern)) + expect operation, isTheSameAs(new CreateCollectionOperation(name, collectionName, writeConcern)) executor.getClientSession() == session when: @@ -287,7 +287,7 @@ class MongoDatabaseSpecification extends Specification { operation = executor.getWriteOperation() as CreateCollectionOperation then: - expect operation, isTheSameAs(new CreateCollectionOperation(TIMEOUT_SETTINGS, name, collectionName, writeConcern) + expect operation, isTheSameAs(new CreateCollectionOperation(name, collectionName, writeConcern) .collation(collation) .capped(true) .maxDocuments(100) @@ -319,7 +319,7 @@ class MongoDatabaseSpecification extends Specification { def operation = executor.getWriteOperation() as CreateViewOperation then: - expect operation, isTheSameAs(new CreateViewOperation(TIMEOUT_SETTINGS, name, viewName, viewOn, + expect operation, isTheSameAs(new CreateViewOperation(name, viewName, viewOn, [new BsonDocument('$match', new BsonDocument('x', BsonBoolean.TRUE))], writeConcern)) executor.getClientSession() == session @@ -328,7 +328,7 @@ class MongoDatabaseSpecification extends Specification { operation = executor.getWriteOperation() as CreateViewOperation then: - expect operation, isTheSameAs(new CreateViewOperation(TIMEOUT_SETTINGS, name, viewName, viewOn, + expect operation, isTheSameAs(new CreateViewOperation(name, viewName, viewOn, [new BsonDocument('$match', new BsonDocument('x', BsonBoolean.TRUE))], writeConcern).collation(collation)) executor.getClientSession() == session @@ -341,7 +341,7 @@ class MongoDatabaseSpecification extends Specification { def viewName = 'view1' def viewOn = 'col1' def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, - readConcern, JAVA_LEGACY, null, null, Stub(OperationExecutor)) + readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, Stub(OperationExecutor)) when: database.createView(viewName, viewOn, null) diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/TestOperationExecutor.java b/driver-sync/src/test/unit/com/mongodb/client/internal/TestOperationExecutor.java index a605d6542e7..ebbf0b4cb65 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/internal/TestOperationExecutor.java +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/TestOperationExecutor.java @@ -19,6 +19,7 @@ import com.mongodb.ReadConcern; import com.mongodb.ReadPreference; import com.mongodb.client.ClientSession; +import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.operation.ReadOperation; import com.mongodb.internal.operation.WriteOperation; import com.mongodb.lang.Nullable; @@ -68,6 +69,11 @@ public T execute(final WriteOperation operation, final ReadConcern readCo return getResponse(); } + @Override + public OperationExecutor withTimeoutSettings(final TimeoutSettings timeoutSettings) { + return this; + } + @SuppressWarnings("unchecked") private T getResponse() { Object response = responses.remove(0);