diff --git a/.evergreen/run-load-balancer-tests.sh b/.evergreen/run-load-balancer-tests.sh index 871e9c2f003..2440cd31ccc 100755 --- a/.evergreen/run-load-balancer-tests.sh +++ b/.evergreen/run-load-balancer-tests.sh @@ -79,7 +79,8 @@ echo $second -Dorg.mongodb.test.uri=${SINGLE_MONGOS_LB_URI} \ -Dorg.mongodb.test.multi.mongos.uri=${MULTI_MONGOS_LB_URI} \ ${GRADLE_EXTRA_VARS} --stacktrace --info --continue driver-core:test \ - --tests QueryBatchCursorFunctionalSpecification + --tests CommandBatchCursorFunctionalTest \ + --tests AsyncCommandBatchCursorFunctionalTest third=$? echo $third diff --git a/config/codenarc/codenarc.xml b/config/codenarc/codenarc.xml index 2d11b03296a..2bab2315e97 100644 --- a/config/codenarc/codenarc.xml +++ b/config/codenarc/codenarc.xml @@ -34,12 +34,6 @@ - - - - - - diff --git a/driver-core/src/main/com/mongodb/assertions/Assertions.java b/driver-core/src/main/com/mongodb/assertions/Assertions.java index 205345bdf7d..98100f65b45 100644 --- a/driver-core/src/main/com/mongodb/assertions/Assertions.java +++ b/driver-core/src/main/com/mongodb/assertions/Assertions.java @@ -21,6 +21,7 @@ import com.mongodb.lang.Nullable; import java.util.Collection; +import java.util.function.Supplier; /** *

Design by contract assertions.

This class is not part of the public API and may be removed or changed at any time.

@@ -226,6 +227,19 @@ public static AssertionError fail(final String msg) throws AssertionError { throw new AssertionError(assertNotNull(msg)); } + /** + * @param supplier the supplier to check + * @return {@code supplier.get()} + * @throws AssertionError If {@code supplier.get()} throws an exception + */ + public static T doesNotThrow(final Supplier supplier) throws AssertionError { + try { + return supplier.get(); + } catch (Exception e) { + throw new AssertionError(e.getMessage(), e); + } + } + private Assertions() { } } diff --git a/driver-core/src/main/com/mongodb/internal/async/AsyncAggregateResponseBatchCursor.java b/driver-core/src/main/com/mongodb/internal/async/AsyncAggregateResponseBatchCursor.java index 2e9da84550c..ccfc9f7a956 100644 --- a/driver-core/src/main/com/mongodb/internal/async/AsyncAggregateResponseBatchCursor.java +++ b/driver-core/src/main/com/mongodb/internal/async/AsyncAggregateResponseBatchCursor.java @@ -16,6 +16,7 @@ package com.mongodb.internal.async; +import com.mongodb.lang.Nullable; import org.bson.BsonDocument; import org.bson.BsonTimestamp; @@ -25,8 +26,10 @@ *

This class is not part of the public API and may be removed or changed at any time

*/ public interface AsyncAggregateResponseBatchCursor extends AsyncBatchCursor { + @Nullable BsonDocument getPostBatchResumeToken(); + @Nullable BsonTimestamp getOperationTime(); boolean isFirstBatchEmpty(); diff --git a/driver-core/src/main/com/mongodb/internal/async/AsyncBatchCursor.java b/driver-core/src/main/com/mongodb/internal/async/AsyncBatchCursor.java index 14cc3faa71b..89260ac7b52 100644 --- a/driver-core/src/main/com/mongodb/internal/async/AsyncBatchCursor.java +++ b/driver-core/src/main/com/mongodb/internal/async/AsyncBatchCursor.java @@ -16,6 +16,8 @@ package com.mongodb.internal.async; +import com.mongodb.internal.operation.BatchCursor; + import java.io.Closeable; import java.util.List; @@ -28,9 +30,9 @@ */ public interface AsyncBatchCursor extends Closeable { /** - * Returns the next batch of results. A tailable cursor will block until another batch exists. After the last batch, the next call - * to this method will execute the callback with a null result to indicate that there are no more batches available and the cursor - * has been closed. + * Returns the next batch of results. A tailable cursor will block until another batch exists. + * Unlike the {@link BatchCursor} this method will automatically mark the cursor as closed when there are no more expected results. + * Care should be taken to check {@link #isClosed()} between calls. * * @param callback callback to receive the next batch of results * @throws java.util.NoSuchElementException if no next batch exists diff --git a/driver-core/src/main/com/mongodb/internal/operation/AggregateOperationImpl.java b/driver-core/src/main/com/mongodb/internal/operation/AggregateOperationImpl.java index 4379845bdd1..ff6b55bac48 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/AggregateOperationImpl.java +++ b/driver-core/src/main/com/mongodb/internal/operation/AggregateOperationImpl.java @@ -18,13 +18,11 @@ import com.mongodb.MongoNamespace; import com.mongodb.client.model.Collation; -import com.mongodb.connection.ConnectionDescription; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; import com.mongodb.internal.binding.ReadBinding; import com.mongodb.internal.client.model.AggregationLevel; -import com.mongodb.internal.connection.QueryResult; import com.mongodb.internal.session.SessionContext; import com.mongodb.lang.Nullable; import org.bson.BsonArray; @@ -40,7 +38,6 @@ import java.util.List; import java.util.concurrent.TimeUnit; -import static com.mongodb.assertions.Assertions.assertNotNull; import static com.mongodb.assertions.Assertions.isTrueArgument; import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; @@ -48,7 +45,6 @@ import static com.mongodb.internal.operation.AsyncOperationHelper.executeRetryableReadAsync; import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator; import static com.mongodb.internal.operation.OperationHelper.LOGGER; -import static com.mongodb.internal.operation.OperationHelper.cursorDocumentToQueryResult; import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand; import static com.mongodb.internal.operation.SyncOperationHelper.CommandReadTransformer; import static com.mongodb.internal.operation.SyncOperationHelper.executeRetryableRead; @@ -239,25 +235,16 @@ BsonDocument getCommand(final SessionContext sessionContext, final int maxWireVe return commandDocument; } - private QueryResult createQueryResult(final BsonDocument result, final ConnectionDescription description) { - assertNotNull(result); - return cursorDocumentToQueryResult(result.getDocument(CURSOR), description.getServerAddress()); - } - - private CommandReadTransformer> transformer() { - return (result, source, connection) -> { - QueryResult queryResult = createQueryResult(result, connection.getDescription()); - return new QueryBatchCursor<>(queryResult, 0, batchSize != null ? batchSize : 0, maxAwaitTimeMS, decoder, comment, - source, connection, result); - }; + private CommandReadTransformer> transformer() { + return (result, source, connection) -> + new CommandBatchCursor<>(result, batchSize != null ? batchSize : 0, maxAwaitTimeMS, decoder, + comment, source, connection); } private CommandReadTransformerAsync> asyncTransformer() { - return (result, source, connection) -> { - QueryResult queryResult = createQueryResult(result, connection.getDescription()); - return new AsyncQueryBatchCursor<>(queryResult, 0, batchSize != null ? batchSize : 0, maxAwaitTimeMS, decoder, - comment, source, connection, result); - }; + return (result, source, connection) -> + new AsyncCommandBatchCursor<>(result, batchSize != null ? batchSize : 0, maxAwaitTimeMS, decoder, + comment, source, connection); } interface AggregateTarget { diff --git a/driver-core/src/main/com/mongodb/internal/operation/AggregateResponseBatchCursor.java b/driver-core/src/main/com/mongodb/internal/operation/AggregateResponseBatchCursor.java index 5ec7d00bb26..e12a2249123 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/AggregateResponseBatchCursor.java +++ b/driver-core/src/main/com/mongodb/internal/operation/AggregateResponseBatchCursor.java @@ -17,6 +17,7 @@ package com.mongodb.internal.operation; import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.lang.Nullable; import org.bson.BsonDocument; import org.bson.BsonTimestamp; @@ -27,8 +28,10 @@ */ @NotThreadSafe public interface AggregateResponseBatchCursor extends BatchCursor { + @Nullable BsonDocument getPostBatchResumeToken(); + @Nullable BsonTimestamp getOperationTime(); boolean isFirstBatchEmpty(); diff --git a/driver-core/src/main/com/mongodb/internal/operation/AsyncChangeStreamBatchCursor.java b/driver-core/src/main/com/mongodb/internal/operation/AsyncChangeStreamBatchCursor.java index 9ccd2f17b0a..7e55f05cac5 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/AsyncChangeStreamBatchCursor.java +++ b/driver-core/src/main/com/mongodb/internal/operation/AsyncChangeStreamBatchCursor.java @@ -18,6 +18,7 @@ import com.mongodb.MongoException; import com.mongodb.internal.async.AsyncAggregateResponseBatchCursor; +import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; import com.mongodb.lang.NonNull; @@ -50,11 +51,11 @@ final class AsyncChangeStreamBatchCursor implements AsyncAggregateResponseBat * {@code wrapped} containing {@code null} and {@link #isClosed} being {@code false}. * This represents a situation in which the wrapped object was closed by {@code this} but {@code this} remained open. */ - private final AtomicReference> wrapped; + private final AtomicReference> wrapped; private final AtomicBoolean isClosed; AsyncChangeStreamBatchCursor(final ChangeStreamOperation changeStreamOperation, - final AsyncAggregateResponseBatchCursor wrapped, + final AsyncCommandBatchCursor wrapped, final AsyncReadBinding binding, @Nullable final BsonDocument resumeToken, final int maxWireVersion) { @@ -68,13 +69,13 @@ final class AsyncChangeStreamBatchCursor implements AsyncAggregateResponseBat } @NonNull - AsyncAggregateResponseBatchCursor getWrapped() { + AsyncCommandBatchCursor getWrapped() { return assertNotNull(wrapped.get()); } @Override public void next(final SingleResultCallback> callback) { - resumeableOperation((cursor, callback1) -> cursor.next(callback1), callback, false); + resumeableOperation(AsyncBatchCursor::next, callback, false); } @Override @@ -129,15 +130,15 @@ private void nullifyAndCloseWrapped() { /** * This method guarantees that the {@code newValue} argument is closed even if - * {@link #setWrappedOrCloseIt(AsyncAggregateResponseBatchCursor)} is called concurrently with or after (in the happens-before order) + * {@code setWrappedOrCloseIt(AsyncCommandBatchCursor)} is called concurrently with or after (in the happens-before order) * the method {@link #close()}. */ - private void setWrappedOrCloseIt(final AsyncAggregateResponseBatchCursor newValue) { + private void setWrappedOrCloseIt(final AsyncCommandBatchCursor newValue) { if (isClosed()) { - assertNull(this.wrapped.get()); + assertNull(wrapped.get()); newValue.close(); } else { - assertNull(this.wrapped.getAndSet(newValue)); + assertNull(wrapped.getAndSet(newValue)); if (isClosed()) { nullifyAndCloseWrapped(); } @@ -164,8 +165,8 @@ public int getMaxWireVersion() { return maxWireVersion; } - private void cachePostBatchResumeToken(final AsyncAggregateResponseBatchCursor queryBatchCursor) { - BsonDocument resumeToken = queryBatchCursor.getPostBatchResumeToken(); + private void cachePostBatchResumeToken(final AsyncCommandBatchCursor cursor) { + BsonDocument resumeToken = cursor.getPostBatchResumeToken(); if (resumeToken != null) { this.resumeToken = resumeToken; } @@ -182,13 +183,13 @@ private void resumeableOperation(final AsyncBlock asyncBlock, final SingleResult tryNext ? "tryNext()" : "next()"))); return; } - AsyncAggregateResponseBatchCursor wrappedCursor = getWrapped(); + AsyncCommandBatchCursor wrappedCursor = getWrapped(); asyncBlock.apply(wrappedCursor, (result, t) -> { if (t == null) { try { List convertedResults; try { - convertedResults = convertAndProduceLastId(result, changeStreamOperation.getDecoder(), + convertedResults = convertAndProduceLastId(assertNotNull(result), changeStreamOperation.getDecoder(), lastId -> resumeToken = lastId); } finally { cachePostBatchResumeToken(wrappedCursor); @@ -215,14 +216,15 @@ private void retryOperation(final AsyncBlock asyncBlock, final SingleResultCallb if (t != null) { callback.onResult(null, t); } else { - changeStreamOperation.setChangeStreamOptionsForResume(resumeToken, source.getServerDescription().getMaxWireVersion()); + changeStreamOperation.setChangeStreamOptionsForResume(resumeToken, + assertNotNull(source).getServerDescription().getMaxWireVersion()); source.release(); changeStreamOperation.executeAsync(binding, (result, t1) -> { if (t1 != null) { callback.onResult(null, t1); } else { try { - setWrappedOrCloseIt(((AsyncChangeStreamBatchCursor) result).getWrapped()); + setWrappedOrCloseIt(assertNotNull((AsyncChangeStreamBatchCursor) result).getWrapped()); } finally { try { binding.release(); // release the new change stream batch cursor's reference to the binding diff --git a/driver-core/src/main/com/mongodb/internal/operation/AsyncCommandBatchCursor.java b/driver-core/src/main/com/mongodb/internal/operation/AsyncCommandBatchCursor.java new file mode 100644 index 00000000000..4831650f7ff --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/AsyncCommandBatchCursor.java @@ -0,0 +1,313 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoCommandException; +import com.mongodb.MongoNamespace; +import com.mongodb.MongoSocketException; +import com.mongodb.ReadPreference; +import com.mongodb.ServerAddress; +import com.mongodb.ServerCursor; +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.connection.ServerType; +import com.mongodb.internal.VisibleForTesting; +import com.mongodb.internal.async.AsyncAggregateResponseBatchCursor; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.async.function.AsyncCallbackSupplier; +import com.mongodb.internal.binding.AsyncConnectionSource; +import com.mongodb.internal.connection.AsyncConnection; +import com.mongodb.internal.connection.Connection; +import com.mongodb.internal.operation.AsyncOperationHelper.AsyncCallableConnectionWithCallback; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonTimestamp; +import org.bson.BsonValue; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.Decoder; + +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.assertions.Assertions.doesNotThrow; +import static com.mongodb.internal.operation.CommandBatchCursorHelper.FIRST_BATCH; +import static com.mongodb.internal.operation.CommandBatchCursorHelper.MESSAGE_IF_CLOSED_AS_CURSOR; +import static com.mongodb.internal.operation.CommandBatchCursorHelper.NEXT_BATCH; +import static com.mongodb.internal.operation.CommandBatchCursorHelper.NO_OP_FIELD_NAME_VALIDATOR; +import static com.mongodb.internal.operation.CommandBatchCursorHelper.getKillCursorsCommand; +import static com.mongodb.internal.operation.CommandBatchCursorHelper.getMoreCommandDocument; +import static com.mongodb.internal.operation.CommandBatchCursorHelper.logCommandCursorResult; +import static com.mongodb.internal.operation.CommandBatchCursorHelper.translateCommandException; +import static java.util.Collections.emptyList; + +class AsyncCommandBatchCursor implements AsyncAggregateResponseBatchCursor { + + private final MongoNamespace namespace; + private final long maxTimeMS; + private final Decoder decoder; + @Nullable + private final BsonValue comment; + private final int maxWireVersion; + private final boolean firstBatchEmpty; + private final ResourceManager resourceManager; + private final AtomicBoolean processedInitial = new AtomicBoolean(); + private int batchSize; + private volatile CommandCursorResult commandCursorResult; + + AsyncCommandBatchCursor( + final BsonDocument commandCursorDocument, + final int batchSize, final long maxTimeMS, + final Decoder decoder, + @Nullable final BsonValue comment, + final AsyncConnectionSource connectionSource, + final AsyncConnection connection) { + ConnectionDescription connectionDescription = connection.getDescription(); + this.commandCursorResult = toCommandCursorResult(connectionDescription.getServerAddress(), FIRST_BATCH, commandCursorDocument); + this.namespace = commandCursorResult.getNamespace(); + this.batchSize = batchSize; + this.maxTimeMS = maxTimeMS; + this.decoder = decoder; + this.comment = comment; + this.maxWireVersion = connectionDescription.getMaxWireVersion(); + this.firstBatchEmpty = commandCursorResult.getResults().isEmpty(); + + AsyncConnection connectionToPin = connectionSource.getServerDescription().getType() == ServerType.LOAD_BALANCER + ? connection : null; + resourceManager = new ResourceManager(namespace, connectionSource, connectionToPin, commandCursorResult.getServerCursor()); + } + + @Override + public void next(final SingleResultCallback> callback) { + resourceManager.execute(funcCallback -> { + ServerCursor localServerCursor = resourceManager.getServerCursor(); + boolean serverCursorIsNull = localServerCursor == null; + List batchResults = emptyList(); + if (!processedInitial.getAndSet(true) && !firstBatchEmpty) { + batchResults = commandCursorResult.getResults(); + } + + if (serverCursorIsNull || !batchResults.isEmpty()) { + funcCallback.onResult(batchResults, null); + } else { + getMore(localServerCursor, funcCallback); + } + }, callback); + } + + @Override + public boolean isClosed() { + return !resourceManager.operable(); + } + + @Override + public void setBatchSize(final int batchSize) { + this.batchSize = batchSize; + } + + @Override + public int getBatchSize() { + return batchSize; + } + + @Override + public void close() { + resourceManager.close(); + } + + @Nullable + @VisibleForTesting(otherwise = VisibleForTesting.AccessModifier.PRIVATE) + ServerCursor getServerCursor() { + if (!resourceManager.operable()) { + return null; + } + return resourceManager.getServerCursor(); + } + + @Override + public BsonDocument getPostBatchResumeToken() { + return commandCursorResult.getPostBatchResumeToken(); + } + + @Override + public BsonTimestamp getOperationTime() { + return commandCursorResult.getOperationTime(); + } + + @Override + public boolean isFirstBatchEmpty() { + return firstBatchEmpty; + } + + @Override + public int getMaxWireVersion() { + return maxWireVersion; + } + + private void getMore(final ServerCursor cursor, final SingleResultCallback> callback) { + resourceManager.executeWithConnection((connection, wrappedCallback) -> + getMoreLoop(assertNotNull(connection), cursor, wrappedCallback), callback); + } + + private void getMoreLoop(final AsyncConnection connection, final ServerCursor serverCursor, + final SingleResultCallback> callback) { + connection.commandAsync(namespace.getDatabaseName(), + getMoreCommandDocument(serverCursor.getId(), connection.getDescription(), namespace, batchSize, maxTimeMS, comment), + NO_OP_FIELD_NAME_VALIDATOR, ReadPreference.primary(), + CommandResultDocumentCodec.create(decoder, NEXT_BATCH), + assertNotNull(resourceManager.getConnectionSource()), + (commandResult, t) -> { + if (t != null) { + Throwable translatedException = + t instanceof MongoCommandException + ? translateCommandException((MongoCommandException) t, serverCursor) + : t; + callback.onResult(null, translatedException); + return; + } + commandCursorResult = toCommandCursorResult( + connection.getDescription().getServerAddress(), NEXT_BATCH, assertNotNull(commandResult)); + ServerCursor nextServerCursor = commandCursorResult.getServerCursor(); + resourceManager.setServerCursor(nextServerCursor); + List nextBatch = commandCursorResult.getResults(); + if (nextServerCursor == null || !nextBatch.isEmpty()) { + callback.onResult(nextBatch, null); + return; + } + + if (!resourceManager.operable()) { + callback.onResult(emptyList(), null); + return; + } + + getMoreLoop(connection, nextServerCursor, callback); + }); + } + + private CommandCursorResult toCommandCursorResult(final ServerAddress serverAddress, final String fieldNameContainingBatch, + final BsonDocument commandCursorDocument) { + CommandCursorResult commandCursorResult = new CommandCursorResult<>(serverAddress, fieldNameContainingBatch, + commandCursorDocument); + logCommandCursorResult(commandCursorResult); + return commandCursorResult; + } + + @ThreadSafe + private static final class ResourceManager extends CursorResourceManager { + + ResourceManager( + final MongoNamespace namespace, + final AsyncConnectionSource connectionSource, + @Nullable final AsyncConnection connectionToPin, + @Nullable final ServerCursor serverCursor) { + super(namespace, connectionSource, connectionToPin, serverCursor); + } + + /** + * Thread-safe. + * Executes {@code operation} within the {@link #tryStartOperation()}/{@link #endOperation()} bounds. + */ + void execute(final AsyncCallbackSupplier operation, final SingleResultCallback callback) { + boolean canStartOperation = doesNotThrow(this::tryStartOperation); + if (!canStartOperation) { + callback.onResult(null, new IllegalStateException(MESSAGE_IF_CLOSED_AS_CURSOR)); + } else { + operation.whenComplete(() -> { + endOperation(); + if (getServerCursor() == null) { + // At this point all resources have been released, + // but `isClose` may still be returning `false` if `close` have not been called. + // Self-close to update the state managed by `ResourceManger`, and so that `isClosed` return `true`. + close(); + } + }).get(callback); + } + } + + @Override + void markAsPinned(final AsyncConnection connectionToPin, final Connection.PinningMode pinningMode) { + connectionToPin.markAsPinned(pinningMode); + } + + @Override + void doClose() { + if (isSkipReleasingServerResourcesOnClose()) { + unsetServerCursor(); + } + + if (getServerCursor() != null) { + getConnection((connection, t) -> { + if (connection != null) { + releaseServerAndClientResources(connection); + } else { + unsetServerCursor(); + releaseClientResources(); + } + }); + } else { + releaseClientResources(); + } + } + + void executeWithConnection(final AsyncCallableConnectionWithCallback callable, final SingleResultCallback callback) { + getConnection((connection, t) -> { + if (t != null) { + callback.onResult(null, t); + return; + } + callable.call(assertNotNull(connection), (result, t1) -> { + if (t1 instanceof MongoSocketException) { + onCorruptedConnection(connection, (MongoSocketException) t1); + } + connection.release(); + callback.onResult(result, t1); + }); + }); + } + + private void getConnection(final SingleResultCallback callback) { + assertTrue(getState() != State.IDLE); + AsyncConnection pinnedConnection = getPinnedConnection(); + if (pinnedConnection != null) { + callback.onResult(assertNotNull(pinnedConnection).retain(), null); + } else { + assertNotNull(getConnectionSource()).getConnection(callback); + } + } + + private void releaseServerAndClientResources(final AsyncConnection connection) { + AsyncCallbackSupplier callbackSupplier = funcCallback -> { + ServerCursor localServerCursor = getServerCursor(); + if (localServerCursor != null) { + killServerCursor(getNamespace(), localServerCursor, connection, funcCallback); + } + }; + callbackSupplier.whenComplete(() -> { + unsetServerCursor(); + releaseClientResources(); + }).whenComplete(connection::release).get((r, t) -> { /* do nothing */ }); + } + + private void killServerCursor(final MongoNamespace namespace, final ServerCursor localServerCursor, + final AsyncConnection localConnection, final SingleResultCallback callback) { + localConnection.commandAsync(namespace.getDatabaseName(), getKillCursorsCommand(namespace, localServerCursor), + NO_OP_FIELD_NAME_VALIDATOR, ReadPreference.primary(), new BsonDocumentCodec(), + assertNotNull(getConnectionSource()), (r, t) -> callback.onResult(null, null)); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/AsyncOperationHelper.java b/driver-core/src/main/com/mongodb/internal/operation/AsyncOperationHelper.java index 21b10cdff08..163521631d2 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/AsyncOperationHelper.java +++ b/driver-core/src/main/com/mongodb/internal/operation/AsyncOperationHelper.java @@ -18,9 +18,7 @@ import com.mongodb.Function; import com.mongodb.MongoException; -import com.mongodb.MongoNamespace; import com.mongodb.ReadPreference; -import com.mongodb.ServerAddress; import com.mongodb.assertions.Assertions; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; @@ -35,7 +33,6 @@ import com.mongodb.internal.binding.ReferenceCounted; import com.mongodb.internal.connection.AsyncConnection; import com.mongodb.internal.connection.OperationContext; -import com.mongodb.internal.connection.QueryResult; import com.mongodb.internal.operation.retry.AttachmentKeys; import com.mongodb.internal.validator.NoOpFieldNameValidator; import com.mongodb.lang.Nullable; @@ -56,7 +53,6 @@ import static com.mongodb.internal.operation.CommandOperationHelper.isRetryWritesEnabled; import static com.mongodb.internal.operation.CommandOperationHelper.logRetryExecute; import static com.mongodb.internal.operation.CommandOperationHelper.transformWriteException; -import static com.mongodb.internal.operation.OperationHelper.cursorDocumentToQueryResult; import static com.mongodb.internal.operation.WriteConcernHelper.throwOnWriteConcernError; final class AsyncOperationHelper { @@ -65,6 +61,10 @@ interface AsyncCallableWithConnection { void call(@Nullable AsyncConnection connection, @Nullable Throwable t); } + interface AsyncCallableConnectionWithCallback { + void call(AsyncConnection connection, SingleResultCallback callback); + } + interface AsyncCallableWithSource { void call(@Nullable AsyncConnectionSource source, @Nullable Throwable t); } @@ -309,15 +309,14 @@ static CommandWriteTransformerAsync writeConcernErrorTransfo }; } - static AsyncBatchCursor createEmptyAsyncBatchCursor(final MongoNamespace namespace, final ServerAddress serverAddress) { - return new AsyncSingleBatchQueryCursor<>(new QueryResult<>(namespace, Collections.emptyList(), 0L, serverAddress)); + static CommandReadTransformerAsync> asyncSingleBatchCursorTransformer(final String fieldName) { + return (result, source, connection) -> + new AsyncSingleBatchCursor<>(BsonDocumentWrapperHelper.toList(result, fieldName), 0); } static AsyncBatchCursor cursorDocumentToAsyncBatchCursor(final BsonDocument cursorDocument, final Decoder decoder, final BsonValue comment, final AsyncConnectionSource source, final AsyncConnection connection, final int batchSize) { - return new AsyncQueryBatchCursor<>(cursorDocumentToQueryResult(cursorDocument, - source.getServerDescription().getAddress()), - 0, batchSize, 0, decoder, comment, source, connection, cursorDocument); + return new AsyncCommandBatchCursor<>(cursorDocument, batchSize, 0, decoder, comment, source, connection); } static SingleResultCallback releasingCallback(final SingleResultCallback wrapped, final AsyncConnection connection) { diff --git a/driver-core/src/main/com/mongodb/internal/operation/AsyncQueryBatchCursor.java b/driver-core/src/main/com/mongodb/internal/operation/AsyncQueryBatchCursor.java deleted file mode 100644 index 96b841283b8..00000000000 --- a/driver-core/src/main/com/mongodb/internal/operation/AsyncQueryBatchCursor.java +++ /dev/null @@ -1,417 +0,0 @@ -/* - * Copyright 2008-present MongoDB, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.internal.operation; - -import com.mongodb.MongoCommandException; -import com.mongodb.MongoException; -import com.mongodb.MongoNamespace; -import com.mongodb.ReadPreference; -import com.mongodb.ServerCursor; -import com.mongodb.connection.ConnectionDescription; -import com.mongodb.connection.ServerType; -import com.mongodb.internal.async.AsyncAggregateResponseBatchCursor; -import com.mongodb.internal.async.SingleResultCallback; -import com.mongodb.internal.binding.AsyncConnectionSource; -import com.mongodb.internal.connection.AsyncConnection; -import com.mongodb.internal.connection.Connection; -import com.mongodb.internal.connection.QueryResult; -import com.mongodb.internal.diagnostics.logging.Logger; -import com.mongodb.internal.diagnostics.logging.Loggers; -import com.mongodb.internal.validator.NoOpFieldNameValidator; -import com.mongodb.lang.Nullable; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.bson.BsonInt32; -import org.bson.BsonInt64; -import org.bson.BsonString; -import org.bson.BsonTimestamp; -import org.bson.BsonValue; -import org.bson.FieldNameValidator; -import org.bson.codecs.BsonDocumentCodec; -import org.bson.codecs.Decoder; - -import java.util.List; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; - -import static com.mongodb.assertions.Assertions.assertFalse; -import static com.mongodb.assertions.Assertions.assertNotNull; -import static com.mongodb.assertions.Assertions.isTrueArgument; -import static com.mongodb.assertions.Assertions.notNull; -import static com.mongodb.internal.Locks.withLock; -import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; -import static com.mongodb.internal.operation.CursorHelper.getNumberToReturn; -import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull; -import static com.mongodb.internal.operation.SyncOperationHelper.getMoreCursorDocumentToQueryResult; -import static com.mongodb.internal.operation.QueryHelper.translateCommandException; -import static com.mongodb.internal.operation.ServerVersionHelper.serverIsAtLeastVersionFourDotFour; -import static java.lang.String.format; -import static java.util.Collections.singletonList; - -class AsyncQueryBatchCursor implements AsyncAggregateResponseBatchCursor { - private static final Logger LOGGER = Loggers.getLogger("operation"); - private static final FieldNameValidator NO_OP_FIELD_NAME_VALIDATOR = new NoOpFieldNameValidator(); - private static final String CURSOR = "cursor"; - private static final String POST_BATCH_RESUME_TOKEN = "postBatchResumeToken"; - private static final String OPERATION_TIME = "operationTime"; - - private final MongoNamespace namespace; - private final int limit; - private final Decoder decoder; - private final long maxTimeMS; - private volatile AsyncConnectionSource connectionSource; - private volatile AsyncConnection pinnedConnection; - private final AtomicReference cursor; - private volatile QueryResult firstBatch; - private volatile int batchSize; - private final AtomicInteger count = new AtomicInteger(); - private volatile BsonDocument postBatchResumeToken; - private final BsonTimestamp operationTime; - private final BsonValue comment; - private final boolean firstBatchEmpty; - private final int maxWireVersion; - - private final Lock lock = new ReentrantLock(); - /* protected by `lock` */ - private boolean isOperationInProgress = false; - private boolean isClosed = false; - /* protected by `lock` */ - private volatile boolean isClosePending = false; - - AsyncQueryBatchCursor(final QueryResult firstBatch, final int limit, final int batchSize, final long maxTimeMS, - final Decoder decoder, final BsonValue comment, final AsyncConnectionSource connectionSource, - final AsyncConnection connection) { - this(firstBatch, limit, batchSize, maxTimeMS, decoder, comment, connectionSource, connection, null); - } - - AsyncQueryBatchCursor(final QueryResult firstBatch, final int limit, final int batchSize, final long maxTimeMS, - final Decoder decoder, final BsonValue comment, final AsyncConnectionSource connectionSource, - @Nullable final AsyncConnection connection, @Nullable final BsonDocument result) { - isTrueArgument("maxTimeMS >= 0", maxTimeMS >= 0); - this.maxTimeMS = maxTimeMS; - this.namespace = firstBatch.getNamespace(); - this.firstBatch = firstBatch; - this.limit = limit; - this.batchSize = batchSize; - this.decoder = decoder; - this.comment = comment; - this.cursor = new AtomicReference<>(firstBatch.getCursor()); - this.count.addAndGet(firstBatch.getResults().size()); - if (result != null) { - this.operationTime = result.getTimestamp(OPERATION_TIME, null); - this.postBatchResumeToken = getPostBatchResumeTokenFromResponse(result); - } else { - this.operationTime = null; - } - - firstBatchEmpty = firstBatch.getResults().isEmpty(); - if (cursor.get() != null) { - this.connectionSource = notNull("connectionSource", connectionSource).retain(); - assertNotNull(connection); - if (limitReached()) { - killCursor(connection); - } else { - if (connectionSource.getServerDescription().getType() == ServerType.LOAD_BALANCER) { - this.pinnedConnection = connection.retain(); - this.pinnedConnection.markAsPinned(Connection.PinningMode.CURSOR); - } - } - } - this.maxWireVersion = connection == null ? 0 : connection.getDescription().getMaxWireVersion(); - logQueryResult(firstBatch); - } - - /** - * {@inheritDoc} - *

- * From the perspective of the code external to this class, this method is idempotent as required by its specification. - * However, if this method sets {@link #isClosePending}, - * then it must be called by {@code this} again to release resources. - * This behavior does not violate externally observable idempotence because this method is allowed to release resources "eventually". - */ - @Override - public void close() { - boolean doClose = withLock(lock, () -> { - if (isOperationInProgress) { - isClosePending = true; - return false; - } else if (!isClosed) { - isClosed = true; - isClosePending = false; - return true; - } - return false; - }); - - if (doClose) { - killCursorOnClose(); - } - } - - @Override - public void next(final SingleResultCallback> callback) { - if (isClosed()) { - callback.onResult(null, new MongoException("next() called after the cursor was closed.")); - } else if (firstBatch != null && (!firstBatch.getResults().isEmpty())) { - // May be empty for a tailable cursor - List results = firstBatch.getResults(); - firstBatch = null; - if (getServerCursor() == null) { - close(); - } - callback.onResult(results, null); - } else { - ServerCursor localCursor = getServerCursor(); - if (localCursor == null) { - close(); - callback.onResult(null, null); - } else { - boolean doGetMore = withLock(lock, () -> { - if (isClosed()) { - callback.onResult(null, new MongoException("next() called after the cursor was closed.")); - return false; - } - isOperationInProgress = true; - return true; - }); - if (doGetMore) { - getMore(localCursor, callback); - } - } - } - } - - @Override - public void setBatchSize(final int batchSize) { - assertFalse(isClosed()); - this.batchSize = batchSize; - } - - @Override - public int getBatchSize() { - assertFalse(isClosed()); - return batchSize; - } - - @Override - public boolean isClosed() { - return withLock(lock, () -> isClosed || isClosePending); - } - - @Override - public BsonDocument getPostBatchResumeToken() { - return postBatchResumeToken; - } - - @Override - public BsonTimestamp getOperationTime() { - return operationTime; - } - - @Override - public boolean isFirstBatchEmpty() { - return firstBatchEmpty; - } - - @Override - public int getMaxWireVersion() { - return maxWireVersion; - } - - private boolean limitReached() { - return Math.abs(limit) != 0 && count.get() >= Math.abs(limit); - } - - private void getMore(final ServerCursor cursor, final SingleResultCallback> callback) { - if (pinnedConnection != null) { - getMore(pinnedConnection.retain(), cursor, callback); - } else { - connectionSource.getConnection((connection, t) -> { - if (t != null) { - endOperationInProgress(); - callback.onResult(null, t); - } else { - getMore(assertNotNull(connection), cursor, callback); - } - }); - } - } - - private void getMore(final AsyncConnection connection, final ServerCursor cursor, final SingleResultCallback> callback) { - connection.commandAsync(namespace.getDatabaseName(), asGetMoreCommandDocument(cursor.getId(), connection.getDescription()), - NO_OP_FIELD_NAME_VALIDATOR, ReadPreference.primary(), CommandResultDocumentCodec.create(decoder, "nextBatch"), - connectionSource, new CommandResultSingleResultCallback(connection, cursor, callback)); - } - - private BsonDocument asGetMoreCommandDocument(final long cursorId, final ConnectionDescription connectionDescription) { - BsonDocument document = new BsonDocument("getMore", new BsonInt64(cursorId)) - .append("collection", new BsonString(namespace.getCollectionName())); - - int batchSizeForGetMoreCommand = Math.abs(getNumberToReturn(limit, this.batchSize, count.get())); - if (batchSizeForGetMoreCommand != 0) { - document.append("batchSize", new BsonInt32(batchSizeForGetMoreCommand)); - } - if (maxTimeMS != 0) { - document.append("maxTimeMS", new BsonInt64(maxTimeMS)); - } - if (serverIsAtLeastVersionFourDotFour(connectionDescription)) { - putIfNotNull(document, "comment", comment); - } - return document; - } - - private void killCursorOnClose() { - ServerCursor localCursor = getServerCursor(); - if (localCursor != null) { - if (pinnedConnection != null) { - killCursorAsynchronouslyAndReleaseConnectionAndSource(pinnedConnection, localCursor); - } else { - connectionSource.getConnection((connection, t) -> { - if (t != null) { - connectionSource.release(); - } else { - killCursorAsynchronouslyAndReleaseConnectionAndSource(assertNotNull(connection), localCursor); - } - }); - } - } else if (pinnedConnection != null) { - pinnedConnection.release(); - } - } - - private void killCursor(final AsyncConnection connection) { - ServerCursor localCursor = cursor.getAndSet(null); - if (localCursor != null) { - killCursorAsynchronouslyAndReleaseConnectionAndSource(connection.retain(), localCursor); - } else { - connectionSource.release(); - } - } - - private void killCursorAsynchronouslyAndReleaseConnectionAndSource(final AsyncConnection connection, final ServerCursor localCursor) { - connection.commandAsync(namespace.getDatabaseName(), asKillCursorsCommandDocument(localCursor), NO_OP_FIELD_NAME_VALIDATOR, - ReadPreference.primary(), new BsonDocumentCodec(), connectionSource, (result, t) -> { - connection.release(); - connectionSource.release(); - }); - } - - private BsonDocument asKillCursorsCommandDocument(final ServerCursor localCursor) { - return new BsonDocument("killCursors", new BsonString(namespace.getCollectionName())) - .append("cursors", new BsonArray(singletonList(new BsonInt64(localCursor.getId())))); - } - - private void endOperationInProgress() { - boolean closePending = withLock(lock, () -> { - isOperationInProgress = false; - return this.isClosePending; - }); - if (closePending) { - close(); - } - } - - - private void handleGetMoreQueryResult(final AsyncConnection connection, final SingleResultCallback> callback, - final QueryResult result) { - logQueryResult(result); - cursor.set(result.getCursor()); - if (isClosePending) { - try { - connection.release(); - if (result.getCursor() == null) { - connectionSource.release(); - } - endOperationInProgress(); - } finally { - callback.onResult(null, null); - } - } else if (result.getResults().isEmpty() && result.getCursor() != null) { - getMore(connection, assertNotNull(result.getCursor()), callback); - } else { - count.addAndGet(result.getResults().size()); - if (limitReached()) { - killCursor(connection); - connection.release(); - } else { - connection.release(); - if (result.getCursor() == null) { - connectionSource.release(); - } - } - endOperationInProgress(); - - if (result.getResults().isEmpty()) { - callback.onResult(null, null); - } else { - callback.onResult(result.getResults(), null); - } - } - } - - private void logQueryResult(final QueryResult result) { - LOGGER.debug(format("Received batch of %d documents with cursorId %d from server %s", result.getResults().size(), - result.getCursorId(), result.getAddress())); - } - - private class CommandResultSingleResultCallback implements SingleResultCallback { - private final AsyncConnection connection; - private final ServerCursor cursor; - private final SingleResultCallback> callback; - - CommandResultSingleResultCallback(final AsyncConnection connection, final ServerCursor cursor, - final SingleResultCallback> callback) { - this.connection = connection; - this.cursor = cursor; - this.callback = errorHandlingCallback(callback, LOGGER); - } - - @Override - public void onResult(@Nullable final BsonDocument result, @Nullable final Throwable t) { - if (t != null) { - Throwable translatedException = t instanceof MongoCommandException - ? translateCommandException((MongoCommandException) t, cursor) - : t; - connection.release(); - endOperationInProgress(); - callback.onResult(null, translatedException); - } else { - assertNotNull(result); - QueryResult queryResult = getMoreCursorDocumentToQueryResult(result.getDocument(CURSOR), - connection.getDescription().getServerAddress()); - postBatchResumeToken = getPostBatchResumeTokenFromResponse(result); - handleGetMoreQueryResult(connection, callback, queryResult); - } - } - } - - @Nullable - ServerCursor getServerCursor() { - return cursor.get(); - } - - @Nullable - private BsonDocument getPostBatchResumeTokenFromResponse(final BsonDocument result) { - BsonDocument cursor = result.getDocument(CURSOR, null); - if (cursor != null) { - return cursor.getDocument(POST_BATCH_RESUME_TOKEN, null); - } - return null; - } -} diff --git a/driver-core/src/main/com/mongodb/internal/operation/AsyncSingleBatchQueryCursor.java b/driver-core/src/main/com/mongodb/internal/operation/AsyncSingleBatchCursor.java similarity index 63% rename from driver-core/src/main/com/mongodb/internal/operation/AsyncSingleBatchQueryCursor.java rename to driver-core/src/main/com/mongodb/internal/operation/AsyncSingleBatchCursor.java index f29cda04dae..57b20ff1711 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/AsyncSingleBatchQueryCursor.java +++ b/driver-core/src/main/com/mongodb/internal/operation/AsyncSingleBatchCursor.java @@ -19,19 +19,26 @@ import com.mongodb.MongoException; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; -import com.mongodb.internal.connection.QueryResult; import java.util.List; -import static com.mongodb.assertions.Assertions.isTrue; +import static java.util.Collections.emptyList; -class AsyncSingleBatchQueryCursor implements AsyncBatchCursor { - private volatile QueryResult firstBatch; - private volatile boolean closed; +class AsyncSingleBatchCursor implements AsyncBatchCursor { - AsyncSingleBatchQueryCursor(final QueryResult firstBatch) { - this.firstBatch = firstBatch; - isTrue("Empty Cursor", firstBatch.getCursor() == null); + static AsyncSingleBatchCursor createEmptyAsyncSingleBatchCursor(final int batchSize) { + return new AsyncSingleBatchCursor<>(emptyList(), batchSize); + } + + private final List batch; + private final int batchSize; + + private volatile boolean hasNext = true; + private volatile boolean closed = false; + + AsyncSingleBatchCursor(final List batch, final int batchSize) { + this.batch = batch; + this.batchSize = batchSize; } @Override @@ -43,13 +50,12 @@ public void close() { public void next(final SingleResultCallback> callback) { if (closed) { callback.onResult(null, new MongoException("next() called after the cursor was closed.")); - } else if (firstBatch != null && !firstBatch.getResults().isEmpty()) { - List results = firstBatch.getResults(); - firstBatch = null; - callback.onResult(results, null); + } else if (hasNext && !batch.isEmpty()) { + hasNext = false; + callback.onResult(batch, null); } else { closed = true; - callback.onResult(null, null); + callback.onResult(emptyList(), null); } } @@ -60,7 +66,7 @@ public void setBatchSize(final int batchSize) { @Override public int getBatchSize() { - return 0; + return batchSize; } @Override diff --git a/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamBatchCursor.java b/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamBatchCursor.java index acf70090457..a3c134b720c 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamBatchCursor.java +++ b/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamBatchCursor.java @@ -33,6 +33,7 @@ import java.util.function.Consumer; import java.util.function.Function; +import static com.mongodb.assertions.Assertions.assertNotNull; import static com.mongodb.internal.operation.ChangeStreamBatchCursorHelper.isResumableError; import static com.mongodb.internal.operation.SyncOperationHelper.withReadConnectionSource; @@ -41,12 +42,12 @@ final class ChangeStreamBatchCursor implements AggregateResponseBatchCursor changeStreamOperation; private final int maxWireVersion; - private AggregateResponseBatchCursor wrapped; + private CommandBatchCursor wrapped; private BsonDocument resumeToken; private final AtomicBoolean closed; ChangeStreamBatchCursor(final ChangeStreamOperation changeStreamOperation, - final AggregateResponseBatchCursor wrapped, + final CommandBatchCursor wrapped, final ReadBinding binding, @Nullable final BsonDocument resumeToken, final int maxWireVersion) { @@ -58,29 +59,29 @@ final class ChangeStreamBatchCursor implements AggregateResponseBatchCursor getWrapped() { + CommandBatchCursor getWrapped() { return wrapped; } @Override public boolean hasNext() { - return resumeableOperation(queryBatchCursor -> { + return resumeableOperation(commandBatchCursor -> { try { - return queryBatchCursor.hasNext(); + return commandBatchCursor.hasNext(); } finally { - cachePostBatchResumeToken(queryBatchCursor); + cachePostBatchResumeToken(commandBatchCursor); } }); } @Override public List next() { - return resumeableOperation(queryBatchCursor -> { + return resumeableOperation(commandBatchCursor -> { try { - return convertAndProduceLastId(queryBatchCursor.next(), changeStreamOperation.getDecoder(), + return convertAndProduceLastId(commandBatchCursor.next(), changeStreamOperation.getDecoder(), lastId -> resumeToken = lastId); } finally { - cachePostBatchResumeToken(queryBatchCursor); + cachePostBatchResumeToken(commandBatchCursor); } }); } @@ -92,12 +93,13 @@ public int available() { @Override public List tryNext() { - return resumeableOperation(queryBatchCursor -> { + return resumeableOperation(commandBatchCursor -> { try { - return convertAndProduceLastId(queryBatchCursor.tryNext(), changeStreamOperation.getDecoder(), - lastId -> resumeToken = lastId); + List tryNext = commandBatchCursor.tryNext(); + return tryNext == null ? null + : convertAndProduceLastId(tryNext, changeStreamOperation.getDecoder(), lastId -> resumeToken = lastId); } finally { - cachePostBatchResumeToken(queryBatchCursor); + cachePostBatchResumeToken(commandBatchCursor); } }); } @@ -155,9 +157,9 @@ public int getMaxWireVersion() { return maxWireVersion; } - private void cachePostBatchResumeToken(final AggregateResponseBatchCursor queryBatchCursor) { - if (queryBatchCursor.getPostBatchResumeToken() != null) { - resumeToken = queryBatchCursor.getPostBatchResumeToken(); + private void cachePostBatchResumeToken(final AggregateResponseBatchCursor commandBatchCursor) { + if (commandBatchCursor.getPostBatchResumeToken() != null) { + resumeToken = commandBatchCursor.getPostBatchResumeToken(); } } @@ -165,19 +167,17 @@ private void cachePostBatchResumeToken(final AggregateResponseBatchCursor List convertAndProduceLastId(@Nullable final List rawDocuments, + static List convertAndProduceLastId(final List rawDocuments, final Decoder decoder, final Consumer lastIdConsumer) { - List results = null; - if (rawDocuments != null) { - results = new ArrayList<>(); - for (RawBsonDocument rawDocument : rawDocuments) { - if (!rawDocument.containsKey("_id")) { - throw new MongoChangeStreamException("Cannot provide resume functionality when the resume token is missing."); - } - results.add(rawDocument.decode(decoder)); + List results = new ArrayList<>(); + for (RawBsonDocument rawDocument : assertNotNull(rawDocuments)) { + if (!rawDocument.containsKey("_id")) { + throw new MongoChangeStreamException("Cannot provide resume functionality when the resume token is missing."); } + results.add(rawDocument.decode(decoder)); + } + if (!rawDocuments.isEmpty()) { lastIdConsumer.accept(rawDocuments.get(rawDocuments.size() - 1).getDocument("_id")); } return results; diff --git a/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamOperation.java b/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamOperation.java index a2ba029eb56..8df093a6e9a 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamOperation.java @@ -20,7 +20,6 @@ import com.mongodb.client.model.Collation; import com.mongodb.client.model.changestream.FullDocument; import com.mongodb.client.model.changestream.FullDocumentBeforeChange; -import com.mongodb.internal.async.AsyncAggregateResponseBatchCursor; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; @@ -42,9 +41,8 @@ import java.util.List; import java.util.concurrent.TimeUnit; +import static com.mongodb.assertions.Assertions.assertNotNull; import static com.mongodb.assertions.Assertions.notNull; -import static com.mongodb.internal.operation.AsyncOperationHelper.withAsyncReadConnectionSource; -import static com.mongodb.internal.operation.SyncOperationHelper.withReadConnectionSource; /** * An operation that executes an {@code $changeStream} aggregation. @@ -179,16 +177,12 @@ public ChangeStreamOperation showExpandedEvents(final boolean showExpandedEve return this; } - @Override public BatchCursor execute(final ReadBinding binding) { - return withReadConnectionSource(binding, source -> { - AggregateResponseBatchCursor cursor = - (AggregateResponseBatchCursor) wrapped.execute(binding); + CommandBatchCursor cursor = (CommandBatchCursor) wrapped.execute(binding); return new ChangeStreamBatchCursor<>(ChangeStreamOperation.this, cursor, binding, setChangeStreamOptions(cursor.getPostBatchResumeToken(), cursor.getOperationTime(), cursor.getMaxWireVersion(), cursor.isFirstBatchEmpty()), cursor.getMaxWireVersion()); - }); } @Override @@ -197,25 +191,17 @@ public void executeAsync(final AsyncReadBinding binding, final SingleResultCallb if (t != null) { callback.onResult(null, t); } else { - AsyncAggregateResponseBatchCursor cursor = - (AsyncAggregateResponseBatchCursor) result; - withAsyncReadConnectionSource(binding, (source, t1) -> { - if (t1 != null) { - callback.onResult(null, t1); - } else { - callback.onResult(new AsyncChangeStreamBatchCursor<>(ChangeStreamOperation.this, cursor, binding, - setChangeStreamOptions(cursor.getPostBatchResumeToken(), cursor.getOperationTime(), - cursor.getMaxWireVersion(), cursor.isFirstBatchEmpty()), cursor.getMaxWireVersion()), null); - } - source.release(); // TODO: can this be null? - }); + AsyncCommandBatchCursor cursor = (AsyncCommandBatchCursor) assertNotNull(result); + callback.onResult(new AsyncChangeStreamBatchCursor<>(ChangeStreamOperation.this, cursor, binding, + setChangeStreamOptions(cursor.getPostBatchResumeToken(), cursor.getOperationTime(), + cursor.getMaxWireVersion(), cursor.isFirstBatchEmpty()), cursor.getMaxWireVersion()), null); } }); } @Nullable - private BsonDocument setChangeStreamOptions(@Nullable final BsonDocument postBatchResumeToken, final BsonTimestamp operationTime, - final int maxWireVersion, final boolean firstBatchEmpty) { + private BsonDocument setChangeStreamOptions(@Nullable final BsonDocument postBatchResumeToken, + @Nullable final BsonTimestamp operationTime, final int maxWireVersion, final boolean firstBatchEmpty) { BsonDocument resumeToken = null; if (startAfter != null) { resumeToken = startAfter; diff --git a/driver-core/src/main/com/mongodb/internal/operation/CommandBatchCursor.java b/driver-core/src/main/com/mongodb/internal/operation/CommandBatchCursor.java new file mode 100644 index 00000000000..f71cce0527b --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/CommandBatchCursor.java @@ -0,0 +1,352 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoCommandException; +import com.mongodb.MongoException; +import com.mongodb.MongoNamespace; +import com.mongodb.MongoSocketException; +import com.mongodb.ReadPreference; +import com.mongodb.ServerAddress; +import com.mongodb.ServerCursor; +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.connection.ServerType; +import com.mongodb.internal.VisibleForTesting; +import com.mongodb.internal.binding.ConnectionSource; +import com.mongodb.internal.connection.Connection; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonTimestamp; +import org.bson.BsonValue; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.Decoder; + +import java.util.List; +import java.util.NoSuchElementException; +import java.util.function.Consumer; +import java.util.function.Supplier; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; +import static com.mongodb.internal.operation.CommandBatchCursorHelper.FIRST_BATCH; +import static com.mongodb.internal.operation.CommandBatchCursorHelper.MESSAGE_IF_CLOSED_AS_CURSOR; +import static com.mongodb.internal.operation.CommandBatchCursorHelper.MESSAGE_IF_CLOSED_AS_ITERATOR; +import static com.mongodb.internal.operation.CommandBatchCursorHelper.NEXT_BATCH; +import static com.mongodb.internal.operation.CommandBatchCursorHelper.NO_OP_FIELD_NAME_VALIDATOR; +import static com.mongodb.internal.operation.CommandBatchCursorHelper.getKillCursorsCommand; +import static com.mongodb.internal.operation.CommandBatchCursorHelper.getMoreCommandDocument; +import static com.mongodb.internal.operation.CommandBatchCursorHelper.logCommandCursorResult; +import static com.mongodb.internal.operation.CommandBatchCursorHelper.translateCommandException; + +class CommandBatchCursor implements AggregateResponseBatchCursor { + + private final MongoNamespace namespace; + private final long maxTimeMS; + private final Decoder decoder; + @Nullable + private final BsonValue comment; + private final int maxWireVersion; + private final boolean firstBatchEmpty; + private final ResourceManager resourceManager; + + private int batchSize; + private CommandCursorResult commandCursorResult; + @Nullable + private List nextBatch; + + CommandBatchCursor( + final BsonDocument commandCursorDocument, + final int batchSize, final long maxTimeMS, + final Decoder decoder, + @Nullable final BsonValue comment, + final ConnectionSource connectionSource, + final Connection connection) { + ConnectionDescription connectionDescription = connection.getDescription(); + this.commandCursorResult = toCommandCursorResult(connectionDescription.getServerAddress(), FIRST_BATCH, commandCursorDocument); + this.namespace = commandCursorResult.getNamespace(); + this.batchSize = batchSize; + this.maxTimeMS = maxTimeMS; + this.decoder = decoder; + this.comment = comment; + this.maxWireVersion = connectionDescription.getMaxWireVersion(); + this.firstBatchEmpty = commandCursorResult.getResults().isEmpty(); + + Connection connectionToPin = connectionSource.getServerDescription().getType() == ServerType.LOAD_BALANCER ? connection : null; + resourceManager = new ResourceManager(namespace, connectionSource, connectionToPin, commandCursorResult.getServerCursor()); + } + + @Override + public boolean hasNext() { + return assertNotNull(resourceManager.execute(MESSAGE_IF_CLOSED_AS_CURSOR, this::doHasNext)); + } + + private boolean doHasNext() { + if (nextBatch != null) { + return true; + } + + while (resourceManager.getServerCursor() != null) { + getMore(); + if (!resourceManager.operable()) { + throw new IllegalStateException(MESSAGE_IF_CLOSED_AS_CURSOR); + } + if (nextBatch != null) { + return true; + } + } + + return false; + } + + @Override + public List next() { + return assertNotNull(resourceManager.execute(MESSAGE_IF_CLOSED_AS_ITERATOR, this::doNext)); + } + + @Override + public int available() { + return !resourceManager.operable() || nextBatch == null ? 0 : nextBatch.size(); + } + + @Nullable + private List doNext() { + if (!doHasNext()) { + throw new NoSuchElementException(); + } + + List retVal = nextBatch; + nextBatch = null; + return retVal; + } + + @VisibleForTesting(otherwise = PRIVATE) + boolean isClosed() { + return !resourceManager.operable(); + } + + @Override + public void setBatchSize(final int batchSize) { + this.batchSize = batchSize; + } + + @Override + public int getBatchSize() { + return batchSize; + } + + @Override + public void remove() { + throw new UnsupportedOperationException("Not implemented yet!"); + } + + @Override + public void close() { + resourceManager.close(); + } + + @Nullable + @Override + public List tryNext() { + return resourceManager.execute(MESSAGE_IF_CLOSED_AS_CURSOR, () -> { + if (!tryHasNext()) { + return null; + } + return doNext(); + }); + } + + private boolean tryHasNext() { + if (nextBatch != null) { + return true; + } + + if (resourceManager.getServerCursor() != null) { + getMore(); + } + + return nextBatch != null; + } + + @Override + @Nullable + public ServerCursor getServerCursor() { + if (!resourceManager.operable()) { + throw new IllegalStateException(MESSAGE_IF_CLOSED_AS_ITERATOR); + } + return resourceManager.getServerCursor(); + } + + @Override + public ServerAddress getServerAddress() { + if (!resourceManager.operable()) { + throw new IllegalStateException(MESSAGE_IF_CLOSED_AS_ITERATOR); + } + + return commandCursorResult.getServerAddress(); + } + + @Override + public BsonDocument getPostBatchResumeToken() { + return commandCursorResult.getPostBatchResumeToken(); + } + + @Override + public BsonTimestamp getOperationTime() { + return commandCursorResult.getOperationTime(); + } + + @Override + public boolean isFirstBatchEmpty() { + return firstBatchEmpty; + } + + @Override + public int getMaxWireVersion() { + return maxWireVersion; + } + + private void getMore() { + ServerCursor serverCursor = assertNotNull(resourceManager.getServerCursor()); + resourceManager.executeWithConnection(connection -> { + ServerCursor nextServerCursor; + try { + this.commandCursorResult = toCommandCursorResult(connection.getDescription().getServerAddress(), NEXT_BATCH, + assertNotNull( + connection.command(namespace.getDatabaseName(), + getMoreCommandDocument(serverCursor.getId(), connection.getDescription(), namespace, batchSize, + maxTimeMS, comment), + NO_OP_FIELD_NAME_VALIDATOR, + ReadPreference.primary(), + CommandResultDocumentCodec.create(decoder, NEXT_BATCH), + assertNotNull(resourceManager.getConnectionSource())))); + nextServerCursor = commandCursorResult.getServerCursor(); + } catch (MongoCommandException e) { + throw translateCommandException(e, serverCursor); + } + resourceManager.setServerCursor(nextServerCursor); + }); + } + + private CommandCursorResult toCommandCursorResult(final ServerAddress serverAddress, final String fieldNameContainingBatch, + final BsonDocument commandCursorDocument) { + CommandCursorResult commandCursorResult = new CommandCursorResult<>(serverAddress, fieldNameContainingBatch, + commandCursorDocument); + logCommandCursorResult(commandCursorResult); + this.nextBatch = commandCursorResult.getResults().isEmpty() ? null : commandCursorResult.getResults(); + return commandCursorResult; + } + + @ThreadSafe + private static final class ResourceManager extends CursorResourceManager { + + ResourceManager( + final MongoNamespace namespace, + final ConnectionSource connectionSource, + @Nullable final Connection connectionToPin, + @Nullable final ServerCursor serverCursor) { + super(namespace, connectionSource, connectionToPin, serverCursor); + } + + /** + * Thread-safe. + * Executes {@code operation} within the {@link #tryStartOperation()}/{@link #endOperation()} bounds. + * + * @throws IllegalStateException If {@linkplain CommandBatchCursor#close() closed}. + */ + @Nullable + R execute(final String exceptionMessageIfClosed, final Supplier operation) throws IllegalStateException { + if (!tryStartOperation()) { + throw new IllegalStateException(exceptionMessageIfClosed); + } + try { + return operation.get(); + } finally { + endOperation(); + } + } + + @Override + void markAsPinned(final Connection connectionToPin, final Connection.PinningMode pinningMode) { + connectionToPin.markAsPinned(pinningMode); + } + + @Override + void doClose() { + if (isSkipReleasingServerResourcesOnClose()) { + unsetServerCursor(); + } + try { + if (getServerCursor() != null) { + Connection connection = getConnection(); + try { + releaseServerResources(connection); + } finally { + connection.release(); + } + } + } catch (MongoException e) { + // ignore exceptions when releasing server resources + } finally { + // guarantee that regardless of exceptions, `serverCursor` is null and client resources are released + unsetServerCursor(); + releaseClientResources(); + } + } + + void executeWithConnection(final Consumer action) { + Connection connection = getConnection(); + try { + action.accept(connection); + } catch (MongoSocketException e) { + onCorruptedConnection(connection, e); + throw e; + } finally { + connection.release(); + } + } + + private Connection getConnection() { + assertTrue(getState() != State.IDLE); + Connection pinnedConnection = getPinnedConnection(); + if (pinnedConnection == null) { + return assertNotNull(getConnectionSource()).getConnection(); + } else { + return pinnedConnection.retain(); + } + } + + private void releaseServerResources(final Connection connection) { + try { + ServerCursor localServerCursor = getServerCursor(); + if (localServerCursor != null) { + killServerCursor(getNamespace(), localServerCursor, connection); + } + } finally { + unsetServerCursor(); + } + } + + private void killServerCursor(final MongoNamespace namespace, final ServerCursor localServerCursor, + final Connection localConnection) { + localConnection.command(namespace.getDatabaseName(), getKillCursorsCommand(namespace, localServerCursor), + NO_OP_FIELD_NAME_VALIDATOR, ReadPreference.primary(), new BsonDocumentCodec(), + assertNotNull(getConnectionSource())); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/CommandBatchCursorHelper.java b/driver-core/src/main/com/mongodb/internal/operation/CommandBatchCursorHelper.java new file mode 100644 index 00000000000..eaf03c68ec3 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/CommandBatchCursorHelper.java @@ -0,0 +1,94 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoCommandException; +import com.mongodb.MongoCursorNotFoundException; +import com.mongodb.MongoNamespace; +import com.mongodb.MongoQueryException; +import com.mongodb.ServerCursor; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.internal.validator.NoOpFieldNameValidator; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonInt64; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.FieldNameValidator; + +import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull; +import static com.mongodb.internal.operation.OperationHelper.LOGGER; +import static com.mongodb.internal.operation.ServerVersionHelper.serverIsAtLeastVersionFourDotFour; +import static java.lang.String.format; +import static java.util.Collections.singletonList; + +final class CommandBatchCursorHelper { + + static final String FIRST_BATCH = "firstBatch"; + static final String NEXT_BATCH = "nextBatch"; + static final FieldNameValidator NO_OP_FIELD_NAME_VALIDATOR = new NoOpFieldNameValidator(); + static final String MESSAGE_IF_CLOSED_AS_CURSOR = "Cursor has been closed"; + static final String MESSAGE_IF_CLOSED_AS_ITERATOR = "Iterator has been closed"; + + static final String MESSAGE_IF_CONCURRENT_OPERATION = "Another operation is currently in progress, concurrent operations are not " + + "supported"; + + static BsonDocument getMoreCommandDocument( + final long cursorId, final ConnectionDescription connectionDescription, final MongoNamespace namespace, final int batchSize, + final long maxTimeMS, @Nullable final BsonValue comment) { + BsonDocument document = new BsonDocument("getMore", new BsonInt64(cursorId)) + .append("collection", new BsonString(namespace.getCollectionName())); + + if (batchSize != 0) { + document.append("batchSize", new BsonInt32(batchSize)); + } + if (maxTimeMS != 0) { + document.append("maxTimeMS", new BsonInt64(maxTimeMS)); + } + if (serverIsAtLeastVersionFourDotFour(connectionDescription)) { + putIfNotNull(document, "comment", comment); + } + return document; + } + + static CommandCursorResult logCommandCursorResult(final CommandCursorResult commandCursorResult) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(format("Received batch of %d documents with cursorId %d from server %s", commandCursorResult.getResults().size(), + commandCursorResult.getCursorId(), commandCursorResult.getServerAddress())); + } + return commandCursorResult; + } + + static BsonDocument getKillCursorsCommand(final MongoNamespace namespace, final ServerCursor serverCursor) { + return new BsonDocument("killCursors", new BsonString(namespace.getCollectionName())) + .append("cursors", new BsonArray(singletonList(new BsonInt64(serverCursor.getId())))); + } + + + static MongoQueryException translateCommandException(final MongoCommandException commandException, final ServerCursor cursor) { + if (commandException.getErrorCode() == 43) { + return new MongoCursorNotFoundException(cursor.getId(), commandException.getResponse(), cursor.getAddress()); + } else { + return new MongoQueryException(commandException.getResponse(), commandException.getServerAddress()); + } + } + + private CommandBatchCursorHelper() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/QueryResult.java b/driver-core/src/main/com/mongodb/internal/operation/CommandCursorResult.java similarity index 52% rename from driver-core/src/main/com/mongodb/internal/connection/QueryResult.java rename to driver-core/src/main/com/mongodb/internal/operation/CommandCursorResult.java index 52970ba7b94..7bfbfb33cbe 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/QueryResult.java +++ b/driver-core/src/main/com/mongodb/internal/operation/CommandCursorResult.java @@ -14,40 +14,50 @@ * limitations under the License. */ -package com.mongodb.internal.connection; +package com.mongodb.internal.operation; import com.mongodb.MongoNamespace; import com.mongodb.ServerAddress; import com.mongodb.ServerCursor; import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonTimestamp; import java.util.List; +import static com.mongodb.assertions.Assertions.isTrue; + /** - * A batch of query results. + * The command cursor result * *

This class is not part of the public API and may be removed or changed at any time

*/ -public class QueryResult { - private final MongoNamespace namespace; +public class CommandCursorResult { + + private static final String CURSOR = "cursor"; + private static final String POST_BATCH_RESUME_TOKEN = "postBatchResumeToken"; + private static final String OPERATION_TIME = "operationTime"; + private final ServerAddress serverAddress; private final List results; + private final MongoNamespace namespace; private final long cursorId; - private final ServerAddress serverAddress; + @Nullable + private final BsonTimestamp operationTime; + @Nullable + private final BsonDocument postBatchResumeToken; - /** - * Construct an instance. - * - * @param namespace the namespace - * @param results the query results - * @param cursorId the cursor id - * @param serverAddress the server address - */ - public QueryResult(@Nullable final MongoNamespace namespace, final List results, final long cursorId, - final ServerAddress serverAddress) { - this.namespace = namespace; - this.results = results; - this.cursorId = cursorId; + public CommandCursorResult( + final ServerAddress serverAddress, + final String fieldNameContainingBatch, + final BsonDocument commandCursorDocument) { + isTrue("Contains cursor", commandCursorDocument.isDocument(CURSOR)); this.serverAddress = serverAddress; + BsonDocument cursorDocument = commandCursorDocument.getDocument(CURSOR); + this.results = BsonDocumentWrapperHelper.toList(cursorDocument, fieldNameContainingBatch); + this.namespace = new MongoNamespace(cursorDocument.getString("ns").getValue()); + this.cursorId = cursorDocument.getNumber("id").longValue(); + this.operationTime = cursorDocument.getTimestamp(OPERATION_TIME, null); + this.postBatchResumeToken = cursorDocument.getDocument(POST_BATCH_RESUME_TOKEN, null); } /** @@ -55,7 +65,6 @@ public QueryResult(@Nullable final MongoNamespace namespace, final List resul * * @return the namespace */ - @Nullable public MongoNamespace getNamespace() { return namespace; } @@ -66,7 +75,7 @@ public MongoNamespace getNamespace() { * @return the cursor, which may be null if it's been exhausted */ @Nullable - public ServerCursor getCursor() { + public ServerCursor getServerCursor() { return cursorId == 0 ? null : new ServerCursor(cursorId, serverAddress); } @@ -84,11 +93,21 @@ public List getResults() { * * @return the server address */ - public ServerAddress getAddress() { + public ServerAddress getServerAddress() { return serverAddress; } public long getCursorId() { return cursorId; } + + @Nullable + public BsonDocument getPostBatchResumeToken() { + return postBatchResumeToken; + } + + @Nullable + public BsonTimestamp getOperationTime() { + return operationTime; + } } diff --git a/driver-core/src/main/com/mongodb/internal/operation/CursorHelper.java b/driver-core/src/main/com/mongodb/internal/operation/CursorHelper.java index aea2d2df213..26511c86885 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/CursorHelper.java +++ b/driver-core/src/main/com/mongodb/internal/operation/CursorHelper.java @@ -22,34 +22,6 @@ final class CursorHelper { - /** - *

Gets the limit of the number of documents in the OP_REPLY response to the get more request. A value of zero tells the server to - * use the default size. A negative value tells the server to return no more than that number and immediately close the cursor. - * Otherwise, the server will return no more than that number and return the same cursorId to allow the rest of the documents to be - * fetched, if it turns out there are more documents.

- * - *

The value returned by this method is based on the limit, the batch size, both of which can be positive, negative, or zero, and the - * number of documents fetched so far.

- * - * @return the value for numberToReturn in the OP_GET_MORE wire protocol message. - * @mongodb.driver.manual ../meta-driver/latest/legacy/mongodb-wire-protocol/#op-get-more OP_GET_MORE - * @param limit the user-specified limit on the number of results returned - * @param batchSize the user-specified batch size - * @param numReturnedSoFar the number of results returned so far - */ - static int getNumberToReturn(final int limit, final int batchSize, final int numReturnedSoFar) { - int numberToReturn; - if (Math.abs(limit) != 0) { - numberToReturn = Math.abs(limit) - numReturnedSoFar; - if (batchSize != 0 && numberToReturn > Math.abs(batchSize)) { - numberToReturn = batchSize; - } - } else { - numberToReturn = batchSize; - } - return numberToReturn; - } - static BsonDocument getCursorDocumentFromBatchSize(@Nullable final Integer batchSize) { return batchSize == null ? new BsonDocument() : new BsonDocument("batchSize", new BsonInt32(batchSize)); } diff --git a/driver-core/src/main/com/mongodb/internal/operation/CursorResourceManager.java b/driver-core/src/main/com/mongodb/internal/operation/CursorResourceManager.java new file mode 100644 index 00000000000..cb2e5c58e84 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/CursorResourceManager.java @@ -0,0 +1,277 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoNamespace; +import com.mongodb.MongoSocketException; +import com.mongodb.ServerCursor; +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.internal.binding.ReferenceCounted; +import com.mongodb.internal.connection.Connection; +import com.mongodb.lang.Nullable; + +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.assertNull; +import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.assertions.Assertions.fail; +import static com.mongodb.internal.Locks.withLock; +import static com.mongodb.internal.operation.CommandBatchCursorHelper.MESSAGE_IF_CONCURRENT_OPERATION; + +/** + * This is the resource manager for {@link CommandBatchCursor} or {@link AsyncCommandBatchCursor} implementations. + *

+ * This class maintains all resources that must be released in {@link CommandBatchCursor#close()} / + * {@link AsyncCommandBatchCursor#close()}. The abstract {@linkplain #doClose() deferred close action} is such that it is totally + * ordered with other operations of {@link CommandBatchCursor} / {@link AsyncCommandBatchCursor} (methods {@link #tryStartOperation()}/ + * {@link #endOperation()} must be used properly to enforce the order) despite the method {@link CommandBatchCursor#close()} / + * {@link AsyncCommandBatchCursor#close()} being called concurrently with those operations. + *

+ * This total order induces the happens-before order. + *

+ * The deferred close action does not violate externally observable idempotence of {@link CommandBatchCursor#close()} / + * {@link AsyncCommandBatchCursor#close()}, because the close method is allowed to release resources "eventually". + *

+ * Only methods explicitly documented as thread-safe are thread-safe, + * others are not and rely on the total order mentioned above. + */ +@ThreadSafe +abstract class CursorResourceManager { + private final Lock lock; + private final MongoNamespace namespace; + private volatile State state; + @Nullable + private volatile CS connectionSource; + @Nullable + private volatile C pinnedConnection; + @Nullable + private volatile ServerCursor serverCursor; + private volatile boolean skipReleasingServerResourcesOnClose; + + CursorResourceManager( + final MongoNamespace namespace, + final CS connectionSource, + @Nullable final C connectionToPin, + @Nullable final ServerCursor serverCursor) { + this.lock = new ReentrantLock(); + this.namespace = namespace; + this.state = State.IDLE; + if (serverCursor != null) { + connectionSource.retain(); + this.connectionSource = connectionSource; + if (connectionToPin != null) { + connectionToPin.retain(); + markAsPinned(connectionToPin, Connection.PinningMode.CURSOR); + this.pinnedConnection = connectionToPin; + } + } + this.skipReleasingServerResourcesOnClose = false; + this.serverCursor = serverCursor; + } + + /** + * Thread-safe. + */ + MongoNamespace getNamespace() { + return namespace; + } + + /** + * Thread-safe. + */ + State getState() { + return state; + } + + /** + * Thread-safe. + */ + @Nullable + CS getConnectionSource() { + return connectionSource; + } + + /** + * Thread-safe. + */ + @Nullable + C getPinnedConnection() { + return pinnedConnection; + } + + /** + * Thread-safe. + */ + boolean isSkipReleasingServerResourcesOnClose() { + return skipReleasingServerResourcesOnClose; + } + + @SuppressWarnings("SameParameterValue") + abstract void markAsPinned(C connectionToPin, Connection.PinningMode pinningMode); + + /** + * Thread-safe. + */ + boolean operable() { + return state.operable(); + } + + /** + * Thread-safe. + * Returns {@code true} iff started an operation. + * If {@linkplain #operable() closed}, then returns false, otherwise completes abruptly. + * + * @throws IllegalStateException Iff another operation is in progress. + */ + boolean tryStartOperation() throws IllegalStateException { + return withLock(lock, () -> { + State localState = state; + if (!localState.operable()) { + return false; + } else if (localState == State.IDLE) { + state = State.OPERATION_IN_PROGRESS; + return true; + } else if (localState == State.OPERATION_IN_PROGRESS) { + throw new IllegalStateException(MESSAGE_IF_CONCURRENT_OPERATION); + } else { + throw fail(state.toString()); + } + }); + } + + /** + * Thread-safe. + */ + void endOperation() { + boolean doClose = withLock(lock, () -> { + State localState = state; + if (localState == State.OPERATION_IN_PROGRESS) { + state = State.IDLE; + } else if (localState == State.CLOSE_PENDING) { + state = State.CLOSED; + return true; + } else if (localState != State.CLOSED) { + throw fail(localState.toString()); + } + return false; + }); + if (doClose) { + doClose(); + } + } + + /** + * Thread-safe. + */ + void close() { + boolean doClose = withLock(lock, () -> { + State localState = state; + if (localState == State.OPERATION_IN_PROGRESS) { + state = State.CLOSE_PENDING; + } else if (localState != State.CLOSED) { + state = State.CLOSED; + return true; + } + return false; + }); + if (doClose) { + doClose(); + } + } + + /** + * This method is never executed concurrently with either itself or other operations + * demarcated by {@link #tryStartOperation()}/{@link #endOperation()}. + */ + abstract void doClose(); + + void onCorruptedConnection(@Nullable final C corruptedConnection, final MongoSocketException e) { + // if `pinnedConnection` is corrupted, then we cannot kill `serverCursor` via such a connection + C localPinnedConnection = pinnedConnection; + if (localPinnedConnection != null) { + if (corruptedConnection != localPinnedConnection) { + e.addSuppressed(new AssertionError("Corrupted connection does not equal the pinned connection.")); + } + skipReleasingServerResourcesOnClose = true; + } + } + + /** + * Thread-safe. + */ + @Nullable + ServerCursor getServerCursor() { + return serverCursor; + } + + void setServerCursor(@Nullable final ServerCursor serverCursor) { + assertTrue(state.inProgress()); + assertNotNull(this.serverCursor); + // without `connectionSource` we will not be able to kill `serverCursor` later + assertNotNull(connectionSource); + this.serverCursor = serverCursor; + if (serverCursor == null) { + releaseClientResources(); + } + } + + void unsetServerCursor() { + this.serverCursor = null; + } + + void releaseClientResources() { + assertNull(serverCursor); + CS localConnectionSource = connectionSource; + if (localConnectionSource != null) { + localConnectionSource.release(); + connectionSource = null; + } + C localPinnedConnection = pinnedConnection; + if (localPinnedConnection != null) { + localPinnedConnection.release(); + pinnedConnection = null; + } + } + + enum State { + IDLE(true, false), + OPERATION_IN_PROGRESS(true, true), + /** + * Implies {@link #OPERATION_IN_PROGRESS}. + */ + CLOSE_PENDING(false, true), + CLOSED(false, false); + + private final boolean operable; + private final boolean inProgress; + + State(final boolean operable, final boolean inProgress) { + this.operable = operable; + this.inProgress = inProgress; + } + + boolean operable() { + return operable; + } + + boolean inProgress() { + return inProgress; + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/DistinctOperation.java b/driver-core/src/main/com/mongodb/internal/operation/DistinctOperation.java index a64c4cbfadd..d9fa0cfd72e 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/DistinctOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/DistinctOperation.java @@ -23,7 +23,6 @@ import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; import com.mongodb.internal.binding.ReadBinding; -import com.mongodb.internal.connection.QueryResult; import com.mongodb.internal.session.SessionContext; import com.mongodb.lang.Nullable; import org.bson.BsonDocument; @@ -36,15 +35,15 @@ import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; -import static com.mongodb.internal.operation.AsyncOperationHelper.CommandReadTransformerAsync; +import static com.mongodb.internal.operation.AsyncOperationHelper.asyncSingleBatchCursorTransformer; import static com.mongodb.internal.operation.AsyncOperationHelper.executeRetryableReadAsync; import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator; import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull; import static com.mongodb.internal.operation.DocumentHelper.putIfNotZero; import static com.mongodb.internal.operation.OperationHelper.LOGGER; import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand; -import static com.mongodb.internal.operation.SyncOperationHelper.CommandReadTransformer; import static com.mongodb.internal.operation.SyncOperationHelper.executeRetryableRead; +import static com.mongodb.internal.operation.SyncOperationHelper.singleBatchCursorTransformer; /** * Finds the distinct values for a specified field across a single collection. @@ -116,42 +115,22 @@ public DistinctOperation comment(final BsonValue comment) { return this; } - @Override public BatchCursor execute(final ReadBinding binding) { return executeRetryableRead(binding, namespace.getDatabaseName(), getCommandCreator(binding.getSessionContext()), - createCommandDecoder(), transformer(), retryReads); + createCommandDecoder(), singleBatchCursorTransformer(VALUES), retryReads); } @Override public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback> callback) { executeRetryableReadAsync(binding, namespace.getDatabaseName(), getCommandCreator(binding.getSessionContext()), - createCommandDecoder(), asyncTransformer(), retryReads, errorHandlingCallback(callback, LOGGER)); + createCommandDecoder(), asyncSingleBatchCursorTransformer(VALUES), retryReads, errorHandlingCallback(callback, LOGGER)); } private Codec createCommandDecoder() { return CommandResultDocumentCodec.create(decoder, VALUES); } - private QueryResult createQueryResult(final BsonDocument result, final ConnectionDescription description) { - return new QueryResult<>(namespace, BsonDocumentWrapperHelper.toList(result, VALUES), 0L, - description.getServerAddress()); - } - - private CommandReadTransformer> transformer() { - return (result, source, connection) -> { - QueryResult queryResult = createQueryResult(result, connection.getDescription()); - return new QueryBatchCursor<>(queryResult, 0, 0, decoder, comment, source); - }; - } - - private CommandReadTransformerAsync> asyncTransformer() { - return (result, source, connection) -> { - QueryResult queryResult = createQueryResult(result, connection.getDescription()); - return new AsyncSingleBatchQueryCursor<>(queryResult); - }; - } - private CommandCreator getCommandCreator(final SessionContext sessionContext) { return (serverDescription, connectionDescription) -> getCommand(sessionContext, connectionDescription); } diff --git a/driver-core/src/main/com/mongodb/internal/operation/FindOperation.java b/driver-core/src/main/com/mongodb/internal/operation/FindOperation.java index dcb94211fcf..72d20835aa1 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/FindOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/FindOperation.java @@ -29,7 +29,6 @@ import com.mongodb.internal.binding.AsyncReadBinding; import com.mongodb.internal.binding.ReadBinding; import com.mongodb.internal.connection.NoOpSessionContext; -import com.mongodb.internal.connection.QueryResult; import com.mongodb.internal.session.SessionContext; import com.mongodb.lang.Nullable; import org.bson.BsonBoolean; @@ -57,7 +56,6 @@ import static com.mongodb.internal.operation.ExplainHelper.asExplainCommand; import static com.mongodb.internal.operation.OperationHelper.LOGGER; import static com.mongodb.internal.operation.OperationHelper.canRetryRead; -import static com.mongodb.internal.operation.OperationHelper.cursorDocumentToQueryResult; import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand; import static com.mongodb.internal.operation.ServerVersionHelper.MIN_WIRE_VERSION; import static com.mongodb.internal.operation.SyncOperationHelper.CommandReadTransformer; @@ -471,13 +469,9 @@ private boolean isAwaitData() { return cursorType == CursorType.TailableAwait; } - private CommandReadTransformer> transformer() { - return (result, source, connection) -> { - QueryResult queryResult = cursorDocumentToQueryResult(result.getDocument("cursor"), - connection.getDescription().getServerAddress()); - return new QueryBatchCursor<>(queryResult, limit, batchSize, getMaxTimeForCursor(), decoder, comment, source, connection, - result); - }; + private CommandReadTransformer> transformer() { + return (result, source, connection) -> + new CommandBatchCursor<>(result, batchSize, getMaxTimeForCursor(), decoder, comment, source, connection); } private long getMaxTimeForCursor() { @@ -485,11 +479,7 @@ private long getMaxTimeForCursor() { } private CommandReadTransformerAsync> asyncTransformer() { - return (result, source, connection) -> { - QueryResult queryResult = cursorDocumentToQueryResult(result.getDocument("cursor"), - connection.getDescription().getServerAddress()); - return new AsyncQueryBatchCursor<>(queryResult, limit, batchSize, getMaxTimeForCursor(), decoder, comment, source, - connection, result); - }; + return (result, source, connection) -> + new AsyncCommandBatchCursor<>(result, batchSize, getMaxTimeForCursor(), decoder, comment, source, connection); } } diff --git a/driver-core/src/main/com/mongodb/internal/operation/ListCollectionsOperation.java b/driver-core/src/main/com/mongodb/internal/operation/ListCollectionsOperation.java index fa2a5dcd995..f8ef462b5d2 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/ListCollectionsOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/ListCollectionsOperation.java @@ -22,7 +22,6 @@ import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.async.function.AsyncCallbackSupplier; import com.mongodb.internal.async.function.RetryState; -import com.mongodb.internal.binding.AsyncConnectionSource; import com.mongodb.internal.binding.AsyncReadBinding; import com.mongodb.internal.binding.ReadBinding; import com.mongodb.lang.Nullable; @@ -40,11 +39,11 @@ import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; import static com.mongodb.internal.operation.AsyncOperationHelper.CommandReadTransformerAsync; -import static com.mongodb.internal.operation.AsyncOperationHelper.createEmptyAsyncBatchCursor; import static com.mongodb.internal.operation.AsyncOperationHelper.createReadCommandAndExecuteAsync; import static com.mongodb.internal.operation.AsyncOperationHelper.cursorDocumentToAsyncBatchCursor; import static com.mongodb.internal.operation.AsyncOperationHelper.decorateReadWithRetriesAsync; import static com.mongodb.internal.operation.AsyncOperationHelper.withAsyncSourceAndConnection; +import static com.mongodb.internal.operation.AsyncSingleBatchCursor.createEmptyAsyncSingleBatchCursor; import static com.mongodb.internal.operation.CommandOperationHelper.initialRetryState; import static com.mongodb.internal.operation.CommandOperationHelper.isNamespaceError; import static com.mongodb.internal.operation.CommandOperationHelper.rethrowIfNotNamespaceError; @@ -52,7 +51,7 @@ import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull; import static com.mongodb.internal.operation.OperationHelper.LOGGER; import static com.mongodb.internal.operation.OperationHelper.canRetryRead; -import static com.mongodb.internal.operation.OperationHelper.createEmptyBatchCursor; +import static com.mongodb.internal.operation.SingleBatchCursor.createEmptySingleBatchCursor; import static com.mongodb.internal.operation.SyncOperationHelper.CommandReadTransformer; import static com.mongodb.internal.operation.SyncOperationHelper.createReadCommandAndExecute; import static com.mongodb.internal.operation.SyncOperationHelper.cursorDocumentToBatchCursor; @@ -148,8 +147,8 @@ public BatchCursor execute(final ReadBinding binding) { return createReadCommandAndExecute(retryState, binding, source, databaseName, getCommandCreator(), createCommandDecoder(), commandTransformer(), connection); } catch (MongoCommandException e) { - return rethrowIfNotNamespaceError(e, createEmptyBatchCursor(createNamespace(), decoder, - source.getServerDescription().getAddress(), batchSize)); + return rethrowIfNotNamespaceError(e, + createEmptySingleBatchCursor(source.getServerDescription().getAddress(), batchSize)); } }) ); @@ -173,7 +172,8 @@ public void executeAsync(final AsyncReadBinding binding, final SingleResultCallb if (t != null && !isNamespaceError(t)) { releasingCallback.onResult(null, t); } else { - releasingCallback.onResult(result != null ? result : emptyAsyncCursor(source), null); + releasingCallback.onResult(result != null + ? result : createEmptyAsyncSingleBatchCursor(getBatchSize()), null); } }); }) @@ -181,20 +181,16 @@ public void executeAsync(final AsyncReadBinding binding, final SingleResultCallb asyncRead.get(errorHandlingCallback(callback, LOGGER)); } - private AsyncBatchCursor emptyAsyncCursor(final AsyncConnectionSource source) { - return createEmptyAsyncBatchCursor(createNamespace(), source.getServerDescription().getAddress()); - } - private MongoNamespace createNamespace() { return new MongoNamespace(databaseName, "$cmd.listCollections"); } private CommandReadTransformerAsync> asyncTransformer() { - return (result, source, connection) -> cursorDocumentToAsyncBatchCursor(result.getDocument("cursor"), decoder, comment, source, connection, batchSize); + return (result, source, connection) -> cursorDocumentToAsyncBatchCursor(result, decoder, comment, source, connection, batchSize); } private CommandReadTransformer> commandTransformer() { - return (result, source, connection) -> cursorDocumentToBatchCursor(result.getDocument("cursor"), decoder, comment, source, connection, batchSize); + return (result, source, connection) -> cursorDocumentToBatchCursor(result, decoder, comment, source, connection, batchSize); } private CommandOperationHelper.CommandCreator getCommandCreator() { diff --git a/driver-core/src/main/com/mongodb/internal/operation/ListDatabasesOperation.java b/driver-core/src/main/com/mongodb/internal/operation/ListDatabasesOperation.java index bacf64601c9..fec689c938f 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/ListDatabasesOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/ListDatabasesOperation.java @@ -16,12 +16,11 @@ package com.mongodb.internal.operation; -import com.mongodb.connection.ConnectionDescription; + import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; import com.mongodb.internal.binding.ReadBinding; -import com.mongodb.internal.connection.QueryResult; import com.mongodb.lang.Nullable; import org.bson.BsonBoolean; import org.bson.BsonDocument; @@ -34,13 +33,13 @@ import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; -import static com.mongodb.internal.operation.AsyncOperationHelper.CommandReadTransformerAsync; +import static com.mongodb.internal.operation.AsyncOperationHelper.asyncSingleBatchCursorTransformer; import static com.mongodb.internal.operation.AsyncOperationHelper.executeRetryableReadAsync; import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator; import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull; import static com.mongodb.internal.operation.OperationHelper.LOGGER; -import static com.mongodb.internal.operation.SyncOperationHelper.CommandReadTransformer; import static com.mongodb.internal.operation.SyncOperationHelper.executeRetryableRead; +import static com.mongodb.internal.operation.SyncOperationHelper.singleBatchCursorTransformer; /** @@ -49,6 +48,9 @@ *

This class is not part of the public API and may be removed or changed at any time

*/ public class ListDatabasesOperation implements AsyncReadOperation>, ReadOperation> { + + private static final String DATABASES = "databases"; + private final Decoder decoder; private boolean retryReads; @@ -122,28 +124,16 @@ public ListDatabasesOperation comment(@Nullable final BsonValue comment) { @Override public BatchCursor execute(final ReadBinding binding) { return executeRetryableRead(binding, "admin", getCommandCreator(), - CommandResultDocumentCodec.create(decoder, "databases"), transformer(), retryReads); + CommandResultDocumentCodec.create(decoder, DATABASES), + singleBatchCursorTransformer(DATABASES), retryReads); } @Override public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback> callback) { executeRetryableReadAsync(binding, "admin", getCommandCreator(), - CommandResultDocumentCodec.create(decoder, "databases"), asyncTransformer(), - retryReads, errorHandlingCallback(callback, LOGGER)); - } - - private CommandReadTransformer> transformer() { - return (result, source, connection) -> new QueryBatchCursor<>(createQueryResult(result, connection.getDescription()), 0, 0, decoder, comment, source); - } - - private CommandReadTransformerAsync> asyncTransformer() { - return (result, source, connection) -> new AsyncQueryBatchCursor<>(createQueryResult(result, connection.getDescription()), 0, 0, 0, decoder, - comment, source, connection, result); - } - - private QueryResult createQueryResult(final BsonDocument result, final ConnectionDescription description) { - return new QueryResult<>(null, BsonDocumentWrapperHelper.toList(result, "databases"), 0, - description.getServerAddress()); + CommandResultDocumentCodec.create(decoder, DATABASES), + asyncSingleBatchCursorTransformer(DATABASES), retryReads, + errorHandlingCallback(callback, LOGGER)); } private CommandCreator getCommandCreator() { diff --git a/driver-core/src/main/com/mongodb/internal/operation/ListIndexesOperation.java b/driver-core/src/main/com/mongodb/internal/operation/ListIndexesOperation.java index 62ecdc953bd..e4d0138121d 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/ListIndexesOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/ListIndexesOperation.java @@ -22,7 +22,6 @@ import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.async.function.AsyncCallbackSupplier; import com.mongodb.internal.async.function.RetryState; -import com.mongodb.internal.binding.AsyncConnectionSource; import com.mongodb.internal.binding.AsyncReadBinding; import com.mongodb.internal.binding.ReadBinding; import com.mongodb.lang.Nullable; @@ -39,11 +38,11 @@ import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; import static com.mongodb.internal.operation.AsyncOperationHelper.CommandReadTransformerAsync; -import static com.mongodb.internal.operation.AsyncOperationHelper.createEmptyAsyncBatchCursor; import static com.mongodb.internal.operation.AsyncOperationHelper.createReadCommandAndExecuteAsync; import static com.mongodb.internal.operation.AsyncOperationHelper.cursorDocumentToAsyncBatchCursor; import static com.mongodb.internal.operation.AsyncOperationHelper.decorateReadWithRetriesAsync; import static com.mongodb.internal.operation.AsyncOperationHelper.withAsyncSourceAndConnection; +import static com.mongodb.internal.operation.AsyncSingleBatchCursor.createEmptyAsyncSingleBatchCursor; import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator; import static com.mongodb.internal.operation.CommandOperationHelper.initialRetryState; import static com.mongodb.internal.operation.CommandOperationHelper.isNamespaceError; @@ -52,7 +51,7 @@ import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull; import static com.mongodb.internal.operation.OperationHelper.LOGGER; import static com.mongodb.internal.operation.OperationHelper.canRetryRead; -import static com.mongodb.internal.operation.OperationHelper.createEmptyBatchCursor; +import static com.mongodb.internal.operation.SingleBatchCursor.createEmptySingleBatchCursor; import static com.mongodb.internal.operation.SyncOperationHelper.CommandReadTransformer; import static com.mongodb.internal.operation.SyncOperationHelper.createReadCommandAndExecute; import static com.mongodb.internal.operation.SyncOperationHelper.cursorDocumentToBatchCursor; @@ -127,8 +126,8 @@ public BatchCursor execute(final ReadBinding binding) { return createReadCommandAndExecute(retryState, binding, source, namespace.getDatabaseName(), getCommandCreator(), createCommandDecoder(), transformer(), connection); } catch (MongoCommandException e) { - return rethrowIfNotNamespaceError(e, createEmptyBatchCursor(namespace, decoder, - source.getServerDescription().getAddress(), batchSize)); + return rethrowIfNotNamespaceError(e, + createEmptySingleBatchCursor(source.getServerDescription().getAddress(), batchSize)); } }) ); @@ -152,7 +151,8 @@ public void executeAsync(final AsyncReadBinding binding, final SingleResultCallb if (t != null && !isNamespaceError(t)) { releasingCallback.onResult(null, t); } else { - releasingCallback.onResult(result != null ? result : emptyAsyncCursor(source), null); + releasingCallback.onResult(result != null + ? result : createEmptyAsyncSingleBatchCursor(getBatchSize()), null); } }); }) @@ -160,9 +160,6 @@ public void executeAsync(final AsyncReadBinding binding, final SingleResultCallb asyncRead.get(errorHandlingCallback(callback, LOGGER)); } - private AsyncBatchCursor emptyAsyncCursor(final AsyncConnectionSource source) { - return createEmptyAsyncBatchCursor(namespace, source.getServerDescription().getAddress()); - } private CommandCreator getCommandCreator() { return (serverDescription, connectionDescription) -> getCommand(); @@ -179,11 +176,11 @@ private BsonDocument getCommand() { } private CommandReadTransformer> transformer() { - return (result, source, connection) -> cursorDocumentToBatchCursor(result.getDocument("cursor"), decoder, comment, source, connection, batchSize); + return (result, source, connection) -> cursorDocumentToBatchCursor(result, decoder, comment, source, connection, batchSize); } private CommandReadTransformerAsync> asyncTransformer() { - return (result, source, connection) -> cursorDocumentToAsyncBatchCursor(result.getDocument("cursor"), decoder, comment, source, connection, batchSize); + return (result, source, connection) -> cursorDocumentToAsyncBatchCursor(result, decoder, comment, source, connection, batchSize); } private Codec createCommandDecoder() { diff --git a/driver-core/src/main/com/mongodb/internal/operation/ListSearchIndexesOperation.java b/driver-core/src/main/com/mongodb/internal/operation/ListSearchIndexesOperation.java index 4c471a16bd4..74313059099 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/ListSearchIndexesOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/ListSearchIndexesOperation.java @@ -34,9 +34,9 @@ import java.util.Collections; import java.util.concurrent.TimeUnit; -import static com.mongodb.internal.operation.AsyncOperationHelper.createEmptyAsyncBatchCursor; +import static com.mongodb.internal.operation.AsyncSingleBatchCursor.createEmptyAsyncSingleBatchCursor; import static com.mongodb.internal.operation.CommandOperationHelper.isNamespaceError; -import static com.mongodb.internal.operation.OperationHelper.createEmptyBatchCursor; +import static com.mongodb.internal.operation.SingleBatchCursor.createEmptySingleBatchCursor; /** * An operation that lists Alas Search indexes with the help of {@value #STAGE_LIST_SEARCH_INDEXES} pipeline stage. @@ -90,7 +90,7 @@ public BatchCursor execute(final ReadBinding binding) { if (!isNamespaceError(exception)) { throw exception; } else { - return createEmptyBatchCursor(namespace, decoder, exception.getServerAddress(), cursorBatchSize); + return createEmptySingleBatchCursor(exception.getServerAddress(), cursorBatchSize); } } } @@ -101,9 +101,7 @@ public void executeAsync(final AsyncReadBinding binding, final SingleResultCallb if (exception != null && !isNamespaceError(exception)) { callback.onResult(null, exception); } else if (exception != null) { - MongoCommandException commandException = (MongoCommandException) exception; - AsyncBatchCursor emptyAsyncBatchCursor = createEmptyAsyncBatchCursor(namespace, commandException.getServerAddress()); - callback.onResult(emptyAsyncBatchCursor, null); + callback.onResult(createEmptyAsyncSingleBatchCursor(batchSize == null ? 0 : batchSize), null); } else { callback.onResult(cursor, null); } diff --git a/driver-core/src/main/com/mongodb/internal/operation/MapReduceInlineResultsAsyncCursor.java b/driver-core/src/main/com/mongodb/internal/operation/MapReduceInlineResultsAsyncCursor.java index 1da84755100..ebf331fe47b 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/MapReduceInlineResultsAsyncCursor.java +++ b/driver-core/src/main/com/mongodb/internal/operation/MapReduceInlineResultsAsyncCursor.java @@ -16,18 +16,21 @@ package com.mongodb.internal.operation; -import com.mongodb.internal.connection.QueryResult; +import com.mongodb.internal.async.SingleResultCallback; + +import java.util.List; /** * Cursor representation of the results of an inline map-reduce operation. This allows users to iterate over the results that were returned * from the operation, and also provides access to the statistics returned in the results. */ -class MapReduceInlineResultsAsyncCursor extends AsyncSingleBatchQueryCursor implements MapReduceAsyncBatchCursor { +class MapReduceInlineResultsAsyncCursor implements MapReduceAsyncBatchCursor { + private final AsyncSingleBatchCursor delegate; private final MapReduceStatistics statistics; - MapReduceInlineResultsAsyncCursor(final QueryResult queryResult, final MapReduceStatistics statistics) { - super(queryResult); + MapReduceInlineResultsAsyncCursor(final AsyncSingleBatchCursor delegate, final MapReduceStatistics statistics) { + this.delegate = delegate; this.statistics = statistics; } @@ -35,4 +38,29 @@ class MapReduceInlineResultsAsyncCursor extends AsyncSingleBatchQueryCursor> callback) { + delegate.next(callback); + } + + @Override + public void setBatchSize(final int batchSize) { + delegate.setBatchSize(batchSize); + } + + @Override + public int getBatchSize() { + return delegate.getBatchSize(); + } + + @Override + public boolean isClosed() { + return delegate.isClosed(); + } + + @Override + public void close() { + delegate.close(); + } } diff --git a/driver-core/src/main/com/mongodb/internal/operation/MapReduceInlineResultsCursor.java b/driver-core/src/main/com/mongodb/internal/operation/MapReduceInlineResultsCursor.java index caa2f7fd355..564eac4a8f0 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/MapReduceInlineResultsCursor.java +++ b/driver-core/src/main/com/mongodb/internal/operation/MapReduceInlineResultsCursor.java @@ -16,20 +16,21 @@ package com.mongodb.internal.operation; -import com.mongodb.internal.binding.ConnectionSource; -import com.mongodb.internal.connection.QueryResult; -import org.bson.codecs.Decoder; +import com.mongodb.ServerAddress; +import com.mongodb.ServerCursor; + +import java.util.List; /** * Cursor representation of the results of an inline map-reduce operation. This allows users to iterate over the results that were returned * from the operation, and also provides access to the statistics returned in the results. */ -class MapReduceInlineResultsCursor extends QueryBatchCursor implements MapReduceBatchCursor { +class MapReduceInlineResultsCursor implements MapReduceBatchCursor { + private final BatchCursor delegate; private final MapReduceStatistics statistics; - MapReduceInlineResultsCursor(final QueryResult queryResult, final Decoder decoder, final ConnectionSource connectionSource, - final MapReduceStatistics statistics) { - super(queryResult, 0, 0, decoder, null, connectionSource); + MapReduceInlineResultsCursor(final BatchCursor delegate, final MapReduceStatistics statistics) { + this.delegate = delegate; this.statistics = statistics; } @@ -37,4 +38,49 @@ class MapReduceInlineResultsCursor extends QueryBatchCursor implements Map public MapReduceStatistics getStatistics() { return statistics; } + + @Override + public boolean hasNext() { + return delegate.hasNext(); + } + + @Override + public List next() { + return delegate.next(); + } + + @Override + public int available() { + return delegate.available(); + } + + @Override + public void setBatchSize(final int batchSize) { + delegate.setBatchSize(batchSize); + } + + @Override + public int getBatchSize() { + return delegate.getBatchSize(); + } + + @Override + public List tryNext() { + return delegate.tryNext(); + } + + @Override + public ServerCursor getServerCursor() { + return delegate.getServerCursor(); + } + + @Override + public ServerAddress getServerAddress() { + return delegate.getServerAddress(); + } + + @Override + public void close() { + delegate.close(); + } } diff --git a/driver-core/src/main/com/mongodb/internal/operation/MapReduceWithInlineResultsOperation.java b/driver-core/src/main/com/mongodb/internal/operation/MapReduceWithInlineResultsOperation.java index 131591dd6e2..7205a09dad6 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/MapReduceWithInlineResultsOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/MapReduceWithInlineResultsOperation.java @@ -19,12 +19,10 @@ import com.mongodb.ExplainVerbosity; import com.mongodb.MongoNamespace; import com.mongodb.client.model.Collation; -import com.mongodb.connection.ConnectionDescription; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; import com.mongodb.internal.binding.ReadBinding; import com.mongodb.internal.connection.NoOpSessionContext; -import com.mongodb.internal.connection.QueryResult; import com.mongodb.internal.session.SessionContext; import com.mongodb.lang.Nullable; import org.bson.BsonDocument; @@ -215,12 +213,16 @@ private CommandReadOperation createExplainableOperation(final Expl } private CommandReadTransformer> transformer() { - return (result, source, connection) -> new MapReduceInlineResultsCursor<>(createQueryResult(result, connection.getDescription()), decoder, source, - MapReduceHelper.createStatistics(result)); + return (result, source, connection) -> + new MapReduceInlineResultsCursor<>( + new SingleBatchCursor<>(BsonDocumentWrapperHelper.toList(result, "results"), 0, + connection.getDescription().getServerAddress()), + MapReduceHelper.createStatistics(result)); } private CommandReadTransformerAsync> asyncTransformer() { - return (result, source, connection) -> new MapReduceInlineResultsAsyncCursor<>(createQueryResult(result, connection.getDescription()), + return (result, source, connection) -> new MapReduceInlineResultsAsyncCursor<>( + new AsyncSingleBatchCursor<>(BsonDocumentWrapperHelper.toList(result, "results"), 0), MapReduceHelper.createStatistics(result)); } @@ -248,9 +250,4 @@ private BsonDocument getCommand(final SessionContext sessionContext, final int m } return commandDocument; } - - private QueryResult createQueryResult(final BsonDocument result, final ConnectionDescription description) { - return new QueryResult<>(namespace, BsonDocumentWrapperHelper.toList(result, "results"), 0, - description.getServerAddress()); - } } diff --git a/driver-core/src/main/com/mongodb/internal/operation/OperationHelper.java b/driver-core/src/main/com/mongodb/internal/operation/OperationHelper.java index 387bb2f5da6..bfa1adbd97e 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/OperationHelper.java +++ b/driver-core/src/main/com/mongodb/internal/operation/OperationHelper.java @@ -17,8 +17,6 @@ package com.mongodb.internal.operation; import com.mongodb.MongoClientException; -import com.mongodb.MongoNamespace; -import com.mongodb.ServerAddress; import com.mongodb.WriteConcern; import com.mongodb.client.model.Collation; import com.mongodb.connection.ConnectionDescription; @@ -30,18 +28,14 @@ import com.mongodb.internal.bulk.DeleteRequest; import com.mongodb.internal.bulk.UpdateRequest; import com.mongodb.internal.bulk.WriteRequest; -import com.mongodb.internal.connection.QueryResult; import com.mongodb.internal.diagnostics.logging.Logger; import com.mongodb.internal.diagnostics.logging.Loggers; import com.mongodb.internal.session.SessionContext; import com.mongodb.lang.NonNull; import com.mongodb.lang.Nullable; import org.bson.BsonDocument; -import org.bson.BsonInt64; -import org.bson.codecs.Decoder; import org.bson.conversions.Bson; -import java.util.Collections; import java.util.List; import java.util.function.Function; import java.util.function.Supplier; @@ -200,26 +194,6 @@ static boolean canRetryRead(final ServerDescription serverDescription, final Ses return true; } - static QueryBatchCursor createEmptyBatchCursor(final MongoNamespace namespace, final Decoder decoder, - final ServerAddress serverAddress, final int batchSize) { - return new QueryBatchCursor<>(new QueryResult<>(namespace, Collections.emptyList(), 0L, - serverAddress), - 0, batchSize, decoder); - } - - static QueryResult cursorDocumentToQueryResult(final BsonDocument cursorDocument, final ServerAddress serverAddress) { - return cursorDocumentToQueryResult(cursorDocument, serverAddress, "firstBatch"); - } - - static QueryResult cursorDocumentToQueryResult(final BsonDocument cursorDocument, final ServerAddress serverAddress, - final String fieldNameContainingBatch) { - long cursorId = ((BsonInt64) cursorDocument.get("id")).getValue(); - MongoNamespace queryResultNamespace = new MongoNamespace(cursorDocument.getString("ns").getValue()); - return new QueryResult<>(queryResultNamespace, BsonDocumentWrapperHelper.toList(cursorDocument, fieldNameContainingBatch), - cursorId, serverAddress); - } - - /** * This internal exception is used to *
    diff --git a/driver-core/src/main/com/mongodb/internal/operation/QueryBatchCursor.java b/driver-core/src/main/com/mongodb/internal/operation/QueryBatchCursor.java deleted file mode 100644 index 587237fcaf8..00000000000 --- a/driver-core/src/main/com/mongodb/internal/operation/QueryBatchCursor.java +++ /dev/null @@ -1,625 +0,0 @@ -/* - * Copyright 2008-present MongoDB, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.internal.operation; - -import com.mongodb.MongoCommandException; -import com.mongodb.MongoException; -import com.mongodb.MongoNamespace; -import com.mongodb.MongoSocketException; -import com.mongodb.ReadPreference; -import com.mongodb.ServerAddress; -import com.mongodb.ServerCursor; -import com.mongodb.annotations.ThreadSafe; -import com.mongodb.connection.ConnectionDescription; -import com.mongodb.connection.ServerType; -import com.mongodb.internal.binding.ConnectionSource; -import com.mongodb.internal.connection.Connection; -import com.mongodb.internal.connection.QueryResult; -import com.mongodb.internal.diagnostics.logging.Logger; -import com.mongodb.internal.diagnostics.logging.Loggers; -import com.mongodb.internal.validator.NoOpFieldNameValidator; -import com.mongodb.lang.Nullable; -import org.bson.BsonArray; -import org.bson.BsonDocument; -import org.bson.BsonInt32; -import org.bson.BsonInt64; -import org.bson.BsonString; -import org.bson.BsonTimestamp; -import org.bson.BsonValue; -import org.bson.FieldNameValidator; -import org.bson.codecs.BsonDocumentCodec; -import org.bson.codecs.Decoder; - -import java.util.List; -import java.util.NoSuchElementException; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.StampedLock; -import java.util.function.Consumer; -import java.util.function.Supplier; - -import static com.mongodb.assertions.Assertions.assertNotNull; -import static com.mongodb.assertions.Assertions.assertNull; -import static com.mongodb.assertions.Assertions.assertTrue; -import static com.mongodb.assertions.Assertions.fail; -import static com.mongodb.assertions.Assertions.isTrueArgument; -import static com.mongodb.assertions.Assertions.notNull; -import static com.mongodb.internal.Locks.withLock; -import static com.mongodb.internal.operation.CursorHelper.getNumberToReturn; -import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull; -import static com.mongodb.internal.operation.SyncOperationHelper.getMoreCursorDocumentToQueryResult; -import static com.mongodb.internal.operation.QueryHelper.translateCommandException; -import static com.mongodb.internal.operation.ServerVersionHelper.serverIsAtLeastVersionFourDotFour; -import static java.lang.String.format; -import static java.util.Collections.singletonList; - -class QueryBatchCursor implements AggregateResponseBatchCursor { - private static final Logger LOGGER = Loggers.getLogger("operation"); - private static final FieldNameValidator NO_OP_FIELD_NAME_VALIDATOR = new NoOpFieldNameValidator(); - private static final String CURSOR = "cursor"; - private static final String POST_BATCH_RESUME_TOKEN = "postBatchResumeToken"; - private static final String OPERATION_TIME = "operationTime"; - private static final String MESSAGE_IF_CLOSED_AS_CURSOR = "Cursor has been closed"; - private static final String MESSAGE_IF_CLOSED_AS_ITERATOR = "Iterator has been closed"; - - private final MongoNamespace namespace; - private final ServerAddress serverAddress; - private final int limit; - private final Decoder decoder; - private final long maxTimeMS; - private int batchSize; - private final BsonValue comment; - private List nextBatch; - private int count; - private BsonDocument postBatchResumeToken; - private BsonTimestamp operationTime; - private final boolean firstBatchEmpty; - private int maxWireVersion = 0; - private final ResourceManager resourceManager; - - QueryBatchCursor(final QueryResult firstQueryResult, final int limit, final int batchSize, final Decoder decoder) { - this(firstQueryResult, limit, batchSize, decoder, null, null); - } - - QueryBatchCursor(final QueryResult firstQueryResult, final int limit, final int batchSize, final Decoder decoder, - @Nullable final BsonValue comment, @Nullable final ConnectionSource connectionSource) { - this(firstQueryResult, limit, batchSize, 0, decoder, comment, connectionSource, null, null); - } - - QueryBatchCursor(final QueryResult firstQueryResult, final int limit, final int batchSize, final long maxTimeMS, - final Decoder decoder, @Nullable final BsonValue comment, @Nullable final ConnectionSource connectionSource, - @Nullable final Connection connection) { - this(firstQueryResult, limit, batchSize, maxTimeMS, decoder, comment, connectionSource, connection, null); - } - - QueryBatchCursor(final QueryResult firstQueryResult, final int limit, final int batchSize, final long maxTimeMS, - final Decoder decoder, @Nullable final BsonValue comment, @Nullable final ConnectionSource connectionSource, - @Nullable final Connection connection, @Nullable final BsonDocument result) { - isTrueArgument("maxTimeMS >= 0", maxTimeMS >= 0); - this.maxTimeMS = maxTimeMS; - this.namespace = firstQueryResult.getNamespace(); - this.serverAddress = firstQueryResult.getAddress(); - this.limit = limit; - this.comment = comment; - this.batchSize = batchSize; - this.decoder = notNull("decoder", decoder); - if (result != null) { - this.operationTime = result.getTimestamp(OPERATION_TIME, null); - this.postBatchResumeToken = getPostBatchResumeTokenFromResponse(result); - } - ServerCursor serverCursor = initFromQueryResult(firstQueryResult); - if (serverCursor != null) { - notNull("connectionSource", connectionSource); - } - firstBatchEmpty = firstQueryResult.getResults().isEmpty(); - Connection connectionToPin = null; - boolean releaseServerAndResources = false; - if (connection != null) { - this.maxWireVersion = connection.getDescription().getMaxWireVersion(); - if (limitReached()) { - releaseServerAndResources = true; - } else { - assertNotNull(connectionSource); - if (connectionSource.getServerDescription().getType() == ServerType.LOAD_BALANCER) { - connectionToPin = connection; - } - } - } - resourceManager = new ResourceManager(connectionSource, connectionToPin, serverCursor); - if (releaseServerAndResources) { - resourceManager.releaseServerAndClientResources(assertNotNull(connection)); - } - } - - @Override - public boolean hasNext() { - return assertNotNull(resourceManager.execute(MESSAGE_IF_CLOSED_AS_CURSOR, this::doHasNext)); - } - - private boolean doHasNext() { - if (nextBatch != null) { - return true; - } - - if (limitReached()) { - return false; - } - - while (resourceManager.serverCursor() != null) { - getMore(); - if (!resourceManager.operable()) { - throw new IllegalStateException(MESSAGE_IF_CLOSED_AS_CURSOR); - } - if (nextBatch != null) { - return true; - } - } - - return false; - } - - @Override - public List next() { - return assertNotNull(resourceManager.execute(MESSAGE_IF_CLOSED_AS_ITERATOR, this::doNext)); - } - - @Override - public int available() { - return !resourceManager.operable() || nextBatch == null ? 0 : nextBatch.size(); - } - - private List doNext() { - if (!doHasNext()) { - throw new NoSuchElementException(); - } - - List retVal = nextBatch; - nextBatch = null; - return retVal; - } - - @Override - public void setBatchSize(final int batchSize) { - this.batchSize = batchSize; - } - - @Override - public int getBatchSize() { - return batchSize; - } - - @Override - public void remove() { - throw new UnsupportedOperationException("Not implemented yet!"); - } - - @Override - public void close() { - resourceManager.close(); - } - - @Nullable - @Override - public List tryNext() { - return resourceManager.execute(MESSAGE_IF_CLOSED_AS_CURSOR, () -> { - if (!tryHasNext()) { - return null; - } - return doNext(); - }); - } - - private boolean tryHasNext() { - if (nextBatch != null) { - return true; - } - - if (limitReached()) { - return false; - } - - if (resourceManager.serverCursor() != null) { - getMore(); - } - - return nextBatch != null; - } - - @Override - @Nullable - public ServerCursor getServerCursor() { - if (!resourceManager.operable()) { - throw new IllegalStateException(MESSAGE_IF_CLOSED_AS_ITERATOR); - } - - return resourceManager.serverCursor(); - } - - @Override - public ServerAddress getServerAddress() { - if (!resourceManager.operable()) { - throw new IllegalStateException(MESSAGE_IF_CLOSED_AS_ITERATOR); - } - - return serverAddress; - } - - @Override - public BsonDocument getPostBatchResumeToken() { - return postBatchResumeToken; - } - - @Override - public BsonTimestamp getOperationTime() { - return operationTime; - } - - @Override - public boolean isFirstBatchEmpty() { - return firstBatchEmpty; - } - - @Override - public int getMaxWireVersion() { - return maxWireVersion; - } - - private void getMore() { - ServerCursor serverCursor = assertNotNull(resourceManager.serverCursor()); - resourceManager.executeWithConnection(connection -> { - ServerCursor nextServerCursor; - try { - nextServerCursor = initFromCommandResult(connection.command(namespace.getDatabaseName(), - asGetMoreCommandDocument(serverCursor.getId(), connection.getDescription()), - NO_OP_FIELD_NAME_VALIDATOR, - ReadPreference.primary(), - CommandResultDocumentCodec.create(decoder, "nextBatch"), - assertNotNull(resourceManager.connectionSource))); - } catch (MongoCommandException e) { - throw translateCommandException(e, serverCursor); - } - resourceManager.setServerCursor(nextServerCursor); - if (limitReached()) { - resourceManager.releaseServerAndClientResources(connection); - } - }); - } - - private BsonDocument asGetMoreCommandDocument(final long cursorId, final ConnectionDescription connectionDescription) { - BsonDocument document = new BsonDocument("getMore", new BsonInt64(cursorId)) - .append("collection", new BsonString(namespace.getCollectionName())); - - int batchSizeForGetMoreCommand = Math.abs(getNumberToReturn(limit, this.batchSize, count)); - if (batchSizeForGetMoreCommand != 0) { - document.append("batchSize", new BsonInt32(batchSizeForGetMoreCommand)); - } - if (maxTimeMS != 0) { - document.append("maxTimeMS", new BsonInt64(maxTimeMS)); - } - if (serverIsAtLeastVersionFourDotFour(connectionDescription)) { - putIfNotNull(document, "comment", comment); - } - return document; - } - - @Nullable - private ServerCursor initFromQueryResult(final QueryResult queryResult) { - nextBatch = queryResult.getResults().isEmpty() ? null : queryResult.getResults(); - count += queryResult.getResults().size(); - LOGGER.debug(format("Received batch of %d documents with cursorId %d from server %s", queryResult.getResults().size(), - queryResult.getCursorId(), queryResult.getAddress())); - return queryResult.getCursor(); - } - - @Nullable - private ServerCursor initFromCommandResult(final BsonDocument getMoreCommandResultDocument) { - QueryResult queryResult = getMoreCursorDocumentToQueryResult(getMoreCommandResultDocument.getDocument(CURSOR), serverAddress); - postBatchResumeToken = getPostBatchResumeTokenFromResponse(getMoreCommandResultDocument); - operationTime = getMoreCommandResultDocument.getTimestamp(OPERATION_TIME, null); - return initFromQueryResult(queryResult); - } - - private boolean limitReached() { - return Math.abs(limit) != 0 && count >= Math.abs(limit); - } - - @Nullable - private BsonDocument getPostBatchResumeTokenFromResponse(final BsonDocument result) { - BsonDocument cursor = result.getDocument(CURSOR, null); - if (cursor != null) { - return cursor.getDocument(POST_BATCH_RESUME_TOKEN, null); - } - return null; - } - - /** - * This class maintains all resources that must be released in {@link QueryBatchCursor#close()}. - * It also implements a {@linkplain #doClose() deferred close action} such that it is totally ordered with other operations of - * {@link QueryBatchCursor} (methods {@link #tryStartOperation()}/{@link #endOperation()} must be used properly to enforce the order) - * despite the method {@link QueryBatchCursor#close()} being called concurrently with those operations. - * This total order induces the happens-before order. - *

    - * The deferred close action does not violate externally observable idempotence of {@link QueryBatchCursor#close()}, - * because {@link QueryBatchCursor#close()} is allowed to release resources "eventually". - *

    - * Only methods explicitly documented as thread-safe are thread-safe, - * others are not and rely on the total order mentioned above. - */ - @ThreadSafe - private final class ResourceManager { - private final Lock lock; - private volatile State state; - @Nullable - private volatile ConnectionSource connectionSource; - @Nullable - private volatile Connection pinnedConnection; - @Nullable - private volatile ServerCursor serverCursor; - private volatile boolean skipReleasingServerResourcesOnClose; - - ResourceManager(@Nullable final ConnectionSource connectionSource, - @Nullable final Connection connectionToPin, @Nullable final ServerCursor serverCursor) { - lock = new StampedLock().asWriteLock(); - state = State.IDLE; - if (serverCursor != null) { - this.connectionSource = (assertNotNull(connectionSource)).retain(); - if (connectionToPin != null) { - this.pinnedConnection = connectionToPin.retain(); - connectionToPin.markAsPinned(Connection.PinningMode.CURSOR); - } - } - skipReleasingServerResourcesOnClose = false; - this.serverCursor = serverCursor; - } - - /** - * Thread-safe. - */ - boolean operable() { - return state.operable(); - } - - /** - * Thread-safe. - * Executes {@code operation} within the {@link #tryStartOperation()}/{@link #endOperation()} bounds. - * - * @throws IllegalStateException If {@linkplain QueryBatchCursor#close() closed}. - */ - @Nullable - R execute(final String exceptionMessageIfClosed, final Supplier operation) throws IllegalStateException { - if (!tryStartOperation()) { - throw new IllegalStateException(exceptionMessageIfClosed); - } - try { - return operation.get(); - } finally { - endOperation(); - } - } - - /** - * Thread-safe. - * Returns {@code true} iff started an operation. - * If {@linkplain #operable() closed}, then returns false, otherwise completes abruptly. - * @throws IllegalStateException Iff another operation is in progress. - */ - private boolean tryStartOperation() throws IllegalStateException { - return withLock(lock, () -> { - State localState = state; - if (!localState.operable()) { - return false; - } else if (localState == State.IDLE) { - state = State.OPERATION_IN_PROGRESS; - return true; - } else if (localState == State.OPERATION_IN_PROGRESS) { - throw new IllegalStateException("Another operation is currently in progress, concurrent operations are not supported"); - } else { - throw fail(state.toString()); - } - }); - } - - /** - * Thread-safe. - */ - private void endOperation() { - boolean doClose = withLock(lock, () -> { - State localState = state; - if (localState == State.OPERATION_IN_PROGRESS) { - state = State.IDLE; - return false; - } else if (localState == State.CLOSE_PENDING) { - state = State.CLOSED; - return true; - } else { - throw fail(localState.toString()); - } - }); - if (doClose) { - doClose(); - } - } - - /** - * Thread-safe. - */ - void close() { - boolean doClose = withLock(lock, () -> { - State localState = state; - if (localState == State.OPERATION_IN_PROGRESS) { - state = State.CLOSE_PENDING; - return false; - } else if (localState != State.CLOSED) { - state = State.CLOSED; - return true; - } - return false; - }); - if (doClose) { - doClose(); - } - } - - /** - * This method is never executed concurrently with either itself or other operations - * demarcated by {@link #tryStartOperation()}/{@link #endOperation()}. - */ - private void doClose() { - try { - if (skipReleasingServerResourcesOnClose) { - serverCursor = null; - } else if (serverCursor != null) { - Connection connection = connection(); - try { - releaseServerResources(connection); - } finally { - connection.release(); - } - } - } catch (MongoException e) { - // ignore exceptions when releasing server resources - } finally { - // guarantee that regardless of exceptions, `serverCursor` is null and client resources are released - serverCursor = null; - releaseClientResources(); - } - } - - void onCorruptedConnection(final Connection corruptedConnection) { - assertTrue(state.inProgress()); - // if `pinnedConnection` is corrupted, then we cannot kill `serverCursor` via such a connection - Connection localPinnedConnection = pinnedConnection; - if (localPinnedConnection != null) { - assertTrue(corruptedConnection == localPinnedConnection); - skipReleasingServerResourcesOnClose = true; - } - } - - void executeWithConnection(final Consumer action) { - Connection connection = connection(); - try { - action.accept(connection); - } catch (MongoSocketException e) { - try { - onCorruptedConnection(connection); - } catch (Exception suppressed) { - e.addSuppressed(suppressed); - } - throw e; - } finally { - connection.release(); - } - } - - private Connection connection() { - assertTrue(state != State.IDLE); - if (pinnedConnection == null) { - return assertNotNull(connectionSource).getConnection(); - } else { - return assertNotNull(pinnedConnection).retain(); - } - } - - /** - * Thread-safe. - */ - @Nullable - ServerCursor serverCursor() { - return serverCursor; - } - - void setServerCursor(@Nullable final ServerCursor serverCursor) { - assertTrue(state.inProgress()); - assertNotNull(this.serverCursor); - // without `connectionSource` we will not be able to kill `serverCursor` later - assertNotNull(connectionSource); - this.serverCursor = serverCursor; - if (serverCursor == null) { - releaseClientResources(); - } - } - - - void releaseServerAndClientResources(final Connection connection) { - try { - releaseServerResources(assertNotNull(connection)); - } finally { - releaseClientResources(); - } - } - - private void releaseServerResources(final Connection connection) { - try { - ServerCursor localServerCursor = serverCursor; - if (localServerCursor != null) { - killServerCursor(namespace, localServerCursor, assertNotNull(connection)); - } - } finally { - serverCursor = null; - } - } - - private void killServerCursor(final MongoNamespace namespace, final ServerCursor serverCursor, final Connection connection) { - connection.command(namespace.getDatabaseName(), asKillCursorsCommandDocument(namespace, serverCursor), - NO_OP_FIELD_NAME_VALIDATOR, ReadPreference.primary(), new BsonDocumentCodec(), assertNotNull(connectionSource)); - } - - private BsonDocument asKillCursorsCommandDocument(final MongoNamespace namespace, final ServerCursor serverCursor) { - return new BsonDocument("killCursors", new BsonString(namespace.getCollectionName())) - .append("cursors", new BsonArray(singletonList(new BsonInt64(serverCursor.getId())))); - } - - private void releaseClientResources() { - assertNull(serverCursor); - ConnectionSource localConnectionSource = connectionSource; - if (localConnectionSource != null) { - localConnectionSource.release(); - connectionSource = null; - } - Connection localPinnedConnection = pinnedConnection; - if (localPinnedConnection != null) { - localPinnedConnection.release(); - pinnedConnection = null; - } - } - } - - private enum State { - IDLE(true, false), - OPERATION_IN_PROGRESS(true, true), - /** - * Implies {@link #OPERATION_IN_PROGRESS}. - */ - CLOSE_PENDING(false, true), - CLOSED(false, false); - - private final boolean operable; - private final boolean inProgress; - - State(final boolean operable, final boolean inProgress) { - this.operable = operable; - this.inProgress = inProgress; - } - - boolean operable() { - return operable; - } - - boolean inProgress() { - return inProgress; - } - } -} diff --git a/driver-core/src/main/com/mongodb/internal/operation/SingleBatchCursor.java b/driver-core/src/main/com/mongodb/internal/operation/SingleBatchCursor.java new file mode 100644 index 00000000000..8a673ee93d9 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/SingleBatchCursor.java @@ -0,0 +1,91 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.operation; + +import com.mongodb.ServerAddress; +import com.mongodb.ServerCursor; + +import java.util.List; +import java.util.NoSuchElementException; + +import static java.util.Collections.emptyList; + +class SingleBatchCursor implements BatchCursor { + + static SingleBatchCursor createEmptySingleBatchCursor(final ServerAddress serverAddress, final int batchSize) { + return new SingleBatchCursor<>(emptyList(), batchSize, serverAddress); + } + + private final List batch; + private final ServerAddress serverAddress; + private final int batchSize; + private boolean hasNext; + + SingleBatchCursor(final List batch, final int batchSize, final ServerAddress serverAddress) { + this.batch = batch; + this.serverAddress = serverAddress; + this.batchSize = batchSize; + this.hasNext = !batch.isEmpty(); + } + + @Override + public boolean hasNext() { + return hasNext; + } + + @Override + public List next() { + if (hasNext) { + hasNext = false; + return batch; + } + throw new NoSuchElementException(); + } + + @Override + public int available() { + return hasNext ? 1 : 0; + } + + @Override + public void setBatchSize(final int batchSize) { + // NOOP + } + + @Override + public int getBatchSize() { + return batchSize; + } + + @Override + public List tryNext() { + return hasNext ? next() : null; + } + + @Override + public ServerCursor getServerCursor() { + return null; + } + + @Override + public ServerAddress getServerAddress() { + return serverAddress; + } + + @Override + public void close() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/SyncOperationHelper.java b/driver-core/src/main/com/mongodb/internal/operation/SyncOperationHelper.java index 67d5acf9c37..a10604bb717 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/SyncOperationHelper.java +++ b/driver-core/src/main/com/mongodb/internal/operation/SyncOperationHelper.java @@ -18,7 +18,6 @@ import com.mongodb.MongoException; import com.mongodb.ReadPreference; -import com.mongodb.ServerAddress; import com.mongodb.internal.VisibleForTesting; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.async.function.AsyncCallbackBiFunction; @@ -32,7 +31,6 @@ import com.mongodb.internal.binding.WriteBinding; import com.mongodb.internal.connection.Connection; import com.mongodb.internal.connection.OperationContext; -import com.mongodb.internal.connection.QueryResult; import com.mongodb.internal.operation.retry.AttachmentKeys; import com.mongodb.internal.validator.NoOpFieldNameValidator; import com.mongodb.lang.Nullable; @@ -56,7 +54,6 @@ import static com.mongodb.internal.operation.OperationHelper.ResourceSupplierInternalException; import static com.mongodb.internal.operation.OperationHelper.canRetryRead; import static com.mongodb.internal.operation.OperationHelper.canRetryWrite; -import static com.mongodb.internal.operation.OperationHelper.cursorDocumentToQueryResult; import static com.mongodb.internal.operation.WriteConcernHelper.throwOnWriteConcernError; final class SyncOperationHelper { @@ -303,14 +300,15 @@ static CommandWriteTransformer writeConcernErrorTransformer( }; } - static BatchCursor cursorDocumentToBatchCursor(final BsonDocument cursorDocument, final Decoder decoder, - final BsonValue comment, final ConnectionSource source, final Connection connection, final int batchSize) { - return new QueryBatchCursor<>(cursorDocumentToQueryResult(cursorDocument, source.getServerDescription().getAddress()), - 0, batchSize, 0, decoder, comment, source, connection); + static CommandReadTransformer> singleBatchCursorTransformer(final String fieldName) { + return (result, source, connection) -> + new SingleBatchCursor<>(BsonDocumentWrapperHelper.toList(result, fieldName), 0, + connection.getDescription().getServerAddress()); } - static QueryResult getMoreCursorDocumentToQueryResult(final BsonDocument cursorDocument, final ServerAddress serverAddress) { - return cursorDocumentToQueryResult(cursorDocument, serverAddress, "nextBatch"); + static BatchCursor cursorDocumentToBatchCursor(final BsonDocument cursorDocument, final Decoder decoder, + final BsonValue comment, final ConnectionSource source, final Connection connection, final int batchSize) { + return new CommandBatchCursor<>(cursorDocument, batchSize, 0, decoder, comment, source, connection); } private SyncOperationHelper() { diff --git a/driver-core/src/test/functional/com/mongodb/ClusterFixture.java b/driver-core/src/test/functional/com/mongodb/ClusterFixture.java index ba7acd78704..c97ac18a358 100644 --- a/driver-core/src/test/functional/com/mongodb/ClusterFixture.java +++ b/driver-core/src/test/functional/com/mongodb/ClusterFixture.java @@ -728,7 +728,7 @@ public static int getReferenceCountAfterTimeout(final ReferenceCounted reference int count = referenceCounted.getCount(); while (count > target) { try { - if (System.currentTimeMillis() > startTime + 5000) { + if (System.currentTimeMillis() > startTime + TIMEOUT_DURATION.toMillis()) { return count; } sleep(10); diff --git a/driver-core/src/test/functional/com/mongodb/OperationFunctionalSpecification.groovy b/driver-core/src/test/functional/com/mongodb/OperationFunctionalSpecification.groovy index ddbb9f29a0d..372fdd4b82d 100644 --- a/driver-core/src/test/functional/com/mongodb/OperationFunctionalSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/OperationFunctionalSpecification.groovy @@ -202,13 +202,6 @@ class OperationFunctionalSpecification extends Specification { } } - def consumeAsyncResults(cursor) { - def batch = next(cursor, true) - while (batch != null) { - batch = next(cursor, true) - } - } - void testOperation(Map params) { params.async = params.async != null ? params.async : false params.result = params.result != null ? params.result : null diff --git a/driver-core/src/test/functional/com/mongodb/client/model/OperationTest.java b/driver-core/src/test/functional/com/mongodb/client/model/OperationTest.java index 9a215c7260c..5aaac1f70bb 100644 --- a/driver-core/src/test/functional/com/mongodb/client/model/OperationTest.java +++ b/driver-core/src/test/functional/com/mongodb/client/model/OperationTest.java @@ -18,14 +18,17 @@ import com.mongodb.ClusterFixture; import com.mongodb.MongoNamespace; +import com.mongodb.async.FutureResultCallback; import com.mongodb.client.test.CollectionHelper; import com.mongodb.internal.connection.ServerHelper; +import com.mongodb.internal.validator.NoOpFieldNameValidator; import com.mongodb.lang.Nullable; import org.bson.BsonArray; import org.bson.BsonDocument; import org.bson.BsonDouble; import org.bson.BsonValue; import org.bson.Document; +import org.bson.FieldNameValidator; import org.bson.codecs.BsonDocumentCodec; import org.bson.codecs.DecoderContext; import org.bson.codecs.DocumentCodec; @@ -39,8 +42,11 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; import java.util.stream.Collectors; +import static com.mongodb.ClusterFixture.TIMEOUT; import static com.mongodb.ClusterFixture.checkReferenceCountReachesTarget; import static com.mongodb.ClusterFixture.getAsyncBinding; import static com.mongodb.ClusterFixture.getBinding; @@ -50,14 +56,17 @@ import static com.mongodb.client.model.Aggregates.sort; import static java.util.stream.Collectors.toList; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assumptions.assumeTrue; public abstract class OperationTest { protected static final DocumentCodec DOCUMENT_DECODER = new DocumentCodec(); + protected static final FieldNameValidator NO_OP_FIELD_NAME_VALIDATOR = new NoOpFieldNameValidator(); @BeforeEach public void beforeEach() { - ServerHelper.checkPool(getPrimary()); + assumeTrue(ServerHelper.checkPoolCount(getPrimary()) == 0, "Sync Pool count not zero"); + assumeTrue(ServerHelper.checkAsyncPoolCount(getPrimary()) == 0, "Async Pool count not zero"); CollectionHelper.drop(getNamespace()); } @@ -77,15 +86,15 @@ private CollectionHelper getCollectionHelper(final MongoNamespace return new CollectionHelper<>(new BsonDocumentCodec(), namespace); } - private String getDatabaseName() { + protected String getDatabaseName() { return ClusterFixture.getDefaultDatabaseName(); } - private String getCollectionName() { + protected String getCollectionName() { return "test"; } - MongoNamespace getNamespace() { + protected MongoNamespace getNamespace() { return new MongoNamespace(getDatabaseName(), getCollectionName()); } @@ -97,7 +106,6 @@ public static BsonDocument toBsonDocument(final BsonDocument bsonDocument) { return getDefaultCodecRegistry().get(BsonDocument.class).decode(bsonDocument.asBsonReader(), DecoderContext.builder().build()); } - protected List assertPipeline(final String stageAsString, final Bson stage) { List pipeline = Collections.singletonList(stage); return assertPipeline(stageAsString, pipeline); @@ -159,4 +167,25 @@ protected List aggregateWithWindowFields(@Nullable final Object partitio .map(doc -> doc.get("result")) .collect(toList()); } + + protected void ifNotNull(@Nullable final T maybeNull, final Consumer consumer) { + if (maybeNull != null) { + consumer.accept(maybeNull); + } + } + + protected void sleep(final long ms) { + try { + Thread.sleep(ms); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new RuntimeException(e); + } + } + + protected T block(final Consumer> consumer) { + FutureResultCallback cb = new FutureResultCallback<>(); + consumer.accept(cb); + return cb.get(TIMEOUT, TimeUnit.SECONDS); + } } diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/ServerHelper.java b/driver-core/src/test/functional/com/mongodb/internal/connection/ServerHelper.java index ecbf4befb73..17dc3b6cfcf 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/connection/ServerHelper.java +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/ServerHelper.java @@ -35,6 +35,14 @@ public static void checkPool(final ServerAddress address) { checkPool(address, getAsyncCluster()); } + public static int checkPoolCount(final ServerAddress address) { + return getConnectionPool(address, getCluster()).getInUseCount(); + } + + public static int checkAsyncPoolCount(final ServerAddress address) { + return getConnectionPool(address, getAsyncCluster()).getInUseCount(); + } + public static void waitForLastRelease(final Cluster cluster) { for (ServerDescription cur : cluster.getCurrentDescription().getServerDescriptions()) { if (cur.isOk()) { @@ -44,13 +52,11 @@ public static void waitForLastRelease(final Cluster cluster) { } public static void waitForLastRelease(final ServerAddress address, final Cluster cluster) { - OperationContext operationContext = new OperationContext(); - ConcurrentPool pool = connectionPool( - cluster.selectServer(new ServerAddressSelector(address), operationContext).getServer()); + ConcurrentPool pool = getConnectionPool(address, cluster); long startTime = System.currentTimeMillis(); while (pool.getInUseCount() > 0) { try { - sleep(10); + sleep(100); if (System.currentTimeMillis() > startTime + ClusterFixture.TIMEOUT * 1000) { throw new MongoTimeoutException("Timed out waiting for pool in use count to drop to 0. Now at: " + pool.getInUseCount()); @@ -61,11 +67,15 @@ public static void waitForLastRelease(final ServerAddress address, final Cluster } } + private static ConcurrentPool getConnectionPool(final ServerAddress address, final Cluster cluster) { + return connectionPool(cluster.selectServer(new ServerAddressSelector(address), new OperationContext()).getServer()); + } + private static void checkPool(final ServerAddress address, final Cluster cluster) { - ConcurrentPool pool = connectionPool( - cluster.selectServer(new ServerAddressSelector(address), new OperationContext()).getServer()); - if (pool.getInUseCount() > 0) { - throw new IllegalStateException("Connection pool in use count is " + pool.getInUseCount()); + try { + waitForLastRelease(address, cluster); + } catch (MongoTimeoutException e) { + throw new IllegalStateException(e.getMessage()); } } diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateOperationSpecification.groovy index fa688f0b57f..8477a91cc43 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateOperationSpecification.groovy @@ -16,7 +16,6 @@ package com.mongodb.internal.operation - import com.mongodb.MongoExecutionTimeoutException import com.mongodb.MongoNamespace import com.mongodb.OperationFunctionalSpecification @@ -51,7 +50,7 @@ import org.bson.codecs.BsonDocumentCodec import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf -import static QueryOperationHelper.getKeyPattern +import static TestOperationHelper.getKeyPattern import static com.mongodb.ClusterFixture.collectCursorResults import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/AsyncCommandBatchCursorFunctionalTest.java b/driver-core/src/test/functional/com/mongodb/internal/operation/AsyncCommandBatchCursorFunctionalTest.java new file mode 100644 index 00000000000..3b8addf6596 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/AsyncCommandBatchCursorFunctionalTest.java @@ -0,0 +1,434 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + + +import com.mongodb.MongoCursorNotFoundException; +import com.mongodb.MongoQueryException; +import com.mongodb.ReadPreference; +import com.mongodb.ServerCursor; +import com.mongodb.client.model.CreateCollectionOptions; +import com.mongodb.client.model.OperationTest; +import com.mongodb.internal.binding.AsyncConnectionSource; +import com.mongodb.internal.connection.AsyncConnection; +import org.bson.BsonArray; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonInt64; +import org.bson.BsonString; +import org.bson.BsonTimestamp; +import org.bson.Document; +import org.bson.codecs.BsonDocumentCodec; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +import static com.mongodb.ClusterFixture.checkReferenceCountReachesTarget; +import static com.mongodb.ClusterFixture.getAsyncBinding; +import static com.mongodb.ClusterFixture.getConnection; +import static com.mongodb.ClusterFixture.getReferenceCountAfterTimeout; +import static com.mongodb.ClusterFixture.getWriteConnectionSource; +import static com.mongodb.ClusterFixture.isSharded; +import static com.mongodb.internal.operation.CommandBatchCursorHelper.FIRST_BATCH; +import static com.mongodb.internal.operation.TestOperationHelper.makeAdditionalGetMoreCall; +import static java.util.Collections.singletonList; +import static java.util.stream.Stream.generate; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assumptions.assumeFalse; +import static org.junit.jupiter.params.provider.Arguments.arguments; + +public class AsyncCommandBatchCursorFunctionalTest extends OperationTest { + + private AsyncConnectionSource connectionSource; + private AsyncConnection connection; + private AsyncCommandBatchCursor cursor; + + @BeforeEach + void setup() throws Throwable { + List documents = IntStream.rangeClosed(1, 10) + .mapToObj(i -> new BsonDocument("i", new BsonInt32(i))) + .collect(Collectors.toList()); + getCollectionHelper().insertDocuments(documents); + + connectionSource = getWriteConnectionSource(getAsyncBinding()); + connection = getConnection(connectionSource); + } + + @AfterEach + void cleanup() { + ifNotNull(cursor, AsyncCommandBatchCursor::close); + ifNotNull(connectionSource, cs -> { + getReferenceCountAfterTimeout(cs, 1); + cs.release(); + }); + ifNotNull(connection, c -> { + getReferenceCountAfterTimeout(c, 1); + c.release(); + }); + } + + @Test + @DisplayName("server cursor should not be null") + void theServerCursorShouldNotBeNull() { + BsonDocument commandResult = executeFindCommand(2); + cursor = new AsyncCommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertNotNull(cursor.getServerCursor()); + } + + @Test + @DisplayName("should get Exceptions for operations on the cursor after closing") + void shouldGetExceptionsForOperationsOnTheCursorAfterClosing() { + BsonDocument commandResult = executeFindCommand(5); + cursor = new AsyncCommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + cursor.close(); + assertDoesNotThrow(() -> cursor.close()); + + checkReferenceCountReachesTarget(connectionSource, 1); + assertThrows(IllegalStateException.class, this::cursorNext); + assertNull(cursor.getServerCursor()); + } + + @Test + @DisplayName("should throw an Exception when going off the end") + void shouldThrowAnExceptionWhenGoingOffTheEnd() { + BsonDocument commandResult = executeFindCommand(2, 1); + cursor = new AsyncCommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + cursorNext(); + cursorNext(); + + assertThrows(IllegalStateException.class, this::cursorNext); + } + + + @Test + @DisplayName("test normal exhaustion") + void testNormalExhaustion() { + BsonDocument commandResult = executeFindCommand(); + cursor = new AsyncCommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertEquals(10, cursorFlatten().size()); + } + + @ParameterizedTest(name = "{index} => limit={0}, batchSize={1}, expectedTotal={2}") + @MethodSource + @DisplayName("test limit exhaustion") + void testLimitExhaustion(final int limit, final int batchSize, final int expectedTotal) { + BsonDocument commandResult = executeFindCommand(limit, batchSize); + cursor = new AsyncCommandBatchCursor<>(commandResult, batchSize, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + + assertEquals(expectedTotal, cursorFlatten().size()); + + checkReferenceCountReachesTarget(connectionSource, 1); + checkReferenceCountReachesTarget(connection, 1); + } + + @ParameterizedTest(name = "{index} => awaitData={0}, maxTimeMS={1}") + @MethodSource + @DisplayName("should block waiting for next batch on a tailable cursor") + void shouldBlockWaitingForNextBatchOnATailableCursor(final boolean awaitData, final int maxTimeMS) { + getCollectionHelper().create(getCollectionName(), new CreateCollectionOptions().capped(true).sizeInBytes(1000)); + getCollectionHelper().insertDocuments(DOCUMENT_DECODER, new Document("_id", 1).append("ts", new BsonTimestamp(5, 0))); + + BsonDocument commandResult = executeFindCommand(new BsonDocument("ts", + new BsonDocument("$gte", new BsonTimestamp(5, 0))), 0, 2, true, awaitData); + cursor = new AsyncCommandBatchCursor<>(commandResult, 2, maxTimeMS, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertFalse(cursor.isClosed()); + assertEquals(1, cursorNext().get(0).get("_id")); + + new Thread(() -> { + sleep(100); + getCollectionHelper().insertDocuments(DOCUMENT_DECODER, new Document("_id", 2).append("ts", new BsonTimestamp(6, 0))); + }).start(); + + assertFalse(cursor.isClosed()); + assertEquals(2, cursorNext().get(0).get("_id")); + } + + @Test + @DisplayName("test tailable interrupt") + void testTailableInterrupt() throws InterruptedException { + getCollectionHelper().create(getCollectionName(), new CreateCollectionOptions().capped(true).sizeInBytes(1000)); + getCollectionHelper().insertDocuments(DOCUMENT_DECODER, new Document("_id", 1).append("ts", new BsonTimestamp(5, 0))); + + BsonDocument commandResult = executeFindCommand(new BsonDocument("ts", + new BsonDocument("$gte", new BsonTimestamp(5, 0))), 0, 2, true, true); + cursor = new AsyncCommandBatchCursor<>(commandResult, 2, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + CountDownLatch latch = new CountDownLatch(1); + AtomicInteger seen = new AtomicInteger(); + Thread thread = new Thread(() -> { + try { + cursorNext(); + seen.incrementAndGet(); + cursorNext(); + seen.incrementAndGet(); + } catch (Exception e) { + // pass + } finally { + latch.countDown(); + } + }); + + thread.start(); + sleep(1000); + thread.interrupt(); + getCollectionHelper().insertDocuments(DOCUMENT_DECODER, new Document("_id", 2)); + latch.await(); + + assertTrue(latch.await(5, TimeUnit.SECONDS)); + assertEquals(1, seen.intValue()); + } + + @Test + @DisplayName("should kill cursor if limit is reached on initial query") + void shouldKillCursorIfLimitIsReachedOnInitialQuery() { + assumeFalse(isSharded()); + BsonDocument commandResult = executeFindCommand(5, 10); + cursor = new AsyncCommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertNotNull(cursorNext()); + assertTrue(cursor.isClosed()); + assertNull(cursor.getServerCursor()); + } + + @Test + @DisplayName("should kill cursor if limit is reached on getMore") + void shouldKillCursorIfLimitIsReachedOnGetMore() { + assumeFalse(isSharded()); + BsonDocument commandResult = executeFindCommand(5, 3); + cursor = new AsyncCommandBatchCursor<>(commandResult, 3, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + ServerCursor serverCursor = cursor.getServerCursor(); + assertNotNull(serverCursor); + assertNotNull(cursorNext()); + assertNotNull(cursorNext()); + + assertDoesNotThrow(() -> checkReferenceCountReachesTarget(connection, 1)); + assertThrows(MongoQueryException.class, () -> + makeAdditionalGetMoreCall(getNamespace(), serverCursor, connection) + ); + } + + @Test + @DisplayName("should release connection source if limit is reached on initial query") + void shouldReleaseConnectionSourceIfLimitIsReachedOnInitialQuery() { + assumeFalse(isSharded()); + BsonDocument commandResult = executeFindCommand(5, 10); + cursor = new AsyncCommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertDoesNotThrow(() -> checkReferenceCountReachesTarget(connectionSource, 1)); + assertDoesNotThrow(() -> checkReferenceCountReachesTarget(connection, 1)); + assertNull(cursor.getServerCursor()); + } + + @Test + @DisplayName("should release connection source if limit is reached on getMore") + void shouldReleaseConnectionSourceIfLimitIsReachedOnGetMore() { + assumeFalse(isSharded()); + BsonDocument commandResult = executeFindCommand(5, 3); + cursor = new AsyncCommandBatchCursor<>(commandResult, 3, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertNotNull(cursorNext()); + assertNotNull(cursorNext()); + assertDoesNotThrow(() -> checkReferenceCountReachesTarget(connectionSource, 1)); + assertDoesNotThrow(() -> checkReferenceCountReachesTarget(connection, 1)); + } + + @Test + @DisplayName("test limit with get more") + void testLimitWithGetMore() { + BsonDocument commandResult = executeFindCommand(5, 2); + cursor = new AsyncCommandBatchCursor<>(commandResult, 2, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertNotNull(cursorNext()); + assertNotNull(cursorNext()); + assertNotNull(cursorNext()); + + assertDoesNotThrow(() -> checkReferenceCountReachesTarget(connectionSource, 1)); + assertTrue(cursor.isClosed()); + } + + @Test + @DisplayName("test limit with large documents") + void testLimitWithLargeDocuments() { + String bigString = generate(() -> "x") + .limit(16000) + .collect(Collectors.joining()); + + IntStream.range(11, 1000).forEach(i -> + getCollectionHelper().insertDocuments(DOCUMENT_DECODER, new Document("_id", i).append("s", bigString)) + ); + + BsonDocument commandResult = executeFindCommand(300, 0); + cursor = new AsyncCommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertEquals(300, cursorFlatten().size()); + } + + @Test + @DisplayName("should respect batch size") + void shouldRespectBatchSize() { + BsonDocument commandResult = executeFindCommand(2); + cursor = new AsyncCommandBatchCursor<>(commandResult, 2, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertEquals(2, cursor.getBatchSize()); + assertEquals(2, cursorNext().size()); + assertEquals(2, cursorNext().size()); + + cursor.setBatchSize(3); + assertEquals(3, cursor.getBatchSize()); + assertEquals(3, cursorNext().size()); + assertEquals(3, cursorNext().size()); + } + + @Test + @DisplayName("should throw cursor not found exception") + void shouldThrowCursorNotFoundException() throws Throwable { + BsonDocument commandResult = executeFindCommand(2); + cursor = new AsyncCommandBatchCursor<>(commandResult, 2, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + ServerCursor serverCursor = cursor.getServerCursor(); + assertNotNull(serverCursor); + AsyncConnection localConnection = getConnection(connectionSource); + this.block(cb -> localConnection.commandAsync(getNamespace().getDatabaseName(), + new BsonDocument("killCursors", new BsonString(getNamespace().getCollectionName())) + .append("cursors", new BsonArray(singletonList(new BsonInt64(serverCursor.getId())))), + NO_OP_FIELD_NAME_VALIDATOR, ReadPreference.primary(), new BsonDocumentCodec(), connectionSource, cb)); + localConnection.release(); + + cursorNext(); + + MongoCursorNotFoundException exception = assertThrows(MongoCursorNotFoundException.class, this::cursorNext); + assertEquals(serverCursor.getId(), exception.getCursorId()); + assertEquals(serverCursor.getAddress(), exception.getServerAddress()); + } + + + private static Stream shouldBlockWaitingForNextBatchOnATailableCursor() { + return Stream.of( + arguments(true, 0), + arguments(true, 100), + arguments(false, 0)); + } + + private static Stream testLimitExhaustion() { + return Stream.of( + arguments(5, 2, 5), + arguments(5, -2, 2), + arguments(-5, -2, 5), + arguments(-5, 2, 5), + arguments(2, 5, 2), + arguments(2, -5, 2), + arguments(-2, 5, 2), + arguments(-2, -5, 2) + ); + } + + private BsonDocument executeFindCommand() { + return executeFindCommand(0); + } + + private BsonDocument executeFindCommand(final int batchSize) { + return executeFindCommand(new BsonDocument(), 0, batchSize, false, false); + } + + private BsonDocument executeFindCommand(final int limit, final int batchSize) { + return executeFindCommand(new BsonDocument(), limit, batchSize, false, false); + } + + private BsonDocument executeFindCommand(final BsonDocument filter, final int limit, final int batchSize, final boolean tailable, + final boolean awaitData) { + return executeFindCommand(filter, limit, batchSize, tailable, awaitData, ReadPreference.primary()); + } + + private BsonDocument executeFindCommand(final BsonDocument filter, final int limit, final int batchSize, + final boolean tailable, final boolean awaitData, final ReadPreference readPreference) { + BsonDocument findCommand = new BsonDocument("find", new BsonString(getCollectionName())) + .append("filter", filter) + .append("tailable", BsonBoolean.valueOf(tailable)) + .append("awaitData", BsonBoolean.valueOf(awaitData)); + + findCommand.append("limit", new BsonInt32(Math.abs(limit))); + if (limit >= 0) { + if (batchSize < 0 && Math.abs(batchSize) < limit) { + findCommand.append("limit", new BsonInt32(Math.abs(batchSize))); + } else { + findCommand.append("batchSize", new BsonInt32(Math.abs(batchSize))); + } + } + + BsonDocument results = block(cb -> connection.commandAsync(getDatabaseName(), findCommand, + NO_OP_FIELD_NAME_VALIDATOR, readPreference, + CommandResultDocumentCodec.create(DOCUMENT_DECODER, FIRST_BATCH), + connectionSource, cb)); + + assertNotNull(results); + return results; + } + + private List cursorNext() { + return block(cb -> cursor.next(cb)); + } + + private List cursorFlatten() { + List results = new ArrayList<>(); + while (!cursor.isClosed()) { + results.addAll(cursorNext()); + } + return results; + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/AsyncQueryBatchCursorFunctionalSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/AsyncQueryBatchCursorFunctionalSpecification.groovy deleted file mode 100644 index 3d6f0c8b7a7..00000000000 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/AsyncQueryBatchCursorFunctionalSpecification.groovy +++ /dev/null @@ -1,445 +0,0 @@ -/* - * Copyright 2008-present MongoDB, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.internal.operation - -import com.mongodb.MongoCursorNotFoundException -import com.mongodb.MongoException -import com.mongodb.MongoTimeoutException -import com.mongodb.OperationFunctionalSpecification -import com.mongodb.ReadPreference -import com.mongodb.ServerCursor -import com.mongodb.WriteConcern -import com.mongodb.async.FutureResultCallback -import com.mongodb.client.model.CreateCollectionOptions -import com.mongodb.client.syncadapter.SyncConnection -import com.mongodb.internal.binding.AsyncConnectionSource -import com.mongodb.internal.binding.AsyncReadBinding -import com.mongodb.internal.connection.AsyncConnection -import com.mongodb.internal.connection.Connection -import com.mongodb.internal.connection.QueryResult -import com.mongodb.internal.validator.NoOpFieldNameValidator -import org.bson.BsonArray -import org.bson.BsonBoolean -import org.bson.BsonDocument -import org.bson.BsonInt32 -import org.bson.BsonInt64 -import org.bson.BsonNull -import org.bson.BsonString -import org.bson.BsonTimestamp -import org.bson.Document -import org.bson.codecs.BsonDocumentCodec -import org.bson.codecs.DocumentCodec -import spock.lang.IgnoreIf -import util.spock.annotations.Slow - -import java.util.concurrent.CountDownLatch - -import static com.mongodb.ClusterFixture.getAsyncBinding -import static com.mongodb.ClusterFixture.getAsyncCluster -import static com.mongodb.ClusterFixture.getBinding -import static com.mongodb.ClusterFixture.getConnection -import static com.mongodb.ClusterFixture.getReadConnectionSource -import static com.mongodb.ClusterFixture.getReferenceCountAfterTimeout -import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet -import static com.mongodb.ClusterFixture.isSharded -import static com.mongodb.ClusterFixture.serverVersionLessThan -import static com.mongodb.internal.connection.ServerHelper.waitForLastRelease -import static com.mongodb.internal.connection.ServerHelper.waitForRelease -import static com.mongodb.internal.operation.OperationHelper.cursorDocumentToQueryResult -import static com.mongodb.internal.operation.QueryOperationHelper.makeAdditionalGetMoreCall -import static java.util.Collections.singletonList -import static java.util.concurrent.TimeUnit.SECONDS -import static org.junit.Assert.assertEquals -import static org.junit.Assert.fail - -@IgnoreIf({ isSharded() && serverVersionLessThan(3, 2) }) -class AsyncQueryBatchCursorFunctionalSpecification extends OperationFunctionalSpecification { - AsyncConnectionSource connectionSource - AsyncQueryBatchCursor cursor - AsyncConnection connection - - def setup() { - def documents = [] - for (int i = 0; i < 10; i++) { - documents.add(new BsonDocument('_id', new BsonInt32(i))) - } - collectionHelper.insertDocuments(documents, - isDiscoverableReplicaSet() ? WriteConcern.MAJORITY : WriteConcern.ACKNOWLEDGED, - getBinding()) - setUpConnectionAndSource(getAsyncBinding()) - } - - private void setUpConnectionAndSource(final AsyncReadBinding binding) { - connectionSource = getReadConnectionSource(binding) - connection = getConnection(connectionSource) - } - - def cleanup() { - cursor?.close() - cleanupConnectionAndSource() - } - - private void cleanupConnectionAndSource() { - connection?.release() - connectionSource?.release() - waitForLastRelease(connectionSource.getServerDescription().getAddress(), getAsyncCluster()) - waitForRelease(connectionSource, 0) - } - - def 'should exhaust single batch'() { - given: - cursor = new AsyncQueryBatchCursor(executeQuery(), 0, 0, 0, new DocumentCodec(), null, connectionSource, connection) - - expect: - nextBatch().size() == 10 - } - - def 'should not retain connection and source after cursor is exhausted on first batch'() { - given: - cursor = new AsyncQueryBatchCursor(executeQuery(), 0, 0, 0, new DocumentCodec(), null, connectionSource, connection) - - when: - nextBatch() - - then: - connection.count == 1 - connectionSource.count == 1 - } - - def 'should not retain connection and source after cursor is exhausted on getMore'() { - given: - cursor = new AsyncQueryBatchCursor(executeQuery(1, 0), 1, 1, 0, new DocumentCodec(), null, connectionSource, connection) - - when: - nextBatch() - - then: - getReferenceCountAfterTimeout(connection, 1) == 1 - getReferenceCountAfterTimeout(connectionSource, 1) == 1 - } - - def 'should not retain connection and source after cursor is exhausted after first batch'() { - when: - cursor = new AsyncQueryBatchCursor(executeQuery(10, 10), 10, 10, 0, new DocumentCodec(), null, connectionSource, - connection) - - then: - getReferenceCountAfterTimeout(connection, 1) == 1 - getReferenceCountAfterTimeout(connectionSource, 1) == 1 - } - - def 'should exhaust single batch with limit'() { - given: - cursor = new AsyncQueryBatchCursor(executeQuery(1, 0), 1, 0, 0, new DocumentCodec(), null, connectionSource, connection) - - expect: - nextBatch().size() == 1 - cursor.isClosed() || !nextBatch() && cursor.isClosed() - } - - def 'should exhaust multiple batches with limit'() { - given: - cursor = new AsyncQueryBatchCursor(executeQuery(limit, batchSize), limit, batchSize, 0, new DocumentCodec(), null, - connectionSource, connection) - - when: - def next = nextBatch() - def total = 0 - while (next) { - total += next.size() - if (cursor.isClosed()) { - break - } - next = nextBatch() - } - - then: - total == expectedTotal - - where: - limit | batchSize | expectedTotal - 5 | 2 | 5 - 5 | -2 | 2 - -5 | 2 | 5 - -5 | -2 | 5 - 2 | 5 | 2 - 2 | -5 | 2 - -2 | 5 | 2 - -2 | -5 | 2 - } - - def 'should exhaust multiple batches'() { - given: - cursor = new AsyncQueryBatchCursor(executeQuery(3), 0, 2, 0, new DocumentCodec(), null, connectionSource, connection) - - expect: - nextBatch().size() == 3 - nextBatch().size() == 2 - nextBatch().size() == 2 - nextBatch().size() == 2 - nextBatch().size() == 1 - !nextBatch() - } - - def 'should respect batch size'() { - when: - cursor = new AsyncQueryBatchCursor(executeQuery(3), 0, 2, 0, new DocumentCodec(), null, connectionSource, connection) - - then: - cursor.batchSize == 2 - - when: - nextBatch() - cursor.batchSize = 4 - - then: - nextBatch().size() == 4 - } - - def 'should close when exhausted'() { - given: - cursor = new AsyncQueryBatchCursor(executeQuery(), 0, 2, 0, new DocumentCodec(), null, connectionSource, connection) - - when: - cursor.close() - waitForRelease(connectionSource, 1) - - then: - connectionSource.count == 1 - - when: - nextBatch() - - then: - thrown(MongoException) - } - - def 'should close when not exhausted'() { - given: - cursor = new AsyncQueryBatchCursor(executeQuery(3), 0, 2, 0, new DocumentCodec(), null, connectionSource, connection) - - when: - cursor.close() - - then: - waitForRelease(connectionSource, 1) - } - - @Slow - def 'should block waiting for first batch on a tailable cursor'() { - given: - collectionHelper.create(collectionName, new CreateCollectionOptions().capped(true).sizeInBytes(1000)) - collectionHelper.insertDocuments(new DocumentCodec(), new Document('_id', 1).append('ts', new BsonTimestamp(4, 0))) - def firstBatch = executeQuery(new BsonDocument('ts', new BsonDocument('$gte', new BsonTimestamp(5, 0))), 0, 2, true, false) - - when: - cursor = new AsyncQueryBatchCursor(firstBatch, 0, 2, 0, new DocumentCodec(), null, connectionSource, connection) - def latch = new CountDownLatch(1) - Thread.start { - sleep(500) - collectionHelper.insertDocuments(new DocumentCodec(), new Document('_id', 2).append('ts', new BsonTimestamp(5, 0))) - latch.countDown() - } - - def batch = nextBatch() - - then: - batch.size() == 1 - batch[0].get('_id') == 2 - - cleanup: - def cleanedUp = latch.await(10, SECONDS) // Workaround for codenarc bug - if (!cleanedUp) { - throw new MongoTimeoutException('Timed out waiting for documents to be inserted') - } - } - - @Slow - def 'should block waiting for next batch on a tailable cursor'() { - collectionHelper.create(collectionName, new CreateCollectionOptions().capped(true).sizeInBytes(1000)) - collectionHelper.insertDocuments(new DocumentCodec(), new Document('_id', 1).append('ts', new BsonTimestamp(5, 0))) - def firstBatch = executeQuery(new BsonDocument('ts', new BsonDocument('$gte', new BsonTimestamp(5, 0))), 0, 2, true, awaitData) - - - when: - cursor = new AsyncQueryBatchCursor(firstBatch, 0, 2, maxTimeMS, new DocumentCodec(), null, connectionSource, connection) - def batch = nextBatch() - - then: - batch.size() == 1 - batch[0].get('_id') == 1 - - when: - def latch = new CountDownLatch(1) - Thread.start { - sleep(500) - collectionHelper.insertDocuments(new DocumentCodec(), new Document('_id', 2).append('ts', new BsonTimestamp(6, 0))) - latch.countDown() - } - - batch = nextBatch() - - then: - batch.size() == 1 - batch[0].get('_id') == 2 - - cleanup: - def cleanedUp = latch.await(10, SECONDS) - if (!cleanedUp) { - throw new MongoTimeoutException('Timed out waiting for documents to be inserted') - } - - where: - awaitData | maxTimeMS - true | 0 - true | 100 - false | 0 - } - - @Slow - def 'should unblock if closed while waiting for more data from tailable cursor'() { - given: - collectionHelper.create(collectionName, new CreateCollectionOptions().capped(true).sizeInBytes(1000)) - collectionHelper.insertDocuments(new DocumentCodec(), Document.parse('{}')) - def firstBatch = executeQuery(new BsonDocument('_id', BsonNull.VALUE), 0, 1, true, true) - - when: - cursor = new AsyncQueryBatchCursor(firstBatch, 0, 1, 500, new DocumentCodec(), null, connectionSource, connection) - Thread.start { - Thread.sleep(SECONDS.toMillis(2)) - cursor.close() - } - def batch = nextBatch() - - then: - cursor.isClosed() - batch == null - //both connection and connectionSource have reference count 1 when we pass them to the AsyncQueryBatchCursor constructor - connection.getCount() == 1 - waitForRelease(connectionSource, 1) - } - - def 'should respect limit'() { - given: - cursor = new AsyncQueryBatchCursor(executeQuery(6, 3), 6, 2, 0, new DocumentCodec(), null, connectionSource, connection) - - expect: - nextBatch().size() == 3 - nextBatch().size() == 2 - nextBatch().size() == 1 - !nextBatch() - } - - @IgnoreIf({ isSharded() }) - def 'should kill cursor if limit is reached on initial query'() throws InterruptedException { - given: - def firstBatch = executeQuery(5) - - cursor = new AsyncQueryBatchCursor(firstBatch, 5, 0, 0, new DocumentCodec(), null, connectionSource, connection) - - when: - while (connection.getCount() > 1) { - Thread.sleep(5) - } - makeAdditionalGetMoreCall(getNamespace(), firstBatch.cursor, new SyncConnection(connection)) - - then: - thrown(MongoCursorNotFoundException) - } - - @SuppressWarnings('BracesForTryCatchFinally') - @IgnoreIf({ isSharded() }) - def 'should throw cursor not found exception'() { - given: - def firstBatch = executeQuery(2) - - when: - cursor = new AsyncQueryBatchCursor(firstBatch, 0, 2, 0, new DocumentCodec(), null, connectionSource, connection) - - def connection = new SyncConnection(getConnection(connectionSource)) - def serverCursor = cursor.cursor.get() - connection.command(getNamespace().databaseName, - new BsonDocument('killCursors', new BsonString(namespace.getCollectionName())) - .append('cursors', new BsonArray(singletonList(new BsonInt64(serverCursor.getId())))), - new NoOpFieldNameValidator(), ReadPreference.primary(), - new BsonDocumentCodec() - , connectionSource) - connection.release() - nextBatch() - - then: - try { - nextBatch() - fail('expected MongoCursorNotFoundException but no exception was thrown') - } catch (MongoCursorNotFoundException e) { - assertEquals(serverCursor.getId(), e.getCursorId()) - assertEquals(serverCursor.getAddress(), e.getServerAddress()) - } catch (ignored) { - fail('Expected MongoCursorNotFoundException to be thrown but got ' + ignored.getClass()) - } - } - - List nextBatch() { - def futureResultCallback = new FutureResultCallback() - cursor.next(futureResultCallback) - futureResultCallback.get() - } - - private QueryResult executeQuery() { - executeQuery(0) - } - - private QueryResult executeQuery(int batchSize) { - executeQuery(0, batchSize) - } - - private QueryResult executeQuery(int limit, int batchSize) { - executeQuery(new BsonDocument(), limit, batchSize, false, false) - } - - private QueryResult executeQuery(BsonDocument filter, int limit, int batchSize, boolean tailable, boolean awaitData) { - def findCommand = new BsonDocument('find', new BsonString(getCollectionName())) - .append('filter', filter) - .append('tailable', BsonBoolean.valueOf(tailable)) - .append('awaitData', BsonBoolean.valueOf(awaitData)) - - findCommand.append('limit', new BsonInt32(Math.abs(limit))) - - if (limit >= 0) { - if (batchSize < 0 && Math.abs(batchSize) < limit) { - findCommand.append('limit', new BsonInt32(Math.abs(batchSize))) - } else { - findCommand.append('batchSize', new BsonInt32(Math.abs(batchSize))) - } - } - - def futureResultCallback = new FutureResultCallback() - connection.commandAsync(getDatabaseName(), findCommand, NO_OP_FIELD_NAME_VALIDATOR, ReadPreference.primary(), - CommandResultDocumentCodec.create(new DocumentCodec(), 'firstBatch'), connectionSource, - futureResultCallback) - def response = futureResultCallback.get() - cursorDocumentToQueryResult(response.getDocument('cursor'), connection.getDescription().getServerAddress()) - } - - private void makeAdditionalGetMoreCall(ServerCursor serverCursor, Connection connection) { - connection.command(getNamespace().databaseName, - new BsonDocument('getMore', new BsonInt64(serverCursor.getId())) - .append('collection', new BsonString(namespace.getCollectionName())), - NO_OP_FIELD_NAME_VALIDATOR, ReadPreference.primary(), new BsonDocumentCodec(), connectionSource.getSessionContext(), - connectionSource.getServerApi()) - } -} diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/CommandBatchCursorFunctionalTest.java b/driver-core/src/test/functional/com/mongodb/internal/operation/CommandBatchCursorFunctionalTest.java new file mode 100644 index 00000000000..30a74443633 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/CommandBatchCursorFunctionalTest.java @@ -0,0 +1,550 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + + +import com.mongodb.MongoCursorNotFoundException; +import com.mongodb.MongoQueryException; +import com.mongodb.ReadPreference; +import com.mongodb.ServerCursor; +import com.mongodb.client.model.CreateCollectionOptions; +import com.mongodb.client.model.OperationTest; +import com.mongodb.internal.binding.ConnectionSource; +import com.mongodb.internal.connection.Connection; +import org.bson.BsonArray; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonInt64; +import org.bson.BsonString; +import org.bson.BsonTimestamp; +import org.bson.Document; +import org.bson.codecs.BsonDocumentCodec; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.ArrayList; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +import static com.mongodb.ClusterFixture.checkReferenceCountReachesTarget; +import static com.mongodb.ClusterFixture.getBinding; +import static com.mongodb.ClusterFixture.getReferenceCountAfterTimeout; +import static com.mongodb.ClusterFixture.isSharded; +import static com.mongodb.internal.operation.CommandBatchCursorHelper.FIRST_BATCH; +import static com.mongodb.internal.operation.TestOperationHelper.makeAdditionalGetMoreCall; +import static java.util.Collections.singletonList; +import static java.util.stream.Stream.generate; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assumptions.assumeFalse; +import static org.junit.jupiter.params.provider.Arguments.arguments; + +public class CommandBatchCursorFunctionalTest extends OperationTest { + + private ConnectionSource connectionSource; + private Connection connection; + private CommandBatchCursor cursor; + + @BeforeEach + void setup() { + List documents = IntStream.rangeClosed(1, 10) + .mapToObj(i -> new BsonDocument("i", new BsonInt32(i))) + .collect(Collectors.toList()); + getCollectionHelper().insertDocuments(documents); + + connectionSource = getBinding().getWriteConnectionSource(); + connection = connectionSource.getConnection(); + } + + @AfterEach + void cleanup() { + ifNotNull(cursor, CommandBatchCursor::close); + ifNotNull(connectionSource, cs -> { + getReferenceCountAfterTimeout(cs, 1); + cs.release(); + }); + ifNotNull(connection, c -> { + getReferenceCountAfterTimeout(c, 1); + c.release(); + }); + } + + @Test + @DisplayName("server cursor should not be null") + void theServerCursorShouldNotBeNull() { + BsonDocument commandResult = executeFindCommand(2); + cursor = new CommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertNotNull(cursor.getServerCursor()); + } + + @Test + @DisplayName("test server address should not be null") + void theServerAddressShouldNotNull() { + BsonDocument commandResult = executeFindCommand(); + cursor = new CommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertNotNull(cursor.getServerAddress()); + } + + @Test + @DisplayName("should get Exceptions for operations on the cursor after closing") + void shouldGetExceptionsForOperationsOnTheCursorAfterClosing() { + BsonDocument commandResult = executeFindCommand(); + cursor = new CommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + cursor.close(); + + assertDoesNotThrow(() -> cursor.close()); + assertThrows(IllegalStateException.class, () -> cursor.hasNext()); + assertThrows(IllegalStateException.class, () -> cursor.next()); + assertThrows(IllegalStateException.class, () -> cursor.getServerCursor()); + } + + @Test + @DisplayName("should throw an Exception when going off the end") + void shouldThrowAnExceptionWhenGoingOffTheEnd() { + BsonDocument commandResult = executeFindCommand(1); + cursor = new CommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + cursor.next(); + cursor.next(); + assertThrows(NoSuchElementException.class, () -> cursor.next()); + } + + @Test + @DisplayName("test cursor remove") + void testCursorRemove() { + BsonDocument commandResult = executeFindCommand(); + cursor = new CommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertThrows(UnsupportedOperationException.class, () -> cursor.remove()); + } + + @Test + @DisplayName("test normal exhaustion") + void testNormalExhaustion() { + BsonDocument commandResult = executeFindCommand(); + cursor = new CommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertEquals(10, cursorFlatten().size()); + } + + @ParameterizedTest(name = "{index} => limit={0}, batchSize={1}, expectedTotal={2}") + @MethodSource + @DisplayName("test limit exhaustion") + void testLimitExhaustion(final int limit, final int batchSize, final int expectedTotal) { + BsonDocument commandResult = executeFindCommand(limit, batchSize); + cursor = new CommandBatchCursor<>(commandResult, batchSize, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertEquals(expectedTotal, cursorFlatten().size()); + + checkReferenceCountReachesTarget(connectionSource, 1); + checkReferenceCountReachesTarget(connection, 1); + } + + @ParameterizedTest(name = "{index} => awaitData={0}, maxTimeMS={1}") + @MethodSource + @DisplayName("should block waiting for next batch on a tailable cursor") + void shouldBlockWaitingForNextBatchOnATailableCursor(final boolean awaitData, final int maxTimeMS) { + + getCollectionHelper().create(getCollectionName(), new CreateCollectionOptions().capped(true).sizeInBytes(1000)); + getCollectionHelper().insertDocuments(DOCUMENT_DECODER, new Document("_id", 1).append("ts", new BsonTimestamp(5, 0))); + + BsonDocument commandResult = executeFindCommand(new BsonDocument("ts", + new BsonDocument("$gte", new BsonTimestamp(5, 0))), 0, 2, true, awaitData); + cursor = new CommandBatchCursor<>(commandResult, 2, maxTimeMS, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertTrue(cursor.hasNext()); + assertEquals(1, cursor.next().get(0).get("_id")); + + new Thread(() -> { + sleep(100); + getCollectionHelper().insertDocuments(DOCUMENT_DECODER, new Document("_id", 2).append("ts", new BsonTimestamp(6, 0))); + }).start(); + + assertTrue(cursor.hasNext()); + assertEquals(2, cursor.next().get(0).get("_id")); + } + + @Test + @DisplayName("test tryNext with tailable") + void testTryNextWithTailable() { + getCollectionHelper().create(getCollectionName(), new CreateCollectionOptions().capped(true).sizeInBytes(1000)); + getCollectionHelper().insertDocuments(DOCUMENT_DECODER, new Document("_id", 1).append("ts", new BsonTimestamp(5, 0))); + + BsonDocument commandResult = executeFindCommand(new BsonDocument("ts", + new BsonDocument("$gte", new BsonTimestamp(5, 0))), 0, 2, true, true); + cursor = new CommandBatchCursor<>(commandResult, 2, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + + List nextBatch = cursor.tryNext(); + assertNotNull(nextBatch); + assertEquals(1, nextBatch.get(0).get("_id")); + + nextBatch = cursor.tryNext(); + assertNull(nextBatch); + + getCollectionHelper().insertDocuments(DOCUMENT_DECODER, new Document("_id", 2).append("ts", new BsonTimestamp(6, 0))); + + nextBatch = cursor.tryNext(); + assertNotNull(nextBatch); + assertEquals(2, nextBatch.get(0).get("_id")); + } + + @Test + @DisplayName("hasNext should throw when cursor is closed in another thread") + void hasNextShouldThrowWhenCursorIsClosedInAnotherThread() throws InterruptedException { + + getCollectionHelper().create(getCollectionName(), new CreateCollectionOptions().capped(true).sizeInBytes(1000)); + getCollectionHelper().insertDocuments(DOCUMENT_DECODER, new Document("_id", 1).append("ts", new BsonTimestamp(5, 0))); + + BsonDocument commandResult = executeFindCommand(new BsonDocument("ts", + new BsonDocument("$gte", new BsonTimestamp(5, 0))), 0, 2, true, true); + cursor = new CommandBatchCursor<>(commandResult, 2, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertTrue(cursor.hasNext()); + assertEquals(1, cursor.next().get(0).get("_id")); + + CountDownLatch latch = new CountDownLatch(1); + new Thread(() -> { + sleep(100); + cursor.close(); + latch.countDown(); + }).start(); + + assertTrue(latch.await(5, TimeUnit.SECONDS)); + assertThrows(IllegalStateException.class, () -> cursor.hasNext()); + } + + @Test + @DisplayName("test maxTimeMS") + void testMaxTimeMS() { + assumeFalse(isSharded()); + getCollectionHelper().create(getCollectionName(), new CreateCollectionOptions().capped(true).sizeInBytes(1000)); + getCollectionHelper().insertDocuments(DOCUMENT_DECODER, new Document("_id", 1).append("ts", new BsonTimestamp(5, 0))); + + long maxTimeMS = 500; + BsonDocument commandResult = executeFindCommand(new BsonDocument("ts", + new BsonDocument("$gte", new BsonTimestamp(5, 0))), 0, 2, true, true); + cursor = new CommandBatchCursor<>(commandResult, 2, maxTimeMS, DOCUMENT_DECODER, + null, connectionSource, connection); + + + List nextBatch = cursor.tryNext(); + assertNotNull(nextBatch); + + long startTime = System.currentTimeMillis(); + nextBatch = cursor.tryNext(); + long endTime = System.currentTimeMillis(); + + assertNull(nextBatch); + + // RACY TEST: no guarantee assertion will fire within the given timeframe + assertTrue(endTime - startTime < (maxTimeMS + 200)); + } + + @Test + @DisplayName("test tailable interrupt") + void testTailableInterrupt() throws InterruptedException { + getCollectionHelper().create(getCollectionName(), new CreateCollectionOptions().capped(true).sizeInBytes(1000)); + getCollectionHelper().insertDocuments(DOCUMENT_DECODER, new Document("_id", 1).append("ts", new BsonTimestamp(5, 0))); + + BsonDocument commandResult = executeFindCommand(new BsonDocument("ts", + new BsonDocument("$gte", new BsonTimestamp(5, 0))), 0, 2, true, true); + cursor = new CommandBatchCursor<>(commandResult, 2, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + CountDownLatch latch = new CountDownLatch(1); + AtomicInteger seen = new AtomicInteger(); + Thread thread = new Thread(() -> { + try { + cursor.next(); + seen.incrementAndGet(); + cursor.next(); + seen.incrementAndGet(); + } catch (Exception e) { + // pass + } finally { + latch.countDown(); + } + }); + + thread.start(); + sleep(1000); + thread.interrupt(); + getCollectionHelper().insertDocuments(DOCUMENT_DECODER, new Document("_id", 2)); + latch.await(); + + assertTrue(latch.await(5, TimeUnit.SECONDS)); + assertEquals(1, seen.intValue()); + } + + @Test + @DisplayName("should kill cursor if limit is reached on initial query") + void shouldKillCursorIfLimitIsReachedOnInitialQuery() { + assumeFalse(isSharded()); + BsonDocument commandResult = executeFindCommand(5, 10); + cursor = new CommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertNotNull(cursor.next()); + assertFalse(cursor.hasNext()); + assertNull(cursor.getServerCursor()); + } + + @Test + @DisplayName("should kill cursor if limit is reached on getMore") + void shouldKillCursorIfLimitIsReachedOnGetMore() { + assumeFalse(isSharded()); + BsonDocument commandResult = executeFindCommand(5, 3); + cursor = new CommandBatchCursor<>(commandResult, 3, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + ServerCursor serverCursor = cursor.getServerCursor(); + assertNotNull(serverCursor); + assertNotNull(cursor.next()); + assertNotNull(cursor.next()); + + assertDoesNotThrow(() -> checkReferenceCountReachesTarget(connection, 1)); + assertThrows(MongoQueryException.class, () -> + makeAdditionalGetMoreCall(getNamespace(), serverCursor, connection) + ); + } + + @Test + @DisplayName("should release connection source if limit is reached on initial query") + void shouldReleaseConnectionSourceIfLimitIsReachedOnInitialQuery() { + assumeFalse(isSharded()); + BsonDocument commandResult = executeFindCommand(5, 10); + cursor = new CommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertNull(cursor.getServerCursor()); + assertDoesNotThrow(() -> checkReferenceCountReachesTarget(connectionSource, 1)); + assertDoesNotThrow(() -> checkReferenceCountReachesTarget(connection, 1)); + } + + @Test + @DisplayName("should release connection source if limit is reached on getMore") + void shouldReleaseConnectionSourceIfLimitIsReachedOnGetMore() { + assumeFalse(isSharded()); + BsonDocument commandResult = executeFindCommand(5, 3); + cursor = new CommandBatchCursor<>(commandResult, 3, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertNotNull(cursor.next()); + assertNotNull(cursor.next()); + assertDoesNotThrow(() -> checkReferenceCountReachesTarget(connectionSource, 1)); + assertDoesNotThrow(() -> checkReferenceCountReachesTarget(connection, 1)); + } + + @Test + @DisplayName("test limit with get more") + void testLimitWithGetMore() { + BsonDocument commandResult = executeFindCommand(5, 2); + cursor = new CommandBatchCursor<>(commandResult, 2, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertNotNull(cursor.next()); + assertNotNull(cursor.next()); + assertNotNull(cursor.next()); + assertFalse(cursor.hasNext()); + } + + @Test + @DisplayName("test limit with large documents") + void testLimitWithLargeDocuments() { + String bigString = generate(() -> "x") + .limit(16000) + .collect(Collectors.joining()); + + IntStream.range(11, 1000).forEach(i -> + getCollectionHelper().insertDocuments(DOCUMENT_DECODER, new Document("_id", i).append("s", bigString)) + ); + + BsonDocument commandResult = executeFindCommand(300, 0); + cursor = new CommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertEquals(300, cursorFlatten().size()); + } + + @Test + @DisplayName("should respect batch size") + void shouldRespectBatchSize() { + BsonDocument commandResult = executeFindCommand(2); + cursor = new CommandBatchCursor<>(commandResult, 2, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertEquals(2, cursor.getBatchSize()); + assertEquals(2, cursor.next().size()); + assertEquals(2, cursor.next().size()); + + cursor.setBatchSize(3); + assertEquals(3, cursor.getBatchSize()); + assertEquals(3, cursor.next().size()); + assertEquals(3, cursor.next().size()); + } + + @Test + @DisplayName("should throw cursor not found exception") + void shouldThrowCursorNotFoundException() { + BsonDocument commandResult = executeFindCommand(2); + cursor = new CommandBatchCursor<>(commandResult, 2, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + ServerCursor serverCursor = cursor.getServerCursor(); + assertNotNull(serverCursor); + Connection localConnection = connectionSource.getConnection(); + localConnection.command(getNamespace().getDatabaseName(), + new BsonDocument("killCursors", new BsonString(getNamespace().getCollectionName())) + .append("cursors", new BsonArray(singletonList(new BsonInt64(serverCursor.getId())))), + NO_OP_FIELD_NAME_VALIDATOR, ReadPreference.primary(), new BsonDocumentCodec(), connectionSource); + localConnection.release(); + + cursor.next(); + + MongoCursorNotFoundException exception = assertThrows(MongoCursorNotFoundException.class, () -> cursor.next()); + assertEquals(serverCursor.getId(), exception.getCursorId()); + assertEquals(serverCursor.getAddress(), exception.getServerAddress()); + } + + @Test + @DisplayName("should report available documents") + void shouldReportAvailableDocuments() { + BsonDocument commandResult = executeFindCommand(3); + cursor = new CommandBatchCursor<>(commandResult, 2, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertEquals(3, cursor.available()); + + cursor.next(); + assertEquals(0, cursor.available()); + + assertTrue(cursor.hasNext()); + assertEquals(2, cursor.available()); + + cursor.next(); + assertEquals(0, cursor.available()); + + assertTrue(cursor.hasNext()); + assertEquals(2, cursor.available()); + + cursor.close(); + assertEquals(0, cursor.available()); + } + + + private static Stream shouldBlockWaitingForNextBatchOnATailableCursor() { + return Stream.of( + arguments(true, 0), + arguments(true, 100), + arguments(false, 0)); + } + + private static Stream testLimitExhaustion() { + return Stream.of( + arguments(5, 2, 5), + arguments(5, -2, 2), + arguments(-5, -2, 5), + arguments(-5, 2, 5), + arguments(2, 5, 2), + arguments(2, -5, 2), + arguments(-2, 5, 2), + arguments(-2, -5, 2) + ); + } + + private BsonDocument executeFindCommand() { + return executeFindCommand(0); + } + + private BsonDocument executeFindCommand(final int batchSize) { + return executeFindCommand(new BsonDocument(), 0, batchSize, false, false); + } + + private BsonDocument executeFindCommand(final int limit, final int batchSize) { + return executeFindCommand(new BsonDocument(), limit, batchSize, false, false); + } + + private BsonDocument executeFindCommand(final BsonDocument filter, final int limit, final int batchSize, final boolean tailable, + final boolean awaitData) { + return executeFindCommand(filter, limit, batchSize, tailable, awaitData, ReadPreference.primary()); + } + + private BsonDocument executeFindCommand(final BsonDocument filter, final int limit, final int batchSize, + final boolean tailable, final boolean awaitData, final ReadPreference readPreference) { + BsonDocument findCommand = new BsonDocument("find", new BsonString(getCollectionName())) + .append("filter", filter) + .append("tailable", BsonBoolean.valueOf(tailable)) + .append("awaitData", BsonBoolean.valueOf(awaitData)); + + findCommand.append("limit", new BsonInt32(Math.abs(limit))); + if (limit >= 0) { + if (batchSize < 0 && Math.abs(batchSize) < limit) { + findCommand.append("limit", new BsonInt32(Math.abs(batchSize))); + } else { + findCommand.append("batchSize", new BsonInt32(Math.abs(batchSize))); + } + } + + BsonDocument results = connection.command(getDatabaseName(), findCommand, + NO_OP_FIELD_NAME_VALIDATOR, readPreference, + CommandResultDocumentCodec.create(DOCUMENT_DECODER, FIRST_BATCH), + connectionSource); + + assertNotNull(results); + return results; + } + + private List cursorFlatten() { + List results = new ArrayList<>(); + while (cursor.hasNext()) { + results.addAll(cursor.next()); + } + return results; + } + +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/ListCollectionsOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/ListCollectionsOperationSpecification.groovy index 9e2d8937818..a5e965f4685 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/ListCollectionsOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/ListCollectionsOperationSpecification.groovy @@ -34,7 +34,6 @@ import com.mongodb.internal.binding.ConnectionSource import com.mongodb.internal.binding.ReadBinding import com.mongodb.internal.connection.AsyncConnection import com.mongodb.internal.connection.Connection -import com.mongodb.internal.connection.QueryResult import org.bson.BsonBoolean import org.bson.BsonDocument import org.bson.BsonDouble @@ -84,7 +83,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica cursor.next(callback) then: - callback.get() == null + callback.get() == [] cleanup: collectionHelper.dropDatabase(madeUpDatabase) @@ -380,7 +379,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica cursor.getBatchSize() == 2 cleanup: - consumeAsyncResults(cursor) + cursor?.close() } @IgnoreIf({ isSharded() }) @@ -479,7 +478,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica threeSixConnectionDescription : Stub(ConnectionDescription) { getMaxWireVersion() >> 3 }, - queryResult: Stub(QueryResult) { + queryResult: Stub(CommandCursorResult) { getNamespace() >> new MongoNamespace('db', 'coll') getResults() >> [] getCursor() >> new ServerCursor(1, Stub(ServerAddress)) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/ListIndexesOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/ListIndexesOperationSpecification.groovy index 4ca91524e9f..51280de9b45 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/ListIndexesOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/ListIndexesOperationSpecification.groovy @@ -34,7 +34,6 @@ import com.mongodb.internal.binding.ReadBinding import com.mongodb.internal.bulk.IndexRequest import com.mongodb.internal.connection.AsyncConnection import com.mongodb.internal.connection.Connection -import com.mongodb.internal.connection.QueryResult import org.bson.BsonDocument import org.bson.BsonDouble import org.bson.BsonInt32 @@ -76,7 +75,7 @@ class ListIndexesOperationSpecification extends OperationFunctionalSpecification cursor.next(callback) then: - callback.get() == null + callback.get() == [] } @@ -210,7 +209,7 @@ class ListIndexesOperationSpecification extends OperationFunctionalSpecification cursor.getBatchSize() == 2 cleanup: - consumeAsyncResults(cursor) + cursor?.close() } @IgnoreIf({ isSharded() }) @@ -310,7 +309,7 @@ class ListIndexesOperationSpecification extends OperationFunctionalSpecification threeSixConnectionDescription : Stub(ConnectionDescription) { getMaxWireVersion() >> 3 }, - queryResult: Stub(QueryResult) { + queryResult: Stub(CommandCursorResult) { getNamespace() >> new MongoNamespace('db', 'coll') getResults() >> [] getCursor() >> new ServerCursor(1, Stub(ServerAddress)) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/QueryBatchCursorFunctionalSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/QueryBatchCursorFunctionalSpecification.groovy deleted file mode 100644 index 9c77bb41b89..00000000000 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/QueryBatchCursorFunctionalSpecification.groovy +++ /dev/null @@ -1,642 +0,0 @@ -/* - * Copyright 2008-present MongoDB, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.internal.operation - -import com.mongodb.MongoCursorNotFoundException -import com.mongodb.MongoTimeoutException -import com.mongodb.OperationFunctionalSpecification -import com.mongodb.ReadPreference -import com.mongodb.ServerCursor -import com.mongodb.WriteConcern -import com.mongodb.client.model.CreateCollectionOptions -import com.mongodb.internal.binding.ConnectionSource -import com.mongodb.internal.connection.Connection -import com.mongodb.internal.connection.QueryResult -import com.mongodb.internal.validator.NoOpFieldNameValidator -import org.bson.BsonArray -import org.bson.BsonBoolean -import org.bson.BsonDocument -import org.bson.BsonInt32 -import org.bson.BsonInt64 -import org.bson.BsonString -import org.bson.BsonTimestamp -import org.bson.Document -import org.bson.codecs.BsonDocumentCodec -import org.bson.codecs.DocumentCodec -import spock.lang.IgnoreIf -import util.spock.annotations.Slow - -import java.util.concurrent.CountDownLatch -import java.util.concurrent.TimeUnit - -import static com.mongodb.ClusterFixture.checkReferenceCountReachesTarget -import static com.mongodb.ClusterFixture.getBinding -import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet -import static com.mongodb.ClusterFixture.isSharded -import static com.mongodb.ClusterFixture.serverVersionLessThan -import static com.mongodb.internal.operation.OperationHelper.cursorDocumentToQueryResult -import static com.mongodb.internal.operation.QueryOperationHelper.makeAdditionalGetMoreCall -import static java.util.Collections.singletonList -import static org.junit.Assert.assertEquals -import static org.junit.Assert.fail - -class QueryBatchCursorFunctionalSpecification extends OperationFunctionalSpecification { - ConnectionSource connectionSource - QueryBatchCursor cursor - - def setup() { - def documents = [] - for (int i = 0; i < 10; i++) { - documents.add(new BsonDocument('_id', new BsonInt32(i))) - } - collectionHelper.insertDocuments(documents, - isDiscoverableReplicaSet() ? WriteConcern.MAJORITY : WriteConcern.ACKNOWLEDGED, - getBinding()) - connectionSource = getBinding().getReadConnectionSource() - } - - def cleanup() { - cursor?.close() - connectionSource?.release() - } - - def 'server cursor should not be null'() { - given: - def firstBatch = executeQuery(2) - - when: - cursor = new QueryBatchCursor(firstBatch, 0, 0, new DocumentCodec(), null, connectionSource) - - then: - cursor.getServerCursor() != null - } - - def 'test server address'() { - given: - def firstBatch = executeQuery() - - when: - cursor = new QueryBatchCursor(firstBatch, 0, 0, new DocumentCodec(), null, connectionSource) - then: - cursor.getServerAddress() != null - } - - def 'should get Exceptions for operations on the cursor after closing'() { - given: - def firstBatch = executeQuery() - - cursor = new QueryBatchCursor(firstBatch, 0, 0, new DocumentCodec(), null, connectionSource) - - when: - cursor.close() - cursor.close() - - and: - cursor.next() - - then: - thrown(IllegalStateException) - - when: - cursor.hasNext() - - then: - thrown(IllegalStateException) - - when: - cursor.getServerCursor() - - then: - thrown(IllegalStateException) - } - - def 'should throw an Exception when going off the end'() { - given: - def firstBatch = executeQuery(1) - - cursor = new QueryBatchCursor(firstBatch, 2, 0, new DocumentCodec(), null, connectionSource) - when: - cursor.next() - cursor.next() - cursor.next() - - then: - thrown(NoSuchElementException) - } - - def 'test normal exhaustion'() { - given: - def firstBatch = executeQuery() - - when: - cursor = new QueryBatchCursor(firstBatch, 0, 0, new DocumentCodec(), null, connectionSource) - - then: - cursor.iterator().sum { it.size() } == 10 - } - - def 'test limit exhaustion'() { - given: - def firstBatch = executeQuery(limit, batchSize) - def connection = connectionSource.getConnection() - - when: - cursor = new QueryBatchCursor(firstBatch, limit, batchSize, 0, new DocumentCodec(), null, connectionSource, connection) - - then: - cursor.iterator().sum { it.size() } == expectedTotal - - cleanup: - connection?.release() - - where: - limit | batchSize | expectedTotal - 5 | 2 | 5 - 5 | -2 | 2 - -5 | 2 | 5 - -5 | -2 | 5 - 2 | 5 | 2 - 2 | -5 | 2 - -2 | 5 | 2 - -2 | -5 | 2 - } - - def 'test remove'() { - given: - def firstBatch = executeQuery() - - cursor = new QueryBatchCursor(firstBatch, 0, 0, new DocumentCodec(), null, connectionSource) - - when: - cursor.remove() - - then: - thrown(UnsupportedOperationException) - } - - @SuppressWarnings('EmptyCatchBlock') - @Slow - def 'should block waiting for next batch on a tailable cursor'() { - given: - def connection = connectionSource.getConnection() - collectionHelper.create(collectionName, new CreateCollectionOptions().capped(true).sizeInBytes(1000)) - collectionHelper.insertDocuments(new DocumentCodec(), new Document('_id', 1).append('ts', new BsonTimestamp(5, 0))) - def firstBatch = executeQuery(new BsonDocument('ts', new BsonDocument('$gte', new BsonTimestamp(5, 0))), 0, 2, true, awaitData) - - when: - cursor = new QueryBatchCursor(firstBatch, 0, 2, maxTimeMS, new DocumentCodec(), null, connectionSource, connection) - - then: - cursor.hasNext() - cursor.next().iterator().next().get('_id') == 1 - - when: - def latch = new CountDownLatch(1) - Thread.start { - try { - sleep(500) - collectionHelper.insertDocuments(new DocumentCodec(), new Document('_id', 2).append('ts', new BsonTimestamp(6, 0))) - } catch (ignored) { - //pass - } finally { - latch.countDown() - } - } - - // Note: this test is racy. - // The sleep above does not guarantee that we're testing what we're trying to, which is the loop in the hasNext() method. - then: - cursor.hasNext() - cursor.next().iterator().next().get('_id') == 2 - - cleanup: - def cleanedUp = latch.await(10, TimeUnit.SECONDS) - if (!cleanedUp) { - throw new MongoTimeoutException('Timed out waiting for documents to be inserted') - } - connection?.release() - - where: - awaitData | maxTimeMS - true | 0 - true | 100 - false | 0 - } - - @Slow - def 'test try next with tailable'() { - collectionHelper.create(collectionName, new CreateCollectionOptions().capped(true).sizeInBytes(1000)) - collectionHelper.insertDocuments(new DocumentCodec(), new Document('_id', 1).append('ts', new BsonTimestamp(5, 0))) - def firstBatch = executeQuery(new BsonDocument('ts', new BsonDocument('$gte', new BsonTimestamp(5, 0))), 0, 2, true, true) - - - when: - cursor = new QueryBatchCursor(firstBatch, 0, 2, new DocumentCodec(), null, connectionSource) - - then: - cursor.tryNext().iterator().next().get('_id') == 1 - - then: - !cursor.tryNext() - - when: - collectionHelper.insertDocuments(new DocumentCodec(), new Document('_id', 2).append('ts', new BsonTimestamp(6, 0))) - def nextBatch = cursor.tryNext() - - then: - nextBatch - nextBatch.iterator().next().get('_id') == 2 - } - - @Slow - def 'hasNext should throw when cursor is closed in another thread'() { - Connection conn = connectionSource.getConnection() - collectionHelper.create(collectionName, new CreateCollectionOptions().capped(true).sizeInBytes(1000)) - collectionHelper.insertDocuments(new DocumentCodec(), new Document('_id', 1).append('ts', new BsonTimestamp(5, 0))) - def firstBatch = executeQuery(new BsonDocument('ts', new BsonDocument('$gte', new BsonTimestamp(5, 0))), 0, 2, true, true) - cursor = new QueryBatchCursor(firstBatch, 0, 2, 0, new DocumentCodec(), null, connectionSource, conn) - cursor.next() - def closeCompleted = new CountDownLatch(1) - - // wait a second then close the cursor - new Thread({ - sleep(1000) - cursor.close() - closeCompleted.countDown() - } as Runnable).start() - - when: - cursor.hasNext() - - then: - thrown(Exception) - closeCompleted.await(5, TimeUnit.SECONDS) - conn.getCount() == 1 - - cleanup: - conn.release() - } - - @IgnoreIf({ serverVersionLessThan(3, 2) || isSharded() }) - @Slow - def 'test maxTimeMS'() { - collectionHelper.create(collectionName, new CreateCollectionOptions().capped(true).sizeInBytes(1000)) - collectionHelper.insertDocuments(new DocumentCodec(), new Document('_id', 1).append('ts', new BsonTimestamp(5, 0))) - def firstBatch = executeQuery(new BsonDocument('ts', new BsonDocument('$gte', new BsonTimestamp(5, 0))), 0, 2, true, true) - - def connection = connectionSource.getConnection() - def maxTimeMS = 10 - cursor = new QueryBatchCursor(firstBatch, 0, 2, maxTimeMS, new DocumentCodec(), null, connectionSource, connection) - cursor.tryNext() - long startTime = System.currentTimeMillis() - - when: - def result = cursor.tryNext() - - then: - result == null - // RACY TEST: no guarantee assertion will fire within the given timeframe - System.currentTimeMillis() - startTime < (maxTimeMS + 200) - - cleanup: - connection?.release() - } - - @SuppressWarnings('EmptyCatchBlock') - @Slow - def 'test tailable interrupt'() throws InterruptedException { - collectionHelper.create(collectionName, new CreateCollectionOptions().capped(true).sizeInBytes(1000)) - collectionHelper.insertDocuments(new DocumentCodec(), new Document('_id', 1)) - - def firstBatch = executeQuery(new BsonDocument(), 0, 2, true, true) - - when: - cursor = new QueryBatchCursor(firstBatch, 0, 2, new DocumentCodec(), null, connectionSource) - - CountDownLatch latch = new CountDownLatch(1) - def seen = 0 - def thread = Thread.start { - try { - cursor.next() - seen = 1 - cursor.next() - seen = 2 - } catch (ignored) { - // pass - } finally { - latch.countDown() - } - } - sleep(1000) - thread.interrupt() - collectionHelper.insertDocuments(new DocumentCodec(), new Document('_id', 2)) - latch.await() - - then: - seen == 1 - } - - @IgnoreIf({ isSharded() }) - def 'should kill cursor if limit is reached on initial query'() throws InterruptedException { - given: - def firstBatch = executeQuery(5) - def connection = connectionSource.getConnection() - - cursor = new QueryBatchCursor(firstBatch, 5, 0, 0, new DocumentCodec(), null, connectionSource, connection) - - when: - makeAdditionalGetMoreCall(getNamespace(), firstBatch.cursor, connection) - - then: - thrown(MongoCursorNotFoundException) - - cleanup: - connection?.release() - } - - @IgnoreIf({ isSharded() }) - @Slow - def 'should kill cursor if limit is reached on get more'() throws InterruptedException { - given: - def firstBatch = executeQuery(3) - - cursor = new QueryBatchCursor(firstBatch, 5, 3, new DocumentCodec(), null, connectionSource) - ServerCursor serverCursor = cursor.getServerCursor() - - cursor.next() - cursor.next() - - Thread.sleep(1000) //Note: waiting for some time for killCursor operation to be performed on a server. - when: - makeAdditionalGetMoreCall(getNamespace(), serverCursor, connectionSource) - - then: - thrown(MongoCursorNotFoundException) - } - - def 'should release connection source if limit is reached on initial query'() throws InterruptedException { - given: - def firstBatch = executeQuery(5) - def connection = connectionSource.getConnection() - - when: - cursor = new QueryBatchCursor(firstBatch, 5, 0, 0, new DocumentCodec(), null, connectionSource, connection) - - then: - checkReferenceCountReachesTarget(connectionSource, 1) - - cleanup: - connection?.release() - } - - def 'should release connection source if limit is reached on get more'() throws InterruptedException { - given: - def firstBatch = executeQuery(3) - - cursor = new QueryBatchCursor(firstBatch, 5, 3, new DocumentCodec(), null, connectionSource) - - when: - cursor.next() - cursor.next() - - then: - checkReferenceCountReachesTarget(connectionSource, 1) - } - - def 'test limit with get more'() { - given: - def firstBatch = executeQuery(2) - - when: - cursor = new QueryBatchCursor(firstBatch, 5, 2, new DocumentCodec(), null, connectionSource) - - then: - cursor.next() != null - cursor.next() != null - cursor.next() != null - !cursor.hasNext() - } - - @Slow - def 'test limit with large documents'() { - given: - char[] array = 'x' * 16000 - String bigString = new String(array) - - (11..1000).each { collectionHelper.insertDocuments(new DocumentCodec(), new Document('_id', it).append('s', bigString)) } - def firstBatch = executeQuery(300, 0) - - when: - cursor = new QueryBatchCursor(firstBatch, 300, 0, new DocumentCodec(), null, connectionSource) - - then: - cursor.iterator().sum { it.size() } == 300 - } - - def 'should respect batch size'() { - given: - def firstBatch = executeQuery(2) - - when: - cursor = new QueryBatchCursor(firstBatch, 0, 2, new DocumentCodec(), null, connectionSource) - - then: - cursor.batchSize == 2 - - when: - def nextBatch = cursor.next() - - then: - nextBatch.size() == 2 - - when: - nextBatch = cursor.next() - - then: - nextBatch.size() == 2 - - when: - cursor.batchSize = 3 - nextBatch = cursor.next() - - then: - cursor.batchSize == 3 - nextBatch.size() == 3 - - when: - nextBatch = cursor.next() - - then: - nextBatch.size() == 3 - } - - def 'test normal loop with get more'() { - given: - def firstBatch = executeQuery(2) - - when: - cursor = new QueryBatchCursor(firstBatch, 0, 2, new DocumentCodec(), null, connectionSource) - def results = cursor.iterator().collectMany { it*.get('_id') } - - then: - results == (0..9).toList() - !cursor.hasNext() - } - - def 'test next without has next with get more'() { - given: - def firstBatch = executeQuery(2) - - when: - cursor = new QueryBatchCursor(firstBatch, 0, 2, new DocumentCodec(), null, connectionSource) - - then: - (0..4).each { cursor.next() } - !cursor.hasNext() - !cursor.hasNext() - - when: - cursor.next() - - then: - thrown(NoSuchElementException) - } - - @SuppressWarnings('BracesForTryCatchFinally') - @IgnoreIf({ isSharded() }) - def 'should throw cursor not found exception'() { - given: - def firstBatch = executeQuery(2) - - when: - cursor = new QueryBatchCursor(firstBatch, 0, 2, new DocumentCodec(), null, connectionSource) - def serverCursor = cursor.getServerCursor() - def connection = connectionSource.getConnection() - connection.command(getNamespace().databaseName, - new BsonDocument('killCursors', new BsonString(namespace.getCollectionName())) - .append('cursors', new BsonArray(singletonList(new BsonInt64(serverCursor.getId())))), - new NoOpFieldNameValidator(), ReadPreference.primary(), new BsonDocumentCodec(), connectionSource) - connection.release() - cursor.next() - - then: - try { - cursor.next() - } catch (MongoCursorNotFoundException e) { - assertEquals(serverCursor.getId(), e.getCursorId()) - assertEquals(serverCursor.getAddress(), e.getServerAddress()) - } catch (ignored) { - fail('Expected MongoCursorNotFoundException to be thrown but got ' + ignored.getClass()) - } - } - - def 'should report available documents'() { - given: - def firstBatch = executeQuery(3) - - when: - cursor = new QueryBatchCursor(firstBatch, 0, 2, new DocumentCodec(), null, connectionSource) - - then: - cursor.available() == 3 - - when: - cursor.hasNext() - - then: - cursor.available() == 3 - - when: - cursor.next() - - then: - cursor.available() == 0 - - when: - cursor.hasNext() - - then: - cursor.available() == 2 - - when: - cursor.next() - - then: - cursor.available() == 0 - - when: - cursor.hasNext() - - then: - cursor.available() == 2 - - when: - cursor.close() - - then: - cursor.available() == 0 - } - - private QueryResult executeQuery() { - executeQuery(0) - } - - private QueryResult executeQuery(int batchSize) { - executeQuery(new BsonDocument(), 0, batchSize, false, false, ReadPreference.primary()) - } - - private QueryResult executeQuery(int batchSize, ReadPreference readPreference) { - executeQuery(new BsonDocument(), 0, batchSize, false, false, readPreference) - } - - private QueryResult executeQuery(int limit, int batchSize) { - executeQuery(new BsonDocument(), limit, batchSize, false, false, ReadPreference.primary()) - } - - - private QueryResult executeQuery(BsonDocument filter, int limit, int batchSize, boolean tailable, boolean awaitData) { - executeQuery(filter, limit, batchSize, tailable, awaitData, ReadPreference.primary()) - } - - private QueryResult executeQuery(BsonDocument filter, int limit, int batchSize, boolean tailable, boolean awaitData, - ReadPreference readPreference) { - def connection = connectionSource.getConnection() - try { - def findCommand = new BsonDocument('find', new BsonString(getCollectionName())) - .append('filter', filter) - .append('tailable', BsonBoolean.valueOf(tailable)) - .append('awaitData', BsonBoolean.valueOf(awaitData)) - - findCommand.append('limit', new BsonInt32(Math.abs(limit))) - - if (limit >= 0) { - if (batchSize < 0 && Math.abs(batchSize) < limit) { - findCommand.append('limit', new BsonInt32(Math.abs(batchSize))) - } else { - findCommand.append('batchSize', new BsonInt32(Math.abs(batchSize))) - } - } - - def response = connection.command(getDatabaseName(), findCommand, - NO_OP_FIELD_NAME_VALIDATOR, readPreference, - CommandResultDocumentCodec.create(new DocumentCodec(), 'firstBatch'), connectionSource) - cursorDocumentToQueryResult(response.getDocument('cursor'), connection.getDescription().getServerAddress()) - } finally { - connection.release() - } - } -} diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/TestOperationHelper.java b/driver-core/src/test/functional/com/mongodb/internal/operation/TestOperationHelper.java new file mode 100644 index 00000000000..731f83c3c53 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/TestOperationHelper.java @@ -0,0 +1,97 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoCommandException; +import com.mongodb.MongoCursorNotFoundException; +import com.mongodb.MongoNamespace; +import com.mongodb.MongoQueryException; +import com.mongodb.ReadPreference; +import com.mongodb.ServerCursor; +import com.mongodb.async.FutureResultCallback; +import com.mongodb.internal.IgnorableRequestContext; +import com.mongodb.internal.binding.StaticBindingContext; +import com.mongodb.internal.connection.AsyncConnection; +import com.mongodb.internal.connection.Connection; +import com.mongodb.internal.connection.NoOpSessionContext; +import com.mongodb.internal.connection.OperationContext; +import com.mongodb.internal.validator.NoOpFieldNameValidator; +import org.bson.BsonDocument; +import org.bson.BsonInt64; +import org.bson.BsonString; +import org.bson.codecs.BsonDocumentCodec; + +import static com.mongodb.ClusterFixture.getServerApi; + +final class TestOperationHelper { + + static BsonDocument getKeyPattern(final BsonDocument explainPlan) { + BsonDocument winningPlan = explainPlan.getDocument("queryPlanner").getDocument("winningPlan"); + if (winningPlan.containsKey("queryPlan")) { + BsonDocument queryPlan = winningPlan.getDocument("queryPlan"); + if (queryPlan.containsKey("inputStage")) { + return queryPlan.getDocument("inputStage").getDocument("keyPattern"); + } + } else if (winningPlan.containsKey("inputStage")) { + return winningPlan.getDocument("inputStage").getDocument("keyPattern"); + } else if (winningPlan.containsKey("shards")) { + // recurse on shards[0] to get its query plan + return getKeyPattern(new BsonDocument("queryPlanner", winningPlan.getArray("shards").get(0).asDocument())); + } + throw new IllegalArgumentException("Unexpected explain plain: " + explainPlan.toJson()); + } + + static void makeAdditionalGetMoreCall(final MongoNamespace namespace, final ServerCursor serverCursor, final Connection connection) { + makeAdditionalGetMoreCallHandleError(serverCursor, () -> + connection.command(namespace.getDatabaseName(), + new BsonDocument("getMore", new BsonInt64(serverCursor.getId())) + .append("collection", new BsonString(namespace.getCollectionName())), + new NoOpFieldNameValidator(), ReadPreference.primary(), + new BsonDocumentCodec(), + new StaticBindingContext(new NoOpSessionContext(), getServerApi(), IgnorableRequestContext.INSTANCE, + new OperationContext()))); + } + + static void makeAdditionalGetMoreCall(final MongoNamespace namespace, final ServerCursor serverCursor, + final AsyncConnection connection) { + FutureResultCallback callback = new FutureResultCallback<>(); + makeAdditionalGetMoreCallHandleError(serverCursor, () -> { + connection.commandAsync(namespace.getDatabaseName(), + new BsonDocument("getMore", new BsonInt64(serverCursor.getId())) + .append("collection", new BsonString(namespace.getCollectionName())), + new NoOpFieldNameValidator(), ReadPreference.primary(), new BsonDocumentCodec(), + new StaticBindingContext(new NoOpSessionContext(), getServerApi(), IgnorableRequestContext.INSTANCE, + new OperationContext()), callback); + callback.get(); + }); + } + + static void makeAdditionalGetMoreCallHandleError(final ServerCursor serverCursor, final Runnable runnable) { + try { + runnable.run(); + } catch (MongoCommandException e) { + if (e.getErrorCode() == 43) { + throw new MongoCursorNotFoundException(serverCursor.getId(), e.getResponse(), serverCursor.getAddress()); + } else { + throw new MongoQueryException(e.getResponse(), e.getServerAddress()); + } + } + } + + private TestOperationHelper() { + } +} diff --git a/driver-core/src/test/resources/unified-test-format/command-monitoring/find.json b/driver-core/src/test/resources/unified-test-format/command-monitoring/find.json index bc9668499b3..68ce294240e 100644 --- a/driver-core/src/test/resources/unified-test-format/command-monitoring/find.json +++ b/driver-core/src/test/resources/unified-test-format/command-monitoring/find.json @@ -390,6 +390,7 @@ }, { "description": "A successful find event with a getmore and the server kills the cursor (<= 4.4)", + "comment": "UPDATED final batchSize to 3 as batchSize is no longer calculated see: DRIVERS-1448 ", "runOnRequirements": [ { "minServerVersion": "3.1", @@ -483,7 +484,7 @@ ] }, "collection": "test", - "batchSize": 1 + "batchSize": 3 }, "commandName": "getMore", "databaseName": "command-monitoring-tests" diff --git a/driver-core/src/test/resources/unified-test-format/valid-pass/poc-command-monitoring.json b/driver-core/src/test/resources/unified-test-format/valid-pass/poc-command-monitoring.json index fe0a5ae9913..b62f08a35e7 100644 --- a/driver-core/src/test/resources/unified-test-format/valid-pass/poc-command-monitoring.json +++ b/driver-core/src/test/resources/unified-test-format/valid-pass/poc-command-monitoring.json @@ -58,6 +58,7 @@ "tests": [ { "description": "A successful find event with a getmore and the server kills the cursor (<= 4.4)", + "comment": "UPDATED final batchSize to 3 as batchSize is no longer calculated see: DRIVERS-1448 ", "runOnRequirements": [ { "minServerVersion": "3.1", @@ -150,7 +151,7 @@ ] }, "collection": "test", - "batchSize": 1 + "batchSize": 3 }, "commandName": "getMore", "databaseName": "command-monitoring-tests" diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncChangeStreamBatchCursorSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncChangeStreamBatchCursorSpecification.groovy index 1142ce5f91c..4381e54f2e5 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncChangeStreamBatchCursorSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncChangeStreamBatchCursorSpecification.groovy @@ -27,11 +27,11 @@ import static java.util.concurrent.TimeUnit.SECONDS class AsyncChangeStreamBatchCursorSpecification extends Specification { - def 'should call the underlying AsyncQueryBatchCursor'() { + def 'should call the underlying AsyncCommandBatchCursor'() { given: def changeStreamOpertation = Stub(ChangeStreamOperation) def binding = Mock(AsyncReadBinding) - def wrapped = Mock(AsyncQueryBatchCursor) + def wrapped = Mock(AsyncCommandBatchCursor) def callback = Stub(SingleResultCallback) def cursor = new AsyncChangeStreamBatchCursor(changeStreamOpertation, wrapped, binding, null, ServerVersionHelper.FOUR_DOT_FOUR_WIRE_VERSION) @@ -46,7 +46,7 @@ class AsyncChangeStreamBatchCursorSpecification extends Specification { cursor.next(callback) then: - 1 * wrapped.next(_) >> { it[0].onResult(null, null) } + 1 * wrapped.next(_) >> { it[0].onResult([], null) } when: cursor.close() @@ -66,7 +66,7 @@ class AsyncChangeStreamBatchCursorSpecification extends Specification { def 'should not close the cursor in next if the cursor was closed before next completed'() { def changeStreamOpertation = Stub(ChangeStreamOperation) def binding = Mock(AsyncReadBinding) - def wrapped = Mock(AsyncQueryBatchCursor) + def wrapped = Mock(AsyncCommandBatchCursor) def callback = Stub(SingleResultCallback) def cursor = new AsyncChangeStreamBatchCursor(changeStreamOpertation, wrapped, binding, null, ServerVersionHelper.FOUR_DOT_FOUR_WIRE_VERSION) @@ -78,7 +78,7 @@ class AsyncChangeStreamBatchCursorSpecification extends Specification { 1 * wrapped.next(_) >> { // Simulate the user calling close while wrapped.next() is in flight cursor.close() - it[0].onResult(null, null) + it[0].onResult([], null) } then: @@ -91,7 +91,7 @@ class AsyncChangeStreamBatchCursorSpecification extends Specification { def 'should throw a MongoException when next/tryNext is called after the cursor is closed'() { def changeStreamOpertation = Stub(ChangeStreamOperation) def binding = Mock(AsyncReadBinding) - def wrapped = Mock(AsyncQueryBatchCursor) + def wrapped = Mock(AsyncCommandBatchCursor) def cursor = new AsyncChangeStreamBatchCursor(changeStreamOpertation, wrapped, binding, null, ServerVersionHelper.FOUR_DOT_FOUR_WIRE_VERSION) diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncQueryBatchCursorSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncCommandBatchCursorSpecification.groovy similarity index 61% rename from driver-core/src/test/unit/com/mongodb/internal/operation/AsyncQueryBatchCursorSpecification.groovy rename to driver-core/src/test/unit/com/mongodb/internal/operation/AsyncCommandBatchCursorSpecification.groovy index 5efcbc736ab..7ba7db42a01 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncQueryBatchCursorSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncCommandBatchCursorSpecification.groovy @@ -30,7 +30,6 @@ import com.mongodb.connection.ServerVersion import com.mongodb.internal.async.SingleResultCallback import com.mongodb.internal.binding.AsyncConnectionSource import com.mongodb.internal.connection.AsyncConnection -import com.mongodb.internal.connection.QueryResult import org.bson.BsonArray import org.bson.BsonDocument import org.bson.BsonInt32 @@ -42,17 +41,18 @@ import spock.lang.Specification import static OperationUnitSpecification.getMaxWireVersionForServerVersion import static com.mongodb.ReadPreference.primary +import static com.mongodb.internal.operation.CommandBatchCursorHelper.MESSAGE_IF_CLOSED_AS_CURSOR +import static com.mongodb.internal.operation.CommandBatchCursorHelper.MESSAGE_IF_CONCURRENT_OPERATION -class AsyncQueryBatchCursorSpecification extends Specification { +class AsyncCommandBatchCursorSpecification extends Specification { def 'should generate expected command with batchSize and maxTimeMS'() { given: + def initialConnection = referenceCountedAsyncConnection() def connection = referenceCountedAsyncConnection() def connectionSource = getAsyncConnectionSource(connection) - - def firstBatch = new QueryResult(NAMESPACE, [], 42, SERVER_ADDRESS) - def cursor = new AsyncQueryBatchCursor(firstBatch, 0, batchSize, maxTimeMS, CODEC, null, connectionSource, - connection) + def cursor = new AsyncCommandBatchCursor(createCommandResult([], 42), batchSize, maxTimeMS, CODEC, + null, connectionSource, initialConnection) def expectedCommand = new BsonDocument('getMore': new BsonInt64(CURSOR_ID)) .append('collection', new BsonString(NAMESPACE.getCollectionName())) if (batchSize != 0) { @@ -62,7 +62,7 @@ class AsyncQueryBatchCursorSpecification extends Specification { expectedCommand.append('maxTimeMS', new BsonInt64(expectedMaxTimeFieldValue)) } - def reply = documentResponse([], 0) + def reply = getMoreResponse([], 0) when: def batch = nextBatch(cursor) @@ -71,16 +71,17 @@ class AsyncQueryBatchCursorSpecification extends Specification { 1 * connection.commandAsync(NAMESPACE.getDatabaseName(), expectedCommand, *_) >> { it.last().onResult(reply, null) } - batch == null + batch.isEmpty() then: - !cursor.isClosed() + cursor.isClosed() then: cursor.close() then: connection.getCount() == 0 + initialConnection.getCount() == 0 connectionSource.getCount() == 0 where: @@ -92,35 +93,41 @@ class AsyncQueryBatchCursorSpecification extends Specification { def 'should close the cursor'() { given: + def initialConnection = referenceCountedAsyncConnection() def serverVersion = new ServerVersion([3, 6, 0]) def connection = referenceCountedAsyncConnection(serverVersion) def connectionSource = getAsyncConnectionSource(connection) + def cursor = new AsyncCommandBatchCursor(firstBatch, 0, 0, CODEC, + null, connectionSource, initialConnection) when: - def cursor = new AsyncQueryBatchCursor(firstBatch, 0, 0, 0, CODEC, null, connectionSource, connection) cursor.close() then: - if (firstBatch.getCursor() != null) { - 1 * connection.commandAsync(NAMESPACE.databaseName, createKillCursorsDocument(firstBatch.cursor), _, primary(), *_) >> { + if (cursor.getServerCursor() != null) { + 1 * connection.commandAsync(NAMESPACE.databaseName, createKillCursorsDocument(cursor.getServerCursor()), _, primary(), *_) >> { it.last().onResult(null, null) } } then: connection.getCount() == 0 + initialConnection.getCount() == 0 connectionSource.getCount() == 0 where: - firstBatch << [queryResult(), queryResult(FIRST_BATCH, 0)] + firstBatch << [createCommandResult(), createCommandResult(FIRST_BATCH, 0)] } def 'should return the expected results from next'() { given: - def connectionSource = getAsyncConnectionSource(referenceCountedAsyncConnection()) + def initialConnection = referenceCountedAsyncConnection() + def connection = referenceCountedAsyncConnection() + def connectionSource = getAsyncConnectionSource(connection) when: - def cursor = new AsyncQueryBatchCursor(queryResult(FIRST_BATCH, 0), 0, 0, 0, CODEC, null, connectionSource, null) + def cursor = new AsyncCommandBatchCursor(createCommandResult(FIRST_BATCH, 0), 0, 0, CODEC, + null, connectionSource, initialConnection) then: nextBatch(cursor) == FIRST_BATCH @@ -135,140 +142,67 @@ class AsyncQueryBatchCursorSpecification extends Specification { nextBatch(cursor) then: - def exception = thrown(MongoException) - exception.getMessage() == 'next() called after the cursor was closed.' - } - - def 'should respect the limit'() { - given: - def serverVersion = new ServerVersion([3, 6, 0]) - def connectionA = referenceCountedAsyncConnection(serverVersion) - def connectionB = referenceCountedAsyncConnection(serverVersion) - def connectionSource = getAsyncConnectionSource(connectionA, connectionB) - - def firstBatch = [new Document('_id', 1), new Document('_id', 2), new Document('_id', 3)] - def secondBatch = [new Document('_id', 4), new Document('_id', 5)] - def thirdBatch = [new Document('_id', 6)] - - when: - def cursor = new AsyncQueryBatchCursor(queryResult(firstBatch), 6, 2, 0, CODEC, null, connectionSource, - connectionA) - def batch = nextBatch(cursor) - - then: - batch == firstBatch - - when: - batch = nextBatch(cursor) - - then: - 1 * connectionA.commandAsync(*_) >> { it.last().onResult(documentResponse(secondBatch), null) } - - then: - batch == secondBatch - connectionA.getCount() == 0 - connectionSource.getCount() == 1 - - when: - batch = nextBatch(cursor) - - then: - 1 * connectionB.commandAsync(*_) >> { - connectionB.getCount() == 1 - connectionSource.getCount() == 1 - it.last().onResult(documentResponse(thirdBatch, 0), null) - } - - then: - batch == thirdBatch - connectionB.getCount() == 0 - connectionSource.getCount() == 0 - - when: - batch = nextBatch(cursor) - - then: - batch == null - connectionSource.getCount() == 0 - } - - - def 'should close the cursor immediately if the limit has been reached'() { - given: - def serverVersion = new ServerVersion([3, 6, 0]) - def connection = referenceCountedAsyncConnection(serverVersion) - def connectionSource = getAsyncConnectionSource(connection) - def queryResult = queryResult() - - when: - def cursor = new AsyncQueryBatchCursor(queryResult, 1, 0, 0, CODEC, null, connectionSource, connection) - - then: - 1 * connection.commandAsync(NAMESPACE.databaseName, createKillCursorsDocument(queryResult.cursor), _, primary(), - *_) >> { - it.last().onResult(null, null) - } - - when: - cursor.close() - - then: - 0 * connection.commandAsync(_, _, _, _, _) - - then: - connection.getCount() == 0 + def exception = thrown(IllegalStateException) + exception.getMessage() == MESSAGE_IF_CLOSED_AS_CURSOR + initialConnection.getCount() == 0 connectionSource.getCount() == 0 } def 'should handle getMore when there are empty results but there is a cursor'() { given: - def connection = referenceCountedAsyncConnection(serverVersion) + def initialConnection = referenceCountedAsyncConnection() + def connection = referenceCountedAsyncConnection() def connectionSource = getAsyncConnectionSource(connection) when: - def cursor = new AsyncQueryBatchCursor(queryResult([], 42), 3, 0, 0, CODEC, null, connectionSource, connection) + def firstBatch = createCommandResult([], CURSOR_ID) + def cursor = new AsyncCommandBatchCursor(firstBatch, 0, 0, CODEC, + null, connectionSource, initialConnection) def batch = nextBatch(cursor) then: 1 * connection.commandAsync(*_) >> { connection.getCount() == 1 connectionSource.getCount() == 1 - it.last().onResult(response, null) + it.last().onResult(response, null) } 1 * connection.commandAsync(*_) >> { connection.getCount() == 1 connectionSource.getCount() == 1 - it.last().onResult(response2, null) + it.last().onResult(response2, null) } then: batch == SECOND_BATCH - then: - connection.getCount() == 0 - connectionSource.getCount() == 0 - when: cursor.close() then: 0 * connection._ + initialConnection.getCount() == 0 connectionSource.getCount() == 0 where: - serverVersion | response | response2 - new ServerVersion([3, 6, 0]) | documentResponse([]) | documentResponse(SECOND_BATCH, 0) + response | response2 + getMoreResponse([]) | getMoreResponse(SECOND_BATCH, 0) } - def 'should kill the cursor in the getMore if limit is reached'() { + def 'should close cursor after getMore finishes if cursor was closed while getMore was in progress and getMore returns a response'() { given: - def connection = referenceCountedAsyncConnection(serverVersion) - def connectionSource = getAsyncConnectionSource(connection) - def initialResult = queryResult() + def serverVersion = new ServerVersion([3, 6, 0]) + def initialConnection = referenceCountedAsyncConnection(serverVersion, 'connectionOri', serverType) + def connectionA = referenceCountedAsyncConnection(serverVersion, 'connectionA', serverType) + def connectionB = referenceCountedAsyncConnection(serverVersion, 'connectionB', serverType) + def connectionSource = getAsyncConnectionSource(serverType, connectionA, connectionB) + + def firstConnection = serverType == ServerType.LOAD_BALANCER ? initialConnection : connectionA + def secondConnection = serverType == ServerType.LOAD_BALANCER ? initialConnection : connectionB when: - def cursor = new AsyncQueryBatchCursor(initialResult, 3, 0, 0, CODEC, null, connectionSource, connection) + def cursor = new AsyncCommandBatchCursor(createCommandResult(FIRST_BATCH, 42), 0, 0, CODEC, + null, connectionSource, initialConnection) def batch = nextBatch(cursor) then: @@ -278,40 +212,51 @@ class AsyncQueryBatchCursorSpecification extends Specification { nextBatch(cursor) then: - 1 * connection.commandAsync(*_) >> { - it.last().onResult(response, null) - } - 1 * connection.commandAsync(NAMESPACE.databaseName, createKillCursorsDocument(initialResult.cursor), _, primary(), _, - connectionSource, *_) >> { - it.last().onResult(null, null) + // simulate the user calling `close` while `getMore` is in flight + // in LB mode the same connection is used to execute both `getMore` and `killCursors` + 1 * firstConnection.commandAsync(*_) >> { + // `getMore` command + cursor.close() + ((SingleResultCallback) it.last()).onResult(getMoreResponse([], responseCursorId), null) } then: - connection.getCount() == 0 - connectionSource.getCount() == 0 + if (responseCursorId > 0) { + 1 * secondConnection.commandAsync(*_) >> { + // `killCursors` command + ((SingleResultCallback) it.last()).onResult(null, null) + } + } - when: - cursor.close() + then: + noExceptionThrown() then: - 0 * connection.commandAsync(*_) + connectionA.getCount() == 0 + connectionB.getCount() == 0 + initialConnection.getCount() == 0 connectionSource.getCount() == 0 + cursor.isClosed() where: - serverVersion | response - new ServerVersion([3, 2, 0]) | documentResponse(SECOND_BATCH) + serverType | responseCursorId + ServerType.LOAD_BALANCER | 42 + ServerType.LOAD_BALANCER | 0 + ServerType.STANDALONE | 42 + ServerType.STANDALONE | 0 } - def 'should close cursor after getMore finishes if cursor was closed while getMore was in progress and getMore returns a response'() { + def 'should throw concurrent operation assertion error'() { given: def serverVersion = new ServerVersion([3, 6, 0]) - def connectionA = referenceCountedAsyncConnection(serverVersion) - def connectionB = referenceCountedAsyncConnection(serverVersion) - def connectionSource = getAsyncConnectionSource(serverType, connectionA, connectionB) - def initialResult = queryResult() + def initialConnection = referenceCountedAsyncConnection(serverVersion, 'connectionOri') + def connectionA = referenceCountedAsyncConnection(serverVersion, 'connectionA') + def connectionB = referenceCountedAsyncConnection(serverVersion, 'connectionB') + def connectionSource = getAsyncConnectionSource(connectionA, connectionB) when: - def cursor = new AsyncQueryBatchCursor(initialResult, 0, 0, 0, CODEC, null, connectionSource, connectionA) + def cursor = new AsyncCommandBatchCursor(createCommandResult(FIRST_BATCH, 42), 0, 0, CODEC, + null, connectionSource, initialConnection) def batch = nextBatch(cursor) then: @@ -321,40 +266,33 @@ class AsyncQueryBatchCursorSpecification extends Specification { nextBatch(cursor) then: - numberOfInvocations * connectionA.commandAsync(*_) >> { - // Simulate the user calling close while the getMore is in flight - cursor.close() - ((SingleResultCallback) it.last()).onResult(response, null) - } >> { - // `killCursors` command - ((SingleResultCallback) it.last()).onResult(response2, null) + // simulate the user calling `cursor.next()` while `getMore` is in flight + 1 * connectionA.commandAsync(*_) >> { + // `getMore` command + nextBatch(cursor) } then: - noExceptionThrown() - - then: - connectionA.getCount() == 0 - cursor.isClosed() - - where: - response | response2 | getMoreResponseHasCursor | serverType | numberOfInvocations - documentResponse([]) | documentResponse([], 0) | true | ServerType.LOAD_BALANCER | 2 - documentResponse([], 0) | null | false | ServerType.LOAD_BALANCER | 1 - documentResponse([]) | documentResponse([], 0) | true | ServerType.STANDALONE | 1 - documentResponse([], 0) | null | false | ServerType.STANDALONE | 1 + def exception = thrown(AssertionError) + exception.getMessage() == MESSAGE_IF_CONCURRENT_OPERATION } def 'should close cursor after getMore finishes if cursor was closed while getMore was in progress and getMore throws exception'() { given: - def serverVersion = new ServerVersion([3, 2, 0]) - def connectionA = referenceCountedAsyncConnection(serverVersion) - def connectionB = referenceCountedAsyncConnection(serverVersion) + def serverVersion = new ServerVersion([4, 4, 0]) + def initialConnection = referenceCountedAsyncConnection(serverVersion, 'connectionOri', serverType) + def connectionA = referenceCountedAsyncConnection(serverVersion, 'connectionA', serverType) + def connectionB = referenceCountedAsyncConnection(serverVersion, 'connectionB', serverType) def connectionSource = getAsyncConnectionSource(serverType, connectionA, connectionB) - def initialResult = queryResult() + + def firstConnection = serverType == ServerType.LOAD_BALANCER ? initialConnection : connectionA + def secondConnection = serverType == ServerType.LOAD_BALANCER ? initialConnection : connectionB + + def firstBatch = createCommandResult() when: - def cursor = new AsyncQueryBatchCursor(initialResult, 0, 0, 0, CODEC, null, connectionSource, connectionA) + def cursor = new AsyncCommandBatchCursor(firstBatch, 0, 0, CODEC, + null, connectionSource, initialConnection) def batch = nextBatch(cursor) then: @@ -364,13 +302,16 @@ class AsyncQueryBatchCursorSpecification extends Specification { nextBatch(cursor) then: - numberOfInvocations * connectionA.commandAsync(*_) >> { + 1 * firstConnection.commandAsync(*_) >> { // Simulate the user calling close while the getMore is throwing a MongoException cursor.close() - ((SingleResultCallback) it.last()).onResult(null, MONGO_EXCEPTION) - } >> { - // `killCursors` command - ((SingleResultCallback) it.last()).onResult(null, null) + ((SingleResultCallback) it.last()).onResult(null, MONGO_EXCEPTION) + } + + then: + 1 * secondConnection.commandAsync(*_) >> { + // `killCursors` command + ((SingleResultCallback) it.last()).onResult(null, null) } then: @@ -378,41 +319,40 @@ class AsyncQueryBatchCursorSpecification extends Specification { then: connectionA.getCount() == 0 + initialConnection.getCount() == 0 cursor.isClosed() where: - serverType | numberOfInvocations - ServerType.LOAD_BALANCER | 2 - ServerType.STANDALONE | 1 + serverType << [ServerType.LOAD_BALANCER, ServerType.STANDALONE] } def 'should handle errors when calling close'() { given: - def connection = referenceCountedAsyncConnection() + def initialConnection = referenceCountedAsyncConnection() def connectionSource = getAsyncConnectionSourceWithResult(ServerType.STANDALONE) { [null, MONGO_EXCEPTION] } - def cursor = new AsyncQueryBatchCursor(queryResult(), 0, 0, 0, CODEC, null, connectionSource, connection) + def firstBatch = createCommandResult() + def cursor = new AsyncCommandBatchCursor(firstBatch, 0, 0, CODEC, + null, connectionSource, initialConnection) when: cursor.close() - nextBatch(cursor) - - then: - def exception = thrown(MongoException) - exception.getMessage() == 'next() called after the cursor was closed.' then: cursor.isClosed() + initialConnection.getCount() == 0 connectionSource.getCount() == 0 } def 'should handle errors when getting a connection for getMore'() { given: - def connection = referenceCountedAsyncConnection() + def initialConnection = referenceCountedAsyncConnection() def connectionSource = getAsyncConnectionSourceWithResult(ServerType.STANDALONE) { [null, MONGO_EXCEPTION] } when: - def cursor = new AsyncQueryBatchCursor(queryResult(), 0, 0, 0, CODEC, null, connectionSource, connection) + def firstBatch = createCommandResult() + def cursor = new AsyncCommandBatchCursor(firstBatch, 0, 0, CODEC, + null, connectionSource, initialConnection) then: nextBatch(cursor) @@ -424,39 +364,36 @@ class AsyncQueryBatchCursorSpecification extends Specification { thrown(MongoException) then: + initialConnection.getCount() == 0 connectionSource.getCount() == 1 - - when: - cursor.close() - - then: - connectionSource.getCount() == 0 } def 'should handle errors when calling getMore'() { given: def serverVersion = new ServerVersion([3, 6, 0]) - def connectionA = referenceCountedAsyncConnection(serverVersion) - def connectionB = referenceCountedAsyncConnection(serverVersion) + def initialConnection = referenceCountedAsyncConnection() + def connectionA = referenceCountedAsyncConnection(serverVersion, 'connectionA') + def connectionB = referenceCountedAsyncConnection(serverVersion, 'connectionB') def connectionSource = getAsyncConnectionSource(connectionA, connectionB) when: - def cursor = new AsyncQueryBatchCursor(queryResult([]), 0, 0, 0, CODEC, null, connectionSource, - connectionA) + def firstBatch = createCommandResult() + def cursor = new AsyncCommandBatchCursor(firstBatch, 0, 0, CODEC, + null, connectionSource, initialConnection) then: connectionSource.getCount() == 1 when: nextBatch(cursor) + nextBatch(cursor) then: 1 * connectionA.commandAsync(*_) >> { connectionA.getCount() == 1 connectionSource.getCount() == 1 - it.last().onResult(null, exception) + it.last().onResult(null, exception) } - then: thrown(MongoException) @@ -468,13 +405,23 @@ class AsyncQueryBatchCursorSpecification extends Specification { cursor.close() then: - connectionSource.getCount() == 1 + 1 * connectionB.commandAsync(*_) >> { + connectionB.getCount() == 1 + connectionSource.getCount() == 1 + it.last().onResult(null, null) + } + + then: + connectionA.getCount() == 0 + connectionB.getCount() == 0 + initialConnection.getCount() == 0 + connectionSource.getCount() == 0 where: exception << [COMMAND_EXCEPTION, MONGO_EXCEPTION] } - List nextBatch(AsyncQueryBatchCursor cursor) { + List nextBatch(AsyncCommandBatchCursor cursor) { def futureResultCallback = new FutureResultCallback() cursor.next(futureResultCallback) futureResultCallback.get() @@ -490,27 +437,37 @@ class AsyncQueryBatchCursorSpecification extends Specification { private static final COMMAND_EXCEPTION = new MongoCommandException(BsonDocument.parse('{"ok": false, "errmsg": "error"}'), SERVER_ADDRESS) - private static BsonDocument documentResponse(results, cursorId = 42) { - new BsonDocument('ok', new BsonInt32(1)).append('cursor', - new BsonDocument('id', new BsonInt64(cursorId)).append('ns', - new BsonString(NAMESPACE.getFullName())) - .append('nextBatch', new BsonArrayWrapper(results))) + private static BsonDocument getMoreResponse(results, cursorId = CURSOR_ID) { + createCommandResult(results, cursorId, "nextBatch") } - private static QueryResult queryResult(results = FIRST_BATCH, cursorId = 42) { - new QueryResult(NAMESPACE, results, cursorId, SERVER_ADDRESS) + private static BsonDocument createCommandResult(List results = FIRST_BATCH, Long cursorId = CURSOR_ID, + String fieldNameContainingBatch = "firstBatch") { + new BsonDocument("ok", new BsonInt32(1)) + .append("cursor", + new BsonDocument("ns", new BsonString(NAMESPACE.fullName)) + .append("id", new BsonInt64(cursorId)) + .append(fieldNameContainingBatch, new BsonArrayWrapper(results))) } - def referenceCountedAsyncConnection() { - referenceCountedAsyncConnection(new ServerVersion([3, 2, 0])) + private static BsonDocument createKillCursorsDocument(ServerCursor serverCursor) { + new BsonDocument('killCursors', new BsonString(NAMESPACE.getCollectionName())) + .append('cursors', new BsonArray(Collections.singletonList(new BsonInt64(serverCursor.id)))) } - def referenceCountedAsyncConnection(ServerVersion serverVersion) { + AsyncConnection referenceCountedAsyncConnection() { + referenceCountedAsyncConnection(new ServerVersion([3, 6, 0])) + } + + AsyncConnection referenceCountedAsyncConnection(ServerVersion serverVersion, String name = 'connection', + ServerType serverType = ServerType.STANDALONE) { def released = false def counter = 0 - def mock = Mock(AsyncConnection) { + def mock = Mock(AsyncConnection, name: name) { _ * getDescription() >> Stub(ConnectionDescription) { getMaxWireVersion() >> getMaxWireVersionForServerVersion(serverVersion.getVersionList()) + getServerAddress() >> SERVER_ADDRESS + getServerType() >> serverType } } mock.retain() >> { @@ -581,10 +538,4 @@ class AsyncQueryBatchCursorSpecification extends Specification { mock.getCount() >> { counter } mock } - - BsonDocument createKillCursorsDocument(ServerCursor serverCursor) { - new BsonDocument('killCursors', new BsonString(NAMESPACE.getCollectionName())) - .append('cursors', new BsonArray(Collections.singletonList(new BsonInt64(serverCursor.id)))) - } - } diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncSingleBatchCursorTest.java b/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncSingleBatchCursorTest.java new file mode 100644 index 00000000000..561a4cf9f31 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncSingleBatchCursorTest.java @@ -0,0 +1,84 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoException; +import com.mongodb.async.FutureResultCallback; +import org.bson.Document; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.ClusterFixture.TIMEOUT; +import static com.mongodb.internal.operation.AsyncSingleBatchCursor.createEmptyAsyncSingleBatchCursor; +import static java.util.Arrays.asList; +import static java.util.Collections.emptyList; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertIterableEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + + +class AsyncSingleBatchCursorTest { + + private static final List SINGLE_BATCH = asList(new Document("a", 1), new Document("b", 2)); + + @Test + @DisplayName("should work as expected") + void shouldWorkAsExpected() { + try (AsyncSingleBatchCursor cursor = new AsyncSingleBatchCursor<>(SINGLE_BATCH, 0)) { + + assertIterableEquals(SINGLE_BATCH, nextBatch(cursor)); + assertIterableEquals(emptyList(), nextBatch(cursor)); + assertTrue(cursor.isClosed()); + + assertThrows(MongoException.class, () -> nextBatch(cursor)); + } + } + + @Test + @DisplayName("should work as expected emptyCursor") + void shouldWorkAsExpectedEmptyCursor() { + try (AsyncSingleBatchCursor cursor = createEmptyAsyncSingleBatchCursor(0)) { + assertIterableEquals(emptyList(), nextBatch(cursor)); + assertTrue(cursor.isClosed()); + + assertThrows(MongoException.class, () -> nextBatch(cursor)); + } + } + + @Test + @DisplayName("should not support setting batch size") + void shouldNotSupportSettingBatchSize() { + try (AsyncSingleBatchCursor cursor = new AsyncSingleBatchCursor<>(SINGLE_BATCH, 0)) { + + assertEquals(0, cursor.getBatchSize()); + + cursor.setBatchSize(1); + assertEquals(0, cursor.getBatchSize()); + } + } + + List nextBatch(final AsyncSingleBatchCursor cursor) { + FutureResultCallback> futureResultCallback = new FutureResultCallback<>(); + cursor.next(futureResultCallback); + return futureResultCallback.get(TIMEOUT, TimeUnit.MILLISECONDS); + } + +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncSingleBatchQueryCursorSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncSingleBatchQueryCursorSpecification.groovy deleted file mode 100644 index 22f9035404f..00000000000 --- a/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncSingleBatchQueryCursorSpecification.groovy +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright 2008-present MongoDB, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.internal.operation - -import com.mongodb.MongoException -import com.mongodb.MongoNamespace -import com.mongodb.ServerAddress -import com.mongodb.async.FutureResultCallback -import com.mongodb.internal.connection.QueryResult -import org.bson.Document -import spock.lang.Specification - -class AsyncSingleBatchQueryCursorSpecification extends Specification { - - def 'should work as expected'() { - given: - def cursor = new AsyncSingleBatchQueryCursor(firstBatch) - - when: - def batch = nextBatch(cursor) - - then: - batch == firstBatch.getResults() - - then: - nextBatch(cursor) == null - - when: - nextBatch(cursor) - - then: - thrown(MongoException) - } - - def 'should not support setting batchsize'() { - given: - def cursor = new AsyncSingleBatchQueryCursor(firstBatch) - - when: - cursor.setBatchSize(1) - - then: - cursor.getBatchSize() == 0 - } - - - List nextBatch(AsyncSingleBatchQueryCursor cursor) { - def futureResultCallback = new FutureResultCallback() - cursor.next(futureResultCallback) - futureResultCallback.get() - } - - def firstBatch = new QueryResult(new MongoNamespace('db', 'coll'), [new Document('a', 1)], 0, new ServerAddress()) -} diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/ChangeStreamBatchCursorSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/ChangeStreamBatchCursorSpecification.groovy index e654c2ef5ca..09c6ff221b6 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/operation/ChangeStreamBatchCursorSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/ChangeStreamBatchCursorSpecification.groovy @@ -21,13 +21,15 @@ import org.bson.BsonDocument import org.bson.BsonInt32 import spock.lang.Specification +import static java.util.Collections.emptyList + class ChangeStreamBatchCursorSpecification extends Specification { - def 'should call the underlying QueryBatchCursor'() { + def 'should call the underlying CommandBatchCursor'() { given: def changeStreamOperation = Stub(ChangeStreamOperation) def binding = Stub(ReadBinding) - def wrapped = Mock(QueryBatchCursor) + def wrapped = Mock(CommandBatchCursor) def resumeToken = new BsonDocument('_id': new BsonInt32(1)) def cursor = new ChangeStreamBatchCursor(changeStreamOperation, wrapped, binding, resumeToken, ServerVersionHelper.FOUR_DOT_FOUR_WIRE_VERSION) @@ -49,7 +51,7 @@ class ChangeStreamBatchCursorSpecification extends Specification { cursor.next() then: - 1 * wrapped.next() + 1 * wrapped.next() >> emptyList() 1 * wrapped.getPostBatchResumeToken() when: diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/CommandBatchCursorSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/CommandBatchCursorSpecification.groovy new file mode 100644 index 00000000000..38496f02552 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/CommandBatchCursorSpecification.groovy @@ -0,0 +1,593 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + +import com.mongodb.MongoCommandException +import com.mongodb.MongoException +import com.mongodb.MongoNamespace +import com.mongodb.MongoSocketException +import com.mongodb.MongoSocketOpenException +import com.mongodb.ServerAddress +import com.mongodb.ServerCursor +import com.mongodb.connection.ConnectionDescription +import com.mongodb.connection.ServerConnectionState +import com.mongodb.connection.ServerDescription +import com.mongodb.connection.ServerType +import com.mongodb.connection.ServerVersion +import com.mongodb.internal.binding.ConnectionSource +import com.mongodb.internal.connection.Connection +import org.bson.BsonArray +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonInt64 +import org.bson.BsonString +import org.bson.Document +import org.bson.codecs.DocumentCodec +import spock.lang.Specification + +import static com.mongodb.ReadPreference.primary +import static com.mongodb.internal.operation.CommandBatchCursorHelper.MESSAGE_IF_CLOSED_AS_CURSOR +import static com.mongodb.internal.operation.CommandBatchCursorHelper.MESSAGE_IF_CONCURRENT_OPERATION +import static com.mongodb.internal.operation.OperationUnitSpecification.getMaxWireVersionForServerVersion + +class CommandBatchCursorSpecification extends Specification { + + def 'should generate expected command with batchSize and maxTimeMS'() { + given: + def initialConnection = referenceCountedConnection() + def connection = referenceCountedConnection() + def connectionSource = getConnectionSource(connection) + + def firstBatch = createCommandResult([]) + def cursor = new CommandBatchCursor(firstBatch, batchSize, maxTimeMS, CODEC, + null, connectionSource, initialConnection) + def expectedCommand = new BsonDocument('getMore': new BsonInt64(CURSOR_ID)) + .append('collection', new BsonString(NAMESPACE.getCollectionName())) + if (batchSize != 0) { + expectedCommand.append('batchSize', new BsonInt32(batchSize)) + } + if (expectedMaxTimeFieldValue != null) { + expectedCommand.append('maxTimeMS', new BsonInt64(expectedMaxTimeFieldValue)) + } + + def reply = getMoreResponse([], 0) + + when: + cursor.hasNext() + + then: + 1 * connection.command(NAMESPACE.getDatabaseName(), expectedCommand, *_) >> reply + + then: + !cursor.isClosed() + + when: + cursor.close() + + then: + connection.getCount() == 0 + initialConnection.getCount() == 0 + connectionSource.getCount() == 0 + + where: + batchSize | maxTimeMS | expectedMaxTimeFieldValue + 0 | 0 | null + 2 | 0 | null + 0 | 100 | 100 + } + + def 'should close the cursor'() { + given: + def initialConnection = referenceCountedConnection() + def serverVersion = new ServerVersion([3, 6, 0]) + def connection = referenceCountedConnection(serverVersion) + def connectionSource = getConnectionSource(connection) + def cursor = new CommandBatchCursor(firstBatch, 0, 0, CODEC, + null, connectionSource, initialConnection) + + when: + cursor.close() + + then: + if (cursor.getServerCursor() != null) { + 1 * connection.command(NAMESPACE.databaseName, createKillCursorsDocument(cursor.getServerCursor()), _, primary(), *_) + } + + then: + connection.getCount() == 0 + initialConnection.getCount() == 0 + connectionSource.getCount() == 0 + + where: + firstBatch << [createCommandResult(FIRST_BATCH, 42), createCommandResult(FIRST_BATCH, 0)] + } + + def 'should return the expected results from next'() { + given: + def initialConnection = referenceCountedConnection() + def connection = referenceCountedConnection() + def connectionSource = getConnectionSource(connection) + + when: + def firstBatch = createCommandResult(FIRST_BATCH, 0) + def cursor = new CommandBatchCursor(firstBatch, 0, 0, CODEC, + null, connectionSource, initialConnection) + + then: + cursor.next() == FIRST_BATCH + + then: + initialConnection.getCount() == 0 + connectionSource.getCount() == 0 + + then: + // Unlike the AsyncCommandBatchCursor - the cursor isn't automatically closed + !cursor.isClosed() + } + + def 'should handle getMore when there are empty results but there is a cursor'() { + given: + def initialConnection = referenceCountedConnection() + def connectionA = referenceCountedConnection(serverVersion, 'connectionA') + def connectionB = referenceCountedConnection(serverVersion, 'connectionB') + def connectionSource = getConnectionSource(connectionA, connectionB) + + when: + def firstBatch = createCommandResult([], CURSOR_ID) + def cursor = new CommandBatchCursor(firstBatch, 0, 0, CODEC, + null, connectionSource, initialConnection) + def batch = cursor.next() + + then: + 1 * connectionA.command(*_) >> { + connectionA.getCount() == 1 + connectionSource.getCount() == 1 + response + } + + 1 * connectionB.command(*_) >> { + connectionB.getCount() == 1 + connectionSource.getCount() == 1 + response2 + } + + then: + batch == SECOND_BATCH + + then: + connectionA.getCount() == 0 + connectionB.getCount() == 0 + initialConnection.getCount() == 0 + connectionSource.getCount() == 0 + + when: + cursor.close() + + then: + 0 * connectionA._ + 0 * connectionB._ + initialConnection.getCount() == 0 + connectionSource.getCount() == 0 + + where: + serverVersion | response | response2 + new ServerVersion([3, 6, 0]) | getMoreResponse([]) | getMoreResponse(SECOND_BATCH, 0) + } + + def 'should close cursor after getMore finishes if cursor was closed while getMore was in progress and getMore returns a response'() { + given: + def serverVersion = new ServerVersion([3, 6, 0]) + def initialConnection = referenceCountedConnection(serverVersion, 'connectionOri', serverType) + def connectionA = referenceCountedConnection(serverVersion, 'connectionA', serverType) + def connectionB = referenceCountedConnection(serverVersion, 'connectionB', serverType) + def connectionSource = getConnectionSource(serverType, connectionA, connectionB) + + def firstConnection = serverType == ServerType.LOAD_BALANCER ? initialConnection : connectionA + def secondConnection = serverType == ServerType.LOAD_BALANCER ? initialConnection : connectionB + + def firstBatch = createCommandResult() + + when: + CommandBatchCursor cursor = new CommandBatchCursor<>(firstBatch, 0, 0, CODEC, + null, connectionSource, initialConnection) + List batch = cursor.next() + + then: + batch == FIRST_BATCH + + when: + cursor.next() + + then: + // simulate the user calling `close` while `getMore` is in flight + // in LB mode the same connection is used to execute both `getMore` and `killCursors` + 1 * firstConnection.command(*_) >> { + // `getMore` command + cursor.close() + getMoreResponse([], responseCursorId) + } + + then: + if (responseCursorId > 0) { + 1 * secondConnection.command(*_) >> null + } + + then: + IllegalStateException e = thrown() + e.getMessage() == MESSAGE_IF_CLOSED_AS_CURSOR + + then: + connectionA.getCount() == 0 + connectionB.getCount() == 0 + initialConnection.getCount() == 0 + connectionSource.getCount() == 0 + cursor.isClosed() + + where: + serverType | responseCursorId + ServerType.LOAD_BALANCER | 42 + ServerType.LOAD_BALANCER | 0 + ServerType.STANDALONE | 42 + ServerType.STANDALONE | 0 + } + + def 'should throw concurrent operation illegal state exception'() { + given: + def serverVersion = new ServerVersion([3, 6, 0]) + def initialConnection = referenceCountedConnection(serverVersion, 'connectionOri') + def connectionA = referenceCountedConnection(serverVersion, 'connectionA') + def connectionB = referenceCountedConnection(serverVersion, 'connectionB') + def connectionSource = getConnectionSource(connectionA, connectionB) + + when: + def cursor = new CommandBatchCursor(createCommandResult(FIRST_BATCH, 42), 0, 0, CODEC, + null, connectionSource, initialConnection) + def batch = cursor.next() + + then: + batch == FIRST_BATCH + + when: + cursor.next() + + then: + // simulate the user calling `cursor.next()` while `getMore` is in flight + 1 * connectionA.command(*_) >> { + // `getMore` command + cursor.next() + } + + then: + def exception = thrown(IllegalStateException) + exception.getMessage() == MESSAGE_IF_CONCURRENT_OPERATION + } + + def 'should close cursor after getMore finishes if cursor was closed while getMore was in progress and getMore throws exception'() { + given: + def serverVersion = new ServerVersion([4, 4, 0]) + def initialConnection = referenceCountedConnection(serverVersion, 'connectionOri', serverType) + def connectionA = referenceCountedConnection(serverVersion, 'connectionA', serverType) + def connectionB = referenceCountedConnection(serverVersion, 'connectionB', serverType) + def connectionSource = getConnectionSource(serverType, connectionA, connectionB) + + def firstConnection = serverType == ServerType.LOAD_BALANCER ? initialConnection : connectionA + def secondConnection = serverType == ServerType.LOAD_BALANCER ? initialConnection : connectionB + + def firstBatch = createCommandResult() + + when: + def cursor = new CommandBatchCursor(firstBatch, 0, 0, CODEC, + null, connectionSource, initialConnection) + def batch = cursor.next() + + then: + batch == FIRST_BATCH + + when: + cursor.next() + + then: + 1 * firstConnection.command(*_) >> { + // Simulate the user calling close while the getMore is throwing a MongoException + cursor.close() + throw MONGO_EXCEPTION + } + + then: + 1 * secondConnection.command(*_) >> { + // `killCursors` command + null + } + + then: + thrown(MongoException) + + then: + connectionA.getCount() == 0 + cursor.isClosed() + + where: + serverType << [ServerType.LOAD_BALANCER, ServerType.STANDALONE] + } + + def 'should handle errors when calling close'() { + given: + def initialConnection = referenceCountedConnection() + def connectionSource = getConnectionSourceWithResult(ServerType.STANDALONE) { throw MONGO_EXCEPTION } + def firstBatch = createCommandResult() + def cursor = new CommandBatchCursor(firstBatch, 0, 0, CODEC, + null, connectionSource, initialConnection) + + when: + cursor.close() + + then: + cursor.isClosed() + initialConnection.getCount() == 0 + connectionSource.getCount() == 0 + } + + + def 'should handle errors when getting a connection for getMore'() { + given: + def initialConnection = referenceCountedConnection() + def connection = referenceCountedConnection() + def connectionSource = getConnectionSourceWithResult(ServerType.STANDALONE) { throw MONGO_EXCEPTION } + + when: + def firstBatch = createCommandResult() + def cursor = new CommandBatchCursor(firstBatch, 0, 0, CODEC, + null, connectionSource, initialConnection) + + then: + cursor.next() + + when: + cursor.hasNext() + + then: + thrown(MongoException) + + then: + connection.getCount() == 0 + connectionSource.getCount() == 1 + } + + def 'should handle errors when calling getMore'() { + given: + def initialConnection = referenceCountedConnection() + def serverVersion = new ServerVersion([3, 6, 0]) + def connectionA = referenceCountedConnection(serverVersion, 'connectionA') + def connectionB = referenceCountedConnection(serverVersion, 'connectionB') + def connectionSource = getConnectionSource(connectionA, connectionB) + + when: + def firstBatch = createCommandResult() + def cursor = new CommandBatchCursor(firstBatch, 0, 0, CODEC, + null, connectionSource, initialConnection) + + then: + connectionSource.getCount() == 1 + + when: + cursor.next() + cursor.next() + + then: + 1 * connectionA.command(*_) >> { + connectionA.getCount() == 1 + connectionSource.getCount() == 1 + throw exception + } + + then: + thrown(MongoException) + + then: + connectionA.getCount() == 0 + connectionSource.getCount() == 1 + + when: + cursor.close() + + then: + 1 * connectionB.command(*_) >> { + connectionB.getCount() == 1 + connectionSource.getCount() == 1 + null + } + + then: + connectionA.getCount() == 0 + connectionB.getCount() == 0 + initialConnection.getCount() == 0 + connectionSource.getCount() == 0 + + where: + exception << [COMMAND_EXCEPTION, MONGO_EXCEPTION] + } + + def 'should handle exceptions when closing'() { + given: + def initialConnection = referenceCountedConnection() + def connection = Mock(Connection) { + _ * getDescription() >> Stub(ConnectionDescription) { + getMaxWireVersion() >> 4 + } + _ * command(*_) >> { throw new MongoSocketException('No MongoD', SERVER_ADDRESS) } + } + def connectionSource = Stub(ConnectionSource) { + getServerApi() >> null + getConnection() >> { connection } + } + connectionSource.retain() >> connectionSource + + def initialResults = createCommandResult([]) + def cursor = new CommandBatchCursor(initialResults, 2, 100, new DocumentCodec(), + null, connectionSource, initialConnection) + + when: + cursor.close() + + then: + notThrown(MongoSocketException) + + when: + cursor.close() + + then: + notThrown(Exception) + } + + def 'should handle exceptions when killing cursor and a connection can not be obtained'() { + given: + def initialConnection = referenceCountedConnection() + def connectionSource = Stub(ConnectionSource) { + getConnection() >> { throw new MongoSocketOpenException("can't open socket", SERVER_ADDRESS, new IOException()) } + getServerApi() >> null + } + connectionSource.retain() >> connectionSource + + def initialResults = createCommandResult([]) + def cursor = new CommandBatchCursor(initialResults, 2, 100, new DocumentCodec(), + null, connectionSource, initialConnection) + + when: + cursor.close() + + then: + notThrown(MongoSocketException) + + when: + cursor.close() + + then: + notThrown(Exception) + } + + private static final MongoNamespace NAMESPACE = new MongoNamespace('db', 'coll') + private static final ServerAddress SERVER_ADDRESS = new ServerAddress() + private static final CURSOR_ID = 42 + private static final FIRST_BATCH = [new Document('_id', 1), new Document('_id', 2)] + private static final SECOND_BATCH = [new Document('_id', 3), new Document('_id', 4)] + private static final CODEC = new DocumentCodec() + private static final MONGO_EXCEPTION = new MongoException('error') + private static final COMMAND_EXCEPTION = new MongoCommandException(BsonDocument.parse('{"ok": false, "errmsg": "error"}'), + SERVER_ADDRESS) + + + private static BsonDocument getMoreResponse(results, cursorId = CURSOR_ID) { + createCommandResult(results, cursorId, "nextBatch") + } + + private static BsonDocument createCommandResult(List results = FIRST_BATCH, Long cursorId = CURSOR_ID, + String fieldNameContainingBatch = "firstBatch") { + new BsonDocument("ok", new BsonInt32(1)) + .append("cursor", + new BsonDocument("ns", new BsonString(NAMESPACE.fullName)) + .append("id", new BsonInt64(cursorId)) + .append(fieldNameContainingBatch, new BsonArrayWrapper(results))) + } + + private static BsonDocument createKillCursorsDocument(ServerCursor serverCursor) { + new BsonDocument('killCursors', new BsonString(NAMESPACE.getCollectionName())) + .append('cursors', new BsonArray(Collections.singletonList(new BsonInt64(serverCursor.id)))) + } + + Connection referenceCountedConnection() { + referenceCountedConnection(new ServerVersion([3, 6, 0])) + } + + Connection referenceCountedConnection(ServerVersion serverVersion, String name = 'connection', + ServerType serverType = ServerType.STANDALONE) { + def released = false + def counter = 0 + def mock = Mock(Connection, name: name) { + _ * getDescription() >> Stub(ConnectionDescription) { + getMaxWireVersion() >> getMaxWireVersionForServerVersion(serverVersion.getVersionList()) + getServerType() >> serverType + } + } + mock.retain() >> { + if (released) { + throw new IllegalStateException('Tried to retain Connection when already released') + } else { + counter += 1 + } + mock + } + mock.release() >> { + counter -= 1 + if (counter == 0) { + released = true + } else if (counter < 0) { + throw new IllegalStateException('Tried to release Connection below 0') + } + counter + } + mock.getCount() >> { counter } + mock + } + + ConnectionSource getConnectionSource(Connection... connections) { + getConnectionSource(ServerType.STANDALONE, connections) + } + + ConnectionSource getConnectionSource(ServerType serverType, Connection... connections) { + def index = -1 + getConnectionSourceWithResult(serverType) { index += 1; connections.toList().get(index).retain() } + } + + def getConnectionSourceWithResult(ServerType serverType, Closure connectionCallbackResults) { + def released = false + int counter = 0 + def mock = Mock(ConnectionSource) + mock.getServerDescription() >> { + ServerDescription.builder() + .address(new ServerAddress()) + .type(serverType) + .state(ServerConnectionState.CONNECTED) + .build() + } + mock.getConnection() >> { + if (counter == 0) { + throw new IllegalStateException('Tried to use released ConnectionSource') + } + connectionCallbackResults() + } + mock.retain() >> { + if (released) { + throw new IllegalStateException('Tried to retain ConnectionSource when already released') + } else { + counter += 1 + } + mock + } + mock.release() >> { + counter -= 1 + if (counter == 0) { + released = true + } else if (counter < 0) { + throw new IllegalStateException('Tried to release ConnectionSource below 0') + } + counter + } + mock.getCount() >> { counter } + mock + } + +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/CursorHelperTest.java b/driver-core/src/test/unit/com/mongodb/internal/operation/CursorHelperTest.java deleted file mode 100644 index cdcb33cead2..00000000000 --- a/driver-core/src/test/unit/com/mongodb/internal/operation/CursorHelperTest.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2008-present MongoDB, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.internal.operation; - -import org.junit.Test; - -import java.net.UnknownHostException; - -import static com.mongodb.internal.operation.CursorHelper.getNumberToReturn; -import static org.junit.Assert.assertEquals; - -public class CursorHelperTest { - - @Test - public void testNumberToReturn() throws UnknownHostException { - assertEquals(0, getNumberToReturn(0, 0, 5)); - assertEquals(40, getNumberToReturn(0, 40, 5)); - assertEquals(-40, getNumberToReturn(0, -40, 5)); - assertEquals(15, getNumberToReturn(20, 0, 5)); - assertEquals(10, getNumberToReturn(20, 10, 5)); - assertEquals(15, getNumberToReturn(20, -40, 5)); - } -} diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/QueryBatchCursorSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/QueryBatchCursorSpecification.groovy deleted file mode 100644 index db6831138e1..00000000000 --- a/driver-core/src/test/unit/com/mongodb/internal/operation/QueryBatchCursorSpecification.groovy +++ /dev/null @@ -1,357 +0,0 @@ -/* - * Copyright 2008-present MongoDB, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.internal.operation - -import com.mongodb.MongoException -import com.mongodb.MongoNamespace -import com.mongodb.MongoSocketException -import com.mongodb.MongoSocketOpenException -import com.mongodb.ServerAddress -import com.mongodb.connection.ConnectionDescription -import com.mongodb.connection.ServerConnectionState -import com.mongodb.connection.ServerDescription -import com.mongodb.connection.ServerType -import com.mongodb.connection.ServerVersion -import com.mongodb.internal.binding.ConnectionSource -import com.mongodb.internal.connection.Connection -import com.mongodb.internal.connection.QueryResult -import org.bson.BsonDocument -import org.bson.BsonInt32 -import org.bson.BsonInt64 -import org.bson.BsonString -import org.bson.Document -import org.bson.codecs.BsonDocumentCodec -import org.bson.codecs.DocumentCodec -import spock.lang.Specification - -import static com.mongodb.internal.operation.OperationUnitSpecification.getMaxWireVersionForServerVersion - -class QueryBatchCursorSpecification extends Specification { - private static final MongoNamespace NAMESPACE = new MongoNamespace('db', 'coll') - private static final ServerAddress SERVER_ADDRESS = new ServerAddress() - - def 'should generate expected command with batchSize and maxTimeMS'() { - given: - def connection = Mock(Connection) { - _ * getDescription() >> Stub(ConnectionDescription) { - getMaxWireVersion() >> 4 - } - } - def connectionSource = Stub(ConnectionSource) { - getConnection() >> { connection } - getServerApi() >> null - } - connectionSource.retain() >> connectionSource - - def cursorId = 42 - - def firstBatch = new QueryResult(NAMESPACE, [], cursorId, SERVER_ADDRESS) - def cursor = new QueryBatchCursor(firstBatch, 0, batchSize, maxTimeMS, new BsonDocumentCodec(), null, connectionSource, - connection) - def expectedCommand = new BsonDocument('getMore': new BsonInt64(cursorId)) - .append('collection', new BsonString(NAMESPACE.getCollectionName())) - if (batchSize != 0) { - expectedCommand.append('batchSize', new BsonInt32(batchSize)) - } - if (expectedMaxTimeFieldValue != null) { - expectedCommand.append('maxTimeMS', new BsonInt64(expectedMaxTimeFieldValue)) - } - - def reply = new BsonDocument('ok', new BsonInt32(1)) - .append('cursor', - new BsonDocument('id', new BsonInt64(0)) - .append('ns', new BsonString(NAMESPACE.getFullName())) - .append('nextBatch', new BsonArrayWrapper([]))) - - when: - cursor.hasNext() - - then: - 1 * connection.command(NAMESPACE.getDatabaseName(), expectedCommand, _, _, _, connectionSource) >> { - reply - } - 1 * connection.release() - - where: - batchSize | maxTimeMS | expectedMaxTimeFieldValue - 0 | 0 | null - 2 | 0 | null - 0 | 100 | 100 - } - - def 'should handle exceptions when closing'() { - given: - def connection = Mock(Connection) { - _ * getDescription() >> Stub(ConnectionDescription) { - getMaxWireVersion() >> 4 - } - _ * command(*_) >> { throw new MongoSocketException('No MongoD', SERVER_ADDRESS) } - } - def connectionSource = Stub(ConnectionSource) { - getServerApi() >> null - getConnection() >> { connection } - } - connectionSource.retain() >> connectionSource - - def firstBatch = new QueryResult(NAMESPACE, [], 42, SERVER_ADDRESS) - def cursor = new QueryBatchCursor(firstBatch, 0, 2, 100, new DocumentCodec(), null, connectionSource, connection) - - when: - cursor.close() - - then: - notThrown(MongoSocketException) - - when: - cursor.close() - - then: - notThrown(Exception) - } - - def 'should handle exceptions when killing cursor and a connection can not be obtained'() { - given: - def connection = Mock(Connection) { - _ * getDescription() >> Stub(ConnectionDescription) { - getMaxWireVersion() >> 4 - } - } - def connectionSource = Stub(ConnectionSource) { - getConnection() >> { throw new MongoSocketOpenException("can't open socket", SERVER_ADDRESS, new IOException()) } - getServerApi() >> null - } - connectionSource.retain() >> connectionSource - - def firstBatch = new QueryResult(NAMESPACE, [], 42, SERVER_ADDRESS) - def cursor = new QueryBatchCursor(firstBatch, 0, 2, 100, new DocumentCodec(), null, connectionSource, connection) - - when: - cursor.close() - - then: - notThrown(MongoSocketException) - - when: - cursor.close() - - then: - notThrown(Exception) - } - - def 'should close cursor after getMore finishes if cursor was closed while getMore was in progress and getMore returns a response'() { - given: - Connection conn = mockConnection(serverVersion) - ConnectionSource connSource - if (serverType == ServerType.LOAD_BALANCER) { - connSource = mockConnectionSource(SERVER_ADDRESS, serverType) - } else { - connSource = mockConnectionSource(SERVER_ADDRESS, serverType, conn, mockConnection(serverVersion)) - } - List firstBatch = [new Document()] - QueryResult initialResult = new QueryResult<>(NAMESPACE, firstBatch, 1, SERVER_ADDRESS) - Object getMoreResponse = useCommand - ? emptyGetMoreCommandResponse(NAMESPACE, getMoreResponseHasCursor ? 42 : 0) - : emptyGetMoreQueryResponse(NAMESPACE, SERVER_ADDRESS, getMoreResponseHasCursor ? 42 : 0) - - when: - QueryBatchCursor cursor = new QueryBatchCursor<>(initialResult, 0, 0, 0, new DocumentCodec(), null, connSource, conn) - List batch = cursor.next() - - then: - batch == firstBatch - - when: - cursor.next() - - then: - // simulate the user calling `close` while `getMore` is in flight - if (useCommand) { - // in LB mode the same connection is used to execute both `getMore` and `killCursors` - int numberOfInvocations = serverType == ServerType.LOAD_BALANCER - ? getMoreResponseHasCursor ? 2 : 1 - : 1 - numberOfInvocations * conn.command(*_) >> { - // `getMore` command - cursor.close() - getMoreResponse - } >> { - // `killCursors` command - null - } - } else { - 1 * conn.getMore(*_) >> { - cursor.close() - getMoreResponse - } - } - - then: - IllegalStateException e = thrown() - e.getMessage() == 'Cursor has been closed' - - then: - conn.getCount() == 1 - connSource.getCount() == 1 - - where: - serverVersion | useCommand | getMoreResponseHasCursor | serverType - new ServerVersion([5, 0, 0]) | true | true | ServerType.LOAD_BALANCER - new ServerVersion([5, 0, 0]) | true | false | ServerType.LOAD_BALANCER - new ServerVersion([3, 2, 0]) | true | true | ServerType.STANDALONE - new ServerVersion([3, 2, 0]) | true | false | ServerType.STANDALONE - } - - def 'should close cursor after getMore finishes if cursor was closed while getMore was in progress and getMore throws exception'() { - given: - Connection conn = mockConnection(serverVersion) - ConnectionSource connSource - if (serverType == ServerType.LOAD_BALANCER) { - connSource = mockConnectionSource(SERVER_ADDRESS, serverType) - } else { - connSource = mockConnectionSource(SERVER_ADDRESS, serverType, conn, mockConnection(serverVersion)) - } - List firstBatch = [new Document()] - QueryResult initialResult = new QueryResult<>(NAMESPACE, firstBatch, 1, SERVER_ADDRESS) - String exceptionMessage = 'test' - - when: - QueryBatchCursor cursor = new QueryBatchCursor<>(initialResult, 0, 0, 0, new DocumentCodec(), null, connSource, conn) - List batch = cursor.next() - - then: - batch == firstBatch - - when: - cursor.next() - - then: - // simulate the user calling `close` while `getMore` is in flight - if (useCommand) { - // in LB mode the same connection is used to execute both `getMore` and `killCursors` - int numberOfInvocations = serverType == ServerType.LOAD_BALANCER ? 2 : 1 - numberOfInvocations * conn.command(*_) >> { - // `getMore` command - cursor.close() - throw new MongoException(exceptionMessage) - } >> { - // `killCursors` command - null - } - } else { - 1 * conn.getMore(*_) >> { - cursor.close() - throw new MongoException(exceptionMessage) - } - } - - then: - MongoException e = thrown() - e.getMessage() == exceptionMessage - - then: - conn.getCount() == 1 - connSource.getCount() == 1 - - where: - serverVersion | useCommand | serverType - new ServerVersion([5, 0, 0]) | true | ServerType.LOAD_BALANCER - new ServerVersion([3, 2, 0]) | true | ServerType.STANDALONE - } - - /** - * Creates a {@link Connection} with {@link Connection#getCount()} returning 1. - */ - private Connection mockConnection(ServerVersion serverVersion) { - int refCounter = 1 - Connection mockConn = Mock(Connection) { - getDescription() >> Stub(ConnectionDescription) { - getMaxWireVersion() >> getMaxWireVersionForServerVersion(serverVersion.getVersionList()) - } - } - mockConn.retain() >> { - if (refCounter == 0) { - throw new IllegalStateException('Tried to retain Connection when already released') - } else { - refCounter += 1 - } - mockConn - } - mockConn.release() >> { - refCounter -= 1 - if (refCounter < 0) { - throw new IllegalStateException('Tried to release Connection below 0') - } - refCounter - } - mockConn.getCount() >> { refCounter } - mockConn - } - - private ConnectionSource mockConnectionSource(ServerAddress serverAddress, ServerType serverType, Connection... connections) { - int connIdx = 0 - int refCounter = 1 - ConnectionSource mockConnectionSource = Mock(ConnectionSource) - mockConnectionSource.getServerDescription() >> { - ServerDescription.builder() - .address(serverAddress) - .type(serverType) - .state(ServerConnectionState.CONNECTED) - .build() - } - mockConnectionSource.retain() >> { - if (refCounter == 0) { - throw new IllegalStateException('Tried to retain ConnectionSource when already released') - } else { - refCounter += 1 - } - mockConnectionSource - } - mockConnectionSource.release() >> { - refCounter -= 1 - if (refCounter < 0) { - throw new IllegalStateException('Tried to release ConnectionSource below 0') - } - refCounter - } - mockConnectionSource.getCount() >> { refCounter } - mockConnectionSource.getConnection() >> { - if (refCounter == 0) { - throw new IllegalStateException('Tried to use released ConnectionSource') - } - Connection conn - if (connIdx < connections.length) { - conn = connections[connIdx] - } else { - throw new IllegalStateException('Requested more than maxConnections=' + maxConnections) - } - connIdx++ - conn.retain() - } - mockConnectionSource - } - - private static BsonDocument emptyGetMoreCommandResponse(MongoNamespace namespace, long cursorId) { - new BsonDocument('ok', new BsonInt32(1)) - .append('cursor', new BsonDocument('id', new BsonInt64(cursorId)) - .append('ns', new BsonString(namespace.getFullName())) - .append('nextBatch', new BsonArrayWrapper([]))) - } - - private static QueryResult emptyGetMoreQueryResponse(MongoNamespace namespace, ServerAddress serverAddress, long cursorId) { - new QueryResult(namespace, [], cursorId, serverAddress) - } -} diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/SingleBatchCursorTest.java b/driver-core/src/test/unit/com/mongodb/internal/operation/SingleBatchCursorTest.java new file mode 100644 index 00000000000..a71f067f5d6 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/SingleBatchCursorTest.java @@ -0,0 +1,93 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.ServerAddress; +import org.bson.Document; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; + +import java.util.List; +import java.util.NoSuchElementException; + +import static com.mongodb.assertions.Assertions.assertFalse; +import static com.mongodb.assertions.Assertions.assertNull; +import static com.mongodb.internal.connection.tlschannel.util.Util.assertTrue; +import static com.mongodb.internal.operation.SingleBatchCursor.createEmptySingleBatchCursor; +import static java.util.Arrays.asList; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertIterableEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + + +class SingleBatchCursorTest { + + private static final List SINGLE_BATCH = asList(new Document("a", 1), new Document("b", 2)); + private static final ServerAddress SERVER_ADDRESS = new ServerAddress(); + + @Test + @DisplayName("should work as expected") + void shouldWorkAsExpected() { + + try (SingleBatchCursor cursor = new SingleBatchCursor<>(SINGLE_BATCH, 0, SERVER_ADDRESS)) { + assertEquals(SERVER_ADDRESS, cursor.getServerAddress()); + assertEquals(1, cursor.available()); + assertNull(cursor.getServerCursor()); + + assertTrue(cursor.hasNext()); + assertIterableEquals(SINGLE_BATCH, cursor.next()); + assertEquals(0, cursor.available()); + + assertFalse(cursor.hasNext()); + assertThrows(NoSuchElementException.class, cursor::next); + } + } + + @Test + @DisplayName("should work as expected emptyCursor") + void shouldWorkAsExpectedEmptyCursor() { + try (SingleBatchCursor cursor = createEmptySingleBatchCursor(SERVER_ADDRESS, 0)) { + assertEquals(SERVER_ADDRESS, cursor.getServerAddress()); + assertEquals(0, cursor.available()); + assertNull(cursor.getServerCursor()); + + assertFalse(cursor.hasNext()); + assertThrows(NoSuchElementException.class, cursor::next); + } + } + + @Test + @DisplayName("should work as expected with try methods") + void shouldWorkAsExpectedWithTryMethods() { + try (SingleBatchCursor cursor = new SingleBatchCursor<>(SINGLE_BATCH, 0, SERVER_ADDRESS)) { + assertIterableEquals(SINGLE_BATCH, cursor.tryNext()); + assertNull(cursor.tryNext()); + } + } + + @Test + @DisplayName("should not support setting batch size") + void shouldNotSupportSettingBatchSize() { + try (SingleBatchCursor cursor = new SingleBatchCursor<>(SINGLE_BATCH, 0, SERVER_ADDRESS)) { + assertEquals(0, cursor.getBatchSize()); + + cursor.setBatchSize(1); + assertEquals(0, cursor.getBatchSize()); + } + } + +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/BatchCursor.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/BatchCursor.java index 6e28551f48a..56e1ad54a15 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/BatchCursor.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/BatchCursor.java @@ -44,8 +44,6 @@ public Publisher> next(final Supplier hasBeenCancelled) { if (!hasBeenCancelled.get()) { if (t != null) { sink.error(t); - } else if (result == null) { - sink.success(); } else { sink.success(result); } diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/BatchCursorFlux.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/BatchCursorFlux.java index f4297c72102..9e28af92363 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/BatchCursorFlux.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/BatchCursorFlux.java @@ -95,7 +95,7 @@ private void recurseCursor(){ } }) .doOnSuccess(results -> { - if (results != null) { + if (!results.isEmpty()) { results .stream() .filter(Objects::nonNull) diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/internal/BatchCursorFluxTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/internal/BatchCursorFluxTest.java index 91f44bfddb7..410dfd02fc4 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/internal/BatchCursorFluxTest.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/internal/BatchCursorFluxTest.java @@ -213,7 +213,7 @@ public void testBatchCursorCompletesAsExpectedWithLimit() { findPublisher.subscribe(subscriber); assertCommandNames(emptyList()); - subscriber.requestMore(100); + subscriber.requestMore(101); subscriber.assertReceivedOnNext(docs); subscriber.assertNoErrors(); subscriber.assertTerminalEvent(); diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TestHelper.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TestHelper.java index b5f77d39941..c55d2dccac6 100644 --- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TestHelper.java +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TestHelper.java @@ -53,6 +53,7 @@ import java.util.function.Function; import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry; +import static java.util.Collections.emptyList; import static java.util.stream.Collectors.toList; import static java.util.stream.Collectors.toMap; import static org.bson.codecs.configuration.CodecRegistries.fromProviders; @@ -250,7 +251,7 @@ void configureBatchCursor() { Mockito.lenient().doAnswer(i -> isClosed.get()).when(getBatchCursor()).isClosed(); Mockito.lenient().doAnswer(invocation -> { isClosed.set(true); - invocation.getArgument(0, SingleResultCallback.class).onResult(null, null); + invocation.getArgument(0, SingleResultCallback.class).onResult(emptyList(), null); return null; }).when(getBatchCursor()).next(any(SingleResultCallback.class)); }