Skip to content

Commit 30d9944

Browse files
DATAMONGO-2393 - Remove capturing lambdas and extract methods.
Original Pull Request: #799
1 parent 839aece commit 30d9944

File tree

3 files changed

+110
-37
lines changed

3 files changed

+110
-37
lines changed

spring-data-mongodb/src/main/java/org/springframework/data/mongodb/gridfs/DataBufferPublisherAdapter.java

Lines changed: 97 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -29,10 +29,10 @@
2929

3030
import org.reactivestreams.Publisher;
3131
import org.reactivestreams.Subscription;
32-
3332
import org.springframework.core.io.buffer.DataBuffer;
3433
import org.springframework.core.io.buffer.DataBufferFactory;
3534

35+
import com.mongodb.reactivestreams.client.Success;
3636
import com.mongodb.reactivestreams.client.gridfs.AsyncInputStream;
3737

3838
/**
@@ -56,34 +56,98 @@ class DataBufferPublisherAdapter {
5656
static Flux<DataBuffer> createBinaryStream(AsyncInputStream inputStream, DataBufferFactory dataBufferFactory,
5757
int bufferSize) {
5858

59-
State state = new State(inputStream, dataBufferFactory, bufferSize);
59+
return Flux.usingWhen(Mono.just(new DelegatingAsyncInputStream(inputStream, dataBufferFactory, bufferSize)),
60+
DataBufferPublisherAdapter::doRead, AsyncInputStream::close, (it, err) -> it.close(), AsyncInputStream::close);
61+
}
62+
63+
/**
64+
* Use an {@link AsyncInputStreamHandler} to read data from the given {@link AsyncInputStream}.
65+
*
66+
* @param inputStream the source stream.
67+
* @return a {@link Flux} emitting data chunks one by one.
68+
* @since 2.2.1
69+
*/
70+
private static Flux<DataBuffer> doRead(DelegatingAsyncInputStream inputStream) {
6071

61-
return Flux.usingWhen(Mono.just(inputStream), it -> {
72+
AsyncInputStreamHandler streamHandler = new AsyncInputStreamHandler(inputStream, inputStream.dataBufferFactory,
73+
inputStream.bufferSize);
6274

63-
return Flux.<DataBuffer> create((sink) -> {
75+
return Flux.create((sink) -> {
6476

65-
sink.onDispose(state::close);
66-
sink.onCancel(state::close);
77+
sink.onDispose(streamHandler::close);
78+
sink.onCancel(streamHandler::close);
6779

68-
sink.onRequest(n -> {
69-
state.request(sink, n);
70-
});
80+
sink.onRequest(n -> {
81+
streamHandler.request(sink, n);
7182
});
72-
}, AsyncInputStream::close, (it, err) -> it.close(), AsyncInputStream::close) //
73-
.concatMap(Flux::just, 1);
83+
});
84+
}
85+
86+
/**
87+
* An {@link AsyncInputStream} also holding a {@link DataBufferFactory} and default {@literal bufferSize} for reading
88+
* from it, delegating operations on the {@link AsyncInputStream} to the reference instance. <br />
89+
* Used to pass on the {@link AsyncInputStream} and parameters to avoid capturing lambdas.
90+
*
91+
* @author Christoph Strobl
92+
* @since 2.2.1
93+
*/
94+
private static class DelegatingAsyncInputStream implements AsyncInputStream {
95+
96+
private final AsyncInputStream inputStream;
97+
private final DataBufferFactory dataBufferFactory;
98+
private int bufferSize;
99+
100+
/**
101+
* @param inputStream the source input stream.
102+
* @param dataBufferFactory
103+
* @param bufferSize
104+
*/
105+
DelegatingAsyncInputStream(AsyncInputStream inputStream, DataBufferFactory dataBufferFactory, int bufferSize) {
106+
107+
this.inputStream = inputStream;
108+
this.dataBufferFactory = dataBufferFactory;
109+
this.bufferSize = bufferSize;
110+
}
111+
112+
/*
113+
* (non-Javadoc)
114+
* @see com.mongodb.reactivestreams.client.gridfs.AsyncInputStream#read(java.nio.ByteBuffer)
115+
*/
116+
@Override
117+
public Publisher<Integer> read(ByteBuffer dst) {
118+
return inputStream.read(dst);
119+
}
120+
121+
/*
122+
* (non-Javadoc)
123+
* @see com.mongodb.reactivestreams.client.gridfs.AsyncInputStream#skip(long)
124+
*/
125+
@Override
126+
public Publisher<Long> skip(long bytesToSkip) {
127+
return inputStream.skip(bytesToSkip);
128+
}
129+
130+
/*
131+
* (non-Javadoc)
132+
* @see com.mongodb.reactivestreams.client.gridfs.AsyncInputStream#close()
133+
*/
134+
@Override
135+
public Publisher<Success> close() {
136+
return inputStream.close();
137+
}
74138
}
75139

76140
@RequiredArgsConstructor
77-
static class State {
141+
static class AsyncInputStreamHandler {
78142

79-
private static final AtomicLongFieldUpdater<State> DEMAND = AtomicLongFieldUpdater.newUpdater(State.class,
80-
"demand");
143+
private static final AtomicLongFieldUpdater<AsyncInputStreamHandler> DEMAND = AtomicLongFieldUpdater
144+
.newUpdater(AsyncInputStreamHandler.class, "demand");
81145

82-
private static final AtomicIntegerFieldUpdater<State> STATE = AtomicIntegerFieldUpdater.newUpdater(State.class,
83-
"state");
146+
private static final AtomicIntegerFieldUpdater<AsyncInputStreamHandler> STATE = AtomicIntegerFieldUpdater
147+
.newUpdater(AsyncInputStreamHandler.class, "state");
84148

85-
private static final AtomicIntegerFieldUpdater<State> READ = AtomicIntegerFieldUpdater.newUpdater(State.class,
86-
"read");
149+
private static final AtomicIntegerFieldUpdater<AsyncInputStreamHandler> READ = AtomicIntegerFieldUpdater
150+
.newUpdater(AsyncInputStreamHandler.class, "read");
87151

88152
private static final int STATE_OPEN = 0;
89153
private static final int STATE_CLOSED = 1;
@@ -188,6 +252,7 @@ public Context currentContext() {
188252

189253
@Override
190254
public void onSubscribe(Subscription s) {
255+
191256
this.subscription = s;
192257
s.request(1);
193258
}
@@ -203,14 +268,8 @@ public void onNext(Integer bytes) {
203268

204269
if (bytes > 0) {
205270

206-
transport.flip();
207-
208-
DataBuffer dataBuffer = factory.allocateBuffer(transport.remaining());
209-
dataBuffer.write(transport);
210-
211-
transport.clear();
212-
sink.next(dataBuffer);
213-
271+
DataBuffer buffer = readNextChunk();
272+
sink.next(buffer);
214273
decrementDemand();
215274
}
216275

@@ -226,6 +285,18 @@ public void onNext(Integer bytes) {
226285
subscription.request(1);
227286
}
228287

288+
private DataBuffer readNextChunk() {
289+
290+
transport.flip();
291+
292+
DataBuffer dataBuffer = factory.allocateBuffer(transport.remaining());
293+
dataBuffer.write(transport);
294+
295+
transport.clear();
296+
297+
return dataBuffer;
298+
}
299+
229300
@Override
230301
public void onError(Throwable t) {
231302

spring-data-mongodb/src/main/java/org/springframework/data/mongodb/gridfs/ReactiveGridFsResource.java

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,6 @@
2323
import java.util.function.IntFunction;
2424

2525
import org.reactivestreams.Publisher;
26-
2726
import org.springframework.core.io.AbstractResource;
2827
import org.springframework.core.io.Resource;
2928
import org.springframework.core.io.buffer.DataBuffer;
@@ -36,10 +35,13 @@
3635
* Reactive {@link GridFSFile} based {@link Resource} implementation.
3736
*
3837
* @author Mark Paluch
38+
* @author Christoph Strobl
3939
* @since 2.2
4040
*/
4141
public class ReactiveGridFsResource extends AbstractResource {
4242

43+
private static final Integer DEFAULT_CHUNK_SIZE = 256 * 1024;
44+
4345
private final @Nullable GridFSFile file;
4446
private final String filename;
4547
private final IntFunction<Flux<DataBuffer>> contentFunction;
@@ -176,19 +178,23 @@ public GridFSFile getGridFSFile() {
176178
}
177179

178180
/**
179-
* Retrieve the download stream using the default chunk size of 256kb.
181+
* Retrieve the download stream using the default chunk size of 256 kB.
180182
*
181-
* @return
183+
* @return a {@link Flux} emitting data chunks one by one. Please make sure to
184+
* {@link org.springframework.core.io.buffer.DataBufferUtils#release(DataBuffer) release} all
185+
* {@link DataBuffer buffers} when done.
182186
*/
183187
public Flux<DataBuffer> getDownloadStream() {
184-
return getDownloadStream(256 * 1024); // 256kb buffers
188+
return getDownloadStream(DEFAULT_CHUNK_SIZE);
185189
}
186190

187191
/**
188192
* Retrieve the download stream.
189193
*
190194
* @param chunkSize chunk size in bytes to use.
191-
* @return
195+
* @return a {@link Flux} emitting data chunks one by one. Please make sure to
196+
* {@link org.springframework.core.io.buffer.DataBufferUtils#release(DataBuffer) release} all
197+
* {@link DataBuffer buffers} when done.
192198
* @since 2.2.1
193199
*/
194200
public Flux<DataBuffer> getDownloadStream(int chunkSize) {

spring-data-mongodb/src/test/java/org/springframework/data/mongodb/gridfs/ReactiveGridFsTemplateTests.java

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,6 @@
3333
import org.junit.Before;
3434
import org.junit.Test;
3535
import org.junit.runner.RunWith;
36-
3736
import org.springframework.beans.factory.annotation.Autowired;
3837
import org.springframework.core.io.ClassPathResource;
3938
import org.springframework.core.io.Resource;
@@ -101,13 +100,10 @@ public void storesAndFindsSimpleDocument() {
101100
@Test // DATAMONGO-1855
102101
public void storesAndLoadsLargeFileCorrectly() {
103102

104-
ByteBuffer buffer = ByteBuffer.allocate(1000 * 1000 * 1); // 1 mb
105-
103+
ByteBuffer buffer = ByteBuffer.allocate(1000 * 1000); // 1 mb
106104
int i = 0;
107105
while (buffer.remaining() != 0) {
108-
byte b = (byte) (i++ % 16);
109-
String string = HexUtils.toHex(new byte[] { b });
110-
buffer.put(string.getBytes());
106+
buffer.put(HexUtils.toHex(new byte[] { (byte) (i++ % 16) }).getBytes());
111107
}
112108
buffer.flip();
113109

0 commit comments

Comments
 (0)