|
|
|
@ -29,10 +29,10 @@ import java.util.concurrent.atomic.AtomicLongFieldUpdater; |
|
|
|
|
|
|
|
|
|
|
|
import org.reactivestreams.Publisher; |
|
|
|
import org.reactivestreams.Publisher; |
|
|
|
import org.reactivestreams.Subscription; |
|
|
|
import org.reactivestreams.Subscription; |
|
|
|
|
|
|
|
|
|
|
|
import org.springframework.core.io.buffer.DataBuffer; |
|
|
|
import org.springframework.core.io.buffer.DataBuffer; |
|
|
|
import org.springframework.core.io.buffer.DataBufferFactory; |
|
|
|
import org.springframework.core.io.buffer.DataBufferFactory; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import com.mongodb.reactivestreams.client.Success; |
|
|
|
import com.mongodb.reactivestreams.client.gridfs.AsyncInputStream; |
|
|
|
import com.mongodb.reactivestreams.client.gridfs.AsyncInputStream; |
|
|
|
|
|
|
|
|
|
|
|
/** |
|
|
|
/** |
|
|
|
@ -56,34 +56,98 @@ class DataBufferPublisherAdapter { |
|
|
|
static Flux<DataBuffer> createBinaryStream(AsyncInputStream inputStream, DataBufferFactory dataBufferFactory, |
|
|
|
static Flux<DataBuffer> createBinaryStream(AsyncInputStream inputStream, DataBufferFactory dataBufferFactory, |
|
|
|
int bufferSize) { |
|
|
|
int bufferSize) { |
|
|
|
|
|
|
|
|
|
|
|
State state = new State(inputStream, dataBufferFactory, bufferSize); |
|
|
|
return Flux.usingWhen(Mono.just(new DelegatingAsyncInputStream(inputStream, dataBufferFactory, bufferSize)), |
|
|
|
|
|
|
|
DataBufferPublisherAdapter::doRead, AsyncInputStream::close, (it, err) -> it.close(), AsyncInputStream::close); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/** |
|
|
|
|
|
|
|
* Use an {@link AsyncInputStreamHandler} to read data from the given {@link AsyncInputStream}. |
|
|
|
|
|
|
|
* |
|
|
|
|
|
|
|
* @param inputStream the source stream. |
|
|
|
|
|
|
|
* @return a {@link Flux} emitting data chunks one by one. |
|
|
|
|
|
|
|
* @since 2.2.1 |
|
|
|
|
|
|
|
*/ |
|
|
|
|
|
|
|
private static Flux<DataBuffer> doRead(DelegatingAsyncInputStream inputStream) { |
|
|
|
|
|
|
|
|
|
|
|
return Flux.usingWhen(Mono.just(inputStream), it -> { |
|
|
|
AsyncInputStreamHandler streamHandler = new AsyncInputStreamHandler(inputStream, inputStream.dataBufferFactory, |
|
|
|
|
|
|
|
inputStream.bufferSize); |
|
|
|
|
|
|
|
|
|
|
|
return Flux.<DataBuffer> create((sink) -> { |
|
|
|
return Flux.create((sink) -> { |
|
|
|
|
|
|
|
|
|
|
|
sink.onDispose(state::close); |
|
|
|
sink.onDispose(streamHandler::close); |
|
|
|
sink.onCancel(state::close); |
|
|
|
sink.onCancel(streamHandler::close); |
|
|
|
|
|
|
|
|
|
|
|
sink.onRequest(n -> { |
|
|
|
sink.onRequest(n -> { |
|
|
|
state.request(sink, n); |
|
|
|
streamHandler.request(sink, n); |
|
|
|
}); |
|
|
|
|
|
|
|
}); |
|
|
|
}); |
|
|
|
}, AsyncInputStream::close, (it, err) -> it.close(), AsyncInputStream::close) //
|
|
|
|
}); |
|
|
|
.concatMap(Flux::just, 1); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/** |
|
|
|
|
|
|
|
* An {@link AsyncInputStream} also holding a {@link DataBufferFactory} and default {@literal bufferSize} for reading |
|
|
|
|
|
|
|
* from it, delegating operations on the {@link AsyncInputStream} to the reference instance. <br /> |
|
|
|
|
|
|
|
* Used to pass on the {@link AsyncInputStream} and parameters to avoid capturing lambdas. |
|
|
|
|
|
|
|
* |
|
|
|
|
|
|
|
* @author Christoph Strobl |
|
|
|
|
|
|
|
* @since 2.2.1 |
|
|
|
|
|
|
|
*/ |
|
|
|
|
|
|
|
private static class DelegatingAsyncInputStream implements AsyncInputStream { |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
private final AsyncInputStream inputStream; |
|
|
|
|
|
|
|
private final DataBufferFactory dataBufferFactory; |
|
|
|
|
|
|
|
private int bufferSize; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/** |
|
|
|
|
|
|
|
* @param inputStream the source input stream. |
|
|
|
|
|
|
|
* @param dataBufferFactory |
|
|
|
|
|
|
|
* @param bufferSize |
|
|
|
|
|
|
|
*/ |
|
|
|
|
|
|
|
DelegatingAsyncInputStream(AsyncInputStream inputStream, DataBufferFactory dataBufferFactory, int bufferSize) { |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
this.inputStream = inputStream; |
|
|
|
|
|
|
|
this.dataBufferFactory = dataBufferFactory; |
|
|
|
|
|
|
|
this.bufferSize = bufferSize; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* |
|
|
|
|
|
|
|
* (non-Javadoc) |
|
|
|
|
|
|
|
* @see com.mongodb.reactivestreams.client.gridfs.AsyncInputStream#read(java.nio.ByteBuffer) |
|
|
|
|
|
|
|
*/ |
|
|
|
|
|
|
|
@Override |
|
|
|
|
|
|
|
public Publisher<Integer> read(ByteBuffer dst) { |
|
|
|
|
|
|
|
return inputStream.read(dst); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* |
|
|
|
|
|
|
|
* (non-Javadoc) |
|
|
|
|
|
|
|
* @see com.mongodb.reactivestreams.client.gridfs.AsyncInputStream#skip(long) |
|
|
|
|
|
|
|
*/ |
|
|
|
|
|
|
|
@Override |
|
|
|
|
|
|
|
public Publisher<Long> skip(long bytesToSkip) { |
|
|
|
|
|
|
|
return inputStream.skip(bytesToSkip); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* |
|
|
|
|
|
|
|
* (non-Javadoc) |
|
|
|
|
|
|
|
* @see com.mongodb.reactivestreams.client.gridfs.AsyncInputStream#close() |
|
|
|
|
|
|
|
*/ |
|
|
|
|
|
|
|
@Override |
|
|
|
|
|
|
|
public Publisher<Success> close() { |
|
|
|
|
|
|
|
return inputStream.close(); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
@RequiredArgsConstructor |
|
|
|
@RequiredArgsConstructor |
|
|
|
static class State { |
|
|
|
static class AsyncInputStreamHandler { |
|
|
|
|
|
|
|
|
|
|
|
private static final AtomicLongFieldUpdater<State> DEMAND = AtomicLongFieldUpdater.newUpdater(State.class, |
|
|
|
private static final AtomicLongFieldUpdater<AsyncInputStreamHandler> DEMAND = AtomicLongFieldUpdater |
|
|
|
"demand"); |
|
|
|
.newUpdater(AsyncInputStreamHandler.class, "demand"); |
|
|
|
|
|
|
|
|
|
|
|
private static final AtomicIntegerFieldUpdater<State> STATE = AtomicIntegerFieldUpdater.newUpdater(State.class, |
|
|
|
private static final AtomicIntegerFieldUpdater<AsyncInputStreamHandler> STATE = AtomicIntegerFieldUpdater |
|
|
|
"state"); |
|
|
|
.newUpdater(AsyncInputStreamHandler.class, "state"); |
|
|
|
|
|
|
|
|
|
|
|
private static final AtomicIntegerFieldUpdater<State> READ = AtomicIntegerFieldUpdater.newUpdater(State.class, |
|
|
|
private static final AtomicIntegerFieldUpdater<AsyncInputStreamHandler> READ = AtomicIntegerFieldUpdater |
|
|
|
"read"); |
|
|
|
.newUpdater(AsyncInputStreamHandler.class, "read"); |
|
|
|
|
|
|
|
|
|
|
|
private static final int STATE_OPEN = 0; |
|
|
|
private static final int STATE_OPEN = 0; |
|
|
|
private static final int STATE_CLOSED = 1; |
|
|
|
private static final int STATE_CLOSED = 1; |
|
|
|
@ -188,6 +252,7 @@ class DataBufferPublisherAdapter { |
|
|
|
|
|
|
|
|
|
|
|
@Override |
|
|
|
@Override |
|
|
|
public void onSubscribe(Subscription s) { |
|
|
|
public void onSubscribe(Subscription s) { |
|
|
|
|
|
|
|
|
|
|
|
this.subscription = s; |
|
|
|
this.subscription = s; |
|
|
|
s.request(1); |
|
|
|
s.request(1); |
|
|
|
} |
|
|
|
} |
|
|
|
@ -203,14 +268,8 @@ class DataBufferPublisherAdapter { |
|
|
|
|
|
|
|
|
|
|
|
if (bytes > 0) { |
|
|
|
if (bytes > 0) { |
|
|
|
|
|
|
|
|
|
|
|
transport.flip(); |
|
|
|
DataBuffer buffer = readNextChunk(); |
|
|
|
|
|
|
|
sink.next(buffer); |
|
|
|
DataBuffer dataBuffer = factory.allocateBuffer(transport.remaining()); |
|
|
|
|
|
|
|
dataBuffer.write(transport); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
transport.clear(); |
|
|
|
|
|
|
|
sink.next(dataBuffer); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
decrementDemand(); |
|
|
|
decrementDemand(); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
@ -226,6 +285,18 @@ class DataBufferPublisherAdapter { |
|
|
|
subscription.request(1); |
|
|
|
subscription.request(1); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
private DataBuffer readNextChunk() { |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
transport.flip(); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DataBuffer dataBuffer = factory.allocateBuffer(transport.remaining()); |
|
|
|
|
|
|
|
dataBuffer.write(transport); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
transport.clear(); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return dataBuffer; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
@Override |
|
|
|
@Override |
|
|
|
public void onError(Throwable t) { |
|
|
|
public void onError(Throwable t) { |
|
|
|
|
|
|
|
|
|
|
|
|