diff --git a/pom.xml b/pom.xml
index 89da0a6..7bc17ad 100644
--- a/pom.xml
+++ b/pom.xml
@@ -72,12 +72,12 @@
io.projectreactor
reactor-core
- 3.4.2
+ 3.4.3
io.projectreactor
reactor-test
- 3.4.2
+ 3.4.3
test
diff --git a/src/main/lombok/org/warp/filesponge/DiskCache.java b/src/main/lombok/org/warp/filesponge/DiskCache.java
index a28a34f..d6b9acb 100644
--- a/src/main/lombok/org/warp/filesponge/DiskCache.java
+++ b/src/main/lombok/org/warp/filesponge/DiskCache.java
@@ -72,7 +72,7 @@ public class DiskCache implements URLsDiskHandler, URLsWriter {
return Optional
.of(new DiskMetadata(
metadata.getSize(),
- new BooleanArrayList(DiskMetadata.getBlocksCount(metadata.getSize(), BLOCK_SIZE))
+ BooleanArrayList.wrap(new boolean[DiskMetadata.getBlocksCount(metadata.getSize(), BLOCK_SIZE)])
))
.map(diskMetadataSerializer::serialize);
}
@@ -95,7 +95,7 @@ public class DiskCache implements URLsDiskHandler, URLsWriter {
.map(prevMeta -> {
if (!prevMeta.getDownloadedBlocks().getBoolean(dataBlock.getId())) {
BooleanArrayList bal = prevMeta.getDownloadedBlocks().clone();
- bal.add(dataBlock.getId(), true);
+ bal.set(dataBlock.getId(), true);
return new DiskMetadata(prevMeta.getSize(), bal);
} else {
return prevMeta;
@@ -111,35 +111,35 @@ public class DiskCache implements URLsDiskHandler, URLsWriter {
public Flux requestContent(URL url) {
return requestDiskMetadata(url)
.filter(DiskMetadata::isDownloadedFully)
- .flatMapMany(meta -> Flux.fromIterable(meta.getDownloadedBlocks()))
- .index()
- // Get only downloaded blocks
- .filter(Tuple2::getT2)
- .flatMapSequential(blockMeta -> {
- int blockId = Math.toIntExact(blockMeta.getT1());
- boolean downloaded = blockMeta.getT2();
- if (!downloaded) {
- return Mono.empty();
- }
- return fileContent.get(null, getBlockKey(url, blockId)).map(data -> {
- int blockOffset = getBlockOffset(blockId);
- int blockLength = data.length;
- if (blockOffset + blockLength >= blockMeta.size()) {
- if (blockOffset + blockLength > blockMeta.size()) {
- throw new IllegalStateException("Overflowed data size");
+ .flatMapMany(meta -> Flux.fromIterable(meta.getDownloadedBlocks())
+ .index()
+ // Get only downloaded blocks
+ .filter(Tuple2::getT2)
+ .flatMapSequential(blockMeta -> {
+ int blockId = Math.toIntExact(blockMeta.getT1());
+ boolean downloaded = blockMeta.getT2();
+ if (!downloaded) {
+ return Mono.empty();
}
- } else if (data.length != BLOCK_SIZE) {
- throw new IllegalStateException("Block data length != block length");
- }
- return new DataBlock(blockOffset, blockLength, ByteBuffer.wrap(data, 0, blockLength));
- });
- });
+ return fileContent.get(null, getBlockKey(url, blockId)).map(data -> {
+ int blockOffset = getBlockOffset(blockId);
+ int blockLength = data.length;
+ if (blockOffset + blockLength >= meta.getSize()) {
+ if (blockOffset + blockLength > meta.getSize()) {
+ throw new IllegalStateException("Overflowed data size");
+ }
+ } else if (data.length != BLOCK_SIZE) {
+ throw new IllegalStateException("Block data length != block length");
+ }
+ return new DataBlock(blockOffset, blockLength, ByteBuffer.wrap(data, 0, blockLength));
+ });
+ }));
}
private byte[] getBlockKey(URL url, int blockId) {
byte[] urlBytes = url.getSerializer().serialize(url);
byte[] blockIdBytes = Ints.toByteArray(blockId);
- byte[] resultBytes = Arrays.copyOf(urlBytes, urlBytes.length);
+ byte[] resultBytes = Arrays.copyOf(urlBytes, urlBytes.length + blockIdBytes.length);
System.arraycopy(blockIdBytes, 0, resultBytes, urlBytes.length, blockIdBytes.length);
return resultBytes;
}
diff --git a/src/main/lombok/org/warp/filesponge/DiskMetadata.java b/src/main/lombok/org/warp/filesponge/DiskMetadata.java
index 1e14e0e..4feb7e7 100644
--- a/src/main/lombok/org/warp/filesponge/DiskMetadata.java
+++ b/src/main/lombok/org/warp/filesponge/DiskMetadata.java
@@ -53,7 +53,9 @@ public class DiskMetadata {
private int getBlocksCount() {
var expectedBlocksCount = getBlocksCount(size, FileSponge.BLOCK_SIZE);
if (this.getDownloadedBlocks().size() != expectedBlocksCount) {
- throw new IllegalStateException("Blocks array length != expected blocks count");
+ throw new IllegalStateException(
+ "Blocks array length (" + this.getDownloadedBlocks().size()
+ + ") != expected blocks count (" + expectedBlocksCount + ")");
}
return expectedBlocksCount;
}
@@ -75,22 +77,20 @@ public class DiskMetadata {
var dis = new DataInputStream(bais);
int size = dis.readInt();
int blocksCount = getBlocksCount(size, FileSponge.BLOCK_SIZE);
- boolean[] downloadedBlocks = new boolean[blocksCount];
+ var downloadedBlocks = new BooleanArrayList(blocksCount);
for (int i = 0; i < blocksCount; i++) {
- downloadedBlocks[i] = dis.readBoolean();
+ downloadedBlocks.add(dis.readBoolean());
}
- return new DiskMetadata(size, BooleanArrayList.wrap(downloadedBlocks, blocksCount));
+ return new DiskMetadata(size, downloadedBlocks);
}
@SneakyThrows
@Override
public byte @NotNull [] serialize(@NotNull DiskMetadata deserialized) {
- try (var bos = new ByteArrayOutputStream(Integer.BYTES * 2)) {
+ try (var bos = new ByteArrayOutputStream()) {
try (var dos = new DataOutputStream(bos)) {
dos.writeInt(deserialized.getSize());
- if (deserialized.getDownloadedBlocks().size() != deserialized.getBlocksCount()) {
- throw new IllegalStateException("Blocks array length != expected blocks count");
- }
+ deserialized.getBlocksCount();
for (boolean downloadedBlock : deserialized.getDownloadedBlocks()) {
dos.writeBoolean(downloadedBlock);
}
diff --git a/src/main/lombok/org/warp/filesponge/FileSponge.java b/src/main/lombok/org/warp/filesponge/FileSponge.java
index 0d0a3a2..62c9cff 100644
--- a/src/main/lombok/org/warp/filesponge/FileSponge.java
+++ b/src/main/lombok/org/warp/filesponge/FileSponge.java
@@ -20,13 +20,17 @@ package org.warp.filesponge;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicBoolean;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
public class FileSponge implements URLsHandler {
- public static final int BLOCK_SIZE = 8 * 1024 * 1024; // 8 MiB
- public static final int MAX_BLOCKS = 256; // 2 GiB
+ private static final Logger logger = LoggerFactory.getLogger(FileSponge.class);
+
+ public static final int BLOCK_SIZE = 1024 * 1024; // 1 MiB
private final Set urlsHandlers = new ConcurrentHashMap().keySet(new Object());
@@ -50,13 +54,20 @@ public class FileSponge implements URLsHandler {
@Override
public Flux requestContent(URL url) {
+ AtomicBoolean alreadyPrintedDebug = new AtomicBoolean(false);
return Flux
.fromIterable(cacheAccess)
.map(urlsHandler -> urlsHandler.requestContent(url))
.collectList()
- .flatMapMany(Flux::firstWithValue)
+ .flatMapMany(monos -> FileSpongeUtils.firstWithValueFlux(monos))
+ .doOnNext(dataBlock -> {
+ if (alreadyPrintedDebug.compareAndSet(false, true)) {
+ logger.debug("File \"{}\" content has been found in the cache", url);
+ }
+ })
.switchIfEmpty(Flux
.fromIterable(urlsHandlers)
+ .doOnSubscribe(s -> logger.debug("Downloading file \"{}\" content", url))
.map(urlsHandler -> urlsHandler
.requestContent(url)
.flatMapSequential(dataBlock -> Flux
@@ -66,7 +77,8 @@ public class FileSponge implements URLsHandler {
)
)
.collectList()
- .flatMapMany(Flux::firstWithValue)
+ .flatMapMany(monos -> FileSpongeUtils.firstWithValueFlux(monos))
+ .doOnComplete(() -> logger.debug("Downloaded file \"{}\" content", url))
)
.distinct(DataBlock::getId);
}
@@ -77,9 +89,15 @@ public class FileSponge implements URLsHandler {
.fromIterable(cacheAccess)
.map(urlsHandler -> urlsHandler.requestMetadata(url))
.collectList()
- .flatMap(Mono::firstWithValue)
+ .flatMap(monos -> FileSpongeUtils.firstWithValueMono(monos))
+ .doOnSuccess(metadata -> {
+ if (metadata != null) {
+ logger.debug("File \"{}\" metadata has been found in the cache", url);
+ }
+ })
.switchIfEmpty(Flux
.fromIterable(urlsHandlers)
+ .doOnSubscribe(s -> logger.debug("Downloading file \"{}\" metadata", url))
.map(urlsHandler -> urlsHandler
.requestMetadata(url)
.flatMap(dataBlock -> Flux
@@ -89,7 +107,14 @@ public class FileSponge implements URLsHandler {
)
)
.collectList()
- .flatMap(Mono::firstWithValue)
+ .flatMap(monos -> FileSpongeUtils.firstWithValueMono(monos))
+ .doOnSuccess(s -> {
+ if (s != null) {
+ logger.debug("Downloaded file \"{}\" metadata", url);
+ } else {
+ logger.debug("File \"{}\" metadata has not been found anywhere", url);
+ }
+ })
);
}
}
diff --git a/src/main/lombok/org/warp/filesponge/FileSpongeUtils.java b/src/main/lombok/org/warp/filesponge/FileSpongeUtils.java
new file mode 100644
index 0000000..0f2482b
--- /dev/null
+++ b/src/main/lombok/org/warp/filesponge/FileSpongeUtils.java
@@ -0,0 +1,75 @@
+/*
+ * FileSponge
+ * Copyright (C) 2021 Andrea Cavalli
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+package org.warp.filesponge;
+
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.time.Duration;
+import java.util.List;
+import java.util.NoSuchElementException;
+import java.util.concurrent.TimeUnit;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import reactor.core.Exceptions;
+import reactor.core.publisher.Flux;
+import reactor.core.publisher.Mono;
+import reactor.core.scheduler.Schedulers;
+
+public class FileSpongeUtils {
+
+ private static final Logger logger = LoggerFactory.getLogger(FileSponge.class);
+
+ public static Mono firstWithValueMono(List> monos) {
+ return Mono.firstWithValue(monos).onErrorResume(FileSpongeUtils::ignoreFakeErrors);
+ }
+
+ public static Flux firstWithValueFlux(List> monos) {
+ return Flux.firstWithValue(monos).onErrorResume(FileSpongeUtils::ignoreFakeErrors);
+ }
+
+ private static Mono ignoreFakeErrors(Throwable ex) {
+ return Mono.defer(() -> {
+ if (ex instanceof NoSuchElementException) {
+ var multiple = Exceptions.unwrapMultiple(ex.getCause());
+ for (Throwable throwable : multiple) {
+ if (!(throwable instanceof NoSuchElementException)) {
+ return Mono.error(ex);
+ }
+ }
+ return Mono.empty();
+ } else {
+ return Mono.error(ex);
+ }
+ });
+ }
+
+ public static Mono deleteFileAfter(Path path, Duration delay) {
+ return Mono.fromCallable(() -> {
+ Schedulers.boundedElastic().schedule(() -> {
+ try {
+ Files.deleteIfExists(path);
+ } catch (IOException e) {
+ logger.warn("Failed to delete file \"{}\"", path, e);
+ }
+ }, delay.toMillis(), TimeUnit.MILLISECONDS);
+ return path;
+ }).subscribeOn(Schedulers.boundedElastic());
+ }
+}