Fix refcounts
This commit is contained in:
parent
d6c591d397
commit
bce04a20a4
@ -32,6 +32,7 @@ import it.cavallium.dbengine.database.LLDictionaryResultType;
|
|||||||
import it.cavallium.dbengine.database.LLKeyValueDatabase;
|
import it.cavallium.dbengine.database.LLKeyValueDatabase;
|
||||||
import it.cavallium.dbengine.database.UpdateMode;
|
import it.cavallium.dbengine.database.UpdateMode;
|
||||||
import it.cavallium.dbengine.database.UpdateReturnMode;
|
import it.cavallium.dbengine.database.UpdateReturnMode;
|
||||||
|
import it.cavallium.dbengine.database.disk.LLLocalDictionary.ReleasableSlice;
|
||||||
import it.unimi.dsi.fastutil.booleans.BooleanArrayList;
|
import it.unimi.dsi.fastutil.booleans.BooleanArrayList;
|
||||||
import java.nio.ByteBuffer;
|
import java.nio.ByteBuffer;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
@ -76,17 +77,22 @@ public class DiskCache implements URLsDiskHandler, URLsWriter {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Mono<Void> writeMetadata(URL url, Metadata metadata) {
|
public Mono<Void> writeMetadata(URL url, Metadata metadata) {
|
||||||
return fileMetadata
|
return Mono
|
||||||
.update(url.getSerializer(db.getAllocator()).serialize(url), oldValue -> {
|
.using(
|
||||||
if (oldValue != null) {
|
() -> url.getSerializer(db.getAllocator()).serialize(url),
|
||||||
return oldValue;
|
key -> fileMetadata
|
||||||
} else {
|
.update(key.retain(), oldValue -> {
|
||||||
return diskMetadataSerializer.serialize(new DiskMetadata(
|
if (oldValue != null) {
|
||||||
metadata.getSize(),
|
return oldValue;
|
||||||
BooleanArrayList.wrap(new boolean[DiskMetadata.getBlocksCount(metadata.getSize(), BLOCK_SIZE)])
|
} else {
|
||||||
));
|
return diskMetadataSerializer.serialize(new DiskMetadata(
|
||||||
}
|
metadata.getSize(),
|
||||||
}, UpdateReturnMode.NOTHING)
|
BooleanArrayList.wrap(new boolean[DiskMetadata.getBlocksCount(metadata.getSize(), BLOCK_SIZE)])
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}, UpdateReturnMode.NOTHING),
|
||||||
|
ReferenceCounted::release
|
||||||
|
)
|
||||||
.then();
|
.then();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -95,31 +101,42 @@ public class DiskCache implements URLsDiskHandler, URLsWriter {
|
|||||||
return Mono
|
return Mono
|
||||||
.fromCallable(dataBlock::getData)
|
.fromCallable(dataBlock::getData)
|
||||||
.subscribeOn(Schedulers.boundedElastic())
|
.subscribeOn(Schedulers.boundedElastic())
|
||||||
.flatMap(bytes -> fileContent
|
.flatMap(bytes -> Mono
|
||||||
.put(getBlockKey(url, dataBlock.getId()), bytes, LLDictionaryResultType.VOID)
|
.using(
|
||||||
|
() -> getBlockKey(url, dataBlock.getId()),
|
||||||
|
key -> fileContent
|
||||||
|
.put(key.retain(), bytes, LLDictionaryResultType.VOID),
|
||||||
|
ReferenceCounted::release
|
||||||
|
)
|
||||||
.doOnNext(ReferenceCounted::release)
|
.doOnNext(ReferenceCounted::release)
|
||||||
.then()
|
.then()
|
||||||
)
|
)
|
||||||
.then(fileMetadata.update(url.getSerializer(db.getAllocator()).serialize(url), prevBytes -> {
|
.then(Mono
|
||||||
@Nullable DiskMetadata result;
|
.using(
|
||||||
if (prevBytes != null) {
|
() -> url.getSerializer(db.getAllocator()).serialize(url),
|
||||||
DiskMetadata prevMeta = diskMetadataSerializer.deserialize(prevBytes);
|
key -> fileMetadata.update(key.retain(), prevBytes -> {
|
||||||
if (!prevMeta.getDownloadedBlocks().getBoolean(dataBlock.getId())) {
|
@Nullable DiskMetadata result;
|
||||||
BooleanArrayList bal = prevMeta.getDownloadedBlocks().clone();
|
if (prevBytes != null) {
|
||||||
bal.set(dataBlock.getId(), true);
|
DiskMetadata prevMeta = diskMetadataSerializer.deserialize(prevBytes);
|
||||||
result = new DiskMetadata(prevMeta.getSize(), bal);
|
if (!prevMeta.getDownloadedBlocks().getBoolean(dataBlock.getId())) {
|
||||||
} else {
|
BooleanArrayList bal = prevMeta.getDownloadedBlocks().clone();
|
||||||
result = prevMeta;
|
bal.set(dataBlock.getId(), true);
|
||||||
}
|
result = new DiskMetadata(prevMeta.getSize(), bal);
|
||||||
} else {
|
} else {
|
||||||
result = null;
|
result = prevMeta;
|
||||||
}
|
}
|
||||||
if (result != null) {
|
} else {
|
||||||
return diskMetadataSerializer.serialize(result);
|
result = null;
|
||||||
} else {
|
}
|
||||||
return null;
|
if (result != null) {
|
||||||
}
|
return diskMetadataSerializer.serialize(result);
|
||||||
}, UpdateReturnMode.NOTHING))
|
} else {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}, UpdateReturnMode.NOTHING),
|
||||||
|
ReferenceCounted::release
|
||||||
|
)
|
||||||
|
)
|
||||||
.then();
|
.then();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -137,23 +154,29 @@ public class DiskCache implements URLsDiskHandler, URLsWriter {
|
|||||||
if (!downloaded) {
|
if (!downloaded) {
|
||||||
return Mono.<DataBlock>empty();
|
return Mono.<DataBlock>empty();
|
||||||
}
|
}
|
||||||
return fileContent.get(null, getBlockKey(url, blockId)).map(data -> {
|
return Mono
|
||||||
try {
|
.using(
|
||||||
int blockOffset = getBlockOffset(blockId);
|
() -> getBlockKey(url, blockId),
|
||||||
int blockLength = data.readableBytes();
|
key -> fileContent.get(null, key.retain()),
|
||||||
if (blockOffset + blockLength >= meta.getSize()) {
|
ReferenceCounted::release
|
||||||
if (blockOffset + blockLength > meta.getSize()) {
|
)
|
||||||
throw new IllegalStateException("Overflowed data size");
|
.map(data -> {
|
||||||
|
try {
|
||||||
|
int blockOffset = getBlockOffset(blockId);
|
||||||
|
int blockLength = data.readableBytes();
|
||||||
|
if (blockOffset + blockLength >= meta.getSize()) {
|
||||||
|
if (blockOffset + blockLength > meta.getSize()) {
|
||||||
|
throw new IllegalStateException("Overflowed data size");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Intermediate blocks must be of max size
|
||||||
|
assert data.readableBytes() == BLOCK_SIZE;
|
||||||
|
}
|
||||||
|
return new DataBlock(blockOffset, blockLength, data.retain());
|
||||||
|
} finally {
|
||||||
|
data.release();
|
||||||
}
|
}
|
||||||
} else {
|
});
|
||||||
// Intermediate blocks must be of max size
|
|
||||||
assert data.readableBytes() == BLOCK_SIZE;
|
|
||||||
}
|
|
||||||
return new DataBlock(blockOffset, blockLength, data.retain());
|
|
||||||
} finally {
|
|
||||||
data.release();
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -170,8 +193,12 @@ public class DiskCache implements URLsDiskHandler, URLsWriter {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Mono<DiskMetadata> requestDiskMetadata(URL url) {
|
public Mono<DiskMetadata> requestDiskMetadata(URL url) {
|
||||||
return fileMetadata
|
return Mono
|
||||||
.get(null, url.getSerializer(db.getAllocator()).serialize(url))
|
.using(
|
||||||
|
() -> url.getSerializer(db.getAllocator()).serialize(url),
|
||||||
|
key -> fileMetadata.get(null, key.retain()),
|
||||||
|
ReferenceCounted::release
|
||||||
|
)
|
||||||
.map(diskMetadataSerializer::deserialize);
|
.map(diskMetadataSerializer::deserialize);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -183,8 +210,12 @@ public class DiskCache implements URLsDiskHandler, URLsWriter {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Mono<Tuple2<Metadata, Flux<DataBlock>>> request(URL url) {
|
public Mono<Tuple2<Metadata, Flux<DataBlock>>> request(URL url) {
|
||||||
return fileMetadata
|
return Mono
|
||||||
.get(null, url.getSerializer(db.getAllocator()).serialize(url))
|
.using(
|
||||||
|
() -> url.getSerializer(db.getAllocator()).serialize(url),
|
||||||
|
key -> fileMetadata.get(null, key.retain()),
|
||||||
|
ReferenceCounted::release
|
||||||
|
)
|
||||||
.map(diskMetadataSerializer::deserialize)
|
.map(diskMetadataSerializer::deserialize)
|
||||||
.map(diskMeta -> {
|
.map(diskMeta -> {
|
||||||
var meta = diskMeta.asMetadata();
|
var meta = diskMeta.asMetadata();
|
||||||
|
Loading…
Reference in New Issue
Block a user