strangedb-core/src/main/java/it/cavallium/strangedb/database/references/DatabaseReferencesMetadata....

235 lines
7.5 KiB
Java

package it.cavallium.strangedb.database.references;
import it.cavallium.strangedb.database.IReferencesMetadata;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.AsynchronousFileChannel;
import java.nio.file.Path;
import java.nio.file.StandardOpenOption;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import static it.cavallium.strangedb.database.IBlocksMetadata.EMPTY_BLOCK_ID;
import static it.cavallium.strangedb.database.IBlocksMetadata.ERROR_BLOCK_ID;
import static it.cavallium.strangedb.database.IDatabase.DISK_BLOCK_SIZE;
public class DatabaseReferencesMetadata implements IReferencesMetadata {
public static final byte ERRORED_CLEANER = (byte) -1;
public static final byte BLANK_DATA_CLEANER = (byte) -2;
public static final ReferenceInfo NONEXISTENT_REFERENCE_INFO = new ReferenceInfo(ERRORED_CLEANER, ERROR_BLOCK_ID);
private static final int REF_META_BYTES_COUNT = Long.BYTES + Byte.BYTES;
public static final int REF_META_READS_AT_EVERY_READ = (DISK_BLOCK_SIZE - DISK_BLOCK_SIZE % REF_META_BYTES_COUNT) / REF_META_BYTES_COUNT;
private final AsynchronousFileChannel metaFileChannel;
private final DatabaseReferencesMetadataCache cache;
private AtomicLong firstFreeReference;
private AtomicLong lastWrittenReference;
private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(false);
public DatabaseReferencesMetadata(Path refMetaFile) throws IOException {
metaFileChannel = AsynchronousFileChannel.open(refMetaFile, StandardOpenOption.READ, StandardOpenOption.WRITE);
firstFreeReference = new AtomicLong(metaFileChannel.size() / REF_META_BYTES_COUNT);
lastWrittenReference = new AtomicLong(firstFreeReference.get() - 1);
this.cache = new DatabaseReferencesMetadataCache(this::writeReferenceToDisk);
}
@Override
public long getReferenceBlockId(long reference) throws IOException {
lock.readLock().lock();
try {
return getReferenceBlockId_(reference);
} finally {
lock.readLock().unlock();
}
}
private long getReferenceBlockId_(long reference) throws IOException {
long firstFreeReference = this.firstFreeReference.get();
if (reference >= firstFreeReference) {
return EMPTY_BLOCK_ID;
}
long block;
if ((block = cache.getBlock(reference)) != ERROR_BLOCK_ID) {
return block;
}
long position = reference * REF_META_BYTES_COUNT;
int size = REF_META_READS_AT_EVERY_READ * REF_META_BYTES_COUNT;
if (reference > firstFreeReference) {
return EMPTY_BLOCK_ID;
}
if (reference + (size - 1) / REF_META_BYTES_COUNT >= firstFreeReference) {
size = (int) ((firstFreeReference - reference) * REF_META_BYTES_COUNT);
}
int referencesCount = size / REF_META_BYTES_COUNT;
ByteBuffer buffer = ByteBuffer.allocate(size);
try {
metaFileChannel.read(buffer, position).get();
} catch (InterruptedException e) {
throw new IOException(e);
} catch (ExecutionException e) {
throw new IOException(e.getCause());
}
buffer.flip();
if (referencesCount < 1) {
throw new IOException("Trying to read <1 references");
}
if (buffer.limit() % REF_META_BYTES_COUNT != 0 || buffer.limit() < REF_META_BYTES_COUNT) {
throw new IOException("The buffer is smaller than the data requested.");
} else if (buffer.limit() != size) {
size = buffer.limit();
referencesCount = size / REF_META_BYTES_COUNT;
}
long[] allReferences = new long[referencesCount];
byte[] allCleaners = new byte[referencesCount];
long[] allBlocks = new long[referencesCount];
block = EMPTY_BLOCK_ID;
for (int delta = 0; delta < referencesCount; delta++) {
long referenceToLoad = reference + delta;
long currentBlock = buffer.getLong();
byte cleanerId = buffer.get();
if (buffer.limit() != 0 && currentBlock != 0xFFFFFFFFFFFFFFFFL) {
allReferences[delta] = referenceToLoad;
allCleaners[delta] = cleanerId;
allBlocks[delta] = currentBlock;
if (referenceToLoad == reference) {
block = currentBlock;
}
} else {
allReferences[delta] = referenceToLoad;
allCleaners[delta] = cleanerId;
allBlocks[delta] = EMPTY_BLOCK_ID;
if (referenceToLoad == reference) {
block = EMPTY_BLOCK_ID;
}
}
}
cache.putAll(allReferences, allCleaners, allBlocks);
return block;
}
/**
* This method is <b>SLOW</b>! Use this only for the cleaner
*
* @param reference reference
* @return
* @throws IOException
*/
@Deprecated
@Override
public ReferenceInfo getCleanReference(long reference) throws IOException {
lock.readLock().lock();
try {
getReferenceBlockId_(reference);
return cache.get(reference);
} finally {
lock.readLock().unlock();
}
}
@Override
public long newReference(long blockId) throws IOException {
lock.writeLock().lock();
long newReference;
try {
newReference = firstFreeReference.getAndIncrement();
} finally {
lock.writeLock().unlock();
}
cache.put(newReference, BLANK_DATA_CLEANER, blockId);
return newReference;
}
@Override
public void editReference(long reference, byte cleanerId, long blockId) throws IOException {
lock.writeLock().lock();
try {
cache.put(reference, cleanerId, blockId);
} finally {
lock.writeLock().unlock();
}
}
@Override
public void deleteReference(long reference) throws IOException {
lock.writeLock().lock();
try {
cache.put(reference, NONEXISTENT_REFERENCE_INFO.getCleanerId(), NONEXISTENT_REFERENCE_INFO.getBlockId());
} finally {
lock.writeLock().unlock();
}
}
@Override
public void close() throws IOException {
lock.writeLock().lock();
try {
cache.close();
metaFileChannel.close();
} finally {
lock.writeLock().unlock();
}
}
@Override
public long getFirstFreeReference() {
lock.readLock().lock();
try {
return firstFreeReference.get();
} finally {
lock.readLock().unlock();
}
}
private void writeReferenceToDisk(long reference, byte cleanerId, long blockId) throws IOException {
if (cleanerId == ERRORED_CLEANER) {
throw new IOException("Passing a cleaner with the id of ERRORED_CLIENT");
}
ByteBuffer data = ByteBuffer.allocate(REF_META_BYTES_COUNT);
data.putLong(blockId);
data.put(cleanerId);
data.flip();
try {
metaFileChannel.write(data, reference * REF_META_BYTES_COUNT).get();
} catch (InterruptedException e) {
throw new IOException(e);
} catch (ExecutionException e) {
throw new IOException(e.getCause());
}
}
/*
private void writeReferenceToDisk(long reference, byte cleanerId, long blockId) throws IOException {
if (cleanerId == ERRORED_CLEANER) {
throw new IOException("Passing a cleaner with the id of ERRORED_CLIENT");
}
long firstReferenceToWrite = 1 + lastWrittenReference.getAndUpdate((lastWrittenReferenceVal) -> reference > lastWrittenReferenceVal ? reference : lastWrittenReferenceVal);
if (firstReferenceToWrite > reference) {
firstReferenceToWrite = reference;
}
ByteBuffer data = ByteBuffer.allocate((int) ((reference + 1 - firstReferenceToWrite) * REF_META_BYTES_COUNT));
for (long i = firstReferenceToWrite; i < reference - 1; i++) {
data.putLong(ERROR_BLOCK_ID);
data.putInt(ERRORED_CLEANER & 0xFF);
}
data.putLong(blockId);
data.putInt(cleanerId & 0xFF);
data.flip();
try {
metaFileChannel.write(data, firstReferenceToWrite * REF_META_BYTES_COUNT).get();
} catch (InterruptedException e) {
throw new IOException(e);
} catch (ExecutionException e) {
throw new IOException(e.getCause());
}
}
*/
}