2019-03-07 16:19:53 +01:00
|
|
|
package it.cavallium.strangedb.database.references;
|
|
|
|
|
|
|
|
import it.cavallium.strangedb.database.IReferencesMetadata;
|
|
|
|
|
|
|
|
import java.io.IOException;
|
|
|
|
import java.nio.ByteBuffer;
|
|
|
|
import java.nio.channels.AsynchronousFileChannel;
|
|
|
|
import java.nio.file.Path;
|
|
|
|
import java.nio.file.StandardOpenOption;
|
2019-04-20 15:54:40 +02:00
|
|
|
import java.util.Arrays;
|
|
|
|
import java.util.concurrent.CompletableFuture;
|
2019-03-07 16:19:53 +01:00
|
|
|
import java.util.concurrent.ExecutionException;
|
|
|
|
import java.util.concurrent.Future;
|
2019-04-20 15:54:40 +02:00
|
|
|
import java.util.concurrent.locks.ReentrantLock;
|
|
|
|
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
2019-03-07 16:19:53 +01:00
|
|
|
|
|
|
|
import static it.cavallium.strangedb.database.IBlocksMetadata.EMPTY_BLOCK_ID;
|
|
|
|
import static it.cavallium.strangedb.database.IBlocksMetadata.ERROR_BLOCK_ID;
|
2019-04-20 15:54:40 +02:00
|
|
|
import static it.cavallium.strangedb.database.IDatabase.DISK_BLOCK_SIZE;
|
2019-03-07 16:19:53 +01:00
|
|
|
|
|
|
|
public class DatabaseReferencesMetadata implements IReferencesMetadata {
|
2019-04-20 15:54:40 +02:00
|
|
|
public static final byte ERRORED_CLEANER = (byte) -1;
|
|
|
|
public static final byte BLANK_DATA_CLEANER = (byte) -2;
|
|
|
|
public static final ReferenceInfo NONEXISTENT_REFERENCE_INFO = new ReferenceInfo(ERRORED_CLEANER, ERROR_BLOCK_ID);
|
|
|
|
private static final int REF_META_BYTES_COUNT = Long.BYTES + Integer.BYTES;
|
|
|
|
public static final int REF_META_READS_AT_EVERY_READ = (DISK_BLOCK_SIZE - DISK_BLOCK_SIZE % REF_META_BYTES_COUNT) / REF_META_BYTES_COUNT;
|
|
|
|
|
2019-03-07 16:19:53 +01:00
|
|
|
private final AsynchronousFileChannel metaFileChannel;
|
|
|
|
private final DatabaseReferencesMetadataCache cache;
|
|
|
|
private long firstFreeReference;
|
2019-04-20 15:54:40 +02:00
|
|
|
private long lastWrittenReference;
|
|
|
|
private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(false);
|
2019-03-07 16:19:53 +01:00
|
|
|
|
|
|
|
public DatabaseReferencesMetadata(Path refMetaFile) throws IOException {
|
|
|
|
metaFileChannel = AsynchronousFileChannel.open(refMetaFile, StandardOpenOption.READ, StandardOpenOption.WRITE);
|
|
|
|
firstFreeReference = metaFileChannel.size() / REF_META_BYTES_COUNT;
|
2019-04-20 15:54:40 +02:00
|
|
|
lastWrittenReference = firstFreeReference - 1;
|
2019-03-07 16:19:53 +01:00
|
|
|
this.cache = new DatabaseReferencesMetadataCache(this::writeReferenceToDisk);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2019-04-20 15:54:40 +02:00
|
|
|
public long getReferenceBlockId(long reference) throws IOException {
|
|
|
|
lock.readLock().lock();
|
|
|
|
try {
|
|
|
|
return getReferenceBlockId_(reference);
|
|
|
|
} finally {
|
|
|
|
lock.readLock().unlock();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private long getReferenceBlockId_(long reference) throws IOException {
|
2019-03-07 16:19:53 +01:00
|
|
|
if (reference >= firstFreeReference) {
|
|
|
|
return EMPTY_BLOCK_ID;
|
|
|
|
}
|
|
|
|
long block;
|
2019-04-20 15:54:40 +02:00
|
|
|
if ((block = cache.getBlock(reference)) != ERROR_BLOCK_ID) {
|
2019-03-07 16:19:53 +01:00
|
|
|
return block;
|
|
|
|
}
|
2019-04-20 15:54:40 +02:00
|
|
|
long position = reference * REF_META_BYTES_COUNT;
|
|
|
|
int size = REF_META_READS_AT_EVERY_READ * REF_META_BYTES_COUNT;
|
|
|
|
if (reference + (size - 1) / REF_META_BYTES_COUNT >= firstFreeReference) {
|
|
|
|
size = (int) ((firstFreeReference - reference) * REF_META_BYTES_COUNT);
|
|
|
|
}
|
|
|
|
int referencesCount = size / REF_META_BYTES_COUNT;
|
|
|
|
|
|
|
|
ByteBuffer buffer = ByteBuffer.allocate(size);
|
2019-03-07 16:19:53 +01:00
|
|
|
try {
|
2019-04-20 15:54:40 +02:00
|
|
|
metaFileChannel.read(buffer, position).get();
|
2019-03-07 16:19:53 +01:00
|
|
|
} catch (InterruptedException e) {
|
|
|
|
throw new IOException(e);
|
|
|
|
} catch (ExecutionException e) {
|
|
|
|
throw new IOException(e.getCause());
|
|
|
|
}
|
|
|
|
buffer.flip();
|
2019-04-20 15:54:40 +02:00
|
|
|
|
|
|
|
if (referencesCount < 1) {
|
|
|
|
throw new IOException("Trying to read <1 references");
|
|
|
|
}
|
|
|
|
if (buffer.limit() % REF_META_BYTES_COUNT != 0 || buffer.limit() < REF_META_BYTES_COUNT) {
|
|
|
|
throw new IOException("The buffer is smaller than the data requested.");
|
|
|
|
} else if (buffer.limit() != size) {
|
|
|
|
size = buffer.limit();
|
|
|
|
referencesCount = size / REF_META_BYTES_COUNT;
|
|
|
|
}
|
|
|
|
|
|
|
|
long[] allReferences = new long[referencesCount];
|
|
|
|
byte[] allCleaners = new byte[referencesCount];
|
|
|
|
long[] allBlocks = new long[referencesCount];
|
|
|
|
|
|
|
|
block = EMPTY_BLOCK_ID;
|
|
|
|
for (int delta = 0; delta < referencesCount; delta++) {
|
|
|
|
long referenceToLoad = reference + delta;
|
|
|
|
long currentBlock = buffer.getLong();
|
|
|
|
byte cleanerId = (byte) buffer.getInt();
|
|
|
|
if (buffer.limit() != 0 && currentBlock != 0xFFFFFFFFFFFFFFFFL) {
|
|
|
|
allReferences[delta] = referenceToLoad;
|
|
|
|
allCleaners[delta] = cleanerId;
|
|
|
|
allBlocks[delta] = currentBlock;
|
|
|
|
if (referenceToLoad == reference) {
|
|
|
|
block = currentBlock;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
allReferences[delta] = referenceToLoad;
|
|
|
|
allCleaners[delta] = cleanerId;
|
|
|
|
allBlocks[delta] = EMPTY_BLOCK_ID;
|
|
|
|
if (referenceToLoad == reference) {
|
|
|
|
block = EMPTY_BLOCK_ID;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (int delta = 0; delta < referencesCount; delta++) {
|
|
|
|
if (allCleaners[delta] == 0) {
|
|
|
|
System.out.println("ro");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
cache.putAll(allReferences, allCleaners, allBlocks);
|
|
|
|
return block;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This method is <b>SLOW</b>! Use this only for the cleaner
|
|
|
|
* @param reference reference
|
|
|
|
* @return
|
|
|
|
* @throws IOException
|
|
|
|
*/
|
|
|
|
@Deprecated
|
|
|
|
@Override
|
|
|
|
public ReferenceInfo getCleanReference(long reference) throws IOException {
|
|
|
|
lock.readLock().lock();
|
|
|
|
try {
|
|
|
|
getReferenceBlockId_(reference);
|
|
|
|
return cache.get(reference);
|
|
|
|
} finally {
|
|
|
|
lock.readLock().unlock();
|
2019-03-07 16:19:53 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public long newReference(long blockId) throws IOException {
|
2019-04-20 15:54:40 +02:00
|
|
|
lock.writeLock().lock();
|
|
|
|
try {
|
|
|
|
long newReference = firstFreeReference++;
|
|
|
|
cache.put(newReference, BLANK_DATA_CLEANER, blockId);
|
|
|
|
return newReference;
|
|
|
|
} finally {
|
|
|
|
lock.writeLock().unlock();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void editReference(long reference, byte cleanerId, long blockId) throws IOException {
|
|
|
|
lock.writeLock().lock();
|
|
|
|
try {
|
|
|
|
cache.put(reference, cleanerId, blockId);
|
|
|
|
} finally {
|
|
|
|
lock.writeLock().unlock();
|
|
|
|
}
|
2019-03-07 16:19:53 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2019-04-20 15:54:40 +02:00
|
|
|
public void deleteReference(long reference) throws IOException {
|
|
|
|
lock.writeLock().lock();
|
|
|
|
try {
|
|
|
|
cache.put(reference, NONEXISTENT_REFERENCE_INFO.getCleanerId(), NONEXISTENT_REFERENCE_INFO.getBlockId());
|
|
|
|
} finally {
|
|
|
|
lock.writeLock().unlock();
|
|
|
|
}
|
2019-03-07 16:19:53 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void close() throws IOException {
|
2019-04-20 15:54:40 +02:00
|
|
|
lock.writeLock().lock();
|
|
|
|
try {
|
|
|
|
cache.close();
|
|
|
|
metaFileChannel.close();
|
|
|
|
} finally {
|
|
|
|
lock.writeLock().unlock();
|
|
|
|
}
|
2019-03-07 16:19:53 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public long getFirstFreeReference() {
|
2019-04-20 15:54:40 +02:00
|
|
|
synchronized (lock.readLock()) {
|
|
|
|
return firstFreeReference;
|
|
|
|
}
|
2019-03-07 16:19:53 +01:00
|
|
|
}
|
|
|
|
|
2019-04-20 15:54:40 +02:00
|
|
|
private Future<Integer> writeReferenceToDisk(long reference, byte cleanerId, long blockId) {
|
|
|
|
if (cleanerId == ERRORED_CLEANER) {
|
|
|
|
return CompletableFuture.failedFuture(new IOException("Passing a cleaner with the id of ERRORED_CLIENT"));
|
|
|
|
}
|
2019-03-07 16:19:53 +01:00
|
|
|
ByteBuffer data = ByteBuffer.allocate(REF_META_BYTES_COUNT);
|
|
|
|
data.putLong(blockId);
|
2019-04-20 15:54:40 +02:00
|
|
|
data.putInt(cleanerId & 0xFF);
|
2019-03-07 16:19:53 +01:00
|
|
|
data.flip();
|
2019-04-20 15:54:40 +02:00
|
|
|
while (lastWrittenReference < reference - 1) {
|
|
|
|
ByteBuffer emptyData = ByteBuffer.allocate(REF_META_BYTES_COUNT);
|
|
|
|
emptyData.putLong(ERROR_BLOCK_ID);
|
|
|
|
emptyData.putInt(ERRORED_CLEANER & 0xFF);
|
|
|
|
emptyData.flip();
|
|
|
|
metaFileChannel.write(emptyData, ++lastWrittenReference * REF_META_BYTES_COUNT);
|
|
|
|
}
|
|
|
|
if (reference > lastWrittenReference) {
|
|
|
|
lastWrittenReference = reference;
|
|
|
|
}
|
2019-03-07 16:19:53 +01:00
|
|
|
return metaFileChannel.write(data, reference * REF_META_BYTES_COUNT);
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|