package it.cavallium.strangedb.database.references; import it.cavallium.strangedb.database.IReferencesMetadata; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.AsynchronousFileChannel; import java.nio.file.Path; import java.nio.file.StandardOpenOption; import java.util.Arrays; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import static it.cavallium.strangedb.database.IBlocksMetadata.EMPTY_BLOCK_ID; import static it.cavallium.strangedb.database.IBlocksMetadata.ERROR_BLOCK_ID; import static it.cavallium.strangedb.database.IDatabase.DISK_BLOCK_SIZE; public class DatabaseReferencesMetadata implements IReferencesMetadata { public static final byte ERRORED_CLEANER = (byte) -1; public static final byte BLANK_DATA_CLEANER = (byte) -2; public static final ReferenceInfo NONEXISTENT_REFERENCE_INFO = new ReferenceInfo(ERRORED_CLEANER, ERROR_BLOCK_ID); private static final int REF_META_BYTES_COUNT = Long.BYTES + Integer.BYTES; public static final int REF_META_READS_AT_EVERY_READ = (DISK_BLOCK_SIZE - DISK_BLOCK_SIZE % REF_META_BYTES_COUNT) / REF_META_BYTES_COUNT; private final AsynchronousFileChannel metaFileChannel; private final DatabaseReferencesMetadataCache cache; private long firstFreeReference; private long lastWrittenReference; private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(false); public DatabaseReferencesMetadata(Path refMetaFile) throws IOException { metaFileChannel = AsynchronousFileChannel.open(refMetaFile, StandardOpenOption.READ, StandardOpenOption.WRITE); firstFreeReference = metaFileChannel.size() / REF_META_BYTES_COUNT; lastWrittenReference = firstFreeReference - 1; this.cache = new DatabaseReferencesMetadataCache(this::writeReferenceToDisk); } @Override public long getReferenceBlockId(long reference) throws IOException { lock.readLock().lock(); try { return getReferenceBlockId_(reference); } finally { lock.readLock().unlock(); } } private long getReferenceBlockId_(long reference) throws IOException { if (reference >= firstFreeReference) { return EMPTY_BLOCK_ID; } long block; if ((block = cache.getBlock(reference)) != ERROR_BLOCK_ID) { return block; } long position = reference * REF_META_BYTES_COUNT; int size = REF_META_READS_AT_EVERY_READ * REF_META_BYTES_COUNT; if (reference + (size - 1) / REF_META_BYTES_COUNT >= firstFreeReference) { size = (int) ((firstFreeReference - reference) * REF_META_BYTES_COUNT); } int referencesCount = size / REF_META_BYTES_COUNT; ByteBuffer buffer = ByteBuffer.allocate(size); try { metaFileChannel.read(buffer, position).get(); } catch (InterruptedException e) { throw new IOException(e); } catch (ExecutionException e) { throw new IOException(e.getCause()); } buffer.flip(); if (referencesCount < 1) { throw new IOException("Trying to read <1 references"); } if (buffer.limit() % REF_META_BYTES_COUNT != 0 || buffer.limit() < REF_META_BYTES_COUNT) { throw new IOException("The buffer is smaller than the data requested."); } else if (buffer.limit() != size) { size = buffer.limit(); referencesCount = size / REF_META_BYTES_COUNT; } long[] allReferences = new long[referencesCount]; byte[] allCleaners = new byte[referencesCount]; long[] allBlocks = new long[referencesCount]; block = EMPTY_BLOCK_ID; for (int delta = 0; delta < referencesCount; delta++) { long referenceToLoad = reference + delta; long currentBlock = buffer.getLong(); byte cleanerId = (byte) buffer.getInt(); if (buffer.limit() != 0 && currentBlock != 0xFFFFFFFFFFFFFFFFL) { allReferences[delta] = referenceToLoad; allCleaners[delta] = cleanerId; allBlocks[delta] = currentBlock; if (referenceToLoad == reference) { block = currentBlock; } } else { allReferences[delta] = referenceToLoad; allCleaners[delta] = cleanerId; allBlocks[delta] = EMPTY_BLOCK_ID; if (referenceToLoad == reference) { block = EMPTY_BLOCK_ID; } } } for (int delta = 0; delta < referencesCount; delta++) { if (allCleaners[delta] == 0) { System.out.println("ro"); } } cache.putAll(allReferences, allCleaners, allBlocks); return block; } /** * This method is SLOW! Use this only for the cleaner * @param reference reference * @return * @throws IOException */ @Deprecated @Override public ReferenceInfo getCleanReference(long reference) throws IOException { lock.readLock().lock(); try { getReferenceBlockId_(reference); return cache.get(reference); } finally { lock.readLock().unlock(); } } @Override public long newReference(long blockId) throws IOException { lock.writeLock().lock(); try { long newReference = firstFreeReference++; cache.put(newReference, BLANK_DATA_CLEANER, blockId); return newReference; } finally { lock.writeLock().unlock(); } } @Override public void editReference(long reference, byte cleanerId, long blockId) throws IOException { lock.writeLock().lock(); try { cache.put(reference, cleanerId, blockId); } finally { lock.writeLock().unlock(); } } @Override public void deleteReference(long reference) throws IOException { lock.writeLock().lock(); try { cache.put(reference, NONEXISTENT_REFERENCE_INFO.getCleanerId(), NONEXISTENT_REFERENCE_INFO.getBlockId()); } finally { lock.writeLock().unlock(); } } @Override public void close() throws IOException { lock.writeLock().lock(); try { cache.close(); metaFileChannel.close(); } finally { lock.writeLock().unlock(); } } @Override public long getFirstFreeReference() { synchronized (lock.readLock()) { return firstFreeReference; } } private Future writeReferenceToDisk(long reference, byte cleanerId, long blockId) { if (cleanerId == ERRORED_CLEANER) { return CompletableFuture.failedFuture(new IOException("Passing a cleaner with the id of ERRORED_CLIENT")); } ByteBuffer data = ByteBuffer.allocate(REF_META_BYTES_COUNT); data.putLong(blockId); data.putInt(cleanerId & 0xFF); data.flip(); while (lastWrittenReference < reference - 1) { ByteBuffer emptyData = ByteBuffer.allocate(REF_META_BYTES_COUNT); emptyData.putLong(ERROR_BLOCK_ID); emptyData.putInt(ERRORED_CLEANER & 0xFF); emptyData.flip(); metaFileChannel.write(emptyData, ++lastWrittenReference * REF_META_BYTES_COUNT); } if (reference > lastWrittenReference) { lastWrittenReference = reference; } return metaFileChannel.write(data, reference * REF_META_BYTES_COUNT); } }