parent
d047d1b24a
commit
7fd7497464
15 changed files with 307 additions and 754 deletions
@ -1,53 +0,0 @@ |
||||
package it.cavallium.strangedb.database; |
||||
|
||||
import it.cavallium.strangedb.database.blocks.BlockInfo; |
||||
|
||||
import java.io.IOException; |
||||
|
||||
public interface IBlocksMetadata { |
||||
long EMPTY_BLOCK_ID = -1; |
||||
long ERROR_BLOCK_ID = -2; |
||||
BlockInfo EMPTY_BLOCK_INFO = new BlockInfo(0, 0); |
||||
|
||||
/** |
||||
* Get block info |
||||
* @param blockId block id |
||||
* @return block metadata |
||||
*/ |
||||
BlockInfo getBlockInfo(long blockId) throws IOException; |
||||
|
||||
/** |
||||
* New empty block info |
||||
* @return block id |
||||
*/ |
||||
default long newBlock() { |
||||
return EMPTY_BLOCK_ID; |
||||
} |
||||
|
||||
/** |
||||
* Set block info |
||||
* @param index block index |
||||
* @param size block size |
||||
* @return block id |
||||
*/ |
||||
long newBlock(long index, int size) throws IOException; |
||||
|
||||
/** |
||||
* Set block info |
||||
* @param blockInfo block info |
||||
* @return block id |
||||
*/ |
||||
default long newBlock(BlockInfo blockInfo) throws IOException { |
||||
return this.newBlock(blockInfo.getIndex(), blockInfo.getSize()); |
||||
} |
||||
/** |
||||
* Close file |
||||
*/ |
||||
void close() throws IOException; |
||||
|
||||
/** |
||||
* Get total count of blocks |
||||
* @return |
||||
*/ |
||||
long getTotalBlocksCount(); |
||||
} |
@ -1,44 +0,0 @@ |
||||
package it.cavallium.strangedb.database.blocks; |
||||
|
||||
import java.util.Objects; |
||||
import java.util.StringJoiner; |
||||
|
||||
public class BlockInfo { |
||||
private final long index; |
||||
private final int size; |
||||
|
||||
public BlockInfo(long index, int size) { |
||||
this.index = index; |
||||
this.size = size; |
||||
} |
||||
|
||||
public long getIndex() { |
||||
return index; |
||||
} |
||||
|
||||
public int getSize() { |
||||
return size; |
||||
} |
||||
|
||||
@Override |
||||
public boolean equals(Object o) { |
||||
if (this == o) return true; |
||||
if (o == null || getClass() != o.getClass()) return false; |
||||
BlockInfo blockInfo = (BlockInfo) o; |
||||
return index == blockInfo.index && |
||||
size == blockInfo.size; |
||||
} |
||||
|
||||
@Override |
||||
public int hashCode() { |
||||
return Objects.hash(index, size); |
||||
} |
||||
|
||||
@Override |
||||
public String toString() { |
||||
return new StringJoiner(", ", BlockInfo.class.getSimpleName() + "[", "]") |
||||
.add("index=" + index) |
||||
.add("size=" + size) |
||||
.toString(); |
||||
} |
||||
} |
@ -1,80 +0,0 @@ |
||||
package it.cavallium.strangedb.database.blocks; |
||||
|
||||
import it.cavallium.strangedb.database.DatabaseFileIO; |
||||
import it.cavallium.strangedb.database.IBlocksIO; |
||||
import it.cavallium.strangedb.database.IBlocksMetadata; |
||||
|
||||
import java.io.IOException; |
||||
import java.nio.ByteBuffer; |
||||
import java.util.concurrent.CompletionException; |
||||
import java.util.concurrent.ExecutionException; |
||||
import java.util.concurrent.Future; |
||||
import java.util.concurrent.locks.ReentrantLock; |
||||
import java.util.concurrent.locks.ReentrantReadWriteLock; |
||||
|
||||
import static it.cavallium.strangedb.database.IBlocksMetadata.EMPTY_BLOCK_ID; |
||||
import static it.cavallium.strangedb.database.IBlocksMetadata.ERROR_BLOCK_ID; |
||||
|
||||
public class DatabaseBlocksIO implements IBlocksIO { |
||||
|
||||
private final DatabaseFileIO fileIO; |
||||
private final IBlocksMetadata blocksMetadata; |
||||
|
||||
public DatabaseBlocksIO(DatabaseFileIO fileIO, IBlocksMetadata blocksMetadata) { |
||||
this.fileIO = fileIO; |
||||
this.blocksMetadata = blocksMetadata; |
||||
} |
||||
|
||||
@Override |
||||
public long newBlock(int size, ByteBuffer data) throws IOException { |
||||
if (size == 0) { |
||||
return EMPTY_BLOCK_ID; |
||||
} |
||||
if (size < 0) { |
||||
throw new IOException("Trying to create a block with size " + size); |
||||
} |
||||
if (data.limit() < size) { |
||||
throw new IOException("Trying to create a block with size " + size + " but with a buffer of size " + data.limit()); |
||||
} |
||||
long index = fileIO.writeAtEnd(size, data); |
||||
return blocksMetadata.newBlock(index, size); |
||||
} |
||||
|
||||
@Override |
||||
public ByteBuffer readBlock(long blockId) throws IOException { |
||||
if (blockId == EMPTY_BLOCK_ID) { |
||||
return ByteBuffer.wrap(new byte[0]); |
||||
} |
||||
if (blockId == ERROR_BLOCK_ID) { |
||||
throw new IOException("Errored block id"); |
||||
} |
||||
if (blockId < 0) { |
||||
throw new IOException("Block id " + blockId + " is not valid"); |
||||
} |
||||
BlockInfo blockInfo = blocksMetadata.getBlockInfo(blockId); |
||||
return fileIO.readAt(blockInfo.getIndex(), blockInfo.getSize()); |
||||
} |
||||
|
||||
public ByteBuffer readBlockSizeAndLastElementOfReferencesList(long blockId) throws IOException { |
||||
if (blockId == EMPTY_BLOCK_ID) { |
||||
return ByteBuffer.wrap(new byte[0]); |
||||
} |
||||
if (blockId == ERROR_BLOCK_ID) { |
||||
throw new IOException("Errored block id"); |
||||
} |
||||
if (blockId < 0) { |
||||
throw new IOException("Block id " + blockId + " is not valid"); |
||||
} |
||||
BlockInfo blockInfo = blocksMetadata.getBlockInfo(blockId); |
||||
if (blockInfo.getSize() >= Integer.BYTES * 2 + Long.BYTES) { |
||||
return fileIO.readAt(blockInfo.getIndex() + blockInfo.getSize() - (Integer.BYTES + Long.BYTES), Integer.BYTES + Long.BYTES); |
||||
} else { |
||||
return fileIO.readAt(blockInfo.getIndex(), blockInfo.getSize()); |
||||
} |
||||
} |
||||
|
||||
@Override |
||||
public void close() { |
||||
|
||||
} |
||||
} |
@ -1,128 +0,0 @@ |
||||
package it.cavallium.strangedb.database.blocks; |
||||
|
||||
|
||||
import it.cavallium.strangedb.database.IBlocksMetadata; |
||||
|
||||
import java.io.IOException; |
||||
import java.nio.ByteBuffer; |
||||
import java.nio.channels.AsynchronousFileChannel; |
||||
import java.nio.file.Path; |
||||
import java.nio.file.StandardOpenOption; |
||||
import java.util.concurrent.ExecutionException; |
||||
import java.util.concurrent.atomic.AtomicLong; |
||||
|
||||
import static it.cavallium.strangedb.database.IDatabase.DISK_BLOCK_SIZE; |
||||
|
||||
public class DatabaseBlocksMetadata implements IBlocksMetadata { |
||||
public static final BlockInfo ERROR_BLOCK_INFO = new BlockInfo(-2, 0); |
||||
private static final int BLOCK_META_BYTES_COUNT = Long.BYTES + Integer.BYTES; |
||||
public static final int BLOCK_META_READS_AT_EVERY_READ = (DISK_BLOCK_SIZE - DISK_BLOCK_SIZE % BLOCK_META_BYTES_COUNT) / BLOCK_META_BYTES_COUNT; |
||||
|
||||
private final AsynchronousFileChannel metaFileChannel; |
||||
private final DatabaseBlocksMetadataCache cache; |
||||
private AtomicLong firstFreeBlock; |
||||
|
||||
public DatabaseBlocksMetadata(Path metaFile) throws IOException { |
||||
metaFileChannel = AsynchronousFileChannel.open(metaFile, StandardOpenOption.READ, StandardOpenOption.WRITE); |
||||
firstFreeBlock = new AtomicLong(metaFileChannel.size() / BLOCK_META_BYTES_COUNT); |
||||
this.cache = new DatabaseBlocksMetadataCache(this::writeBlockToDisk); |
||||
} |
||||
|
||||
@Override |
||||
public BlockInfo getBlockInfo(long blockId) throws IOException { |
||||
if (blockId == EMPTY_BLOCK_ID) { |
||||
return EMPTY_BLOCK_INFO; |
||||
} |
||||
if (blockId == ERROR_BLOCK_ID) { |
||||
throw new IOException("Errored block id"); |
||||
} |
||||
BlockInfo blockInfo; |
||||
if ((blockInfo = cache.get(blockId)) != ERROR_BLOCK_INFO) { |
||||
return blockInfo; |
||||
} |
||||
long position = blockId * BLOCK_META_BYTES_COUNT; |
||||
int size = BLOCK_META_READS_AT_EVERY_READ * BLOCK_META_BYTES_COUNT; |
||||
long currentFirstFreeBlock = this.firstFreeBlock.get(); |
||||
if (blockId > currentFirstFreeBlock) { |
||||
return EMPTY_BLOCK_INFO; |
||||
} |
||||
if (blockId + (size - 1) / BLOCK_META_BYTES_COUNT >= currentFirstFreeBlock) { |
||||
size = (int) ((currentFirstFreeBlock - blockId) * BLOCK_META_BYTES_COUNT); |
||||
} |
||||
int blocksCount = size / BLOCK_META_BYTES_COUNT; |
||||
|
||||
ByteBuffer buffer = ByteBuffer.allocate(size); |
||||
try { |
||||
metaFileChannel.read(buffer, position).get(); |
||||
} catch (InterruptedException e) { |
||||
throw new IOException(e); |
||||
} catch (ExecutionException e) { |
||||
throw new IOException(e.getCause()); |
||||
} |
||||
buffer.flip(); |
||||
|
||||
if (blocksCount < 1) { |
||||
throw new IOException("Trying to read <1 blocks"); |
||||
} |
||||
if (buffer.limit() % BLOCK_META_BYTES_COUNT != 0 || buffer.limit() < BLOCK_META_BYTES_COUNT) { |
||||
throw new IOException("The buffer is smaller than the data requested."); |
||||
} else if (buffer.limit() != size) { |
||||
size = buffer.limit(); |
||||
blocksCount = size / BLOCK_META_BYTES_COUNT; |
||||
} |
||||
|
||||
long[] allBlockIds = new long[blocksCount]; |
||||
BlockInfo[] allBlockInfo = new BlockInfo[blocksCount]; |
||||
|
||||
blockInfo = EMPTY_BLOCK_INFO; |
||||
for (int delta = 0; delta < blocksCount; delta++) { |
||||
long blockToLoad = blockId + delta; |
||||
long blockIndex = buffer.getLong(); |
||||
int blockSize = buffer.getInt(); |
||||
BlockInfo currentBlockInfo = new BlockInfo(blockIndex, blockSize); |
||||
allBlockIds[delta] = blockToLoad; |
||||
allBlockInfo[delta] = currentBlockInfo; |
||||
if (blockToLoad == blockId) { |
||||
blockInfo = currentBlockInfo; |
||||
} |
||||
} |
||||
cache.putAll(allBlockIds, allBlockInfo); |
||||
return blockInfo; |
||||
} |
||||
|
||||
@Override |
||||
public long newBlock(long index, int size) throws IOException { |
||||
if (size == 0) { |
||||
return EMPTY_BLOCK_ID; |
||||
} |
||||
long newBlockId = firstFreeBlock.getAndIncrement(); |
||||
BlockInfo blockInfo = new BlockInfo(index, size); |
||||
cache.put(newBlockId, blockInfo); |
||||
return newBlockId; |
||||
} |
||||
|
||||
@Override |
||||
public void close() throws IOException { |
||||
cache.close(); |
||||
metaFileChannel.close(); |
||||
} |
||||
|
||||
private void writeBlockToDisk(long blockId, long index, int size) throws IOException { |
||||
ByteBuffer data = ByteBuffer.allocate(BLOCK_META_BYTES_COUNT); |
||||
data.putLong(index); |
||||
data.putInt(size); |
||||
data.flip(); |
||||
try { |
||||
metaFileChannel.write(data, blockId * BLOCK_META_BYTES_COUNT).get(); |
||||
} catch (InterruptedException e) { |
||||
throw new IOException(e); |
||||
} catch (ExecutionException e) { |
||||
throw new IOException(e.getCause()); |
||||
} |
||||
} |
||||
|
||||
@Override |
||||
public long getTotalBlocksCount() { |
||||
return firstFreeBlock.get(); |
||||
} |
||||
} |
@ -1,143 +0,0 @@ |
||||
package it.cavallium.strangedb.database.blocks; |
||||
|
||||
import it.unimi.dsi.fastutil.longs.*; |
||||
import it.unimi.dsi.fastutil.objects.ObjectArrayList; |
||||
import it.unimi.dsi.fastutil.objects.ObjectIterator; |
||||
|
||||
import java.io.IOException; |
||||
import java.util.ArrayList; |
||||
import java.util.Collection; |
||||
import java.util.LinkedList; |
||||
import java.util.List; |
||||
import java.util.concurrent.*; |
||||
import java.util.concurrent.locks.ReentrantReadWriteLock; |
||||
|
||||
import static it.cavallium.strangedb.database.blocks.DatabaseBlocksMetadata.BLOCK_META_READS_AT_EVERY_READ; |
||||
|
||||
public class DatabaseBlocksMetadataCache { |
||||
|
||||
private static final int BASE_QUANTITY = (BLOCK_META_READS_AT_EVERY_READ < 500 ? BLOCK_META_READS_AT_EVERY_READ : 500); |
||||
private static final int GOOD_CACHE_SIZE = 140 * BASE_QUANTITY; |
||||
private static final int FLUSH_CACHE_SIZE = 300 * BLOCK_META_READS_AT_EVERY_READ; |
||||
private static final int MAX_CACHE_SIZE = 400 * BLOCK_META_READS_AT_EVERY_READ; |
||||
|
||||
private final Long2ObjectMap<BlockInfo> blocks2Info = Long2ObjectMaps.synchronize(new Long2ObjectLinkedOpenHashMap<>(MAX_CACHE_SIZE, 0.5f)); |
||||
|
||||
private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(false); |
||||
private final DatabaseBlocksMetadataCacheFlusher flusher; |
||||
private volatile boolean closed; |
||||
ExecutorService flushExecutorService = Executors.newFixedThreadPool(ForkJoinPool.getCommonPoolParallelism(), (r) -> new Thread(r, "Blocks Flush Thread")); |
||||
|
||||
public DatabaseBlocksMetadataCache(DatabaseBlocksMetadataCacheFlusher flusher) { |
||||
this.flusher = flusher; |
||||
} |
||||
|
||||
public BlockInfo get(long block) throws IOException { |
||||
if (closed) throw new IOException("Cache already closed!"); |
||||
lock.readLock().lock(); |
||||
try { |
||||
return blocks2Info.getOrDefault(block, DatabaseBlocksMetadata.ERROR_BLOCK_INFO); |
||||
} finally { |
||||
lock.readLock().unlock(); |
||||
} |
||||
} |
||||
|
||||
public void put(long block, BlockInfo blockInfo) throws IOException { |
||||
if (closed) return; |
||||
lock.writeLock().lock(); |
||||
try { |
||||
blocks2Info.put(block, blockInfo); |
||||
flush(); |
||||
} finally { |
||||
lock.writeLock().unlock(); |
||||
} |
||||
} |
||||
|
||||
@SuppressWarnings("unchecked") |
||||
public void putAll(long[] blocks, BlockInfo[] blockInfos) throws IOException { |
||||
if (closed) return; |
||||
lock.writeLock().lock(); |
||||
try { |
||||
Long2ObjectMap blocksInfosToAdd = new Long2ObjectLinkedOpenHashMap<>(blocks, blockInfos, 0.5f); |
||||
blocks2Info.putAll(blocksInfosToAdd); |
||||
flush(); |
||||
} finally { |
||||
lock.writeLock().unlock(); |
||||
} |
||||
} |
||||
|
||||
private void flush() throws IOException { |
||||
if (closed) return; |
||||
int blocks2InfoSize = blocks2Info.size(); |
||||
if (blocks2InfoSize >= FLUSH_CACHE_SIZE) { |
||||
ObjectIterator<Long2ObjectMap.Entry<BlockInfo>> entriesIterator = blocks2Info.long2ObjectEntrySet().iterator(); |
||||
@SuppressWarnings("unchecked") |
||||
ObjectArrayList<Callable<Void>> tasks = ObjectArrayList.wrap(new Callable[blocks2InfoSize - GOOD_CACHE_SIZE], blocks2InfoSize - GOOD_CACHE_SIZE); |
||||
for (int i = 0; i < blocks2InfoSize - GOOD_CACHE_SIZE; i++) { |
||||
Long2ObjectMap.Entry<BlockInfo> entry = entriesIterator.next(); |
||||
BlockInfo blockInfo = entry.getValue(); |
||||
long blockId = entry.getLongKey(); |
||||
long blockPosition = blockInfo.getIndex(); |
||||
int blockSize = blockInfo.getSize(); |
||||
entriesIterator.remove(); |
||||
|
||||
tasks.set(i, () -> { |
||||
try { |
||||
flusher.flush(blockId, blockPosition, blockSize); |
||||
} catch (IOException e) { |
||||
throw new CompletionException(e); |
||||
} |
||||
return null; |
||||
}); |
||||
} |
||||
try { |
||||
flushExecutorService.invokeAll(tasks); |
||||
} catch (InterruptedException e) { |
||||
throw new IOException(e.getCause()); |
||||
} |
||||
} |
||||
} |
||||
|
||||
public void close() throws IOException { |
||||
if (!closed) { |
||||
closed = true; |
||||
lock.writeLock().lock(); |
||||
try { |
||||
int blocks2InfoSize = blocks2Info.size(); |
||||
ObjectIterator<Long2ObjectMap.Entry<BlockInfo>> entriesIterator = blocks2Info.long2ObjectEntrySet().iterator(); |
||||
@SuppressWarnings("unchecked") |
||||
ObjectArrayList<Callable<Void>> tasks = ObjectArrayList.wrap(new Callable[blocks2InfoSize], blocks2InfoSize); |
||||
for (int i = 0; i < blocks2InfoSize; i++) { |
||||
Long2ObjectMap.Entry<BlockInfo> entry = entriesIterator.next(); |
||||
BlockInfo blockInfo = entry.getValue(); |
||||
long blockId = entry.getLongKey(); |
||||
long blockPosition = blockInfo.getIndex(); |
||||
int blockSize = blockInfo.getSize(); |
||||
entriesIterator.remove(); |
||||
tasks.set(i, () -> { |
||||
try { |
||||
flusher.flush(blockId, blockPosition, blockSize); |
||||
} catch (IOException e) { |
||||
throw new CompletionException(e); |
||||
} |
||||
return null; |
||||
}); |
||||
} |
||||
try { |
||||
flushExecutorService.invokeAll(tasks); |
||||
} catch (InterruptedException e) { |
||||
throw new IOException(e.getCause()); |
||||
} |
||||
flushExecutorService.shutdown(); |
||||
try { |
||||
if (!flushExecutorService.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS)) |
||||
flushExecutorService.shutdownNow(); |
||||
} catch (InterruptedException e) { |
||||
throw new IOException(e); |
||||
} |
||||
} finally { |
||||
lock.writeLock().unlock(); |
||||
} |
||||
} |
||||
} |
||||
} |
@ -1,8 +0,0 @@ |
||||
package it.cavallium.strangedb.database.blocks; |
||||
|
||||
import java.io.IOException; |
||||
import java.util.concurrent.Future; |
||||
|
||||
public interface DatabaseBlocksMetadataCacheFlusher { |
||||
void flush(long key, long value1, int value2) throws IOException; |
||||
} |
@ -1,67 +1,69 @@ |
||||
package it.cavallium.strangedb.database.references; |
||||
|
||||
import it.cavallium.strangedb.database.DatabaseFileIO; |
||||
import it.cavallium.strangedb.database.IReferencesIO; |
||||
import it.cavallium.strangedb.database.blocks.DatabaseBlocksIO; |
||||
|
||||
import java.io.IOException; |
||||
import java.nio.ByteBuffer; |
||||
import java.util.concurrent.locks.ReentrantReadWriteLock; |
||||
|
||||
import static it.cavallium.strangedb.database.IBlocksMetadata.EMPTY_BLOCK_ID; |
||||
|
||||
public class DatabaseReferencesIO implements IReferencesIO { |
||||
|
||||
private final DatabaseBlocksIO blocksIO; |
||||
private final DatabaseFileIO fileIO; |
||||
private final DatabaseReferencesMetadata referencesMetadata; |
||||
private ReentrantReadWriteLock lock = new ReentrantReadWriteLock(false); |
||||
|
||||
public DatabaseReferencesIO(DatabaseBlocksIO blocksIO, DatabaseReferencesMetadata referencesMetadata) { |
||||
this.blocksIO = blocksIO; |
||||
public DatabaseReferencesIO(DatabaseFileIO fileIO, DatabaseReferencesMetadata referencesMetadata) { |
||||
this.fileIO = fileIO; |
||||
this.referencesMetadata = referencesMetadata; |
||||
} |
||||
|
||||
@Override |
||||
public long allocateReference() throws IOException { |
||||
return referencesMetadata.newReference(EMPTY_BLOCK_ID); |
||||
return referencesMetadata.newReference(0, 0); |
||||
} |
||||
|
||||
@Override |
||||
public long allocateReference(int size, ByteBuffer data) throws IOException { |
||||
long blockId = (size == 0) ? EMPTY_BLOCK_ID : blocksIO.newBlock(size, data); |
||||
return referencesMetadata.newReference(blockId); |
||||
long index = writeToFile(size, data); |
||||
return referencesMetadata.newReference(index, size); |
||||
} |
||||
|
||||
@Override |
||||
public void writeToReference(long reference, byte cleanerId, int size, ByteBuffer data) throws IOException { |
||||
long blockId = (size == 0) ? EMPTY_BLOCK_ID : blocksIO.newBlock(size, data); |
||||
lock.writeLock().lock(); |
||||
try { |
||||
referencesMetadata.editReference(reference, cleanerId, blockId); |
||||
} finally { |
||||
lock.writeLock().unlock(); |
||||
} |
||||
long index = writeToFile(size, data); |
||||
referencesMetadata.editReference(reference, new ReferenceInfo(index, size, cleanerId)); |
||||
} |
||||
|
||||
@Override |
||||
public ByteBuffer readFromReference(long reference) throws IOException { |
||||
long blockId; |
||||
lock.readLock().lock(); |
||||
try { |
||||
blockId = referencesMetadata.getReferenceBlockId(reference); |
||||
} finally { |
||||
lock.readLock().unlock(); |
||||
} |
||||
return blocksIO.readBlock(blockId); |
||||
ReferenceInfo referenceInfo = referencesMetadata.getReferenceInfo(reference); |
||||
return fileIO.readAt(referenceInfo.getIndex(), referenceInfo.getSize()); |
||||
} |
||||
|
||||
public ByteBuffer readFromReferenceSizeAndLastElementOfReferencesList(long reference) throws IOException { |
||||
long blockId; |
||||
lock.readLock().lock(); |
||||
try { |
||||
blockId = referencesMetadata.getReferenceBlockId(reference); |
||||
} finally { |
||||
lock.readLock().unlock(); |
||||
ReferenceInfo referenceInfo = referencesMetadata.getReferenceInfo(reference); |
||||
if (referenceInfo.getSize() >= Integer.BYTES * 2 + Long.BYTES) { |
||||
return fileIO.readAt(referenceInfo.getIndex() + referenceInfo.getSize() - (Integer.BYTES + Long.BYTES), Integer.BYTES + Long.BYTES); |
||||
} else { |
||||
return fileIO.readAt(referenceInfo.getIndex(), referenceInfo.getSize()); |
||||
} |
||||
} |
||||
|
||||
/** |
||||
* |
||||
* @param size |
||||
* @param data |
||||
* @return index |
||||
* @throws IOException |
||||
*/ |
||||
private long writeToFile(int size, ByteBuffer data) throws IOException { |
||||
if (size == 0 && data == null) return 0; |
||||
if (size < 0) { |
||||
throw new IOException("Trying to create a block with size " + size); |
||||
} |
||||
if (data.limit() < size) { |
||||
throw new IOException("Trying to create a block with size " + size + " but with a buffer of size " + data.limit()); |
||||
} |
||||
return blocksIO.readBlockSizeAndLastElementOfReferencesList(blockId); |
||||
return fileIO.writeAtEnd(size, data); |
||||
} |
||||
} |
||||
|