apply new code formatter and save action
This commit is contained in:
@@ -37,212 +37,213 @@ import org.slf4j.LoggerFactory;
|
||||
*/
|
||||
public class BSFile implements AutoCloseable {
|
||||
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(BSFile.class);
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(BSFile.class);
|
||||
|
||||
public static final int BLOCK_SIZE = 512;
|
||||
public static final int BLOCK_SIZE = 512;
|
||||
|
||||
/*
|
||||
* The last disk block of this sequence. This is the block new values will be
|
||||
* appended to.
|
||||
*/
|
||||
private BSFileDiskBlock buffer;
|
||||
/*
|
||||
* The last disk block of this sequence. This is the block new values will be
|
||||
* appended to.
|
||||
*/
|
||||
private BSFileDiskBlock buffer;
|
||||
|
||||
private int offsetInBuffer = 0;
|
||||
private int offsetInBuffer = 0;
|
||||
|
||||
private boolean dirty = false;
|
||||
private boolean dirty = false;
|
||||
|
||||
private final long rootBlockOffset;
|
||||
private final long rootBlockOffset;
|
||||
|
||||
private final DiskStorage diskStorage;
|
||||
private final DiskStorage diskStorage;
|
||||
|
||||
private final BSFileDiskBlock rootDiskBlock;
|
||||
private final BSFileDiskBlock rootDiskBlock;
|
||||
|
||||
private final BSFileCustomizer customizer;
|
||||
private final BSFileCustomizer customizer;
|
||||
|
||||
BSFile(final long rootBlockOffset, final DiskStorage diskStorage, final BSFileCustomizer customizer) {
|
||||
BSFile(final long rootBlockOffset, final DiskStorage diskStorage, final BSFileCustomizer customizer) {
|
||||
|
||||
this(new BSFileDiskBlock(diskStorage.getDiskBlock(rootBlockOffset, BLOCK_SIZE)), diskStorage, customizer);
|
||||
}
|
||||
this(new BSFileDiskBlock(diskStorage.getDiskBlock(rootBlockOffset, BLOCK_SIZE)), diskStorage, customizer);
|
||||
}
|
||||
|
||||
BSFile(final BSFileDiskBlock rootDiskBlock, final DiskStorage diskStorage, final BSFileCustomizer customizer) {
|
||||
BSFile(final BSFileDiskBlock rootDiskBlock, final DiskStorage diskStorage, final BSFileCustomizer customizer) {
|
||||
|
||||
this.rootDiskBlock = rootDiskBlock;
|
||||
this.customizer = customizer;
|
||||
this.rootBlockOffset = rootDiskBlock.getBlockOffset();
|
||||
this.diskStorage = diskStorage;
|
||||
this.rootDiskBlock = rootDiskBlock;
|
||||
this.customizer = customizer;
|
||||
this.rootBlockOffset = rootDiskBlock.getBlockOffset();
|
||||
this.diskStorage = diskStorage;
|
||||
|
||||
final long lastBlockNumber = rootDiskBlock.getLastBlockPointer();
|
||||
if (lastBlockNumber == rootBlockOffset || lastBlockNumber == 0) {
|
||||
buffer = rootDiskBlock;
|
||||
} else {
|
||||
buffer = new BSFileDiskBlock(diskStorage.getDiskBlock(lastBlockNumber, BLOCK_SIZE));
|
||||
}
|
||||
offsetInBuffer = determineWriteOffsetInExistingBuffer(buffer);
|
||||
customizer.init(buffer);
|
||||
LOGGER.trace("create bsFile={} lastBlockNumber={}", rootBlockOffset, lastBlockNumber);
|
||||
}
|
||||
final long lastBlockNumber = rootDiskBlock.getLastBlockPointer();
|
||||
if (lastBlockNumber == rootBlockOffset || lastBlockNumber == 0) {
|
||||
buffer = rootDiskBlock;
|
||||
} else {
|
||||
buffer = new BSFileDiskBlock(diskStorage.getDiskBlock(lastBlockNumber, BLOCK_SIZE));
|
||||
}
|
||||
offsetInBuffer = determineWriteOffsetInExistingBuffer(buffer);
|
||||
customizer.init(buffer);
|
||||
LOGGER.trace("create bsFile={} lastBlockNumber={}", rootBlockOffset, lastBlockNumber);
|
||||
}
|
||||
|
||||
private int determineWriteOffsetInExistingBuffer(final BSFileDiskBlock buffer) {
|
||||
private int determineWriteOffsetInExistingBuffer(final BSFileDiskBlock buffer) {
|
||||
|
||||
final byte[] buf = buffer.getBuffer();
|
||||
final byte[] buf = buffer.getBuffer();
|
||||
|
||||
int result = 0;
|
||||
while (result < buf.length && buf[result] != 0) {
|
||||
result++;
|
||||
}
|
||||
int result = 0;
|
||||
while (result < buf.length && buf[result] != 0) {
|
||||
result++;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
public static BSFile existingFile(final long blockNumber, final DiskStorage diskStorage,
|
||||
final BSFileCustomizer customizer) {
|
||||
return new BSFile(blockNumber, diskStorage, customizer);
|
||||
}
|
||||
public static BSFile existingFile(final long blockNumber, final DiskStorage diskStorage,
|
||||
final BSFileCustomizer customizer) {
|
||||
return new BSFile(blockNumber, diskStorage, customizer);
|
||||
}
|
||||
|
||||
public static BSFile newFile(final DiskStorage diskStorage, final BSFileCustomizer customizer) {
|
||||
final long rootBlockOffset = diskStorage.allocateBlock(BLOCK_SIZE);
|
||||
LOGGER.trace("create new bsFile={}", rootBlockOffset);
|
||||
return new BSFile(rootBlockOffset, diskStorage, customizer);
|
||||
}
|
||||
public static BSFile newFile(final DiskStorage diskStorage, final BSFileCustomizer customizer) {
|
||||
final long rootBlockOffset = diskStorage.allocateBlock(BLOCK_SIZE);
|
||||
LOGGER.trace("create new bsFile={}", rootBlockOffset);
|
||||
return new BSFile(rootBlockOffset, diskStorage, customizer);
|
||||
}
|
||||
|
||||
public void append(final long value1, final long value2) {
|
||||
final long val1 = customizer.preProcessWriteValue1(value1);
|
||||
final long val2 = customizer.preProcessWriteValue2(value2);
|
||||
public void append(final long value1, final long value2) {
|
||||
final long val1 = customizer.preProcessWriteValue1(value1);
|
||||
final long val2 = customizer.preProcessWriteValue2(value2);
|
||||
|
||||
final int bytesWritten = VariableByteEncoder.encodeInto(val1, val2, buffer.getBuffer(), offsetInBuffer);
|
||||
final int bytesWritten = VariableByteEncoder.encodeInto(val1, val2, buffer.getBuffer(), offsetInBuffer);
|
||||
|
||||
if (bytesWritten == 0) {
|
||||
flushFullBufferAndCreateNew();
|
||||
customizer.newBlock();
|
||||
if (bytesWritten == 0) {
|
||||
flushFullBufferAndCreateNew();
|
||||
customizer.newBlock();
|
||||
|
||||
append(value1, value2);
|
||||
}
|
||||
offsetInBuffer += bytesWritten;
|
||||
dirty = true;
|
||||
}
|
||||
append(value1, value2);
|
||||
}
|
||||
offsetInBuffer += bytesWritten;
|
||||
dirty = true;
|
||||
}
|
||||
|
||||
public void append(final long value) {
|
||||
int bytesWritten = VariableByteEncoder.encodeInto(value, buffer.getBuffer(), offsetInBuffer);
|
||||
public void append(final long value) {
|
||||
int bytesWritten = VariableByteEncoder.encodeInto(value, buffer.getBuffer(), offsetInBuffer);
|
||||
|
||||
if (bytesWritten == 0) {
|
||||
flushFullBufferAndCreateNew();
|
||||
bytesWritten = VariableByteEncoder.encodeInto(value, buffer.getBuffer(), offsetInBuffer);
|
||||
assert bytesWritten > 0 : "after a flush the buffer is emtpy, so it should be possible to write a few bytes";
|
||||
}
|
||||
offsetInBuffer += bytesWritten;
|
||||
dirty = true;
|
||||
}
|
||||
if (bytesWritten == 0) {
|
||||
flushFullBufferAndCreateNew();
|
||||
bytesWritten = VariableByteEncoder.encodeInto(value, buffer.getBuffer(), offsetInBuffer);
|
||||
assert bytesWritten > 0 : "after a flush the buffer is emtpy, so it should be possible to write a few bytes";
|
||||
}
|
||||
offsetInBuffer += bytesWritten;
|
||||
dirty = true;
|
||||
}
|
||||
|
||||
private void flushFullBufferAndCreateNew() {
|
||||
private void flushFullBufferAndCreateNew() {
|
||||
|
||||
final long newBlockOffset = diskStorage.allocateBlock(BLOCK_SIZE);
|
||||
final long newBlockOffset = diskStorage.allocateBlock(BLOCK_SIZE);
|
||||
|
||||
if (buffer == rootDiskBlock) {
|
||||
// root block and current block are the same, so we need
|
||||
// to update only one
|
||||
buffer.setLastBlockOffset(newBlockOffset);
|
||||
buffer.setNextBlockOffset(newBlockOffset);
|
||||
buffer.writeAsync();
|
||||
} else {
|
||||
rootDiskBlock.writeLastBlockOffset(newBlockOffset);
|
||||
if (buffer == rootDiskBlock) {
|
||||
// root block and current block are the same, so we need
|
||||
// to update only one
|
||||
buffer.setLastBlockOffset(newBlockOffset);
|
||||
buffer.setNextBlockOffset(newBlockOffset);
|
||||
buffer.writeAsync();
|
||||
} else {
|
||||
rootDiskBlock.writeLastBlockOffset(newBlockOffset);
|
||||
|
||||
buffer.setNextBlockOffset(newBlockOffset);
|
||||
buffer.writeAsync();
|
||||
}
|
||||
buffer.setNextBlockOffset(newBlockOffset);
|
||||
buffer.writeAsync();
|
||||
}
|
||||
|
||||
// set the new buffer
|
||||
buffer = new BSFileDiskBlock(diskStorage.getDiskBlock(newBlockOffset, BLOCK_SIZE));
|
||||
offsetInBuffer = 0;
|
||||
dirty = false;
|
||||
LOGGER.trace("flushFullBufferAndCreateNew bsFile={} newBlock={}", rootBlockOffset, newBlockOffset);
|
||||
}
|
||||
// set the new buffer
|
||||
buffer = new BSFileDiskBlock(diskStorage.getDiskBlock(newBlockOffset, BLOCK_SIZE));
|
||||
offsetInBuffer = 0;
|
||||
dirty = false;
|
||||
LOGGER.trace("flushFullBufferAndCreateNew bsFile={} newBlock={}", rootBlockOffset, newBlockOffset);
|
||||
}
|
||||
|
||||
public void flush() {
|
||||
public void flush() {
|
||||
|
||||
LOGGER.trace("flush bsFile={} dirty={} file={}", rootBlockOffset, dirty, diskStorage.getRelativeDatabaseFileForLogging());
|
||||
if (dirty) {
|
||||
buffer.writeAsync();
|
||||
}
|
||||
}
|
||||
LOGGER.trace("flush bsFile={} dirty={} file={}", rootBlockOffset, dirty,
|
||||
diskStorage.getRelativeDatabaseFileForLogging());
|
||||
if (dirty) {
|
||||
buffer.writeAsync();
|
||||
}
|
||||
}
|
||||
|
||||
public Optional<Long> getLastValue() {
|
||||
public Optional<Long> getLastValue() {
|
||||
|
||||
final byte[] buf = buffer.getBuffer();
|
||||
final LongList bufferedLongs = VariableByteEncoder.decode(buf);
|
||||
final byte[] buf = buffer.getBuffer();
|
||||
final LongList bufferedLongs = VariableByteEncoder.decode(buf);
|
||||
|
||||
final Optional<Long> result;
|
||||
if (bufferedLongs.isEmpty()) {
|
||||
result = Optional.empty();
|
||||
} else {
|
||||
final long lastValue = bufferedLongs.get(bufferedLongs.size() - 1);
|
||||
result = Optional.of(lastValue);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
final Optional<Long> result;
|
||||
if (bufferedLongs.isEmpty()) {
|
||||
result = Optional.empty();
|
||||
} else {
|
||||
final long lastValue = bufferedLongs.get(bufferedLongs.size() - 1);
|
||||
result = Optional.of(lastValue);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
public Stream<LongList> streamOfLongLists() {
|
||||
final Iterator<LongList> iterator = new LongListIterator(rootBlockOffset, diskStorage);
|
||||
final Stream<LongList> stream = StreamSupport
|
||||
.stream(Spliterators.spliteratorUnknownSize(iterator, Spliterator.ORDERED), false);
|
||||
public Stream<LongList> streamOfLongLists() {
|
||||
final Iterator<LongList> iterator = new LongListIterator(rootBlockOffset, diskStorage);
|
||||
final Stream<LongList> stream = StreamSupport
|
||||
.stream(Spliterators.spliteratorUnknownSize(iterator, Spliterator.ORDERED), false);
|
||||
|
||||
final Optional<Function<LongList, LongList>> mapper = customizer.getStreamMapper();
|
||||
if (mapper.isPresent()) {
|
||||
return stream.map(mapper.get());
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
final Optional<Function<LongList, LongList>> mapper = customizer.getStreamMapper();
|
||||
if (mapper.isPresent()) {
|
||||
return stream.map(mapper.get());
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
|
||||
private static class LongListIterator implements Iterator<LongList> {
|
||||
private static class LongListIterator implements Iterator<LongList> {
|
||||
|
||||
private LongList next = null;
|
||||
private long nextBlockOffset;
|
||||
private LongList next = null;
|
||||
private long nextBlockOffset;
|
||||
|
||||
private final DiskStorage diskStorage;
|
||||
private final DiskStorage diskStorage;
|
||||
|
||||
public LongListIterator(final long nextBlockNumber, final DiskStorage diskStorage) {
|
||||
this.nextBlockOffset = nextBlockNumber;
|
||||
this.diskStorage = diskStorage;
|
||||
}
|
||||
public LongListIterator(final long nextBlockNumber, final DiskStorage diskStorage) {
|
||||
this.nextBlockOffset = nextBlockNumber;
|
||||
this.diskStorage = diskStorage;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return nextBlockOffset != BSFileDiskBlock.NO_NEXT_POINTER;
|
||||
}
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return nextBlockOffset != BSFileDiskBlock.NO_NEXT_POINTER;
|
||||
}
|
||||
|
||||
@Override
|
||||
public LongList next() {
|
||||
if (nextBlockOffset == BSFileDiskBlock.NO_NEXT_POINTER) {
|
||||
throw new NoSuchElementException();
|
||||
}
|
||||
@Override
|
||||
public LongList next() {
|
||||
if (nextBlockOffset == BSFileDiskBlock.NO_NEXT_POINTER) {
|
||||
throw new NoSuchElementException();
|
||||
}
|
||||
|
||||
final BSFileDiskBlock diskBlock = getDiskBlock(nextBlockOffset);
|
||||
nextBlockOffset = diskBlock.getNextBlockNumber();
|
||||
final BSFileDiskBlock diskBlock = getDiskBlock(nextBlockOffset);
|
||||
nextBlockOffset = diskBlock.getNextBlockNumber();
|
||||
|
||||
final byte[] buf = diskBlock.getBuffer();
|
||||
next = VariableByteEncoder.decode(buf);
|
||||
return next;
|
||||
}
|
||||
final byte[] buf = diskBlock.getBuffer();
|
||||
next = VariableByteEncoder.decode(buf);
|
||||
return next;
|
||||
}
|
||||
|
||||
private BSFileDiskBlock getDiskBlock(final long blockOffset) {
|
||||
final DiskBlock diskBlock = diskStorage.getDiskBlock(blockOffset, BLOCK_SIZE);
|
||||
return new BSFileDiskBlock(diskBlock);
|
||||
}
|
||||
}
|
||||
private BSFileDiskBlock getDiskBlock(final long blockOffset) {
|
||||
final DiskBlock diskBlock = diskStorage.getDiskBlock(blockOffset, BLOCK_SIZE);
|
||||
return new BSFileDiskBlock(diskBlock);
|
||||
}
|
||||
}
|
||||
|
||||
public LongList asLongList() {
|
||||
public LongList asLongList() {
|
||||
|
||||
final LongList result = new LongList();
|
||||
streamOfLongLists().forEachOrdered(result::addAll);
|
||||
return result;
|
||||
}
|
||||
final LongList result = new LongList();
|
||||
streamOfLongLists().forEachOrdered(result::addAll);
|
||||
return result;
|
||||
}
|
||||
|
||||
public long getRootBlockOffset() {
|
||||
public long getRootBlockOffset() {
|
||||
|
||||
return rootBlockOffset;
|
||||
}
|
||||
return rootBlockOffset;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
flush();
|
||||
}
|
||||
@Override
|
||||
public void close() {
|
||||
flush();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,13 +6,13 @@ import java.util.function.Function;
|
||||
import org.lucares.collections.LongList;
|
||||
|
||||
public interface BSFileCustomizer {
|
||||
void init(BSFileDiskBlock lastDiskBlockOfStream);
|
||||
void init(BSFileDiskBlock lastDiskBlockOfStream);
|
||||
|
||||
Optional<Function<LongList, LongList>> getStreamMapper();
|
||||
Optional<Function<LongList, LongList>> getStreamMapper();
|
||||
|
||||
void newBlock();
|
||||
void newBlock();
|
||||
|
||||
long preProcessWriteValue1(long value);
|
||||
long preProcessWriteValue1(long value);
|
||||
|
||||
long preProcessWriteValue2(long value);
|
||||
long preProcessWriteValue2(long value);
|
||||
}
|
||||
|
||||
@@ -8,90 +8,90 @@ import org.lucares.utils.byteencoder.VariableByteEncoder;
|
||||
|
||||
class BSFileDiskBlock {
|
||||
|
||||
protected static final int NEXT_POINTER_OFFSET = 0;
|
||||
public static final long NO_NEXT_POINTER = 0;
|
||||
private static final int LAST_BLOCK_POINTER_POSITION = 8;
|
||||
public static final long NO_LAST_BLOCK = 0;
|
||||
private static final int INT_SEQUENCE_OFFSET = 8 // next block pointer
|
||||
+ 8; // last block pointer;
|
||||
protected static final int NEXT_POINTER_OFFSET = 0;
|
||||
public static final long NO_NEXT_POINTER = 0;
|
||||
private static final int LAST_BLOCK_POINTER_POSITION = 8;
|
||||
public static final long NO_LAST_BLOCK = 0;
|
||||
private static final int INT_SEQUENCE_OFFSET = 8 // next block pointer
|
||||
+ 8; // last block pointer;
|
||||
|
||||
private final DiskBlock diskBlock;
|
||||
private long nextBlockOffset = 0;
|
||||
private long lastBlockOffset = 0;
|
||||
private final DiskBlock diskBlock;
|
||||
private long nextBlockOffset = 0;
|
||||
private long lastBlockOffset = 0;
|
||||
|
||||
private byte[] buffer = null;
|
||||
private byte[] buffer = null;
|
||||
|
||||
public BSFileDiskBlock(final DiskBlock diskBlock) {
|
||||
this.diskBlock = diskBlock;
|
||||
}
|
||||
public BSFileDiskBlock(final DiskBlock diskBlock) {
|
||||
this.diskBlock = diskBlock;
|
||||
}
|
||||
|
||||
public byte[] getBuffer() {
|
||||
public byte[] getBuffer() {
|
||||
|
||||
if (buffer == null) {
|
||||
final ByteBuffer byteBuffer = diskBlock.getByteBuffer();
|
||||
this.buffer = new byte[byteBuffer.capacity() - INT_SEQUENCE_OFFSET];
|
||||
byteBuffer.position(INT_SEQUENCE_OFFSET);
|
||||
byteBuffer.get(buffer);
|
||||
}
|
||||
if (buffer == null) {
|
||||
final ByteBuffer byteBuffer = diskBlock.getByteBuffer();
|
||||
this.buffer = new byte[byteBuffer.capacity() - INT_SEQUENCE_OFFSET];
|
||||
byteBuffer.position(INT_SEQUENCE_OFFSET);
|
||||
byteBuffer.get(buffer);
|
||||
}
|
||||
|
||||
return buffer;
|
||||
}
|
||||
return buffer;
|
||||
}
|
||||
|
||||
public long getBlockOffset() {
|
||||
return diskBlock.getBlockOffset();
|
||||
}
|
||||
public long getBlockOffset() {
|
||||
return diskBlock.getBlockOffset();
|
||||
}
|
||||
|
||||
public void setNextBlockOffset(final long nextBlockOffset) {
|
||||
this.nextBlockOffset = nextBlockOffset;
|
||||
}
|
||||
public void setNextBlockOffset(final long nextBlockOffset) {
|
||||
this.nextBlockOffset = nextBlockOffset;
|
||||
}
|
||||
|
||||
public long getLastBlockPointer() {
|
||||
public long getLastBlockPointer() {
|
||||
|
||||
if (lastBlockOffset <= 0) {
|
||||
lastBlockOffset = diskBlock.getByteBuffer().getLong(LAST_BLOCK_POINTER_POSITION);
|
||||
}
|
||||
if (lastBlockOffset <= 0) {
|
||||
lastBlockOffset = diskBlock.getByteBuffer().getLong(LAST_BLOCK_POINTER_POSITION);
|
||||
}
|
||||
|
||||
return lastBlockOffset;
|
||||
}
|
||||
return lastBlockOffset;
|
||||
}
|
||||
|
||||
public long getNextBlockNumber() {
|
||||
if (nextBlockOffset <= 0) {
|
||||
nextBlockOffset = diskBlock.getByteBuffer().getLong(NEXT_POINTER_OFFSET);
|
||||
}
|
||||
return nextBlockOffset;
|
||||
}
|
||||
public long getNextBlockNumber() {
|
||||
if (nextBlockOffset <= 0) {
|
||||
nextBlockOffset = diskBlock.getByteBuffer().getLong(NEXT_POINTER_OFFSET);
|
||||
}
|
||||
return nextBlockOffset;
|
||||
}
|
||||
|
||||
public void setLastBlockOffset(final long lastBlockOffset) {
|
||||
this.lastBlockOffset = lastBlockOffset;
|
||||
}
|
||||
public void setLastBlockOffset(final long lastBlockOffset) {
|
||||
this.lastBlockOffset = lastBlockOffset;
|
||||
}
|
||||
|
||||
public void writeLastBlockOffset(final long lastBlockOffset) {
|
||||
this.lastBlockOffset = lastBlockOffset;
|
||||
diskBlock.getByteBuffer().putLong(LAST_BLOCK_POINTER_POSITION, lastBlockOffset);
|
||||
}
|
||||
public void writeLastBlockOffset(final long lastBlockOffset) {
|
||||
this.lastBlockOffset = lastBlockOffset;
|
||||
diskBlock.getByteBuffer().putLong(LAST_BLOCK_POINTER_POSITION, lastBlockOffset);
|
||||
}
|
||||
|
||||
private void writeBufferToByteBuffer() {
|
||||
diskBlock.getByteBuffer().position(INT_SEQUENCE_OFFSET);
|
||||
diskBlock.getByteBuffer().put(buffer);
|
||||
}
|
||||
private void writeBufferToByteBuffer() {
|
||||
diskBlock.getByteBuffer().position(INT_SEQUENCE_OFFSET);
|
||||
diskBlock.getByteBuffer().put(buffer);
|
||||
}
|
||||
|
||||
private void writeBlockHeader() {
|
||||
diskBlock.getByteBuffer().putLong(NEXT_POINTER_OFFSET, nextBlockOffset);
|
||||
diskBlock.getByteBuffer().putLong(LAST_BLOCK_POINTER_POSITION, lastBlockOffset);
|
||||
}
|
||||
private void writeBlockHeader() {
|
||||
diskBlock.getByteBuffer().putLong(NEXT_POINTER_OFFSET, nextBlockOffset);
|
||||
diskBlock.getByteBuffer().putLong(LAST_BLOCK_POINTER_POSITION, lastBlockOffset);
|
||||
}
|
||||
|
||||
public void writeAsync() {
|
||||
writeBlockHeader();
|
||||
writeBufferToByteBuffer();
|
||||
}
|
||||
public void writeAsync() {
|
||||
writeBlockHeader();
|
||||
writeBufferToByteBuffer();
|
||||
}
|
||||
|
||||
public void force() {
|
||||
diskBlock.force();
|
||||
}
|
||||
public void force() {
|
||||
diskBlock.force();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
final LongList bufferDecoded = VariableByteEncoder.decode(buffer);
|
||||
return "BSFileDiskBlock[bufferDecoded=" + bufferDecoded + "]";
|
||||
}
|
||||
@Override
|
||||
public String toString() {
|
||||
final LongList bufferDecoded = VariableByteEncoder.decode(buffer);
|
||||
return "BSFileDiskBlock[bufferDecoded=" + bufferDecoded + "]";
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,41 +8,41 @@ import org.lucares.pdb.diskstorage.DiskStorage;
|
||||
|
||||
public class LongStreamFile implements AutoCloseable {
|
||||
|
||||
private final BSFile bsFile;
|
||||
private final BSFile bsFile;
|
||||
|
||||
LongStreamFile(final BSFile bsFile) {
|
||||
this.bsFile = bsFile;
|
||||
}
|
||||
LongStreamFile(final BSFile bsFile) {
|
||||
this.bsFile = bsFile;
|
||||
}
|
||||
|
||||
public static LongStreamFile existingFile(final long blockNumber, final DiskStorage diskStorage)
|
||||
throws IOException {
|
||||
final BSFile bsFile = BSFile.existingFile(blockNumber, diskStorage, NullCustomizer.INSTANCE);
|
||||
return new LongStreamFile(bsFile);
|
||||
}
|
||||
public static LongStreamFile existingFile(final long blockNumber, final DiskStorage diskStorage)
|
||||
throws IOException {
|
||||
final BSFile bsFile = BSFile.existingFile(blockNumber, diskStorage, NullCustomizer.INSTANCE);
|
||||
return new LongStreamFile(bsFile);
|
||||
}
|
||||
|
||||
public static LongStreamFile newFile(final DiskStorage diskStorage) throws IOException {
|
||||
final BSFile bsFile = BSFile.newFile(diskStorage, NullCustomizer.INSTANCE);
|
||||
return new LongStreamFile(bsFile);
|
||||
}
|
||||
public static LongStreamFile newFile(final DiskStorage diskStorage) throws IOException {
|
||||
final BSFile bsFile = BSFile.newFile(diskStorage, NullCustomizer.INSTANCE);
|
||||
return new LongStreamFile(bsFile);
|
||||
}
|
||||
|
||||
public void append(final long value) throws IOException {
|
||||
public void append(final long value) throws IOException {
|
||||
|
||||
bsFile.append(value);
|
||||
}
|
||||
bsFile.append(value);
|
||||
}
|
||||
|
||||
public Stream<LongList> streamOfLongLists() {
|
||||
return bsFile.streamOfLongLists();
|
||||
}
|
||||
public Stream<LongList> streamOfLongLists() {
|
||||
return bsFile.streamOfLongLists();
|
||||
}
|
||||
|
||||
public LongList asLongList() {
|
||||
public LongList asLongList() {
|
||||
|
||||
final LongList result = new LongList();
|
||||
streamOfLongLists().forEachOrdered(result::addAll);
|
||||
return result;
|
||||
}
|
||||
final LongList result = new LongList();
|
||||
streamOfLongLists().forEachOrdered(result::addAll);
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
bsFile.close();
|
||||
}
|
||||
@Override
|
||||
public void close() {
|
||||
bsFile.close();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,31 +7,31 @@ import org.lucares.collections.LongList;
|
||||
|
||||
public class NullCustomizer implements BSFileCustomizer {
|
||||
|
||||
public static final NullCustomizer INSTANCE = new NullCustomizer();
|
||||
public static final NullCustomizer INSTANCE = new NullCustomizer();
|
||||
|
||||
@Override
|
||||
public void init(final BSFileDiskBlock lastDiskBlockOfStream) {
|
||||
// nothing to do - this is a NullObject
|
||||
}
|
||||
@Override
|
||||
public void init(final BSFileDiskBlock lastDiskBlockOfStream) {
|
||||
// nothing to do - this is a NullObject
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<Function<LongList, LongList>> getStreamMapper() {
|
||||
// no mapper to return - this is a NullObject
|
||||
return Optional.empty();
|
||||
}
|
||||
@Override
|
||||
public Optional<Function<LongList, LongList>> getStreamMapper() {
|
||||
// no mapper to return - this is a NullObject
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void newBlock() {
|
||||
// nothing to do - this is a NullObject
|
||||
}
|
||||
@Override
|
||||
public void newBlock() {
|
||||
// nothing to do - this is a NullObject
|
||||
}
|
||||
|
||||
@Override
|
||||
public long preProcessWriteValue1(final long value) {
|
||||
return value;
|
||||
}
|
||||
@Override
|
||||
public long preProcessWriteValue1(final long value) {
|
||||
return value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long preProcessWriteValue2(final long value) {
|
||||
return value;
|
||||
}
|
||||
@Override
|
||||
public long preProcessWriteValue2(final long value) {
|
||||
return value;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,71 +8,71 @@ import org.lucares.utils.byteencoder.VariableByteEncoder;
|
||||
|
||||
public class TimeSeriesCustomizer implements BSFileCustomizer {
|
||||
|
||||
private static class TimeStampDeltaDecoder implements Function<LongList, LongList> {
|
||||
private static class TimeStampDeltaDecoder implements Function<LongList, LongList> {
|
||||
|
||||
/**
|
||||
* Computes the inverse of the delta encoding in {@link BSFile#appendTimeValue}
|
||||
*/
|
||||
@Override
|
||||
public LongList apply(final LongList t) {
|
||||
long lastTimeValue = 0;
|
||||
for (int i = 0; i < t.size(); i += 2) {
|
||||
lastTimeValue += t.get(i);
|
||||
t.set(i, lastTimeValue);
|
||||
}
|
||||
/**
|
||||
* Computes the inverse of the delta encoding in {@link BSFile#appendTimeValue}
|
||||
*/
|
||||
@Override
|
||||
public LongList apply(final LongList t) {
|
||||
long lastTimeValue = 0;
|
||||
for (int i = 0; i < t.size(); i += 2) {
|
||||
lastTimeValue += t.get(i);
|
||||
t.set(i, lastTimeValue);
|
||||
}
|
||||
|
||||
return t;
|
||||
}
|
||||
}
|
||||
return t;
|
||||
}
|
||||
}
|
||||
|
||||
private static final TimeStampDeltaDecoder TIME_DELTA_DECODER = new TimeStampDeltaDecoder();
|
||||
private static final TimeStampDeltaDecoder TIME_DELTA_DECODER = new TimeStampDeltaDecoder();
|
||||
|
||||
private long lastEpochMilli;
|
||||
private long lastEpochMilli;
|
||||
|
||||
@Override
|
||||
public void init(final BSFileDiskBlock lastDiskBlockOfStream) {
|
||||
lastEpochMilli = determineLastEpochMilli(lastDiskBlockOfStream);
|
||||
}
|
||||
@Override
|
||||
public void init(final BSFileDiskBlock lastDiskBlockOfStream) {
|
||||
lastEpochMilli = determineLastEpochMilli(lastDiskBlockOfStream);
|
||||
}
|
||||
|
||||
private long determineLastEpochMilli(final BSFileDiskBlock diskBlock) {
|
||||
private long determineLastEpochMilli(final BSFileDiskBlock diskBlock) {
|
||||
|
||||
// get the time/value delta encoded longs
|
||||
final byte[] buf = diskBlock.getBuffer();
|
||||
LongList longList = VariableByteEncoder.decode(buf);
|
||||
final long result;
|
||||
if (longList.size() < 2) {
|
||||
// only new files have empty disk blocks
|
||||
// and empty disk blocks have time offset 0
|
||||
result = 0;
|
||||
} else {
|
||||
// decode the deltas to get the correct timestamps
|
||||
longList = TIME_DELTA_DECODER.apply(longList);
|
||||
// get the time/value delta encoded longs
|
||||
final byte[] buf = diskBlock.getBuffer();
|
||||
LongList longList = VariableByteEncoder.decode(buf);
|
||||
final long result;
|
||||
if (longList.size() < 2) {
|
||||
// only new files have empty disk blocks
|
||||
// and empty disk blocks have time offset 0
|
||||
result = 0;
|
||||
} else {
|
||||
// decode the deltas to get the correct timestamps
|
||||
longList = TIME_DELTA_DECODER.apply(longList);
|
||||
|
||||
// return the last timestamp
|
||||
result = longList.get(longList.size() - 2);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
// return the last timestamp
|
||||
result = longList.get(longList.size() - 2);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<Function<LongList, LongList>> getStreamMapper() {
|
||||
return Optional.of(TIME_DELTA_DECODER);
|
||||
}
|
||||
@Override
|
||||
public Optional<Function<LongList, LongList>> getStreamMapper() {
|
||||
return Optional.of(TIME_DELTA_DECODER);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void newBlock() {
|
||||
lastEpochMilli = 0;
|
||||
}
|
||||
@Override
|
||||
public void newBlock() {
|
||||
lastEpochMilli = 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long preProcessWriteValue1(final long epochMilli) {
|
||||
final long epochMilliDelta = epochMilli - lastEpochMilli;
|
||||
lastEpochMilli = epochMilli;
|
||||
return epochMilliDelta;
|
||||
}
|
||||
@Override
|
||||
public long preProcessWriteValue1(final long epochMilli) {
|
||||
final long epochMilliDelta = epochMilli - lastEpochMilli;
|
||||
lastEpochMilli = epochMilli;
|
||||
return epochMilliDelta;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long preProcessWriteValue2(final long value) {
|
||||
return value;
|
||||
}
|
||||
@Override
|
||||
public long preProcessWriteValue2(final long value) {
|
||||
return value;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,52 +8,52 @@ import org.lucares.pdb.diskstorage.DiskStorage;
|
||||
|
||||
public class TimeSeriesFile implements AutoCloseable {
|
||||
|
||||
private final BSFile bsFile;
|
||||
private final BSFile bsFile;
|
||||
|
||||
private TimeSeriesFile(final BSFile bsFile) {
|
||||
this.bsFile = bsFile;
|
||||
}
|
||||
private TimeSeriesFile(final BSFile bsFile) {
|
||||
this.bsFile = bsFile;
|
||||
}
|
||||
|
||||
public static TimeSeriesFile existingFile(final long blockNumber, final DiskStorage diskStorage) {
|
||||
final BSFile bsFile = BSFile.existingFile(blockNumber, diskStorage, new TimeSeriesCustomizer());
|
||||
return new TimeSeriesFile(bsFile);
|
||||
}
|
||||
public static TimeSeriesFile existingFile(final long blockNumber, final DiskStorage diskStorage) {
|
||||
final BSFile bsFile = BSFile.existingFile(blockNumber, diskStorage, new TimeSeriesCustomizer());
|
||||
return new TimeSeriesFile(bsFile);
|
||||
}
|
||||
|
||||
public static TimeSeriesFile newFile(final DiskStorage diskStorage) {
|
||||
final BSFile bsFile = BSFile.newFile(diskStorage, new TimeSeriesCustomizer());
|
||||
return new TimeSeriesFile(bsFile);
|
||||
}
|
||||
public static TimeSeriesFile newFile(final DiskStorage diskStorage) {
|
||||
final BSFile bsFile = BSFile.newFile(diskStorage, new TimeSeriesCustomizer());
|
||||
return new TimeSeriesFile(bsFile);
|
||||
}
|
||||
|
||||
public void appendTimeValue(final long epochMilli, final long value) {
|
||||
public void appendTimeValue(final long epochMilli, final long value) {
|
||||
|
||||
bsFile.append(epochMilli, value);
|
||||
}
|
||||
bsFile.append(epochMilli, value);
|
||||
}
|
||||
|
||||
public Stream<LongList> streamOfLongLists() {
|
||||
return bsFile.streamOfLongLists();
|
||||
}
|
||||
public Stream<LongList> streamOfLongLists() {
|
||||
return bsFile.streamOfLongLists();
|
||||
}
|
||||
|
||||
public LongList asTimeValueLongList() {
|
||||
public LongList asTimeValueLongList() {
|
||||
|
||||
final LongList result = new LongList();
|
||||
streamOfLongLists().forEachOrdered(result::addAll);
|
||||
return result;
|
||||
}
|
||||
final LongList result = new LongList();
|
||||
streamOfLongLists().forEachOrdered(result::addAll);
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
bsFile.close();
|
||||
}
|
||||
@Override
|
||||
public void close() {
|
||||
bsFile.close();
|
||||
}
|
||||
|
||||
public long getRootBlockOffset() {
|
||||
return bsFile.getRootBlockOffset();
|
||||
}
|
||||
public long getRootBlockOffset() {
|
||||
return bsFile.getRootBlockOffset();
|
||||
}
|
||||
|
||||
public Optional<Long> getLastValue() {
|
||||
return bsFile.getLastValue();
|
||||
}
|
||||
public Optional<Long> getLastValue() {
|
||||
return bsFile.getLastValue();
|
||||
}
|
||||
|
||||
public void flush() {
|
||||
bsFile.flush();
|
||||
}
|
||||
public void flush() {
|
||||
bsFile.flush();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,52 +5,52 @@ import java.nio.MappedByteBuffer;
|
||||
|
||||
public class DiskBlock {
|
||||
|
||||
private byte[] buffer = null;
|
||||
private final long blockOffset;
|
||||
private byte[] buffer = null;
|
||||
private final long blockOffset;
|
||||
|
||||
private final ByteBuffer byteBuffer;
|
||||
private final ByteBuffer byteBuffer;
|
||||
|
||||
public DiskBlock(final long blockOffset, final ByteBuffer byteBuffer) {
|
||||
this.blockOffset = blockOffset;
|
||||
this.byteBuffer = byteBuffer;
|
||||
}
|
||||
public DiskBlock(final long blockOffset, final ByteBuffer byteBuffer) {
|
||||
this.blockOffset = blockOffset;
|
||||
this.byteBuffer = byteBuffer;
|
||||
}
|
||||
|
||||
public byte[] getBuffer() {
|
||||
public byte[] getBuffer() {
|
||||
|
||||
if (buffer == null) {
|
||||
this.buffer = new byte[byteBuffer.capacity()];
|
||||
byteBuffer.get(buffer);
|
||||
}
|
||||
if (buffer == null) {
|
||||
this.buffer = new byte[byteBuffer.capacity()];
|
||||
byteBuffer.get(buffer);
|
||||
}
|
||||
|
||||
return buffer;
|
||||
}
|
||||
return buffer;
|
||||
}
|
||||
|
||||
public ByteBuffer getByteBuffer() {
|
||||
return byteBuffer;
|
||||
}
|
||||
public ByteBuffer getByteBuffer() {
|
||||
return byteBuffer;
|
||||
}
|
||||
|
||||
public long getBlockOffset() {
|
||||
return blockOffset;
|
||||
}
|
||||
public long getBlockOffset() {
|
||||
return blockOffset;
|
||||
}
|
||||
|
||||
private void writeBufferToByteBuffer() {
|
||||
byteBuffer.position(0);
|
||||
byteBuffer.put(buffer);
|
||||
}
|
||||
private void writeBufferToByteBuffer() {
|
||||
byteBuffer.position(0);
|
||||
byteBuffer.put(buffer);
|
||||
}
|
||||
|
||||
public void writeAsync() {
|
||||
writeBufferToByteBuffer();
|
||||
}
|
||||
public void writeAsync() {
|
||||
writeBufferToByteBuffer();
|
||||
}
|
||||
|
||||
public void force() {
|
||||
// some tests use HeapByteBuffer and don't support force
|
||||
if (byteBuffer instanceof MappedByteBuffer) {
|
||||
((MappedByteBuffer) byteBuffer).force();
|
||||
}
|
||||
}
|
||||
public void force() {
|
||||
// some tests use HeapByteBuffer and don't support force
|
||||
if (byteBuffer instanceof MappedByteBuffer) {
|
||||
((MappedByteBuffer) byteBuffer).force();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "DiskBlock[" + blockOffset + "]";
|
||||
}
|
||||
@Override
|
||||
public String toString() {
|
||||
return "DiskBlock[" + blockOffset + "]";
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,273 +14,273 @@ import org.slf4j.LoggerFactory;
|
||||
|
||||
public class DiskStorage implements AutoCloseable {
|
||||
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(DiskStorage.class);
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(DiskStorage.class);
|
||||
|
||||
private static final long FREE_LIST_ROOT_OFFSET = 0;
|
||||
private static final long NO_POINTER = 0;
|
||||
private static final int FREE_LIST_NEXT_POINTER = 0;
|
||||
private static final int FREE_LIST_PREV_POINTER = 8;
|
||||
private static final int FREE_LIST_SIZE = 16;
|
||||
private static final int FREE_LIST_NODE_SIZE = 32;
|
||||
private static final long FREE_LIST_ROOT_OFFSET = 0;
|
||||
private static final long NO_POINTER = 0;
|
||||
private static final int FREE_LIST_NEXT_POINTER = 0;
|
||||
private static final int FREE_LIST_PREV_POINTER = 8;
|
||||
private static final int FREE_LIST_SIZE = 16;
|
||||
private static final int FREE_LIST_NODE_SIZE = 32;
|
||||
|
||||
private final FileChannel fileChannel;
|
||||
private final FileChannel fileChannel;
|
||||
|
||||
private Path relativeDatabaseFileForLogging;
|
||||
private Path relativeDatabaseFileForLogging;
|
||||
|
||||
|
||||
public DiskStorage(final Path databaseFile, Path storageBasePath) {
|
||||
this.relativeDatabaseFileForLogging = storageBasePath != null ? storageBasePath.relativize(databaseFile): databaseFile;
|
||||
try {
|
||||
Files.createDirectories(databaseFile.getParent());
|
||||
public DiskStorage(final Path databaseFile, Path storageBasePath) {
|
||||
this.relativeDatabaseFileForLogging = storageBasePath != null ? storageBasePath.relativize(databaseFile)
|
||||
: databaseFile;
|
||||
try {
|
||||
Files.createDirectories(databaseFile.getParent());
|
||||
|
||||
fileChannel = FileChannel.open(databaseFile, StandardOpenOption.READ, StandardOpenOption.WRITE,
|
||||
StandardOpenOption.CREATE);
|
||||
fileChannel = FileChannel.open(databaseFile, StandardOpenOption.READ, StandardOpenOption.WRITE,
|
||||
StandardOpenOption.CREATE);
|
||||
|
||||
initIfNew();
|
||||
} catch (final IOException e) {
|
||||
throw new DiskStorageException(e);
|
||||
}
|
||||
}
|
||||
initIfNew();
|
||||
} catch (final IOException e) {
|
||||
throw new DiskStorageException(e);
|
||||
}
|
||||
}
|
||||
|
||||
private void initIfNew() throws IOException {
|
||||
if (fileChannel.size() == 0) {
|
||||
// file is new -> add root of the free list
|
||||
writeFreeListRootNodePosition(NO_POINTER);
|
||||
}
|
||||
}
|
||||
private void initIfNew() throws IOException {
|
||||
if (fileChannel.size() == 0) {
|
||||
// file is new -> add root of the free list
|
||||
writeFreeListRootNodePosition(NO_POINTER);
|
||||
}
|
||||
}
|
||||
|
||||
public DiskBlock getDiskBlock(final long blockOffset, final int blockSize) {
|
||||
try {
|
||||
LOGGER.trace("read block={} file={}", blockOffset, relativeDatabaseFileForLogging);
|
||||
public DiskBlock getDiskBlock(final long blockOffset, final int blockSize) {
|
||||
try {
|
||||
LOGGER.trace("read block={} file={}", blockOffset, relativeDatabaseFileForLogging);
|
||||
|
||||
final var byteBuffer = fileChannel.map(MapMode.READ_WRITE, blockOffset, blockSize);
|
||||
final var byteBuffer = fileChannel.map(MapMode.READ_WRITE, blockOffset, blockSize);
|
||||
|
||||
return new DiskBlock(blockOffset, byteBuffer);
|
||||
} catch (final IOException e) {
|
||||
throw new DiskStorageException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public Path getRelativeDatabaseFileForLogging() {
|
||||
return relativeDatabaseFileForLogging;
|
||||
}
|
||||
return new DiskBlock(blockOffset, byteBuffer);
|
||||
} catch (final IOException e) {
|
||||
throw new DiskStorageException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
try {
|
||||
fileChannel.force(true);
|
||||
fileChannel.close();
|
||||
} catch (final IOException e) {
|
||||
throw new DiskStorageException(e);
|
||||
}
|
||||
}
|
||||
public Path getRelativeDatabaseFileForLogging() {
|
||||
return relativeDatabaseFileForLogging;
|
||||
}
|
||||
|
||||
public synchronized long allocateBlock(final int blockSize) {
|
||||
if (blockSize < FREE_LIST_NODE_SIZE) {
|
||||
throw new IllegalArgumentException("The minimal allocation size is 32 byte.");
|
||||
}
|
||||
@Override
|
||||
public void close() {
|
||||
try {
|
||||
fileChannel.force(true);
|
||||
fileChannel.close();
|
||||
} catch (final IOException e) {
|
||||
throw new DiskStorageException(e);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
final var optionalFreeBlock = findFreeBlockWithSize(blockSize);
|
||||
if (optionalFreeBlock.isPresent()) {
|
||||
final FreeListNode freeBlock = optionalFreeBlock.get();
|
||||
removeBlockFromFreeList(freeBlock);
|
||||
clearBlock(freeBlock);
|
||||
return freeBlock.getOffset();
|
||||
} else {
|
||||
return allocateNewBlock(blockSize);
|
||||
}
|
||||
} catch (final IOException e) {
|
||||
throw new DiskStorageException(e);
|
||||
}
|
||||
}
|
||||
public synchronized long allocateBlock(final int blockSize) {
|
||||
if (blockSize < FREE_LIST_NODE_SIZE) {
|
||||
throw new IllegalArgumentException("The minimal allocation size is 32 byte.");
|
||||
}
|
||||
|
||||
private long allocateNewBlock(final int blockSize) throws IOException {
|
||||
final var buffer = new byte[blockSize];
|
||||
final var src = ByteBuffer.wrap(buffer);
|
||||
try {
|
||||
final var optionalFreeBlock = findFreeBlockWithSize(blockSize);
|
||||
if (optionalFreeBlock.isPresent()) {
|
||||
final FreeListNode freeBlock = optionalFreeBlock.get();
|
||||
removeBlockFromFreeList(freeBlock);
|
||||
clearBlock(freeBlock);
|
||||
return freeBlock.getOffset();
|
||||
} else {
|
||||
return allocateNewBlock(blockSize);
|
||||
}
|
||||
} catch (final IOException e) {
|
||||
throw new DiskStorageException(e);
|
||||
}
|
||||
}
|
||||
|
||||
// block numbers start with 1, so that the uninitialized value
|
||||
// (0) means 'no block'. That way we do not have to write
|
||||
// data to a newly created block, which reduces IO.
|
||||
final var blockOffset = fileChannel.size();
|
||||
fileChannel.write(src, fileChannel.size());
|
||||
return blockOffset;
|
||||
}
|
||||
private long allocateNewBlock(final int blockSize) throws IOException {
|
||||
final var buffer = new byte[blockSize];
|
||||
final var src = ByteBuffer.wrap(buffer);
|
||||
|
||||
public synchronized void free(final long blockOffset, final int blockSize) throws IOException {
|
||||
// block numbers start with 1, so that the uninitialized value
|
||||
// (0) means 'no block'. That way we do not have to write
|
||||
// data to a newly created block, which reduces IO.
|
||||
final var blockOffset = fileChannel.size();
|
||||
fileChannel.write(src, fileChannel.size());
|
||||
return blockOffset;
|
||||
}
|
||||
|
||||
final var neighboringFreeListNode = getNeighboringFreeListNode(blockOffset);
|
||||
public synchronized void free(final long blockOffset, final int blockSize) throws IOException {
|
||||
|
||||
if (neighboringFreeListNode.isPresent()) {
|
||||
// insert new free node into the free list
|
||||
final var prev = neighboringFreeListNode.get();
|
||||
final var neighboringFreeListNode = getNeighboringFreeListNode(blockOffset);
|
||||
|
||||
insertFreeListNode(prev, blockOffset, blockSize);
|
||||
if (neighboringFreeListNode.isPresent()) {
|
||||
// insert new free node into the free list
|
||||
final var prev = neighboringFreeListNode.get();
|
||||
|
||||
} else {
|
||||
// add new free list node as the first node in the list
|
||||
insertFreeListNodeAsNewRoot(blockOffset, blockSize);
|
||||
}
|
||||
}
|
||||
insertFreeListNode(prev, blockOffset, blockSize);
|
||||
|
||||
private void insertFreeListNodeAsNewRoot(final long blockOffset, final int blockSize) throws IOException {
|
||||
final var freeListRootNodePosition = readFreeListRootNodePosition();
|
||||
} else {
|
||||
// add new free list node as the first node in the list
|
||||
insertFreeListNodeAsNewRoot(blockOffset, blockSize);
|
||||
}
|
||||
}
|
||||
|
||||
if (freeListRootNodePosition > 0) {
|
||||
// there are free list nodes, but they are after the new node
|
||||
private void insertFreeListNodeAsNewRoot(final long blockOffset, final int blockSize) throws IOException {
|
||||
final var freeListRootNodePosition = readFreeListRootNodePosition();
|
||||
|
||||
final var next = readFreeListNode(freeListRootNodePosition);
|
||||
final var newNode = new FreeListNode(blockOffset, blockSize);
|
||||
if (freeListRootNodePosition > 0) {
|
||||
// there are free list nodes, but they are after the new node
|
||||
|
||||
FreeListNode.link(newNode, next);
|
||||
final var next = readFreeListNode(freeListRootNodePosition);
|
||||
final var newNode = new FreeListNode(blockOffset, blockSize);
|
||||
|
||||
writeFreeListNode(newNode, next);
|
||||
writeFreeListRootNodePosition(blockOffset);
|
||||
FreeListNode.link(newNode, next);
|
||||
|
||||
} else {
|
||||
// this is the first free list node
|
||||
final var newNode = new FreeListNode(blockOffset, blockSize);
|
||||
writeFreeListNode(newNode);
|
||||
writeFreeListRootNodePosition(blockOffset);
|
||||
}
|
||||
}
|
||||
writeFreeListNode(newNode, next);
|
||||
writeFreeListRootNodePosition(blockOffset);
|
||||
|
||||
private void insertFreeListNode(final FreeListNode prev, final long blockOffset, final int blockSize)
|
||||
throws IOException {
|
||||
} else {
|
||||
// this is the first free list node
|
||||
final var newNode = new FreeListNode(blockOffset, blockSize);
|
||||
writeFreeListNode(newNode);
|
||||
writeFreeListRootNodePosition(blockOffset);
|
||||
}
|
||||
}
|
||||
|
||||
final var newNode = new FreeListNode(blockOffset, blockSize);
|
||||
final var next = prev.hasNext() ? readFreeListNode(prev.getNext()) : null;
|
||||
private void insertFreeListNode(final FreeListNode prev, final long blockOffset, final int blockSize)
|
||||
throws IOException {
|
||||
|
||||
FreeListNode.link(prev, newNode, next);
|
||||
final var newNode = new FreeListNode(blockOffset, blockSize);
|
||||
final var next = prev.hasNext() ? readFreeListNode(prev.getNext()) : null;
|
||||
|
||||
writeFreeListNode(prev, newNode, next);
|
||||
}
|
||||
FreeListNode.link(prev, newNode, next);
|
||||
|
||||
/**
|
||||
*
|
||||
* @param blockOffset the offset of the block that is about to be free'd
|
||||
* @return the free list node before the block
|
||||
* @throws IOException
|
||||
*/
|
||||
private Optional<FreeListNode> getNeighboringFreeListNode(final long blockOffset) throws IOException {
|
||||
FreeListNode result = null;
|
||||
final long freeListRootNodePosition = readFreeListRootNodePosition();
|
||||
if (freeListRootNodePosition < blockOffset) {
|
||||
writeFreeListNode(prev, newNode, next);
|
||||
}
|
||||
|
||||
long nextFreeListNodeOffset = freeListRootNodePosition;
|
||||
while (nextFreeListNodeOffset > 0) {
|
||||
final var freeListNode = readFreeListNode(nextFreeListNodeOffset);
|
||||
/**
|
||||
*
|
||||
* @param blockOffset the offset of the block that is about to be free'd
|
||||
* @return the free list node before the block
|
||||
* @throws IOException
|
||||
*/
|
||||
private Optional<FreeListNode> getNeighboringFreeListNode(final long blockOffset) throws IOException {
|
||||
FreeListNode result = null;
|
||||
final long freeListRootNodePosition = readFreeListRootNodePosition();
|
||||
if (freeListRootNodePosition < blockOffset) {
|
||||
|
||||
if (freeListNode.getOffset() > blockOffset) {
|
||||
break;
|
||||
}
|
||||
nextFreeListNodeOffset = freeListNode.getNext();
|
||||
result = freeListNode;
|
||||
}
|
||||
}
|
||||
return Optional.ofNullable(result);
|
||||
}
|
||||
long nextFreeListNodeOffset = freeListRootNodePosition;
|
||||
while (nextFreeListNodeOffset > 0) {
|
||||
final var freeListNode = readFreeListNode(nextFreeListNodeOffset);
|
||||
|
||||
private Optional<FreeListNode> findFreeBlockWithSize(final long blockSize) throws IOException {
|
||||
FreeListNode result = null;
|
||||
final long freeListRootNodePosition = readFreeListRootNodePosition();
|
||||
if (freeListNode.getOffset() > blockOffset) {
|
||||
break;
|
||||
}
|
||||
nextFreeListNodeOffset = freeListNode.getNext();
|
||||
result = freeListNode;
|
||||
}
|
||||
}
|
||||
return Optional.ofNullable(result);
|
||||
}
|
||||
|
||||
long nextFreeListNodeOffset = freeListRootNodePosition;
|
||||
while (nextFreeListNodeOffset > 0) {
|
||||
final var freeListNode = readFreeListNode(nextFreeListNodeOffset);
|
||||
private Optional<FreeListNode> findFreeBlockWithSize(final long blockSize) throws IOException {
|
||||
FreeListNode result = null;
|
||||
final long freeListRootNodePosition = readFreeListRootNodePosition();
|
||||
|
||||
if (freeListNode.getSize() == blockSize) {
|
||||
result = freeListNode;
|
||||
break;
|
||||
}
|
||||
nextFreeListNodeOffset = freeListNode.getNext();
|
||||
}
|
||||
long nextFreeListNodeOffset = freeListRootNodePosition;
|
||||
while (nextFreeListNodeOffset > 0) {
|
||||
final var freeListNode = readFreeListNode(nextFreeListNodeOffset);
|
||||
|
||||
return Optional.ofNullable(result);
|
||||
}
|
||||
if (freeListNode.getSize() == blockSize) {
|
||||
result = freeListNode;
|
||||
break;
|
||||
}
|
||||
nextFreeListNodeOffset = freeListNode.getNext();
|
||||
}
|
||||
|
||||
private void clearBlock(final FreeListNode freeBlock) throws IOException {
|
||||
final var src = ByteBuffer.allocate(freeBlock.getSize());
|
||||
fileChannel.write(src, freeBlock.getOffset());
|
||||
}
|
||||
return Optional.ofNullable(result);
|
||||
}
|
||||
|
||||
private void removeBlockFromFreeList(final FreeListNode freeBlock) throws IOException {
|
||||
private void clearBlock(final FreeListNode freeBlock) throws IOException {
|
||||
final var src = ByteBuffer.allocate(freeBlock.getSize());
|
||||
fileChannel.write(src, freeBlock.getOffset());
|
||||
}
|
||||
|
||||
if (freeBlock.getPrev() == 0) {
|
||||
writeFreeListRootNodePosition(freeBlock.getNext());
|
||||
}
|
||||
private void removeBlockFromFreeList(final FreeListNode freeBlock) throws IOException {
|
||||
|
||||
if (freeBlock.getNext() > 0) {
|
||||
final FreeListNode next = readFreeListNode(freeBlock.getNext());
|
||||
next.setPrev(freeBlock.getPrev());
|
||||
writeFreeListNode(next);
|
||||
}
|
||||
if (freeBlock.getPrev() == 0) {
|
||||
writeFreeListRootNodePosition(freeBlock.getNext());
|
||||
}
|
||||
|
||||
if (freeBlock.getPrev() > 0) {
|
||||
final FreeListNode prev = readFreeListNode(freeBlock.getPrev());
|
||||
prev.setNext(freeBlock.getNext());
|
||||
writeFreeListNode(prev);
|
||||
}
|
||||
}
|
||||
if (freeBlock.getNext() > 0) {
|
||||
final FreeListNode next = readFreeListNode(freeBlock.getNext());
|
||||
next.setPrev(freeBlock.getPrev());
|
||||
writeFreeListNode(next);
|
||||
}
|
||||
|
||||
private FreeListNode readFreeListNode(final long freeListNodePosition) throws IOException {
|
||||
final var freeListNode = ByteBuffer.allocate(FREE_LIST_NODE_SIZE);
|
||||
fileChannel.read(freeListNode, freeListNodePosition);
|
||||
final long offset = freeListNodePosition;
|
||||
final long next = freeListNode.getLong(FREE_LIST_NEXT_POINTER);
|
||||
final long prev = freeListNode.getLong(FREE_LIST_PREV_POINTER);
|
||||
final int size = freeListNode.getInt(FREE_LIST_SIZE);
|
||||
return new FreeListNode(offset, next, prev, size);
|
||||
}
|
||||
if (freeBlock.getPrev() > 0) {
|
||||
final FreeListNode prev = readFreeListNode(freeBlock.getPrev());
|
||||
prev.setNext(freeBlock.getNext());
|
||||
writeFreeListNode(prev);
|
||||
}
|
||||
}
|
||||
|
||||
private void writeFreeListNode(final FreeListNode... nodes) throws IOException {
|
||||
private FreeListNode readFreeListNode(final long freeListNodePosition) throws IOException {
|
||||
final var freeListNode = ByteBuffer.allocate(FREE_LIST_NODE_SIZE);
|
||||
fileChannel.read(freeListNode, freeListNodePosition);
|
||||
final long offset = freeListNodePosition;
|
||||
final long next = freeListNode.getLong(FREE_LIST_NEXT_POINTER);
|
||||
final long prev = freeListNode.getLong(FREE_LIST_PREV_POINTER);
|
||||
final int size = freeListNode.getInt(FREE_LIST_SIZE);
|
||||
return new FreeListNode(offset, next, prev, size);
|
||||
}
|
||||
|
||||
for (final FreeListNode node : nodes) {
|
||||
if (node != null) {
|
||||
final var src = ByteBuffer.allocate(FREE_LIST_NODE_SIZE);
|
||||
src.putLong(FREE_LIST_NEXT_POINTER, node.getNext());
|
||||
src.putLong(FREE_LIST_PREV_POINTER, node.getPrev());
|
||||
src.putInt(FREE_LIST_SIZE, node.getSize());
|
||||
fileChannel.write(src, node.getOffset());
|
||||
}
|
||||
}
|
||||
}
|
||||
private void writeFreeListNode(final FreeListNode... nodes) throws IOException {
|
||||
|
||||
private long readFreeListRootNodePosition() throws IOException {
|
||||
final var freeListFirstBlock = ByteBuffer.allocate(8);
|
||||
fileChannel.read(freeListFirstBlock, FREE_LIST_ROOT_OFFSET);
|
||||
return freeListFirstBlock.getLong(0);
|
||||
}
|
||||
for (final FreeListNode node : nodes) {
|
||||
if (node != null) {
|
||||
final var src = ByteBuffer.allocate(FREE_LIST_NODE_SIZE);
|
||||
src.putLong(FREE_LIST_NEXT_POINTER, node.getNext());
|
||||
src.putLong(FREE_LIST_PREV_POINTER, node.getPrev());
|
||||
src.putInt(FREE_LIST_SIZE, node.getSize());
|
||||
fileChannel.write(src, node.getOffset());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void writeFreeListRootNodePosition(final long freeListRootNodePosition) throws IOException {
|
||||
final var freeListFirstBlock = ByteBuffer.allocate(8);
|
||||
freeListFirstBlock.putLong(0, freeListRootNodePosition);
|
||||
fileChannel.write(freeListFirstBlock, FREE_LIST_ROOT_OFFSET);
|
||||
}
|
||||
private long readFreeListRootNodePosition() throws IOException {
|
||||
final var freeListFirstBlock = ByteBuffer.allocate(8);
|
||||
fileChannel.read(freeListFirstBlock, FREE_LIST_ROOT_OFFSET);
|
||||
return freeListFirstBlock.getLong(0);
|
||||
}
|
||||
|
||||
public synchronized void ensureAlignmentForNewBlocks(final int alignment) {
|
||||
try {
|
||||
final long size = fileChannel.size();
|
||||
final int alignmentMismatch = Math.floorMod(size, alignment);
|
||||
if (alignmentMismatch != 0) {
|
||||
// The next allocated block would not be aligned. Therefore we allocate a
|
||||
// throw-away block.
|
||||
allocateNewBlock(alignment - alignmentMismatch);
|
||||
}
|
||||
} catch (final IOException e) {
|
||||
throw new DiskStorageException(e);
|
||||
}
|
||||
}
|
||||
private void writeFreeListRootNodePosition(final long freeListRootNodePosition) throws IOException {
|
||||
final var freeListFirstBlock = ByteBuffer.allocate(8);
|
||||
freeListFirstBlock.putLong(0, freeListRootNodePosition);
|
||||
fileChannel.write(freeListFirstBlock, FREE_LIST_ROOT_OFFSET);
|
||||
}
|
||||
|
||||
public long size() {
|
||||
try {
|
||||
return fileChannel.size();
|
||||
} catch (final IOException e) {
|
||||
throw new DiskStorageException(e);
|
||||
}
|
||||
}
|
||||
public synchronized void ensureAlignmentForNewBlocks(final int alignment) {
|
||||
try {
|
||||
final long size = fileChannel.size();
|
||||
final int alignmentMismatch = Math.floorMod(size, alignment);
|
||||
if (alignmentMismatch != 0) {
|
||||
// The next allocated block would not be aligned. Therefore we allocate a
|
||||
// throw-away block.
|
||||
allocateNewBlock(alignment - alignmentMismatch);
|
||||
}
|
||||
} catch (final IOException e) {
|
||||
throw new DiskStorageException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public int minAllocationSize() {
|
||||
return FREE_LIST_NODE_SIZE;
|
||||
}
|
||||
public long size() {
|
||||
try {
|
||||
return fileChannel.size();
|
||||
} catch (final IOException e) {
|
||||
throw new DiskStorageException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public int minAllocationSize() {
|
||||
return FREE_LIST_NODE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,18 +2,18 @@ package org.lucares.pdb.diskstorage;
|
||||
|
||||
public class DiskStorageException extends RuntimeException {
|
||||
|
||||
private static final long serialVersionUID = 1683775743640383633L;
|
||||
private static final long serialVersionUID = 1683775743640383633L;
|
||||
|
||||
public DiskStorageException(final String message, final Throwable cause) {
|
||||
super(message, cause);
|
||||
}
|
||||
public DiskStorageException(final String message, final Throwable cause) {
|
||||
super(message, cause);
|
||||
}
|
||||
|
||||
public DiskStorageException(final String message) {
|
||||
super(message);
|
||||
}
|
||||
public DiskStorageException(final String message) {
|
||||
super(message);
|
||||
}
|
||||
|
||||
public DiskStorageException(final Throwable cause) {
|
||||
super(cause);
|
||||
}
|
||||
public DiskStorageException(final Throwable cause) {
|
||||
super(cause);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,82 +1,82 @@
|
||||
package org.lucares.pdb.diskstorage;
|
||||
|
||||
public class FreeListNode {
|
||||
private final long offset;
|
||||
private long next;
|
||||
private long prev;
|
||||
private int size;
|
||||
private final long offset;
|
||||
private long next;
|
||||
private long prev;
|
||||
private int size;
|
||||
|
||||
public FreeListNode(final long offset, final int size) {
|
||||
this.offset = offset;
|
||||
this.size = size;
|
||||
}
|
||||
public FreeListNode(final long offset, final int size) {
|
||||
this.offset = offset;
|
||||
this.size = size;
|
||||
}
|
||||
|
||||
public FreeListNode(final long offset, final long next, final long prev, final int size) {
|
||||
this.offset = offset;
|
||||
this.next = next;
|
||||
this.prev = prev;
|
||||
this.size = size;
|
||||
}
|
||||
public FreeListNode(final long offset, final long next, final long prev, final int size) {
|
||||
this.offset = offset;
|
||||
this.next = next;
|
||||
this.prev = prev;
|
||||
this.size = size;
|
||||
}
|
||||
|
||||
public long getOffset() {
|
||||
return offset;
|
||||
}
|
||||
public long getOffset() {
|
||||
return offset;
|
||||
}
|
||||
|
||||
public long getNext() {
|
||||
return next;
|
||||
}
|
||||
public long getNext() {
|
||||
return next;
|
||||
}
|
||||
|
||||
public void setNext(final long next) {
|
||||
this.next = next;
|
||||
}
|
||||
public void setNext(final long next) {
|
||||
this.next = next;
|
||||
}
|
||||
|
||||
public void setNext(final FreeListNode next) {
|
||||
this.next = next != null ? next.getOffset() : 0;
|
||||
}
|
||||
public void setNext(final FreeListNode next) {
|
||||
this.next = next != null ? next.getOffset() : 0;
|
||||
}
|
||||
|
||||
public long getPrev() {
|
||||
return prev;
|
||||
}
|
||||
public long getPrev() {
|
||||
return prev;
|
||||
}
|
||||
|
||||
public void setPrev(final long prev) {
|
||||
this.prev = prev;
|
||||
}
|
||||
public void setPrev(final long prev) {
|
||||
this.prev = prev;
|
||||
}
|
||||
|
||||
public void setPrev(final FreeListNode prev) {
|
||||
this.prev = prev != null ? prev.getOffset() : 0;
|
||||
}
|
||||
public void setPrev(final FreeListNode prev) {
|
||||
this.prev = prev != null ? prev.getOffset() : 0;
|
||||
}
|
||||
|
||||
public int getSize() {
|
||||
return size;
|
||||
}
|
||||
public int getSize() {
|
||||
return size;
|
||||
}
|
||||
|
||||
public void setSize(final int size) {
|
||||
this.size = size;
|
||||
}
|
||||
public void setSize(final int size) {
|
||||
this.size = size;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "FreeListNode [offset=" + offset + ", next=" + next + ", prev=" + prev + ", size=" + size + "]";
|
||||
}
|
||||
@Override
|
||||
public String toString() {
|
||||
return "FreeListNode [offset=" + offset + ", next=" + next + ", prev=" + prev + ", size=" + size + "]";
|
||||
}
|
||||
|
||||
public boolean hasNext() {
|
||||
return next != 0;
|
||||
}
|
||||
public boolean hasNext() {
|
||||
return next != 0;
|
||||
}
|
||||
|
||||
public static void link(final FreeListNode prev, final FreeListNode next) {
|
||||
prev.setNext(next);
|
||||
next.setPrev(prev);
|
||||
}
|
||||
public static void link(final FreeListNode prev, final FreeListNode next) {
|
||||
prev.setNext(next);
|
||||
next.setPrev(prev);
|
||||
}
|
||||
|
||||
public static void link(final FreeListNode prev, final FreeListNode middle, final FreeListNode next) {
|
||||
if (prev != null) {
|
||||
prev.setNext(middle);
|
||||
}
|
||||
middle.setPrev(prev);
|
||||
middle.setNext(next);
|
||||
if (next != null) {
|
||||
next.setPrev(prev);
|
||||
}
|
||||
}
|
||||
public static void link(final FreeListNode prev, final FreeListNode middle, final FreeListNode next) {
|
||||
if (prev != null) {
|
||||
prev.setNext(middle);
|
||||
}
|
||||
middle.setPrev(prev);
|
||||
middle.setNext(next);
|
||||
if (next != null) {
|
||||
next.setPrev(prev);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -3,77 +3,77 @@ package org.lucares.pdb.map;
|
||||
import java.util.Arrays;
|
||||
|
||||
public final class ByteArrayKey implements Comparable<ByteArrayKey> {
|
||||
private final byte[] bytes;
|
||||
private final byte[] bytes;
|
||||
|
||||
public ByteArrayKey(final byte[] bytes) {
|
||||
this.bytes = bytes;
|
||||
}
|
||||
public ByteArrayKey(final byte[] bytes) {
|
||||
this.bytes = bytes;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(final ByteArrayKey o) {
|
||||
return compare(bytes, o.bytes);
|
||||
}
|
||||
@Override
|
||||
public int compareTo(final ByteArrayKey o) {
|
||||
return compare(bytes, o.bytes);
|
||||
}
|
||||
|
||||
public static int compare(final byte[] key, final byte[] otherKey) {
|
||||
return Arrays.compare(key, otherKey);
|
||||
}
|
||||
public static int compare(final byte[] key, final byte[] otherKey) {
|
||||
return Arrays.compare(key, otherKey);
|
||||
}
|
||||
|
||||
public static boolean isPrefix(final byte[] key, final byte[] keyPrefix) {
|
||||
public static boolean isPrefix(final byte[] key, final byte[] keyPrefix) {
|
||||
|
||||
return compareKeyPrefix(key, keyPrefix) == 0;
|
||||
}
|
||||
return compareKeyPrefix(key, keyPrefix) == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Same as {@link #compare(byte[])}, but return 0 if prefix is a prefix of the
|
||||
* key. {@link #compare(byte[])} return values >0 in that case, because key
|
||||
* is longer than the prefix.
|
||||
*
|
||||
* @param prefix the prefix
|
||||
* @return 0 if {@code prefix} is a prefix of the key otherwise the value is
|
||||
* defined by {@link #compare(byte[])}
|
||||
*/
|
||||
public static int compareKeyPrefix(final byte[] key, final byte[] prefix) {
|
||||
int i = 0;
|
||||
while (i < key.length && i < prefix.length) {
|
||||
if (key[i] != prefix[i]) {
|
||||
return key[i] - prefix[i];
|
||||
}
|
||||
i++;
|
||||
}
|
||||
/**
|
||||
* Same as {@link #compare(byte[])}, but return 0 if prefix is a prefix of the
|
||||
* key. {@link #compare(byte[])} return values >0 in that case, because key
|
||||
* is longer than the prefix.
|
||||
*
|
||||
* @param prefix the prefix
|
||||
* @return 0 if {@code prefix} is a prefix of the key otherwise the value is
|
||||
* defined by {@link #compare(byte[])}
|
||||
*/
|
||||
public static int compareKeyPrefix(final byte[] key, final byte[] prefix) {
|
||||
int i = 0;
|
||||
while (i < key.length && i < prefix.length) {
|
||||
if (key[i] != prefix[i]) {
|
||||
return key[i] - prefix[i];
|
||||
}
|
||||
i++;
|
||||
}
|
||||
|
||||
return key.length > prefix.length ? 0 : key.length - prefix.length;
|
||||
return key.length > prefix.length ? 0 : key.length - prefix.length;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
public static boolean equal(final byte[] key, final byte[] otherKey) {
|
||||
return compare(key, otherKey) == 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return Arrays.toString(bytes);
|
||||
}
|
||||
public static boolean equal(final byte[] key, final byte[] otherKey) {
|
||||
return compare(key, otherKey) == 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
final int prime = 31;
|
||||
int result = 1;
|
||||
result = prime * result + Arrays.hashCode(bytes);
|
||||
return result;
|
||||
}
|
||||
@Override
|
||||
public String toString() {
|
||||
return Arrays.toString(bytes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(final Object obj) {
|
||||
if (this == obj)
|
||||
return true;
|
||||
if (obj == null)
|
||||
return false;
|
||||
if (getClass() != obj.getClass())
|
||||
return false;
|
||||
final ByteArrayKey other = (ByteArrayKey) obj;
|
||||
if (!Arrays.equals(bytes, other.bytes))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
@Override
|
||||
public int hashCode() {
|
||||
final int prime = 31;
|
||||
int result = 1;
|
||||
result = prime * result + Arrays.hashCode(bytes);
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(final Object obj) {
|
||||
if (this == obj)
|
||||
return true;
|
||||
if (obj == null)
|
||||
return false;
|
||||
if (getClass() != obj.getClass())
|
||||
return false;
|
||||
final ByteArrayKey other = (ByteArrayKey) obj;
|
||||
if (!Arrays.equals(bytes, other.bytes))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -14,13 +14,13 @@ import org.lucares.pdb.map.PersistentMap.EncoderDecoder;
|
||||
* {@link Empty} solves this by providing a single unmodifiable value.
|
||||
*/
|
||||
public final class Empty {
|
||||
public static final Empty INSTANCE = new Empty();
|
||||
public static final Empty INSTANCE = new Empty();
|
||||
|
||||
private Empty() {
|
||||
}
|
||||
private Empty() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "<empty>";
|
||||
}
|
||||
@Override
|
||||
public String toString() {
|
||||
return "<empty>";
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,158 +9,158 @@ import java.util.function.Predicate;
|
||||
import org.lucares.utils.byteencoder.VariableByteEncoder;
|
||||
|
||||
class NodeEntry {
|
||||
enum ValueType {
|
||||
VALUE_INLINE((byte) 1), NODE_POINTER((byte) 2);
|
||||
enum ValueType {
|
||||
VALUE_INLINE((byte) 1), NODE_POINTER((byte) 2);
|
||||
|
||||
private final byte b;
|
||||
private final byte b;
|
||||
|
||||
ValueType(final byte b) {
|
||||
this.b = b;
|
||||
}
|
||||
ValueType(final byte b) {
|
||||
this.b = b;
|
||||
}
|
||||
|
||||
static ValueType fromByte(final byte b) {
|
||||
for (final ValueType type : values()) {
|
||||
if (type.b == b) {
|
||||
return type;
|
||||
}
|
||||
}
|
||||
throw new IllegalStateException("Cannot map byte " + b + " to a value type.");
|
||||
}
|
||||
static ValueType fromByte(final byte b) {
|
||||
for (final ValueType type : values()) {
|
||||
if (type.b == b) {
|
||||
return type;
|
||||
}
|
||||
}
|
||||
throw new IllegalStateException("Cannot map byte " + b + " to a value type.");
|
||||
}
|
||||
|
||||
public byte asByte() {
|
||||
return b;
|
||||
}
|
||||
}
|
||||
public byte asByte() {
|
||||
return b;
|
||||
}
|
||||
}
|
||||
|
||||
static final class KeyMatches implements Predicate<NodeEntry> {
|
||||
static final class KeyMatches implements Predicate<NodeEntry> {
|
||||
|
||||
private final byte[] key;
|
||||
private final byte[] key;
|
||||
|
||||
public KeyMatches(final byte[] key) {
|
||||
this.key = key;
|
||||
}
|
||||
public KeyMatches(final byte[] key) {
|
||||
this.key = key;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean test(final NodeEntry t) {
|
||||
return Arrays.equals(key, t.getKey());
|
||||
}
|
||||
}
|
||||
@Override
|
||||
public boolean test(final NodeEntry t) {
|
||||
return Arrays.equals(key, t.getKey());
|
||||
}
|
||||
}
|
||||
|
||||
private final ValueType type;
|
||||
private final byte[] key;
|
||||
private final byte[] value;
|
||||
private final ValueType type;
|
||||
private final byte[] key;
|
||||
private final byte[] value;
|
||||
|
||||
public NodeEntry(final ValueType type, final byte[] key, final byte[] value) {
|
||||
this.type = type;
|
||||
this.key = key;
|
||||
this.value = value;
|
||||
}
|
||||
public NodeEntry(final ValueType type, final byte[] key, final byte[] value) {
|
||||
this.type = type;
|
||||
this.key = key;
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
public ValueType getType() {
|
||||
return type;
|
||||
}
|
||||
public ValueType getType() {
|
||||
return type;
|
||||
}
|
||||
|
||||
public byte[] getKey() {
|
||||
return key;
|
||||
}
|
||||
public byte[] getKey() {
|
||||
return key;
|
||||
}
|
||||
|
||||
public byte[] getValue() {
|
||||
return value;
|
||||
}
|
||||
public byte[] getValue() {
|
||||
return value;
|
||||
}
|
||||
|
||||
public int size() {
|
||||
return 1 + key.length + value.length;
|
||||
}
|
||||
public int size() {
|
||||
return 1 + key.length + value.length;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
final String valueAsString = isInnerNode() ? String.valueOf(VariableByteEncoder.decodeFirstValue(value))
|
||||
: new String(value, StandardCharsets.UTF_8);
|
||||
@Override
|
||||
public String toString() {
|
||||
final String valueAsString = isInnerNode() ? String.valueOf(VariableByteEncoder.decodeFirstValue(value))
|
||||
: new String(value, StandardCharsets.UTF_8);
|
||||
|
||||
return "NodeEntry [type=" + type + ", key=" + new String(key, StandardCharsets.UTF_8) + ", value="
|
||||
+ valueAsString + "]";
|
||||
}
|
||||
return "NodeEntry [type=" + type + ", key=" + new String(key, StandardCharsets.UTF_8) + ", value="
|
||||
+ valueAsString + "]";
|
||||
}
|
||||
|
||||
public <K,V> String toString(final Function<byte[], K> keyDecoder, final Function<byte[], V> valueDecoder) {
|
||||
final String valueAsString = isInnerNode() ? String.valueOf(VariableByteEncoder.decodeFirstValue(value))
|
||||
: String.valueOf(valueDecoder.apply(value));
|
||||
public <K, V> String toString(final Function<byte[], K> keyDecoder, final Function<byte[], V> valueDecoder) {
|
||||
final String valueAsString = isInnerNode() ? String.valueOf(VariableByteEncoder.decodeFirstValue(value))
|
||||
: String.valueOf(valueDecoder.apply(value));
|
||||
|
||||
final String keyAsString;
|
||||
if (Arrays.equals(key, PersistentMap.MAX_KEY)) {
|
||||
keyAsString = "<<<MAX_KEY>>>";
|
||||
} else {
|
||||
keyAsString = String.valueOf(keyDecoder.apply(key));
|
||||
}
|
||||
final String keyAsString;
|
||||
if (Arrays.equals(key, PersistentMap.MAX_KEY)) {
|
||||
keyAsString = "<<<MAX_KEY>>>";
|
||||
} else {
|
||||
keyAsString = String.valueOf(keyDecoder.apply(key));
|
||||
}
|
||||
|
||||
return "NodeEntry [type=" + type + ", key=" + keyAsString + ", value=" + valueAsString + "]";
|
||||
}
|
||||
return "NodeEntry [type=" + type + ", key=" + keyAsString + ", value=" + valueAsString + "]";
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
final int prime = 31;
|
||||
int result = 1;
|
||||
result = prime * result + Arrays.hashCode(key);
|
||||
result = prime * result + ((type == null) ? 0 : type.hashCode());
|
||||
result = prime * result + Arrays.hashCode(value);
|
||||
return result;
|
||||
}
|
||||
@Override
|
||||
public int hashCode() {
|
||||
final int prime = 31;
|
||||
int result = 1;
|
||||
result = prime * result + Arrays.hashCode(key);
|
||||
result = prime * result + ((type == null) ? 0 : type.hashCode());
|
||||
result = prime * result + Arrays.hashCode(value);
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(final Object obj) {
|
||||
if (this == obj)
|
||||
return true;
|
||||
if (obj == null)
|
||||
return false;
|
||||
if (getClass() != obj.getClass())
|
||||
return false;
|
||||
final NodeEntry other = (NodeEntry) obj;
|
||||
if (!Arrays.equals(key, other.key))
|
||||
return false;
|
||||
if (type != other.type)
|
||||
return false;
|
||||
if (!Arrays.equals(value, other.value))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
@Override
|
||||
public boolean equals(final Object obj) {
|
||||
if (this == obj)
|
||||
return true;
|
||||
if (obj == null)
|
||||
return false;
|
||||
if (getClass() != obj.getClass())
|
||||
return false;
|
||||
final NodeEntry other = (NodeEntry) obj;
|
||||
if (!Arrays.equals(key, other.key))
|
||||
return false;
|
||||
if (type != other.type)
|
||||
return false;
|
||||
if (!Arrays.equals(value, other.value))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
public static int neededBytes(final Collection<NodeEntry> entries) {
|
||||
return entries.stream().mapToInt(NodeEntry::size).sum();
|
||||
}
|
||||
public static int neededBytes(final Collection<NodeEntry> entries) {
|
||||
return entries.stream().mapToInt(NodeEntry::size).sum();
|
||||
}
|
||||
|
||||
public int compare(final byte[] otherKey) {
|
||||
public int compare(final byte[] otherKey) {
|
||||
|
||||
return ByteArrayKey.compare(key, otherKey);
|
||||
}
|
||||
return ByteArrayKey.compare(key, otherKey);
|
||||
}
|
||||
|
||||
public boolean isPrefix(final byte[] keyPrefix) {
|
||||
public boolean isPrefix(final byte[] keyPrefix) {
|
||||
|
||||
return ByteArrayKey.compareKeyPrefix(key, keyPrefix) == 0;
|
||||
}
|
||||
return ByteArrayKey.compareKeyPrefix(key, keyPrefix) == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Same as {@link #compare(byte[])}, but return 0 if prefix is a prefix of the
|
||||
* key. {@link #compare(byte[])} return values >0 in that case, because key
|
||||
* is longer than the prefix.
|
||||
*
|
||||
* @param prefix the prefix
|
||||
* @return 0 if {@code prefix} is a prefix of the key otherwise the value is
|
||||
* defined by {@link #compare(byte[])}
|
||||
*/
|
||||
public int compareKeyPrefix(final byte[] prefix) {
|
||||
/**
|
||||
* Same as {@link #compare(byte[])}, but return 0 if prefix is a prefix of the
|
||||
* key. {@link #compare(byte[])} return values >0 in that case, because key
|
||||
* is longer than the prefix.
|
||||
*
|
||||
* @param prefix the prefix
|
||||
* @return 0 if {@code prefix} is a prefix of the key otherwise the value is
|
||||
* defined by {@link #compare(byte[])}
|
||||
*/
|
||||
public int compareKeyPrefix(final byte[] prefix) {
|
||||
|
||||
return ByteArrayKey.compareKeyPrefix(key, prefix);
|
||||
}
|
||||
return ByteArrayKey.compareKeyPrefix(key, prefix);
|
||||
}
|
||||
|
||||
public boolean equal(final byte[] otherKey) {
|
||||
return compare(otherKey) == 0;
|
||||
}
|
||||
public boolean equal(final byte[] otherKey) {
|
||||
return compare(otherKey) == 0;
|
||||
}
|
||||
|
||||
public boolean isDataNode() {
|
||||
return type == ValueType.VALUE_INLINE;
|
||||
}
|
||||
public boolean isDataNode() {
|
||||
return type == ValueType.VALUE_INLINE;
|
||||
}
|
||||
|
||||
public boolean isInnerNode() {
|
||||
return type == ValueType.NODE_POINTER;
|
||||
}
|
||||
public boolean isInnerNode() {
|
||||
return type == ValueType.NODE_POINTER;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -23,470 +23,470 @@ import org.slf4j.LoggerFactory;
|
||||
|
||||
public class PersistentMap<K, V> implements AutoCloseable {
|
||||
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(PersistentMap.class);
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(PersistentMap.class);
|
||||
|
||||
// the maximum key
|
||||
static final byte[] MAX_KEY;
|
||||
static {
|
||||
MAX_KEY = new byte[20];
|
||||
Arrays.fill(MAX_KEY, Byte.MAX_VALUE);
|
||||
}
|
||||
// the maximum key
|
||||
static final byte[] MAX_KEY;
|
||||
static {
|
||||
MAX_KEY = new byte[20];
|
||||
Arrays.fill(MAX_KEY, Byte.MAX_VALUE);
|
||||
}
|
||||
|
||||
interface VisitorCallback {
|
||||
void visit(PersistentMapDiskNode node, PersistentMapDiskNode parentNode, NodeEntry nodeEntry, int depth);
|
||||
}
|
||||
interface VisitorCallback {
|
||||
void visit(PersistentMapDiskNode node, PersistentMapDiskNode parentNode, NodeEntry nodeEntry, int depth);
|
||||
}
|
||||
|
||||
public interface EncoderDecoder<O> {
|
||||
public byte[] encode(O object);
|
||||
|
||||
public O decode(byte[] bytes);
|
||||
public interface EncoderDecoder<O> {
|
||||
public byte[] encode(O object);
|
||||
|
||||
public O decode(byte[] bytes);
|
||||
|
||||
public default Function<byte[], O> asDecoder() {
|
||||
return bytes -> this.decode(bytes);
|
||||
}
|
||||
public default Function<byte[], O> asDecoder() {
|
||||
return bytes -> this.decode(bytes);
|
||||
}
|
||||
|
||||
public default Function<O, byte[]> asEncoder() {
|
||||
return plain -> this.encode(plain);
|
||||
}
|
||||
public default Function<O, byte[]> asEncoder() {
|
||||
return plain -> this.encode(plain);
|
||||
}
|
||||
|
||||
public byte[] getEmptyValue();
|
||||
}
|
||||
public byte[] getEmptyValue();
|
||||
}
|
||||
|
||||
private static final class StringCoder implements EncoderDecoder<String> {
|
||||
private static final class StringCoder implements EncoderDecoder<String> {
|
||||
|
||||
@Override
|
||||
public byte[] encode(final String object) {
|
||||
return object.getBytes(StandardCharsets.UTF_8);
|
||||
}
|
||||
@Override
|
||||
public byte[] encode(final String object) {
|
||||
return object.getBytes(StandardCharsets.UTF_8);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String decode(final byte[] bytes) {
|
||||
return bytes == null ? null : new String(bytes, StandardCharsets.UTF_8);
|
||||
}
|
||||
@Override
|
||||
public String decode(final byte[] bytes) {
|
||||
return bytes == null ? null : new String(bytes, StandardCharsets.UTF_8);
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] getEmptyValue() {
|
||||
return new byte[] { 0 };
|
||||
}
|
||||
}
|
||||
@Override
|
||||
public byte[] getEmptyValue() {
|
||||
return new byte[] { 0 };
|
||||
}
|
||||
}
|
||||
|
||||
private static final class LongCoder implements EncoderDecoder<Long> {
|
||||
private static final class LongCoder implements EncoderDecoder<Long> {
|
||||
|
||||
@Override
|
||||
public byte[] encode(final Long object) {
|
||||
return VariableByteEncoder.encode(object);
|
||||
}
|
||||
@Override
|
||||
public byte[] encode(final Long object) {
|
||||
return VariableByteEncoder.encode(object);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long decode(final byte[] bytes) {
|
||||
return bytes == null ? null : VariableByteEncoder.decodeFirstValue(bytes);
|
||||
}
|
||||
@Override
|
||||
public Long decode(final byte[] bytes) {
|
||||
return bytes == null ? null : VariableByteEncoder.decodeFirstValue(bytes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] getEmptyValue() {
|
||||
return new byte[] { 0 };
|
||||
}
|
||||
}
|
||||
@Override
|
||||
public byte[] getEmptyValue() {
|
||||
return new byte[] { 0 };
|
||||
}
|
||||
}
|
||||
|
||||
private static final class UUIDCoder implements EncoderDecoder<UUID> {
|
||||
private static final class UUIDCoder implements EncoderDecoder<UUID> {
|
||||
|
||||
@Override
|
||||
public byte[] encode(final UUID uuid) {
|
||||
final long mostSignificantBits = uuid.getMostSignificantBits();
|
||||
final long leastSignificantBits = uuid.getLeastSignificantBits();
|
||||
return VariableByteEncoder.encode(mostSignificantBits, leastSignificantBits);
|
||||
}
|
||||
@Override
|
||||
public byte[] encode(final UUID uuid) {
|
||||
final long mostSignificantBits = uuid.getMostSignificantBits();
|
||||
final long leastSignificantBits = uuid.getLeastSignificantBits();
|
||||
return VariableByteEncoder.encode(mostSignificantBits, leastSignificantBits);
|
||||
}
|
||||
|
||||
@Override
|
||||
public UUID decode(final byte[] bytes) {
|
||||
@Override
|
||||
public UUID decode(final byte[] bytes) {
|
||||
|
||||
final LongList longs = VariableByteEncoder.decode(bytes);
|
||||
final long mostSignificantBits = longs.get(0);
|
||||
final long leastSignificantBits = longs.get(1);
|
||||
final LongList longs = VariableByteEncoder.decode(bytes);
|
||||
final long mostSignificantBits = longs.get(0);
|
||||
final long leastSignificantBits = longs.get(1);
|
||||
|
||||
return new UUID(mostSignificantBits, leastSignificantBits);
|
||||
}
|
||||
return new UUID(mostSignificantBits, leastSignificantBits);
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] getEmptyValue() {
|
||||
return new byte[] { 0 };
|
||||
}
|
||||
}
|
||||
@Override
|
||||
public byte[] getEmptyValue() {
|
||||
return new byte[] { 0 };
|
||||
}
|
||||
}
|
||||
|
||||
private static final class EmptyCoder implements EncoderDecoder<Empty> {
|
||||
|
||||
private static final byte[] EMPTY_BYTE_ARRAY = new byte[0];
|
||||
private static final class EmptyCoder implements EncoderDecoder<Empty> {
|
||||
|
||||
private static final byte[] EMPTY_BYTE_ARRAY = new byte[0];
|
||||
|
||||
@Override
|
||||
public byte[] encode(final Empty __) {
|
||||
return EMPTY_BYTE_ARRAY;
|
||||
}
|
||||
@Override
|
||||
public byte[] encode(final Empty __) {
|
||||
return EMPTY_BYTE_ARRAY;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Empty decode(final byte[] bytes) {
|
||||
Preconditions.checkTrue(bytes.length == 0, "");
|
||||
|
||||
return Empty.INSTANCE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] getEmptyValue() {
|
||||
return new byte[] {};
|
||||
}
|
||||
}
|
||||
|
||||
public static final EncoderDecoder<Long> LONG_CODER = new LongCoder();
|
||||
public static final EncoderDecoder<UUID> UUID_ENCODER = new UUIDCoder();
|
||||
public static final EncoderDecoder<String> STRING_CODER = new StringCoder();
|
||||
public static final EncoderDecoder<Empty> EMPTY_ENCODER = new EmptyCoder();
|
||||
|
||||
static final int BLOCK_SIZE = 4096;
|
||||
static final long NODE_OFFSET_TO_ROOT_NODE = 8;
|
||||
|
||||
private final DiskStorage diskStore;
|
||||
|
||||
private int maxEntriesInNode = Integer.MAX_VALUE;
|
||||
|
||||
private final EncoderDecoder<K> keyEncoder;
|
||||
|
||||
private final EncoderDecoder<V> valueEncoder;
|
||||
|
||||
private final LRUCache<Long, PersistentMapDiskNode> nodeCache = new LRUCache<>(10_000);
|
||||
|
||||
private final LRUCache<K, V> valueCache = new LRUCache<>(1_000);
|
||||
|
||||
public PersistentMap(final Path path, final Path storageBasePath, final EncoderDecoder<K> keyEncoder,
|
||||
final EncoderDecoder<V> valueEncoder) {
|
||||
this.diskStore = new DiskStorage(path, storageBasePath);
|
||||
this.keyEncoder = keyEncoder;
|
||||
this.valueEncoder = valueEncoder;
|
||||
initIfNew();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
diskStore.close();
|
||||
}
|
||||
|
||||
public void setMaxEntriesInNode(final int maxEntriesInNode) {
|
||||
this.maxEntriesInNode = maxEntriesInNode;
|
||||
}
|
||||
|
||||
private void initIfNew() {
|
||||
if (diskStore.size() < BLOCK_SIZE) {
|
||||
final long nodeOffsetToRootNode = diskStore.allocateBlock(diskStore.minAllocationSize());
|
||||
Preconditions.checkEqual(nodeOffsetToRootNode, NODE_OFFSET_TO_ROOT_NODE,
|
||||
"The offset of the pointer to the root node must be at a well known location. "
|
||||
+ "Otherwise we would not be able to find it in an already existing file.");
|
||||
|
||||
// 2. make sure new blocks are aligned to the block size (for faster disk IO)
|
||||
diskStore.ensureAlignmentForNewBlocks(BLOCK_SIZE);
|
||||
|
||||
// 3. initialize an empty root node
|
||||
final long blockOffset = diskStore.allocateBlock(BLOCK_SIZE);
|
||||
final var rootNode = PersistentMapDiskNode.emptyRootNode(blockOffset);
|
||||
writeNode(rootNode);
|
||||
|
||||
// 4. update pointer to root node
|
||||
writeNodeOffsetOfRootNode(blockOffset);
|
||||
|
||||
// 5. insert a dummy entry with a 'maximum' key
|
||||
putValue(MAX_KEY, valueEncoder.getEmptyValue());
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized void putAllValues(final Map<K, V> map) {
|
||||
for (final Entry<K, V> e : map.entrySet()) {
|
||||
putValue(e.getKey(), e.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized V putValue(final K key, final V value) {
|
||||
|
||||
final V cachedValue = valueCache.get(key);
|
||||
if (cachedValue != null && cachedValue == value) {
|
||||
return value;
|
||||
}
|
||||
|
||||
final byte[] encodedKey = keyEncoder.encode(key);
|
||||
final byte[] encodedValue = valueEncoder.encode(value);
|
||||
final byte[] encodedOldValue = putValue(encodedKey, encodedValue);
|
||||
final V oldValue = encodedOldValue == null ? null : valueEncoder.decode(encodedOldValue);
|
||||
valueCache.put(key, value);
|
||||
return oldValue;
|
||||
}
|
||||
|
||||
public synchronized V getValue(final K key) {
|
||||
|
||||
final V cachedValue = valueCache.get(key);
|
||||
if (cachedValue != null) {
|
||||
return cachedValue;
|
||||
}
|
||||
|
||||
final byte[] encodedKey = keyEncoder.encode(key);
|
||||
final byte[] foundValue = getValue(encodedKey);
|
||||
final V result = foundValue == null ? null : valueEncoder.decode(foundValue);
|
||||
valueCache.put(key, result);
|
||||
return result;
|
||||
}
|
||||
|
||||
private byte[] putValue(final byte[] key, final byte[] value) {
|
||||
final long rootNodeOffset = readNodeOffsetOfRootNode();
|
||||
final Stack<PersistentMapDiskNode> parents = new Stack<>();
|
||||
return insert(parents, rootNodeOffset, key, value);
|
||||
}
|
||||
|
||||
private byte[] getValue(final byte[] key) {
|
||||
final long rootNodeOffset = readNodeOffsetOfRootNode();
|
||||
final NodeEntry entry = findNodeEntry(rootNodeOffset, key);
|
||||
|
||||
return entry == null ? null : entry.getValue();
|
||||
}
|
||||
|
||||
private byte[] insert(final Stack<PersistentMapDiskNode> parents, final long nodeOffest, final byte[] key,
|
||||
final byte[] value) {
|
||||
final PersistentMapDiskNode node = getNode(nodeOffest);
|
||||
|
||||
final NodeEntry entry = node.getNodeEntryTo(key);
|
||||
if (entry == null || entry.isDataNode()) {
|
||||
|
||||
final byte[] oldValue;
|
||||
if (entry == null) {
|
||||
oldValue = null;
|
||||
} else {
|
||||
// found a NodeEntry that is either equal to key, or it is at the insertion
|
||||
// point
|
||||
final boolean entryIsForKey = entry.equal(key);
|
||||
|
||||
oldValue = entryIsForKey ? entry.getValue() : null;
|
||||
|
||||
// Early exit, if the oldValue equals the new value.
|
||||
// We do not have to replace the value, because it would not change anything
|
||||
// (just cause unnecessary write operations). But we return the oldValue so that
|
||||
// the caller thinks we replaced the value.
|
||||
if (Objects.equals(oldValue, value)) {
|
||||
return oldValue;
|
||||
}
|
||||
|
||||
if (entryIsForKey) {
|
||||
node.removeKey(key);
|
||||
}
|
||||
}
|
||||
|
||||
if (node.canAdd(key, value, maxEntriesInNode)) {
|
||||
// insert in existing node
|
||||
node.addKeyValue(key, value);
|
||||
writeNode(node);
|
||||
return oldValue;
|
||||
} else {
|
||||
// add new node
|
||||
// 1. split current node into A and B
|
||||
splitNode(parents, node);
|
||||
|
||||
// 2. insert the value
|
||||
// start from the root, because we might have added a new root node
|
||||
return putValue(key, value);
|
||||
}
|
||||
} else {
|
||||
final long childNodeOffset = toNodeOffset(entry);
|
||||
parents.add(node);
|
||||
return insert(parents, childNodeOffset, key, value);
|
||||
}
|
||||
}
|
||||
|
||||
private PersistentMapDiskNode splitNode(final Stack<PersistentMapDiskNode> parents,
|
||||
final PersistentMapDiskNode node) {
|
||||
|
||||
// System.out.println("\n\npre split node: " + node + "\n");
|
||||
|
||||
final long newBlockOffset = diskStore.allocateBlock(BLOCK_SIZE);
|
||||
|
||||
final PersistentMapDiskNode newNode = node.split(newBlockOffset);
|
||||
|
||||
final PersistentMapDiskNode parent = parents.isEmpty() ? null : parents.pop();
|
||||
|
||||
if (parent != null) {
|
||||
final byte[] newNodeKey = newNode.getTopNodeEntry().getKey();
|
||||
if (parent.canAdd(newNodeKey, newBlockOffset, maxEntriesInNode)) {
|
||||
parent.addKeyNodePointer(newNodeKey, newBlockOffset);
|
||||
writeNode(parent);
|
||||
writeNode(newNode);
|
||||
writeNode(node);
|
||||
return parent;
|
||||
} else {
|
||||
final PersistentMapDiskNode grandParentNode = splitNode(parents, parent);
|
||||
|
||||
final NodeEntry pointerToParentAfterSplit = grandParentNode.getNodeEntryTo(newNodeKey);
|
||||
|
||||
Preconditions.checkEqual(pointerToParentAfterSplit.isInnerNode(), true, "{0} is pointer to inner node",
|
||||
pointerToParentAfterSplit);
|
||||
final long parentNodeOffset = toNodeOffset(pointerToParentAfterSplit); // the parent we have to add the
|
||||
// newNode to
|
||||
final PersistentMapDiskNode parentNode = getNode(parentNodeOffset);
|
||||
parentNode.addKeyNodePointer(newNodeKey, newBlockOffset);
|
||||
writeNode(parentNode);
|
||||
writeNode(newNode);
|
||||
writeNode(node);
|
||||
return parentNode;
|
||||
}
|
||||
|
||||
} else {
|
||||
// has no parent -> create a new parent (the new parent will also be the new
|
||||
// root)
|
||||
final long newRootNodeOffset = diskStore.allocateBlock(BLOCK_SIZE);
|
||||
final PersistentMapDiskNode rootNode = PersistentMapDiskNode.emptyRootNode(newRootNodeOffset);
|
||||
final byte[] newNodeKey = newNode.getTopNodeEntry().getKey();
|
||||
rootNode.addKeyNodePointer(newNodeKey, newBlockOffset);
|
||||
|
||||
final byte[] oldNodeKey = node.getTopNodeEntry().getKey();
|
||||
rootNode.addKeyNodePointer(oldNodeKey, node.getNodeOffset());
|
||||
writeNode(rootNode);
|
||||
|
||||
writeNode(newNode);
|
||||
writeNode(node);
|
||||
|
||||
writeNodeOffsetOfRootNode(newRootNodeOffset);
|
||||
return rootNode;
|
||||
}
|
||||
}
|
||||
|
||||
private NodeEntry findNodeEntry(final long nodeOffest, final byte[] key) {
|
||||
final PersistentMapDiskNode node = getNode(nodeOffest);
|
||||
|
||||
final var entry = node.getNodeEntryTo(key);
|
||||
if (entry == null) {
|
||||
return null;
|
||||
} else if (entry.isDataNode()) {
|
||||
if (entry.equal(key)) {
|
||||
return entry;
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
} else {
|
||||
final long childNodeOffset = toNodeOffset(entry);
|
||||
return findNodeEntry(childNodeOffset, key);
|
||||
}
|
||||
}
|
||||
|
||||
private long toNodeOffset(final NodeEntry entry) {
|
||||
Preconditions.checkEqual(entry.isInnerNode(), true);
|
||||
return VariableByteEncoder.decodeFirstValue(entry.getValue());
|
||||
}
|
||||
|
||||
private PersistentMapDiskNode getNode(final long nodeOffset) {
|
||||
|
||||
PersistentMapDiskNode node = nodeCache.get(nodeOffset);
|
||||
if (node == null) {
|
||||
|
||||
final DiskBlock diskBlock = diskStore.getDiskBlock(nodeOffset, BLOCK_SIZE);
|
||||
|
||||
node = PersistentMapDiskNode.parse(nodeOffset, diskBlock);
|
||||
nodeCache.put(nodeOffset, node);
|
||||
}
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
private void writeNode(final PersistentMapDiskNode node) {
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("writing node {}", node.toString(keyEncoder.asDecoder(), valueEncoder.asDecoder()));
|
||||
}
|
||||
final long nodeOffest = node.getNodeOffset();
|
||||
// final DiskBlock diskBlock = diskStore.getDiskBlock(nodeOffest, BLOCK_SIZE);
|
||||
DiskBlock diskBlock = node.getDiskBlock();
|
||||
if (diskBlock == null) {
|
||||
diskBlock = diskStore.getDiskBlock(nodeOffest, BLOCK_SIZE);
|
||||
}
|
||||
final byte[] buffer = diskBlock.getBuffer();
|
||||
final byte[] newBuffer = node.serialize();
|
||||
System.arraycopy(newBuffer, 0, buffer, 0, buffer.length);
|
||||
diskBlock.writeAsync();
|
||||
// diskBlock.force(); // makes writing nodes slower by factor 800 (sic!)
|
||||
}
|
||||
|
||||
public synchronized void print() {
|
||||
|
||||
visitNodeEntriesPreOrder((node, parentNode, nodeEntry, depth) -> {
|
||||
|
||||
final PrintStream writer = System.out;
|
||||
|
||||
final String children = "#" + node.getEntries().size();
|
||||
|
||||
writer.println(" ".repeat(depth) + "@" + node.getNodeOffset() + " " + children + " " + nodeEntry
|
||||
.toString(b -> String.valueOf(keyEncoder.decode(b)), b -> String.valueOf(valueEncoder.decode(b))));
|
||||
});
|
||||
}
|
||||
|
||||
public synchronized void visitNodeEntriesPreOrder(final VisitorCallback visitor) {
|
||||
final long rootNodeOffset = readNodeOffsetOfRootNode();
|
||||
visitNodeEntriesPreOrderRecursively(rootNodeOffset, null, visitor, 0);
|
||||
}
|
||||
|
||||
private void visitNodeEntriesPreOrderRecursively(final long nodeOffset, final PersistentMapDiskNode parentNode,
|
||||
final VisitorCallback visitor, final int depth) {
|
||||
final PersistentMapDiskNode node = getNode(nodeOffset);
|
||||
|
||||
for (final NodeEntry child : node.getEntries()) {
|
||||
|
||||
visitor.visit(node, parentNode, child, depth);
|
||||
if (child.isInnerNode()) {
|
||||
final long childNodeOffset = VariableByteEncoder.decodeFirstValue(child.getValue());
|
||||
visitNodeEntriesPreOrderRecursively(childNodeOffset, node, visitor, depth + 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enum VisitByPrefixMode {
|
||||
FIND, ITERATE
|
||||
}
|
||||
|
||||
public synchronized void visitValues(final K keyPrefix, final Visitor<K, V> visitor) {
|
||||
final byte[] encodedKeyPrefix = keyEncoder.encode(keyPrefix);
|
||||
|
||||
final long rootNodeOffset = readNodeOffsetOfRootNode();
|
||||
iterateNodeEntryByPrefix(rootNodeOffset, encodedKeyPrefix, visitor);
|
||||
}
|
||||
|
||||
private void iterateNodeEntryByPrefix(final long nodeOffest, final byte[] keyPrefix, final Visitor<K, V> visitor) {
|
||||
final PersistentMapDiskNode node = getNode(nodeOffest);
|
||||
|
||||
// list of children that might contain a key with the keyPrefix
|
||||
final List<NodeEntry> nodesForPrefix = node.getNodesByPrefix(keyPrefix);
|
||||
|
||||
for (final NodeEntry entry : nodesForPrefix) {
|
||||
|
||||
if (entry.isDataNode()) {
|
||||
final int prefixCompareResult = entry.compareKeyPrefix(keyPrefix);
|
||||
if (prefixCompareResult == 0) {
|
||||
|
||||
if (Arrays.equals(entry.getKey(), MAX_KEY)) {
|
||||
continue;
|
||||
}
|
||||
final K key = keyEncoder.decode(entry.getKey());
|
||||
final V value = valueEncoder.decode(entry.getValue());
|
||||
visitor.visit(key, value);
|
||||
|
||||
// System.out.println("--> " + key + "=" + value);
|
||||
} else if (prefixCompareResult > 0) {
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
final long childNodeOffset = toNodeOffset(entry);
|
||||
iterateNodeEntryByPrefix(childNodeOffset, keyPrefix, visitor);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private long readNodeOffsetOfRootNode() {
|
||||
final DiskBlock diskBlock = diskStore.getDiskBlock(NODE_OFFSET_TO_ROOT_NODE, diskStore.minAllocationSize());
|
||||
|
||||
return diskBlock.getByteBuffer().getLong(0);
|
||||
}
|
||||
|
||||
private void writeNodeOffsetOfRootNode(final long newNodeOffsetToRootNode) {
|
||||
final DiskBlock diskBlock = diskStore.getDiskBlock(NODE_OFFSET_TO_ROOT_NODE, diskStore.minAllocationSize());
|
||||
diskBlock.getByteBuffer().putLong(0, newNodeOffsetToRootNode);
|
||||
diskBlock.force();
|
||||
}
|
||||
@Override
|
||||
public Empty decode(final byte[] bytes) {
|
||||
Preconditions.checkTrue(bytes.length == 0, "");
|
||||
|
||||
return Empty.INSTANCE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] getEmptyValue() {
|
||||
return new byte[] {};
|
||||
}
|
||||
}
|
||||
|
||||
public static final EncoderDecoder<Long> LONG_CODER = new LongCoder();
|
||||
public static final EncoderDecoder<UUID> UUID_ENCODER = new UUIDCoder();
|
||||
public static final EncoderDecoder<String> STRING_CODER = new StringCoder();
|
||||
public static final EncoderDecoder<Empty> EMPTY_ENCODER = new EmptyCoder();
|
||||
|
||||
static final int BLOCK_SIZE = 4096;
|
||||
static final long NODE_OFFSET_TO_ROOT_NODE = 8;
|
||||
|
||||
private final DiskStorage diskStore;
|
||||
|
||||
private int maxEntriesInNode = Integer.MAX_VALUE;
|
||||
|
||||
private final EncoderDecoder<K> keyEncoder;
|
||||
|
||||
private final EncoderDecoder<V> valueEncoder;
|
||||
|
||||
private final LRUCache<Long, PersistentMapDiskNode> nodeCache = new LRUCache<>(10_000);
|
||||
|
||||
private final LRUCache<K, V> valueCache = new LRUCache<>(1_000);
|
||||
|
||||
public PersistentMap(final Path path, final Path storageBasePath, final EncoderDecoder<K> keyEncoder,
|
||||
final EncoderDecoder<V> valueEncoder) {
|
||||
this.diskStore = new DiskStorage(path, storageBasePath);
|
||||
this.keyEncoder = keyEncoder;
|
||||
this.valueEncoder = valueEncoder;
|
||||
initIfNew();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
diskStore.close();
|
||||
}
|
||||
|
||||
public void setMaxEntriesInNode(final int maxEntriesInNode) {
|
||||
this.maxEntriesInNode = maxEntriesInNode;
|
||||
}
|
||||
|
||||
private void initIfNew() {
|
||||
if (diskStore.size() < BLOCK_SIZE) {
|
||||
final long nodeOffsetToRootNode = diskStore.allocateBlock(diskStore.minAllocationSize());
|
||||
Preconditions.checkEqual(nodeOffsetToRootNode, NODE_OFFSET_TO_ROOT_NODE,
|
||||
"The offset of the pointer to the root node must be at a well known location. "
|
||||
+ "Otherwise we would not be able to find it in an already existing file.");
|
||||
|
||||
// 2. make sure new blocks are aligned to the block size (for faster disk IO)
|
||||
diskStore.ensureAlignmentForNewBlocks(BLOCK_SIZE);
|
||||
|
||||
// 3. initialize an empty root node
|
||||
final long blockOffset = diskStore.allocateBlock(BLOCK_SIZE);
|
||||
final var rootNode = PersistentMapDiskNode.emptyRootNode(blockOffset);
|
||||
writeNode(rootNode);
|
||||
|
||||
// 4. update pointer to root node
|
||||
writeNodeOffsetOfRootNode(blockOffset);
|
||||
|
||||
// 5. insert a dummy entry with a 'maximum' key
|
||||
putValue(MAX_KEY, valueEncoder.getEmptyValue());
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized void putAllValues(final Map<K, V> map) {
|
||||
for (final Entry<K, V> e : map.entrySet()) {
|
||||
putValue(e.getKey(), e.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized V putValue(final K key, final V value) {
|
||||
|
||||
final V cachedValue = valueCache.get(key);
|
||||
if (cachedValue != null && cachedValue == value) {
|
||||
return value;
|
||||
}
|
||||
|
||||
final byte[] encodedKey = keyEncoder.encode(key);
|
||||
final byte[] encodedValue = valueEncoder.encode(value);
|
||||
final byte[] encodedOldValue = putValue(encodedKey, encodedValue);
|
||||
final V oldValue = encodedOldValue == null ? null : valueEncoder.decode(encodedOldValue);
|
||||
valueCache.put(key, value);
|
||||
return oldValue;
|
||||
}
|
||||
|
||||
public synchronized V getValue(final K key) {
|
||||
|
||||
final V cachedValue = valueCache.get(key);
|
||||
if (cachedValue != null) {
|
||||
return cachedValue;
|
||||
}
|
||||
|
||||
final byte[] encodedKey = keyEncoder.encode(key);
|
||||
final byte[] foundValue = getValue(encodedKey);
|
||||
final V result = foundValue == null ? null : valueEncoder.decode(foundValue);
|
||||
valueCache.put(key, result);
|
||||
return result;
|
||||
}
|
||||
|
||||
private byte[] putValue(final byte[] key, final byte[] value) {
|
||||
final long rootNodeOffset = readNodeOffsetOfRootNode();
|
||||
final Stack<PersistentMapDiskNode> parents = new Stack<>();
|
||||
return insert(parents, rootNodeOffset, key, value);
|
||||
}
|
||||
|
||||
private byte[] getValue(final byte[] key) {
|
||||
final long rootNodeOffset = readNodeOffsetOfRootNode();
|
||||
final NodeEntry entry = findNodeEntry(rootNodeOffset, key);
|
||||
|
||||
return entry == null ? null : entry.getValue();
|
||||
}
|
||||
|
||||
private byte[] insert(final Stack<PersistentMapDiskNode> parents, final long nodeOffest, final byte[] key,
|
||||
final byte[] value) {
|
||||
final PersistentMapDiskNode node = getNode(nodeOffest);
|
||||
|
||||
final NodeEntry entry = node.getNodeEntryTo(key);
|
||||
if (entry == null || entry.isDataNode()) {
|
||||
|
||||
final byte[] oldValue;
|
||||
if (entry == null) {
|
||||
oldValue = null;
|
||||
} else {
|
||||
// found a NodeEntry that is either equal to key, or it is at the insertion
|
||||
// point
|
||||
final boolean entryIsForKey = entry.equal(key);
|
||||
|
||||
oldValue = entryIsForKey ? entry.getValue() : null;
|
||||
|
||||
// Early exit, if the oldValue equals the new value.
|
||||
// We do not have to replace the value, because it would not change anything
|
||||
// (just cause unnecessary write operations). But we return the oldValue so that
|
||||
// the caller thinks we replaced the value.
|
||||
if (Objects.equals(oldValue, value)) {
|
||||
return oldValue;
|
||||
}
|
||||
|
||||
if (entryIsForKey) {
|
||||
node.removeKey(key);
|
||||
}
|
||||
}
|
||||
|
||||
if (node.canAdd(key, value, maxEntriesInNode)) {
|
||||
// insert in existing node
|
||||
node.addKeyValue(key, value);
|
||||
writeNode(node);
|
||||
return oldValue;
|
||||
} else {
|
||||
// add new node
|
||||
// 1. split current node into A and B
|
||||
splitNode(parents, node);
|
||||
|
||||
// 2. insert the value
|
||||
// start from the root, because we might have added a new root node
|
||||
return putValue(key, value);
|
||||
}
|
||||
} else {
|
||||
final long childNodeOffset = toNodeOffset(entry);
|
||||
parents.add(node);
|
||||
return insert(parents, childNodeOffset, key, value);
|
||||
}
|
||||
}
|
||||
|
||||
private PersistentMapDiskNode splitNode(final Stack<PersistentMapDiskNode> parents,
|
||||
final PersistentMapDiskNode node) {
|
||||
|
||||
// System.out.println("\n\npre split node: " + node + "\n");
|
||||
|
||||
final long newBlockOffset = diskStore.allocateBlock(BLOCK_SIZE);
|
||||
|
||||
final PersistentMapDiskNode newNode = node.split(newBlockOffset);
|
||||
|
||||
final PersistentMapDiskNode parent = parents.isEmpty() ? null : parents.pop();
|
||||
|
||||
if (parent != null) {
|
||||
final byte[] newNodeKey = newNode.getTopNodeEntry().getKey();
|
||||
if (parent.canAdd(newNodeKey, newBlockOffset, maxEntriesInNode)) {
|
||||
parent.addKeyNodePointer(newNodeKey, newBlockOffset);
|
||||
writeNode(parent);
|
||||
writeNode(newNode);
|
||||
writeNode(node);
|
||||
return parent;
|
||||
} else {
|
||||
final PersistentMapDiskNode grandParentNode = splitNode(parents, parent);
|
||||
|
||||
final NodeEntry pointerToParentAfterSplit = grandParentNode.getNodeEntryTo(newNodeKey);
|
||||
|
||||
Preconditions.checkEqual(pointerToParentAfterSplit.isInnerNode(), true, "{0} is pointer to inner node",
|
||||
pointerToParentAfterSplit);
|
||||
final long parentNodeOffset = toNodeOffset(pointerToParentAfterSplit); // the parent we have to add the
|
||||
// newNode to
|
||||
final PersistentMapDiskNode parentNode = getNode(parentNodeOffset);
|
||||
parentNode.addKeyNodePointer(newNodeKey, newBlockOffset);
|
||||
writeNode(parentNode);
|
||||
writeNode(newNode);
|
||||
writeNode(node);
|
||||
return parentNode;
|
||||
}
|
||||
|
||||
} else {
|
||||
// has no parent -> create a new parent (the new parent will also be the new
|
||||
// root)
|
||||
final long newRootNodeOffset = diskStore.allocateBlock(BLOCK_SIZE);
|
||||
final PersistentMapDiskNode rootNode = PersistentMapDiskNode.emptyRootNode(newRootNodeOffset);
|
||||
final byte[] newNodeKey = newNode.getTopNodeEntry().getKey();
|
||||
rootNode.addKeyNodePointer(newNodeKey, newBlockOffset);
|
||||
|
||||
final byte[] oldNodeKey = node.getTopNodeEntry().getKey();
|
||||
rootNode.addKeyNodePointer(oldNodeKey, node.getNodeOffset());
|
||||
writeNode(rootNode);
|
||||
|
||||
writeNode(newNode);
|
||||
writeNode(node);
|
||||
|
||||
writeNodeOffsetOfRootNode(newRootNodeOffset);
|
||||
return rootNode;
|
||||
}
|
||||
}
|
||||
|
||||
private NodeEntry findNodeEntry(final long nodeOffest, final byte[] key) {
|
||||
final PersistentMapDiskNode node = getNode(nodeOffest);
|
||||
|
||||
final var entry = node.getNodeEntryTo(key);
|
||||
if (entry == null) {
|
||||
return null;
|
||||
} else if (entry.isDataNode()) {
|
||||
if (entry.equal(key)) {
|
||||
return entry;
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
} else {
|
||||
final long childNodeOffset = toNodeOffset(entry);
|
||||
return findNodeEntry(childNodeOffset, key);
|
||||
}
|
||||
}
|
||||
|
||||
private long toNodeOffset(final NodeEntry entry) {
|
||||
Preconditions.checkEqual(entry.isInnerNode(), true);
|
||||
return VariableByteEncoder.decodeFirstValue(entry.getValue());
|
||||
}
|
||||
|
||||
private PersistentMapDiskNode getNode(final long nodeOffset) {
|
||||
|
||||
PersistentMapDiskNode node = nodeCache.get(nodeOffset);
|
||||
if (node == null) {
|
||||
|
||||
final DiskBlock diskBlock = diskStore.getDiskBlock(nodeOffset, BLOCK_SIZE);
|
||||
|
||||
node = PersistentMapDiskNode.parse(nodeOffset, diskBlock);
|
||||
nodeCache.put(nodeOffset, node);
|
||||
}
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
private void writeNode(final PersistentMapDiskNode node) {
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("writing node {}", node.toString(keyEncoder.asDecoder(), valueEncoder.asDecoder()));
|
||||
}
|
||||
final long nodeOffest = node.getNodeOffset();
|
||||
// final DiskBlock diskBlock = diskStore.getDiskBlock(nodeOffest, BLOCK_SIZE);
|
||||
DiskBlock diskBlock = node.getDiskBlock();
|
||||
if (diskBlock == null) {
|
||||
diskBlock = diskStore.getDiskBlock(nodeOffest, BLOCK_SIZE);
|
||||
}
|
||||
final byte[] buffer = diskBlock.getBuffer();
|
||||
final byte[] newBuffer = node.serialize();
|
||||
System.arraycopy(newBuffer, 0, buffer, 0, buffer.length);
|
||||
diskBlock.writeAsync();
|
||||
// diskBlock.force(); // makes writing nodes slower by factor 800 (sic!)
|
||||
}
|
||||
|
||||
public synchronized void print() {
|
||||
|
||||
visitNodeEntriesPreOrder((node, parentNode, nodeEntry, depth) -> {
|
||||
|
||||
final PrintStream writer = System.out;
|
||||
|
||||
final String children = "#" + node.getEntries().size();
|
||||
|
||||
writer.println(" ".repeat(depth) + "@" + node.getNodeOffset() + " " + children + " " + nodeEntry
|
||||
.toString(b -> String.valueOf(keyEncoder.decode(b)), b -> String.valueOf(valueEncoder.decode(b))));
|
||||
});
|
||||
}
|
||||
|
||||
public synchronized void visitNodeEntriesPreOrder(final VisitorCallback visitor) {
|
||||
final long rootNodeOffset = readNodeOffsetOfRootNode();
|
||||
visitNodeEntriesPreOrderRecursively(rootNodeOffset, null, visitor, 0);
|
||||
}
|
||||
|
||||
private void visitNodeEntriesPreOrderRecursively(final long nodeOffset, final PersistentMapDiskNode parentNode,
|
||||
final VisitorCallback visitor, final int depth) {
|
||||
final PersistentMapDiskNode node = getNode(nodeOffset);
|
||||
|
||||
for (final NodeEntry child : node.getEntries()) {
|
||||
|
||||
visitor.visit(node, parentNode, child, depth);
|
||||
if (child.isInnerNode()) {
|
||||
final long childNodeOffset = VariableByteEncoder.decodeFirstValue(child.getValue());
|
||||
visitNodeEntriesPreOrderRecursively(childNodeOffset, node, visitor, depth + 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enum VisitByPrefixMode {
|
||||
FIND, ITERATE
|
||||
}
|
||||
|
||||
public synchronized void visitValues(final K keyPrefix, final Visitor<K, V> visitor) {
|
||||
final byte[] encodedKeyPrefix = keyEncoder.encode(keyPrefix);
|
||||
|
||||
final long rootNodeOffset = readNodeOffsetOfRootNode();
|
||||
iterateNodeEntryByPrefix(rootNodeOffset, encodedKeyPrefix, visitor);
|
||||
}
|
||||
|
||||
private void iterateNodeEntryByPrefix(final long nodeOffest, final byte[] keyPrefix, final Visitor<K, V> visitor) {
|
||||
final PersistentMapDiskNode node = getNode(nodeOffest);
|
||||
|
||||
// list of children that might contain a key with the keyPrefix
|
||||
final List<NodeEntry> nodesForPrefix = node.getNodesByPrefix(keyPrefix);
|
||||
|
||||
for (final NodeEntry entry : nodesForPrefix) {
|
||||
|
||||
if (entry.isDataNode()) {
|
||||
final int prefixCompareResult = entry.compareKeyPrefix(keyPrefix);
|
||||
if (prefixCompareResult == 0) {
|
||||
|
||||
if (Arrays.equals(entry.getKey(), MAX_KEY)) {
|
||||
continue;
|
||||
}
|
||||
final K key = keyEncoder.decode(entry.getKey());
|
||||
final V value = valueEncoder.decode(entry.getValue());
|
||||
visitor.visit(key, value);
|
||||
|
||||
// System.out.println("--> " + key + "=" + value);
|
||||
} else if (prefixCompareResult > 0) {
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
final long childNodeOffset = toNodeOffset(entry);
|
||||
iterateNodeEntryByPrefix(childNodeOffset, keyPrefix, visitor);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private long readNodeOffsetOfRootNode() {
|
||||
final DiskBlock diskBlock = diskStore.getDiskBlock(NODE_OFFSET_TO_ROOT_NODE, diskStore.minAllocationSize());
|
||||
|
||||
return diskBlock.getByteBuffer().getLong(0);
|
||||
}
|
||||
|
||||
private void writeNodeOffsetOfRootNode(final long newNodeOffsetToRootNode) {
|
||||
final DiskBlock diskBlock = diskStore.getDiskBlock(NODE_OFFSET_TO_ROOT_NODE, diskStore.minAllocationSize());
|
||||
diskBlock.getByteBuffer().putLong(0, newNodeOffsetToRootNode);
|
||||
diskBlock.force();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -42,256 +42,256 @@ import org.lucares.utils.byteencoder.VariableByteEncoder;
|
||||
*/
|
||||
public class PersistentMapDiskNode {
|
||||
|
||||
private final TreeMap<ByteArrayKey, NodeEntry> entries;
|
||||
private final long nodeOffset;
|
||||
private final DiskBlock diskBlock;
|
||||
private final TreeMap<ByteArrayKey, NodeEntry> entries;
|
||||
private final long nodeOffset;
|
||||
private final DiskBlock diskBlock;
|
||||
|
||||
public PersistentMapDiskNode(final long nodeOffset, final Collection<NodeEntry> entries,
|
||||
final DiskBlock diskBlock) {
|
||||
this.nodeOffset = nodeOffset;
|
||||
this.diskBlock = diskBlock;
|
||||
this.entries = toMap(entries);
|
||||
}
|
||||
public PersistentMapDiskNode(final long nodeOffset, final Collection<NodeEntry> entries,
|
||||
final DiskBlock diskBlock) {
|
||||
this.nodeOffset = nodeOffset;
|
||||
this.diskBlock = diskBlock;
|
||||
this.entries = toMap(entries);
|
||||
}
|
||||
|
||||
private static TreeMap<ByteArrayKey, NodeEntry> toMap(final Collection<NodeEntry> nodeEntries) {
|
||||
final TreeMap<ByteArrayKey, NodeEntry> result = new TreeMap<>();
|
||||
private static TreeMap<ByteArrayKey, NodeEntry> toMap(final Collection<NodeEntry> nodeEntries) {
|
||||
final TreeMap<ByteArrayKey, NodeEntry> result = new TreeMap<>();
|
||||
|
||||
for (final NodeEntry nodeEntry : nodeEntries) {
|
||||
result.put(new ByteArrayKey(nodeEntry.getKey()), nodeEntry);
|
||||
}
|
||||
for (final NodeEntry nodeEntry : nodeEntries) {
|
||||
result.put(new ByteArrayKey(nodeEntry.getKey()), nodeEntry);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
public static PersistentMapDiskNode emptyRootNode(final long nodeOffset) {
|
||||
return new PersistentMapDiskNode(nodeOffset, Collections.emptyList(), null);
|
||||
}
|
||||
public static PersistentMapDiskNode emptyRootNode(final long nodeOffset) {
|
||||
return new PersistentMapDiskNode(nodeOffset, Collections.emptyList(), null);
|
||||
}
|
||||
|
||||
public static PersistentMapDiskNode parse(final long nodeOffset, final DiskBlock diskBlock) {
|
||||
final byte[] data = diskBlock.getBuffer();
|
||||
if (data.length != PersistentMap.BLOCK_SIZE) {
|
||||
throw new IllegalStateException(
|
||||
"block size must be " + PersistentMap.BLOCK_SIZE + " but was " + data.length);
|
||||
}
|
||||
final LongList longs = VariableByteEncoder.decode(data);
|
||||
public static PersistentMapDiskNode parse(final long nodeOffset, final DiskBlock diskBlock) {
|
||||
final byte[] data = diskBlock.getBuffer();
|
||||
if (data.length != PersistentMap.BLOCK_SIZE) {
|
||||
throw new IllegalStateException(
|
||||
"block size must be " + PersistentMap.BLOCK_SIZE + " but was " + data.length);
|
||||
}
|
||||
final LongList longs = VariableByteEncoder.decode(data);
|
||||
|
||||
final List<NodeEntry> entries = deserialize(longs, data);
|
||||
return new PersistentMapDiskNode(nodeOffset, entries, diskBlock);
|
||||
}
|
||||
final List<NodeEntry> entries = deserialize(longs, data);
|
||||
return new PersistentMapDiskNode(nodeOffset, entries, diskBlock);
|
||||
}
|
||||
|
||||
public static List<NodeEntry> deserialize(final LongList keyLengths, final byte[] buffer) {
|
||||
final List<NodeEntry> entries = new ArrayList<>();
|
||||
public static List<NodeEntry> deserialize(final LongList keyLengths, final byte[] buffer) {
|
||||
final List<NodeEntry> entries = new ArrayList<>();
|
||||
|
||||
if (keyLengths.isEmpty() || keyLengths.get(0) == 0) {
|
||||
// node is empty -> should only happen for the root node
|
||||
} else {
|
||||
final int numEntries = (int) keyLengths.get(0);
|
||||
if (keyLengths.isEmpty() || keyLengths.get(0) == 0) {
|
||||
// node is empty -> should only happen for the root node
|
||||
} else {
|
||||
final int numEntries = (int) keyLengths.get(0);
|
||||
|
||||
int offset = PersistentMap.BLOCK_SIZE;
|
||||
for (int i = 0; i < numEntries; i++) {
|
||||
final int keyLength = (int) keyLengths.get(i * 2 + 1);
|
||||
final int valueLength = (int) keyLengths.get(i * 2 + 2);
|
||||
int offset = PersistentMap.BLOCK_SIZE;
|
||||
for (int i = 0; i < numEntries; i++) {
|
||||
final int keyLength = (int) keyLengths.get(i * 2 + 1);
|
||||
final int valueLength = (int) keyLengths.get(i * 2 + 2);
|
||||
|
||||
final int valueOffset = offset - valueLength;
|
||||
final int keyOffset = valueOffset - keyLength;
|
||||
final int typeOffset = keyOffset - 1;
|
||||
final int valueOffset = offset - valueLength;
|
||||
final int keyOffset = valueOffset - keyLength;
|
||||
final int typeOffset = keyOffset - 1;
|
||||
|
||||
final byte typeByte = buffer[typeOffset];
|
||||
final byte[] key = Arrays.copyOfRange(buffer, keyOffset, keyOffset + keyLength);
|
||||
final byte[] value = Arrays.copyOfRange(buffer, valueOffset, valueOffset + valueLength);
|
||||
final byte typeByte = buffer[typeOffset];
|
||||
final byte[] key = Arrays.copyOfRange(buffer, keyOffset, keyOffset + keyLength);
|
||||
final byte[] value = Arrays.copyOfRange(buffer, valueOffset, valueOffset + valueLength);
|
||||
|
||||
final NodeEntry entry = new NodeEntry(ValueType.fromByte(typeByte), key, value);
|
||||
final NodeEntry entry = new NodeEntry(ValueType.fromByte(typeByte), key, value);
|
||||
|
||||
entries.add(entry);
|
||||
entries.add(entry);
|
||||
|
||||
offset = typeOffset;
|
||||
}
|
||||
}
|
||||
return entries;
|
||||
}
|
||||
offset = typeOffset;
|
||||
}
|
||||
}
|
||||
return entries;
|
||||
}
|
||||
|
||||
public byte[] serialize() {
|
||||
public byte[] serialize() {
|
||||
|
||||
return serialize(entries);
|
||||
}
|
||||
return serialize(entries);
|
||||
}
|
||||
|
||||
public DiskBlock getDiskBlock() {
|
||||
return diskBlock;
|
||||
}
|
||||
public DiskBlock getDiskBlock() {
|
||||
return diskBlock;
|
||||
}
|
||||
|
||||
public long getNodeOffset() {
|
||||
return nodeOffset;
|
||||
}
|
||||
public long getNodeOffset() {
|
||||
return nodeOffset;
|
||||
}
|
||||
|
||||
public NodeEntry getNodeEntryTo(final byte[] key) {
|
||||
public NodeEntry getNodeEntryTo(final byte[] key) {
|
||||
|
||||
final Entry<ByteArrayKey, NodeEntry> ceilingEntry = entries.ceilingEntry(new ByteArrayKey(key));
|
||||
return ceilingEntry != null ? ceilingEntry.getValue() : null;
|
||||
}
|
||||
final Entry<ByteArrayKey, NodeEntry> ceilingEntry = entries.ceilingEntry(new ByteArrayKey(key));
|
||||
return ceilingEntry != null ? ceilingEntry.getValue() : null;
|
||||
}
|
||||
|
||||
public List<NodeEntry> getNodesByPrefix(final byte[] keyPrefix) {
|
||||
final List<NodeEntry> result = new ArrayList<>();
|
||||
public List<NodeEntry> getNodesByPrefix(final byte[] keyPrefix) {
|
||||
final List<NodeEntry> result = new ArrayList<>();
|
||||
|
||||
for (final NodeEntry nodeEntry : entries.values()) {
|
||||
final int prefixCompareResult = nodeEntry.compareKeyPrefix(keyPrefix);
|
||||
if (prefixCompareResult == 0) {
|
||||
// add all entries where keyPrefix is a prefix of the key
|
||||
result.add(nodeEntry);
|
||||
} else if (prefixCompareResult > 0) {
|
||||
// Only add the first entry where the keyPrefix is smaller (as defined by
|
||||
// compareKeyPrefix) than the key.
|
||||
// These are entries that might contain key with the keyPrefix. But only the
|
||||
// first of those can really have such keys.
|
||||
result.add(nodeEntry);
|
||||
break;
|
||||
}
|
||||
}
|
||||
for (final NodeEntry nodeEntry : entries.values()) {
|
||||
final int prefixCompareResult = nodeEntry.compareKeyPrefix(keyPrefix);
|
||||
if (prefixCompareResult == 0) {
|
||||
// add all entries where keyPrefix is a prefix of the key
|
||||
result.add(nodeEntry);
|
||||
} else if (prefixCompareResult > 0) {
|
||||
// Only add the first entry where the keyPrefix is smaller (as defined by
|
||||
// compareKeyPrefix) than the key.
|
||||
// These are entries that might contain key with the keyPrefix. But only the
|
||||
// first of those can really have such keys.
|
||||
result.add(nodeEntry);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
public void addKeyValue(final byte[] key, final byte[] value) {
|
||||
addNode(ValueType.VALUE_INLINE, key, value);
|
||||
}
|
||||
public void addKeyValue(final byte[] key, final byte[] value) {
|
||||
addNode(ValueType.VALUE_INLINE, key, value);
|
||||
}
|
||||
|
||||
public void addKeyNodePointer(final byte[] key, final long nodePointer) {
|
||||
final byte[] value = VariableByteEncoder.encode(nodePointer);
|
||||
addNode(ValueType.NODE_POINTER, key, value);
|
||||
}
|
||||
public void addKeyNodePointer(final byte[] key, final long nodePointer) {
|
||||
final byte[] value = VariableByteEncoder.encode(nodePointer);
|
||||
addNode(ValueType.NODE_POINTER, key, value);
|
||||
}
|
||||
|
||||
public void addNode(final ValueType valueType, final byte[] key, final byte[] value) {
|
||||
public void addNode(final ValueType valueType, final byte[] key, final byte[] value) {
|
||||
|
||||
final NodeEntry entry = new NodeEntry(valueType, key, value);
|
||||
entries.put(new ByteArrayKey(key), entry);
|
||||
}
|
||||
final NodeEntry entry = new NodeEntry(valueType, key, value);
|
||||
entries.put(new ByteArrayKey(key), entry);
|
||||
}
|
||||
|
||||
public boolean canAdd(final byte[] key, final long nodeOffset, final int maxEntriesInNode) {
|
||||
return canAdd(key, VariableByteEncoder.encode(nodeOffset), maxEntriesInNode);
|
||||
}
|
||||
public boolean canAdd(final byte[] key, final long nodeOffset, final int maxEntriesInNode) {
|
||||
return canAdd(key, VariableByteEncoder.encode(nodeOffset), maxEntriesInNode);
|
||||
}
|
||||
|
||||
public boolean canAdd(final byte[] key, final byte[] value, final int maxEntriesInNode) {
|
||||
public boolean canAdd(final byte[] key, final byte[] value, final int maxEntriesInNode) {
|
||||
|
||||
if (entries.size() > maxEntriesInNode) {
|
||||
return false;
|
||||
} else {
|
||||
final NodeEntry entry = new NodeEntry(ValueType.VALUE_INLINE, key, value);
|
||||
final List<NodeEntry> tmp = new ArrayList<>(entries.size() + 1);
|
||||
tmp.addAll(entries.values());
|
||||
tmp.add(entry);
|
||||
if (entries.size() > maxEntriesInNode) {
|
||||
return false;
|
||||
} else {
|
||||
final NodeEntry entry = new NodeEntry(ValueType.VALUE_INLINE, key, value);
|
||||
final List<NodeEntry> tmp = new ArrayList<>(entries.size() + 1);
|
||||
tmp.addAll(entries.values());
|
||||
tmp.add(entry);
|
||||
|
||||
// the +1 is for the null-byte terminator of the prefix
|
||||
return neededBytesTotal(tmp) + 1 <= PersistentMap.BLOCK_SIZE;
|
||||
}
|
||||
}
|
||||
// the +1 is for the null-byte terminator of the prefix
|
||||
return neededBytesTotal(tmp) + 1 <= PersistentMap.BLOCK_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
public void removeKey(final byte[] key) {
|
||||
entries.remove(new ByteArrayKey(key));
|
||||
}
|
||||
public void removeKey(final byte[] key) {
|
||||
entries.remove(new ByteArrayKey(key));
|
||||
}
|
||||
|
||||
public List<NodeEntry> getEntries() {
|
||||
return new ArrayList<>(entries.values());
|
||||
}
|
||||
public List<NodeEntry> getEntries() {
|
||||
return new ArrayList<>(entries.values());
|
||||
}
|
||||
|
||||
public void clear() {
|
||||
entries.clear();
|
||||
}
|
||||
public void clear() {
|
||||
entries.clear();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "@" + nodeOffset + ": "
|
||||
+ String.join("\n", entries.values().stream().map(NodeEntry::toString).collect(Collectors.toList()));
|
||||
}
|
||||
|
||||
public <K,V> String toString(Function<byte[], K> keyDecoder, Function<byte[], V> valueDecoder) {
|
||||
StringBuilder result = new StringBuilder();
|
||||
result.append("@");
|
||||
result.append(nodeOffset);
|
||||
result.append(": ");
|
||||
for (NodeEntry e : entries.values()) {
|
||||
String s = e.toString(keyDecoder, valueDecoder);
|
||||
result.append("\n");
|
||||
result.append(s);
|
||||
}
|
||||
|
||||
return result.toString();
|
||||
}
|
||||
@Override
|
||||
public String toString() {
|
||||
return "@" + nodeOffset + ": "
|
||||
+ String.join("\n", entries.values().stream().map(NodeEntry::toString).collect(Collectors.toList()));
|
||||
}
|
||||
|
||||
public NodeEntry getTopNodeEntry() {
|
||||
return entries.lastEntry().getValue();
|
||||
}
|
||||
public <K, V> String toString(Function<byte[], K> keyDecoder, Function<byte[], V> valueDecoder) {
|
||||
StringBuilder result = new StringBuilder();
|
||||
result.append("@");
|
||||
result.append(nodeOffset);
|
||||
result.append(": ");
|
||||
for (NodeEntry e : entries.values()) {
|
||||
String s = e.toString(keyDecoder, valueDecoder);
|
||||
result.append("\n");
|
||||
result.append(s);
|
||||
}
|
||||
|
||||
public PersistentMapDiskNode split(final long newBlockOffset) {
|
||||
return result.toString();
|
||||
}
|
||||
|
||||
final List<NodeEntry> entriesAsCollection = new ArrayList<>(entries.values());
|
||||
public NodeEntry getTopNodeEntry() {
|
||||
return entries.lastEntry().getValue();
|
||||
}
|
||||
|
||||
final var leftEntries = new ArrayList<>(entriesAsCollection.subList(0, entriesAsCollection.size() / 2));
|
||||
final var rightEntries = new ArrayList<>(
|
||||
entriesAsCollection.subList(entriesAsCollection.size() / 2, entriesAsCollection.size()));
|
||||
public PersistentMapDiskNode split(final long newBlockOffset) {
|
||||
|
||||
entries.clear();
|
||||
entries.putAll(toMap(rightEntries));
|
||||
final List<NodeEntry> entriesAsCollection = new ArrayList<>(entries.values());
|
||||
|
||||
return new PersistentMapDiskNode(newBlockOffset, leftEntries, null);
|
||||
}
|
||||
final var leftEntries = new ArrayList<>(entriesAsCollection.subList(0, entriesAsCollection.size() / 2));
|
||||
final var rightEntries = new ArrayList<>(
|
||||
entriesAsCollection.subList(entriesAsCollection.size() / 2, entriesAsCollection.size()));
|
||||
|
||||
public static int neededBytesTotal(final List<NodeEntry> entries) {
|
||||
final byte[] buffer = new byte[PersistentMap.BLOCK_SIZE];
|
||||
entries.clear();
|
||||
entries.putAll(toMap(rightEntries));
|
||||
|
||||
final int usedBytes = serializePrefix(entries, buffer);
|
||||
return new PersistentMapDiskNode(newBlockOffset, leftEntries, null);
|
||||
}
|
||||
|
||||
return usedBytes + NodeEntry.neededBytes(entries);
|
||||
}
|
||||
public static int neededBytesTotal(final List<NodeEntry> entries) {
|
||||
final byte[] buffer = new byte[PersistentMap.BLOCK_SIZE];
|
||||
|
||||
private static byte[] serialize(final Map<ByteArrayKey, NodeEntry> entries) {
|
||||
final byte[] buffer = new byte[PersistentMap.BLOCK_SIZE];
|
||||
final Collection<NodeEntry> entriesAsCollection = entries.values();
|
||||
final int usedBytes = serializePrefix(entriesAsCollection, buffer);
|
||||
final int usedBytes = serializePrefix(entries, buffer);
|
||||
|
||||
// the +1 is for the null-byte terminator of the prefix
|
||||
Preconditions.checkGreaterOrEqual(PersistentMap.BLOCK_SIZE,
|
||||
usedBytes + 1 + NodeEntry.neededBytes(entriesAsCollection),
|
||||
"The node is too big. It cannot be encoded into " + PersistentMap.BLOCK_SIZE + " bytes.");
|
||||
return usedBytes + NodeEntry.neededBytes(entries);
|
||||
}
|
||||
|
||||
serializeIntoFromTail(entriesAsCollection, buffer);
|
||||
return buffer;
|
||||
}
|
||||
private static byte[] serialize(final Map<ByteArrayKey, NodeEntry> entries) {
|
||||
final byte[] buffer = new byte[PersistentMap.BLOCK_SIZE];
|
||||
final Collection<NodeEntry> entriesAsCollection = entries.values();
|
||||
final int usedBytes = serializePrefix(entriesAsCollection, buffer);
|
||||
|
||||
private static int serializePrefix(final Collection<NodeEntry> entries, final byte[] buffer) {
|
||||
final LongList longs = serializeKeyLengths(entries);
|
||||
// the +1 is for the null-byte terminator of the prefix
|
||||
Preconditions.checkGreaterOrEqual(PersistentMap.BLOCK_SIZE,
|
||||
usedBytes + 1 + NodeEntry.neededBytes(entriesAsCollection),
|
||||
"The node is too big. It cannot be encoded into " + PersistentMap.BLOCK_SIZE + " bytes.");
|
||||
|
||||
final int usedBytes = VariableByteEncoder.encodeInto(longs, buffer, 0);
|
||||
return usedBytes;
|
||||
}
|
||||
serializeIntoFromTail(entriesAsCollection, buffer);
|
||||
return buffer;
|
||||
}
|
||||
|
||||
private static LongList serializeKeyLengths(final Collection<NodeEntry> entries) {
|
||||
final var keyLengths = new LongList();
|
||||
keyLengths.add(entries.size());
|
||||
for (final NodeEntry nodeEntry : entries) {
|
||||
keyLengths.add(nodeEntry.getKey().length);
|
||||
keyLengths.add(nodeEntry.getValue().length);
|
||||
}
|
||||
private static int serializePrefix(final Collection<NodeEntry> entries, final byte[] buffer) {
|
||||
final LongList longs = serializeKeyLengths(entries);
|
||||
|
||||
return keyLengths;
|
||||
}
|
||||
final int usedBytes = VariableByteEncoder.encodeInto(longs, buffer, 0);
|
||||
return usedBytes;
|
||||
}
|
||||
|
||||
private static void serializeIntoFromTail(final Collection<NodeEntry> entries, final byte[] buffer) {
|
||||
private static LongList serializeKeyLengths(final Collection<NodeEntry> entries) {
|
||||
final var keyLengths = new LongList();
|
||||
keyLengths.add(entries.size());
|
||||
for (final NodeEntry nodeEntry : entries) {
|
||||
keyLengths.add(nodeEntry.getKey().length);
|
||||
keyLengths.add(nodeEntry.getValue().length);
|
||||
}
|
||||
|
||||
int offset = buffer.length;
|
||||
return keyLengths;
|
||||
}
|
||||
|
||||
for (final var entry : entries) {
|
||||
final byte[] valueBytes = entry.getValue();
|
||||
final byte[] keyBytes = entry.getKey();
|
||||
private static void serializeIntoFromTail(final Collection<NodeEntry> entries, final byte[] buffer) {
|
||||
|
||||
final int offsetValue = offset - valueBytes.length;
|
||||
final int offsetKey = offsetValue - keyBytes.length;
|
||||
final int offsetType = offsetKey - 1;
|
||||
int offset = buffer.length;
|
||||
|
||||
System.arraycopy(valueBytes, 0, buffer, offsetValue, valueBytes.length);
|
||||
System.arraycopy(keyBytes, 0, buffer, offsetKey, keyBytes.length);
|
||||
buffer[offsetType] = entry.getType().asByte();
|
||||
for (final var entry : entries) {
|
||||
final byte[] valueBytes = entry.getValue();
|
||||
final byte[] keyBytes = entry.getKey();
|
||||
|
||||
offset = offsetType;
|
||||
}
|
||||
}
|
||||
final int offsetValue = offset - valueBytes.length;
|
||||
final int offsetKey = offsetValue - keyBytes.length;
|
||||
final int offsetType = offsetKey - 1;
|
||||
|
||||
System.arraycopy(valueBytes, 0, buffer, offsetValue, valueBytes.length);
|
||||
System.arraycopy(keyBytes, 0, buffer, offsetKey, keyBytes.length);
|
||||
buffer[offsetType] = entry.getType().asByte();
|
||||
|
||||
offset = offsetType;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
package org.lucares.pdb.map;
|
||||
|
||||
public interface Visitor<K, V> {
|
||||
void visit(K key, V value);
|
||||
void visit(K key, V value);
|
||||
}
|
||||
@@ -25,110 +25,110 @@ import org.testng.annotations.Test;
|
||||
@Test
|
||||
public class BSFileTest {
|
||||
|
||||
private Path dataDirectory;
|
||||
private Path dataDirectory;
|
||||
|
||||
@BeforeMethod
|
||||
public void beforeMethod() throws IOException {
|
||||
dataDirectory = Files.createTempDirectory("pdb");
|
||||
}
|
||||
@BeforeMethod
|
||||
public void beforeMethod() throws IOException {
|
||||
dataDirectory = Files.createTempDirectory("pdb");
|
||||
}
|
||||
|
||||
@AfterMethod
|
||||
public void afterMethod() throws IOException {
|
||||
FileUtils.delete(dataDirectory);
|
||||
}
|
||||
@AfterMethod
|
||||
public void afterMethod() throws IOException {
|
||||
FileUtils.delete(dataDirectory);
|
||||
}
|
||||
|
||||
public void testBlockStorage() throws Exception {
|
||||
final Path file = dataDirectory.resolve("data.int.db");
|
||||
final int numLongs = 1000;
|
||||
long blockOffset = -1;
|
||||
public void testBlockStorage() throws Exception {
|
||||
final Path file = dataDirectory.resolve("data.int.db");
|
||||
final int numLongs = 1000;
|
||||
long blockOffset = -1;
|
||||
|
||||
long start = System.nanoTime();
|
||||
|
||||
try (final DiskStorage ds = new DiskStorage(file, dataDirectory)) {
|
||||
long start = System.nanoTime();
|
||||
|
||||
try (final BSFile bsFile = BSFile.newFile(ds, NullCustomizer.INSTANCE)) {
|
||||
try (final DiskStorage ds = new DiskStorage(file, dataDirectory)) {
|
||||
|
||||
blockOffset = bsFile.getRootBlockOffset();
|
||||
try (final BSFile bsFile = BSFile.newFile(ds, NullCustomizer.INSTANCE)) {
|
||||
|
||||
for (long i = 0; i < numLongs / 2; i++) {
|
||||
bsFile.append(i);
|
||||
}
|
||||
}
|
||||
try (final BSFile bsFile = BSFile.existingFile(blockOffset, ds, NullCustomizer.INSTANCE)) {
|
||||
blockOffset = bsFile.getRootBlockOffset();
|
||||
|
||||
for (long i = numLongs / 2; i < numLongs; i++) {
|
||||
bsFile.append(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
System.out.println("duration write: " + (System.nanoTime() - start) / 1_000_000.0 + "ms");
|
||||
for (long i = 0; i < numLongs / 2; i++) {
|
||||
bsFile.append(i);
|
||||
}
|
||||
}
|
||||
try (final BSFile bsFile = BSFile.existingFile(blockOffset, ds, NullCustomizer.INSTANCE)) {
|
||||
|
||||
start = System.nanoTime();
|
||||
try (final DiskStorage ds = new DiskStorage(file, dataDirectory)) {
|
||||
final BSFile bsFile = BSFile.existingFile(blockOffset, ds, NullCustomizer.INSTANCE);
|
||||
final LongList actualLongs = bsFile.asLongList();
|
||||
final LongList expectedLongs = LongList.rangeClosed(0, numLongs - 1);
|
||||
Assert.assertEquals(actualLongs, expectedLongs);
|
||||
}
|
||||
System.out.println("duration read: " + (System.nanoTime() - start) / 1_000_000.0 + "ms");
|
||||
}
|
||||
for (long i = numLongs / 2; i < numLongs; i++) {
|
||||
bsFile.append(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
System.out.println("duration write: " + (System.nanoTime() - start) / 1_000_000.0 + "ms");
|
||||
|
||||
public void testBlockStorageMultithreading() throws Exception {
|
||||
final ExecutorService pool = Executors.newCachedThreadPool();
|
||||
start = System.nanoTime();
|
||||
try (final DiskStorage ds = new DiskStorage(file, dataDirectory)) {
|
||||
final BSFile bsFile = BSFile.existingFile(blockOffset, ds, NullCustomizer.INSTANCE);
|
||||
final LongList actualLongs = bsFile.asLongList();
|
||||
final LongList expectedLongs = LongList.rangeClosed(0, numLongs - 1);
|
||||
Assert.assertEquals(actualLongs, expectedLongs);
|
||||
}
|
||||
System.out.println("duration read: " + (System.nanoTime() - start) / 1_000_000.0 + "ms");
|
||||
}
|
||||
|
||||
final Path file = dataDirectory.resolve("data.int.db");
|
||||
public void testBlockStorageMultithreading() throws Exception {
|
||||
final ExecutorService pool = Executors.newCachedThreadPool();
|
||||
|
||||
final int threads = 50;
|
||||
final int values = 10000;
|
||||
final Map<Long, LongList> expected = new HashMap<>();
|
||||
final List<Future<Void>> futures = new ArrayList<>();
|
||||
final long start = System.nanoTime();
|
||||
try (final DiskStorage ds = new DiskStorage(file, dataDirectory)) {
|
||||
final Path file = dataDirectory.resolve("data.int.db");
|
||||
|
||||
for (int i = 0; i < threads; i++) {
|
||||
final Future<Void> future = pool.submit(() -> {
|
||||
final ThreadLocalRandom random = ThreadLocalRandom.current();
|
||||
final LongList listOfValues = new LongList();
|
||||
final int threads = 50;
|
||||
final int values = 10000;
|
||||
final Map<Long, LongList> expected = new HashMap<>();
|
||||
final List<Future<Void>> futures = new ArrayList<>();
|
||||
final long start = System.nanoTime();
|
||||
try (final DiskStorage ds = new DiskStorage(file, dataDirectory)) {
|
||||
|
||||
try (BSFile bsFile = BSFile.newFile(ds, NullCustomizer.INSTANCE)) {
|
||||
for (int i = 0; i < threads; i++) {
|
||||
final Future<Void> future = pool.submit(() -> {
|
||||
final ThreadLocalRandom random = ThreadLocalRandom.current();
|
||||
final LongList listOfValues = new LongList();
|
||||
|
||||
for (int j = 0; j < values; j++) {
|
||||
try (BSFile bsFile = BSFile.newFile(ds, NullCustomizer.INSTANCE)) {
|
||||
|
||||
// will produce 1,2 and 3 byte sequences when encoded
|
||||
final long value = random.nextLong(32768);
|
||||
listOfValues.add(value);
|
||||
bsFile.append(value);
|
||||
}
|
||||
expected.put(bsFile.getRootBlockOffset(), listOfValues);
|
||||
}
|
||||
for (int j = 0; j < values; j++) {
|
||||
|
||||
return null;
|
||||
});
|
||||
futures.add(future);
|
||||
}
|
||||
// will produce 1,2 and 3 byte sequences when encoded
|
||||
final long value = random.nextLong(32768);
|
||||
listOfValues.add(value);
|
||||
bsFile.append(value);
|
||||
}
|
||||
expected.put(bsFile.getRootBlockOffset(), listOfValues);
|
||||
}
|
||||
|
||||
for (final Future<Void> future : futures) {
|
||||
future.get();
|
||||
}
|
||||
return null;
|
||||
});
|
||||
futures.add(future);
|
||||
}
|
||||
|
||||
pool.shutdown();
|
||||
pool.awaitTermination(5, TimeUnit.MINUTES);
|
||||
}
|
||||
System.out.println("duration write: " + (System.nanoTime() - start) / 1_000_000.0 + "ms");
|
||||
for (final Future<Void> future : futures) {
|
||||
future.get();
|
||||
}
|
||||
|
||||
// verification
|
||||
try (final DiskStorage ds = new DiskStorage(file, dataDirectory)) {
|
||||
for (final Entry<Long, LongList> entry : expected.entrySet()) {
|
||||
final long rootBlockNumber = entry.getKey();
|
||||
final LongList expectedValues = entry.getValue();
|
||||
pool.shutdown();
|
||||
pool.awaitTermination(5, TimeUnit.MINUTES);
|
||||
}
|
||||
System.out.println("duration write: " + (System.nanoTime() - start) / 1_000_000.0 + "ms");
|
||||
|
||||
try (BSFile bsFile = BSFile.existingFile(rootBlockNumber, ds, NullCustomizer.INSTANCE)) {
|
||||
final LongList actualLongs = bsFile.asLongList();
|
||||
final LongList expectedLongs = expectedValues;
|
||||
Assert.assertEquals(actualLongs, expectedLongs, "for rootBlockNumber=" + rootBlockNumber);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// verification
|
||||
try (final DiskStorage ds = new DiskStorage(file, dataDirectory)) {
|
||||
for (final Entry<Long, LongList> entry : expected.entrySet()) {
|
||||
final long rootBlockNumber = entry.getKey();
|
||||
final LongList expectedValues = entry.getValue();
|
||||
|
||||
try (BSFile bsFile = BSFile.existingFile(rootBlockNumber, ds, NullCustomizer.INSTANCE)) {
|
||||
final LongList actualLongs = bsFile.asLongList();
|
||||
final LongList expectedLongs = expectedValues;
|
||||
Assert.assertEquals(actualLongs, expectedLongs, "for rootBlockNumber=" + rootBlockNumber);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -15,70 +15,70 @@ import org.testng.annotations.BeforeMethod;
|
||||
|
||||
public class TimeSeriesFileTest {
|
||||
|
||||
private Path dataDirectory;
|
||||
private Path dataDirectory;
|
||||
|
||||
@BeforeMethod
|
||||
public void beforeMethod() throws IOException {
|
||||
dataDirectory = Files.createTempDirectory("pdb");
|
||||
}
|
||||
@BeforeMethod
|
||||
public void beforeMethod() throws IOException {
|
||||
dataDirectory = Files.createTempDirectory("pdb");
|
||||
}
|
||||
|
||||
@AfterMethod
|
||||
public void afterMethod() throws IOException {
|
||||
FileUtils.delete(dataDirectory);
|
||||
}
|
||||
@AfterMethod
|
||||
public void afterMethod() throws IOException {
|
||||
FileUtils.delete(dataDirectory);
|
||||
}
|
||||
|
||||
public void testBlockStorageTimeValue() throws Exception {
|
||||
final Path file = dataDirectory.resolve("data.int.db");
|
||||
final Random random = ThreadLocalRandom.current();
|
||||
final int numTimeValuePairs = 1000;
|
||||
long blockNumber = -1;
|
||||
final LongList expectedLongs = new LongList();
|
||||
public void testBlockStorageTimeValue() throws Exception {
|
||||
final Path file = dataDirectory.resolve("data.int.db");
|
||||
final Random random = ThreadLocalRandom.current();
|
||||
final int numTimeValuePairs = 1000;
|
||||
long blockNumber = -1;
|
||||
final LongList expectedLongs = new LongList();
|
||||
|
||||
long start = System.nanoTime();
|
||||
long lastEpochMilli = 0;
|
||||
//
|
||||
try (final DiskStorage ds = new DiskStorage(file, dataDirectory)) {
|
||||
long start = System.nanoTime();
|
||||
long lastEpochMilli = 0;
|
||||
//
|
||||
try (final DiskStorage ds = new DiskStorage(file, dataDirectory)) {
|
||||
|
||||
try (final TimeSeriesFile bsFile = TimeSeriesFile.newFile(ds)) {
|
||||
try (final TimeSeriesFile bsFile = TimeSeriesFile.newFile(ds)) {
|
||||
|
||||
blockNumber = bsFile.getRootBlockOffset();
|
||||
blockNumber = bsFile.getRootBlockOffset();
|
||||
|
||||
for (long i = 0; i < numTimeValuePairs / 2; i++) {
|
||||
for (long i = 0; i < numTimeValuePairs / 2; i++) {
|
||||
|
||||
final long epochMilli = lastEpochMilli + random.nextInt(1000);
|
||||
final long value = random.nextInt(10000);
|
||||
final long epochMilli = lastEpochMilli + random.nextInt(1000);
|
||||
final long value = random.nextInt(10000);
|
||||
|
||||
lastEpochMilli = epochMilli;
|
||||
lastEpochMilli = epochMilli;
|
||||
|
||||
bsFile.appendTimeValue(epochMilli, value);
|
||||
expectedLongs.add(epochMilli);
|
||||
expectedLongs.add(value);
|
||||
}
|
||||
}
|
||||
try (final TimeSeriesFile bsFile = TimeSeriesFile.existingFile(blockNumber, ds)) {
|
||||
bsFile.appendTimeValue(epochMilli, value);
|
||||
expectedLongs.add(epochMilli);
|
||||
expectedLongs.add(value);
|
||||
}
|
||||
}
|
||||
try (final TimeSeriesFile bsFile = TimeSeriesFile.existingFile(blockNumber, ds)) {
|
||||
|
||||
for (long i = numTimeValuePairs / 2; i < numTimeValuePairs; i++) {
|
||||
final long epochMilli = lastEpochMilli + random.nextInt(100);
|
||||
final long value = random.nextInt(10000);
|
||||
for (long i = numTimeValuePairs / 2; i < numTimeValuePairs; i++) {
|
||||
final long epochMilli = lastEpochMilli + random.nextInt(100);
|
||||
final long value = random.nextInt(10000);
|
||||
|
||||
lastEpochMilli = epochMilli;
|
||||
lastEpochMilli = epochMilli;
|
||||
|
||||
bsFile.appendTimeValue(epochMilli, value);
|
||||
expectedLongs.add(epochMilli);
|
||||
expectedLongs.add(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
System.out.println("duration write: " + (System.nanoTime() - start) / 1_000_000.0 + "ms");
|
||||
bsFile.appendTimeValue(epochMilli, value);
|
||||
expectedLongs.add(epochMilli);
|
||||
expectedLongs.add(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
System.out.println("duration write: " + (System.nanoTime() - start) / 1_000_000.0 + "ms");
|
||||
|
||||
start = System.nanoTime();
|
||||
try (final DiskStorage ds = new DiskStorage(file, dataDirectory)) {
|
||||
final TimeSeriesFile bsFile = TimeSeriesFile.existingFile(blockNumber, ds);
|
||||
final LongList actualLongs = bsFile.asTimeValueLongList();
|
||||
start = System.nanoTime();
|
||||
try (final DiskStorage ds = new DiskStorage(file, dataDirectory)) {
|
||||
final TimeSeriesFile bsFile = TimeSeriesFile.existingFile(blockNumber, ds);
|
||||
final LongList actualLongs = bsFile.asTimeValueLongList();
|
||||
|
||||
Assert.assertEquals(actualLongs, expectedLongs);
|
||||
}
|
||||
System.out.println("duration read: " + (System.nanoTime() - start) / 1_000_000.0 + "ms");
|
||||
}
|
||||
Assert.assertEquals(actualLongs, expectedLongs);
|
||||
}
|
||||
System.out.println("duration read: " + (System.nanoTime() - start) / 1_000_000.0 + "ms");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -18,289 +18,289 @@ import org.testng.annotations.Test;
|
||||
|
||||
@Test
|
||||
public class DiskStorageTest {
|
||||
private static final int BLOCK_SIZE = 512;
|
||||
private static final int BLOCK_SIZE = 512;
|
||||
|
||||
private Path dataDirectory;
|
||||
private Path dataDirectory;
|
||||
|
||||
@BeforeMethod
|
||||
public void beforeMethod() throws IOException {
|
||||
dataDirectory = Files.createTempDirectory("pdb");
|
||||
}
|
||||
@BeforeMethod
|
||||
public void beforeMethod() throws IOException {
|
||||
dataDirectory = Files.createTempDirectory("pdb");
|
||||
}
|
||||
|
||||
@AfterMethod
|
||||
public void afterMethod() throws IOException {
|
||||
FileUtils.delete(dataDirectory);
|
||||
}
|
||||
@AfterMethod
|
||||
public void afterMethod() throws IOException {
|
||||
FileUtils.delete(dataDirectory);
|
||||
}
|
||||
|
||||
/**
|
||||
* File systems work with 4096 byte blocks, but we want to work with 512 bytes
|
||||
* per block. Does flushing a 512 byte block flush the full 4096 byte block?
|
||||
*
|
||||
* @throws Exception
|
||||
*/
|
||||
@Test(enabled = false)
|
||||
public void testFlushingASectorOrABlock() throws Exception {
|
||||
final Path databaseFile = dataDirectory.resolve("db.ds");
|
||||
Files.deleteIfExists(databaseFile);
|
||||
/**
|
||||
* File systems work with 4096 byte blocks, but we want to work with 512 bytes
|
||||
* per block. Does flushing a 512 byte block flush the full 4096 byte block?
|
||||
*
|
||||
* @throws Exception
|
||||
*/
|
||||
@Test(enabled = false)
|
||||
public void testFlushingASectorOrABlock() throws Exception {
|
||||
final Path databaseFile = dataDirectory.resolve("db.ds");
|
||||
Files.deleteIfExists(databaseFile);
|
||||
|
||||
try (DiskStorage ds = new DiskStorage(databaseFile, dataDirectory)) {
|
||||
final int numBlocks = 10;
|
||||
try (DiskStorage ds = new DiskStorage(databaseFile, dataDirectory)) {
|
||||
final int numBlocks = 10;
|
||||
|
||||
allocateBlocks(ds, numBlocks, BLOCK_SIZE);
|
||||
final List<DiskBlock> blocks = new ArrayList<>();
|
||||
allocateBlocks(ds, numBlocks, BLOCK_SIZE);
|
||||
final List<DiskBlock> blocks = new ArrayList<>();
|
||||
|
||||
// fill the first 16 512-byte blocks
|
||||
// that is more than on 4096 byte block
|
||||
for (int i = 0; i < numBlocks; i++) {
|
||||
final DiskBlock diskBlock = ds.getDiskBlock(i, BLOCK_SIZE);
|
||||
assertAllValuesAreEqual(diskBlock);
|
||||
fill(diskBlock, (byte) i);
|
||||
diskBlock.writeAsync();
|
||||
blocks.add(diskBlock);
|
||||
}
|
||||
// fill the first 16 512-byte blocks
|
||||
// that is more than on 4096 byte block
|
||||
for (int i = 0; i < numBlocks; i++) {
|
||||
final DiskBlock diskBlock = ds.getDiskBlock(i, BLOCK_SIZE);
|
||||
assertAllValuesAreEqual(diskBlock);
|
||||
fill(diskBlock, (byte) i);
|
||||
diskBlock.writeAsync();
|
||||
blocks.add(diskBlock);
|
||||
}
|
||||
|
||||
// now force (aka flush) a block in the middle of the first 4096 byte block
|
||||
blocks.get(3).writeAsync();
|
||||
blocks.get(3).force();
|
||||
// now force (aka flush) a block in the middle of the first 4096 byte block
|
||||
blocks.get(3).writeAsync();
|
||||
blocks.get(3).force();
|
||||
|
||||
System.exit(0);
|
||||
System.exit(0);
|
||||
|
||||
// read all blocks again an check what they contain
|
||||
// read all blocks again an check what they contain
|
||||
|
||||
// 1. we do this with the existing file channel
|
||||
// this one should see every change, because we wrote them to the file channel
|
||||
for (int i = 0; i < numBlocks; i++) {
|
||||
final DiskBlock diskBlock = ds.getDiskBlock(i, BLOCK_SIZE);
|
||||
assertAllValuesAreEqual(diskBlock, (byte) i);
|
||||
fill(diskBlock, (byte) i);
|
||||
blocks.add(diskBlock);
|
||||
}
|
||||
// 1. we do this with the existing file channel
|
||||
// this one should see every change, because we wrote them to the file channel
|
||||
for (int i = 0; i < numBlocks; i++) {
|
||||
final DiskBlock diskBlock = ds.getDiskBlock(i, BLOCK_SIZE);
|
||||
assertAllValuesAreEqual(diskBlock, (byte) i);
|
||||
fill(diskBlock, (byte) i);
|
||||
blocks.add(diskBlock);
|
||||
}
|
||||
|
||||
// 2. we read the file from another file channel
|
||||
// this one might not see changes made to the first file channel
|
||||
//
|
||||
// But it does see the changes. Most likely, because both channels
|
||||
// use the same buffers from the operating system.
|
||||
try (DiskStorage ds2 = new DiskStorage(databaseFile, dataDirectory)) {
|
||||
for (int i = 0; i < numBlocks; i++) {
|
||||
final DiskBlock diskBlock = ds2.getDiskBlock(i, BLOCK_SIZE);
|
||||
assertAllValuesAreEqual(diskBlock, (byte) i);
|
||||
fill(diskBlock, (byte) i);
|
||||
blocks.add(diskBlock);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// 2. we read the file from another file channel
|
||||
// this one might not see changes made to the first file channel
|
||||
//
|
||||
// But it does see the changes. Most likely, because both channels
|
||||
// use the same buffers from the operating system.
|
||||
try (DiskStorage ds2 = new DiskStorage(databaseFile, dataDirectory)) {
|
||||
for (int i = 0; i < numBlocks; i++) {
|
||||
final DiskBlock diskBlock = ds2.getDiskBlock(i, BLOCK_SIZE);
|
||||
assertAllValuesAreEqual(diskBlock, (byte) i);
|
||||
fill(diskBlock, (byte) i);
|
||||
blocks.add(diskBlock);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test(enabled = true)
|
||||
public void testDiskStorage() throws Exception {
|
||||
final Path databaseFile = dataDirectory.resolve("db.ds");
|
||||
@Test(enabled = true)
|
||||
public void testDiskStorage() throws Exception {
|
||||
final Path databaseFile = dataDirectory.resolve("db.ds");
|
||||
|
||||
final ExecutorService pool = Executors.newCachedThreadPool();
|
||||
final ExecutorService pool = Executors.newCachedThreadPool();
|
||||
|
||||
try (DiskStorage ds = new DiskStorage(databaseFile, dataDirectory)) {
|
||||
final int numBlocks = 10;
|
||||
try (DiskStorage ds = new DiskStorage(databaseFile, dataDirectory)) {
|
||||
final int numBlocks = 10;
|
||||
|
||||
final long[] blockOffsets = allocateBlocks(ds, numBlocks, BLOCK_SIZE);
|
||||
final long[] blockOffsets = allocateBlocks(ds, numBlocks, BLOCK_SIZE);
|
||||
|
||||
for (final long blockOffset : blockOffsets) {
|
||||
for (final long blockOffset : blockOffsets) {
|
||||
|
||||
final long block = blockOffset;
|
||||
pool.submit(() -> {
|
||||
final ThreadLocalRandom random = ThreadLocalRandom.current();
|
||||
try {
|
||||
// now read/write random blocks
|
||||
for (int j = 0; j < 10; j++) {
|
||||
final DiskBlock diskBlock = ds.getDiskBlock(block, BLOCK_SIZE);
|
||||
final long block = blockOffset;
|
||||
pool.submit(() -> {
|
||||
final ThreadLocalRandom random = ThreadLocalRandom.current();
|
||||
try {
|
||||
// now read/write random blocks
|
||||
for (int j = 0; j < 10; j++) {
|
||||
final DiskBlock diskBlock = ds.getDiskBlock(block, BLOCK_SIZE);
|
||||
|
||||
assertAllValuesAreEqual(diskBlock);
|
||||
fill(diskBlock, (byte) random.nextInt(127));
|
||||
assertAllValuesAreEqual(diskBlock);
|
||||
fill(diskBlock, (byte) random.nextInt(127));
|
||||
|
||||
if (random.nextBoolean()) {
|
||||
diskBlock.writeAsync();
|
||||
} else {
|
||||
diskBlock.writeAsync();
|
||||
diskBlock.force();
|
||||
}
|
||||
}
|
||||
if (random.nextBoolean()) {
|
||||
diskBlock.writeAsync();
|
||||
} else {
|
||||
diskBlock.writeAsync();
|
||||
diskBlock.force();
|
||||
}
|
||||
}
|
||||
|
||||
} catch (final Exception e) {
|
||||
e.printStackTrace();
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
} catch (final Exception e) {
|
||||
e.printStackTrace();
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
pool.shutdown();
|
||||
pool.awaitTermination(1, TimeUnit.MINUTES);
|
||||
}
|
||||
}
|
||||
pool.shutdown();
|
||||
pool.awaitTermination(1, TimeUnit.MINUTES);
|
||||
}
|
||||
}
|
||||
|
||||
@Test(enabled = true, expectedExceptions = IllegalArgumentException.class)
|
||||
public void testAllocationSmallerThanMinimalBlockSize() throws Exception {
|
||||
final Path databaseFile = dataDirectory.resolve("db.ds");
|
||||
@Test(enabled = true, expectedExceptions = IllegalArgumentException.class)
|
||||
public void testAllocationSmallerThanMinimalBlockSize() throws Exception {
|
||||
final Path databaseFile = dataDirectory.resolve("db.ds");
|
||||
|
||||
try (DiskStorage ds = new DiskStorage(databaseFile, dataDirectory)) {
|
||||
try (DiskStorage ds = new DiskStorage(databaseFile, dataDirectory)) {
|
||||
|
||||
final int blockSize = 31; // minimal block size is 32
|
||||
ds.allocateBlock(blockSize);
|
||||
}
|
||||
}
|
||||
final int blockSize = 31; // minimal block size is 32
|
||||
ds.allocateBlock(blockSize);
|
||||
}
|
||||
}
|
||||
|
||||
@Test(enabled = true)
|
||||
public void testAllocateAndFreeSingleBlockInFreeList() throws Exception {
|
||||
final Path databaseFile = dataDirectory.resolve("db.ds");
|
||||
@Test(enabled = true)
|
||||
public void testAllocateAndFreeSingleBlockInFreeList() throws Exception {
|
||||
final Path databaseFile = dataDirectory.resolve("db.ds");
|
||||
|
||||
try (DiskStorage ds = new DiskStorage(databaseFile, dataDirectory)) {
|
||||
try (DiskStorage ds = new DiskStorage(databaseFile, dataDirectory)) {
|
||||
|
||||
final int blockSize = 32;
|
||||
final long block_8_39 = ds.allocateBlock(blockSize);
|
||||
final long block_40_71 = ds.allocateBlock(blockSize);
|
||||
final long block_72_103 = ds.allocateBlock(blockSize);
|
||||
final int blockSize = 32;
|
||||
final long block_8_39 = ds.allocateBlock(blockSize);
|
||||
final long block_40_71 = ds.allocateBlock(blockSize);
|
||||
final long block_72_103 = ds.allocateBlock(blockSize);
|
||||
|
||||
Assert.assertEquals(block_8_39, 8);
|
||||
Assert.assertEquals(block_40_71, 40);
|
||||
Assert.assertEquals(block_72_103, 72);
|
||||
Assert.assertEquals(block_8_39, 8);
|
||||
Assert.assertEquals(block_40_71, 40);
|
||||
Assert.assertEquals(block_72_103, 72);
|
||||
|
||||
ds.free(block_40_71, blockSize);
|
||||
ds.free(block_40_71, blockSize);
|
||||
|
||||
// should reuse the block we just freed
|
||||
final long actual_block_40_71 = ds.allocateBlock(blockSize);
|
||||
// should reuse the block we just freed
|
||||
final long actual_block_40_71 = ds.allocateBlock(blockSize);
|
||||
|
||||
Assert.assertEquals(actual_block_40_71, 40);
|
||||
}
|
||||
}
|
||||
Assert.assertEquals(actual_block_40_71, 40);
|
||||
}
|
||||
}
|
||||
|
||||
@Test(enabled = true)
|
||||
public void testAllocateAndFreeMultipleBlocksInFreeList() throws Exception {
|
||||
final Path databaseFile = dataDirectory.resolve("db.ds");
|
||||
@Test(enabled = true)
|
||||
public void testAllocateAndFreeMultipleBlocksInFreeList() throws Exception {
|
||||
final Path databaseFile = dataDirectory.resolve("db.ds");
|
||||
|
||||
try (DiskStorage ds = new DiskStorage(databaseFile, dataDirectory)) {
|
||||
try (DiskStorage ds = new DiskStorage(databaseFile, dataDirectory)) {
|
||||
|
||||
final int blockSize = 32;
|
||||
ds.allocateBlock(blockSize);
|
||||
final long block_40_71 = ds.allocateBlock(blockSize);
|
||||
final long block_72_103 = ds.allocateBlock(blockSize);
|
||||
final long block_104_135 = ds.allocateBlock(blockSize);
|
||||
ds.allocateBlock(blockSize);
|
||||
final int blockSize = 32;
|
||||
ds.allocateBlock(blockSize);
|
||||
final long block_40_71 = ds.allocateBlock(blockSize);
|
||||
final long block_72_103 = ds.allocateBlock(blockSize);
|
||||
final long block_104_135 = ds.allocateBlock(blockSize);
|
||||
ds.allocateBlock(blockSize);
|
||||
|
||||
ds.free(block_72_103, blockSize);
|
||||
ds.free(block_104_135, blockSize);
|
||||
ds.free(block_40_71, blockSize); // the block with the smaller index is freed last, this increases line
|
||||
// coverage, because there is a branch for prepending the root node
|
||||
ds.free(block_72_103, blockSize);
|
||||
ds.free(block_104_135, blockSize);
|
||||
ds.free(block_40_71, blockSize); // the block with the smaller index is freed last, this increases line
|
||||
// coverage, because there is a branch for prepending the root node
|
||||
|
||||
// should reuse the first block we just freed
|
||||
// this removes the root node of the free list
|
||||
final long actual_block_40_71 = ds.allocateBlock(blockSize);
|
||||
Assert.assertEquals(actual_block_40_71, 40);
|
||||
// should reuse the first block we just freed
|
||||
// this removes the root node of the free list
|
||||
final long actual_block_40_71 = ds.allocateBlock(blockSize);
|
||||
Assert.assertEquals(actual_block_40_71, 40);
|
||||
|
||||
// should reuse the second block we just freed
|
||||
final long actual_block_72_103 = ds.allocateBlock(blockSize);
|
||||
Assert.assertEquals(actual_block_72_103, 72);
|
||||
// should reuse the second block we just freed
|
||||
final long actual_block_72_103 = ds.allocateBlock(blockSize);
|
||||
Assert.assertEquals(actual_block_72_103, 72);
|
||||
|
||||
// should reuse the third block we just freed
|
||||
// this removes the last node of the free list
|
||||
final long actual_block_104_135 = ds.allocateBlock(blockSize);
|
||||
Assert.assertEquals(actual_block_104_135, 104);
|
||||
// should reuse the third block we just freed
|
||||
// this removes the last node of the free list
|
||||
final long actual_block_104_135 = ds.allocateBlock(blockSize);
|
||||
Assert.assertEquals(actual_block_104_135, 104);
|
||||
|
||||
final long block_168_199 = ds.allocateBlock(blockSize);
|
||||
Assert.assertEquals(block_168_199, 168);
|
||||
}
|
||||
}
|
||||
final long block_168_199 = ds.allocateBlock(blockSize);
|
||||
Assert.assertEquals(block_168_199, 168);
|
||||
}
|
||||
}
|
||||
|
||||
@Test(enabled = true)
|
||||
public void testAllocateAndFreeInsertFreeNodeInTheMiddleOfTheFreeList() throws Exception {
|
||||
final Path databaseFile = dataDirectory.resolve("db.ds");
|
||||
@Test(enabled = true)
|
||||
public void testAllocateAndFreeInsertFreeNodeInTheMiddleOfTheFreeList() throws Exception {
|
||||
final Path databaseFile = dataDirectory.resolve("db.ds");
|
||||
|
||||
try (DiskStorage ds = new DiskStorage(databaseFile, dataDirectory)) {
|
||||
try (DiskStorage ds = new DiskStorage(databaseFile, dataDirectory)) {
|
||||
|
||||
final int blockSize = 32;
|
||||
ds.allocateBlock(blockSize);
|
||||
ds.allocateBlock(blockSize);
|
||||
final long block_72_103 = ds.allocateBlock(blockSize);
|
||||
final long block_104_135 = ds.allocateBlock(blockSize);
|
||||
final long block_136_167 = ds.allocateBlock(blockSize);
|
||||
final int blockSize = 32;
|
||||
ds.allocateBlock(blockSize);
|
||||
ds.allocateBlock(blockSize);
|
||||
final long block_72_103 = ds.allocateBlock(blockSize);
|
||||
final long block_104_135 = ds.allocateBlock(blockSize);
|
||||
final long block_136_167 = ds.allocateBlock(blockSize);
|
||||
|
||||
// free the last block first, to increase code coverage
|
||||
ds.free(block_136_167, blockSize);
|
||||
ds.free(block_72_103, blockSize);
|
||||
ds.free(block_104_135, blockSize);
|
||||
// free the last block first, to increase code coverage
|
||||
ds.free(block_136_167, blockSize);
|
||||
ds.free(block_72_103, blockSize);
|
||||
ds.free(block_104_135, blockSize);
|
||||
|
||||
// the first free block is re-used
|
||||
final long actual_block_72_103 = ds.allocateBlock(blockSize);
|
||||
Assert.assertEquals(actual_block_72_103, block_72_103);
|
||||
// the first free block is re-used
|
||||
final long actual_block_72_103 = ds.allocateBlock(blockSize);
|
||||
Assert.assertEquals(actual_block_72_103, block_72_103);
|
||||
|
||||
final long actual_block_104_135 = ds.allocateBlock(blockSize);
|
||||
Assert.assertEquals(actual_block_104_135, block_104_135);
|
||||
final long actual_block_104_135 = ds.allocateBlock(blockSize);
|
||||
Assert.assertEquals(actual_block_104_135, block_104_135);
|
||||
|
||||
final long actual_block_136_167 = ds.allocateBlock(blockSize);
|
||||
Assert.assertEquals(actual_block_136_167, block_136_167);
|
||||
}
|
||||
}
|
||||
final long actual_block_136_167 = ds.allocateBlock(blockSize);
|
||||
Assert.assertEquals(actual_block_136_167, block_136_167);
|
||||
}
|
||||
}
|
||||
|
||||
@Test(enabled = true)
|
||||
public void testAllocateAndFreeMultipleBlocksWithDifferentSizes() throws Exception {
|
||||
final Path databaseFile = dataDirectory.resolve("db.ds");
|
||||
@Test(enabled = true)
|
||||
public void testAllocateAndFreeMultipleBlocksWithDifferentSizes() throws Exception {
|
||||
final Path databaseFile = dataDirectory.resolve("db.ds");
|
||||
|
||||
try (DiskStorage ds = new DiskStorage(databaseFile, dataDirectory)) {
|
||||
try (DiskStorage ds = new DiskStorage(databaseFile, dataDirectory)) {
|
||||
|
||||
final int blockSizeSmall = 32;
|
||||
final int blockSizeBig = 64;
|
||||
ds.allocateBlock(blockSizeSmall);
|
||||
ds.allocateBlock(blockSizeSmall);
|
||||
final long big_block_72_103 = ds.allocateBlock(blockSizeBig);
|
||||
final long small_block_136_167 = ds.allocateBlock(blockSizeSmall);
|
||||
ds.allocateBlock(blockSizeSmall);
|
||||
final int blockSizeSmall = 32;
|
||||
final int blockSizeBig = 64;
|
||||
ds.allocateBlock(blockSizeSmall);
|
||||
ds.allocateBlock(blockSizeSmall);
|
||||
final long big_block_72_103 = ds.allocateBlock(blockSizeBig);
|
||||
final long small_block_136_167 = ds.allocateBlock(blockSizeSmall);
|
||||
ds.allocateBlock(blockSizeSmall);
|
||||
|
||||
ds.free(big_block_72_103, blockSizeBig);
|
||||
ds.free(small_block_136_167, blockSizeSmall);
|
||||
ds.free(big_block_72_103, blockSizeBig);
|
||||
ds.free(small_block_136_167, blockSizeSmall);
|
||||
|
||||
final long actual_small_block_136_167 = ds.allocateBlock(blockSizeSmall);
|
||||
Assert.assertEquals(actual_small_block_136_167, small_block_136_167);
|
||||
}
|
||||
}
|
||||
final long actual_small_block_136_167 = ds.allocateBlock(blockSizeSmall);
|
||||
Assert.assertEquals(actual_small_block_136_167, small_block_136_167);
|
||||
}
|
||||
}
|
||||
|
||||
private void assertAllValuesAreEqual(final DiskBlock diskBlock, final byte expectedVal) {
|
||||
final byte[] buffer = diskBlock.getBuffer();
|
||||
for (int i = 0; i < buffer.length; i++) {
|
||||
if (expectedVal != buffer[i]) {
|
||||
System.err.println(
|
||||
"block " + diskBlock.getBlockOffset() + " " + buffer[i] + " != " + expectedVal + " at " + i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
private void assertAllValuesAreEqual(final DiskBlock diskBlock, final byte expectedVal) {
|
||||
final byte[] buffer = diskBlock.getBuffer();
|
||||
for (int i = 0; i < buffer.length; i++) {
|
||||
if (expectedVal != buffer[i]) {
|
||||
System.err.println(
|
||||
"block " + diskBlock.getBlockOffset() + " " + buffer[i] + " != " + expectedVal + " at " + i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void assertAllValuesAreEqual(final DiskBlock diskBlock) {
|
||||
private void assertAllValuesAreEqual(final DiskBlock diskBlock) {
|
||||
|
||||
final byte[] buffer = diskBlock.getBuffer();
|
||||
final byte expected = buffer[0];
|
||||
for (int i = 0; i < buffer.length; i++) {
|
||||
if (expected != buffer[i]) {
|
||||
System.err.println(
|
||||
"block " + diskBlock.getBlockOffset() + " " + buffer[i] + " != " + expected + " at " + i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
final byte[] buffer = diskBlock.getBuffer();
|
||||
final byte expected = buffer[0];
|
||||
for (int i = 0; i < buffer.length; i++) {
|
||||
if (expected != buffer[i]) {
|
||||
System.err.println(
|
||||
"block " + diskBlock.getBlockOffset() + " " + buffer[i] + " != " + expected + " at " + i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
private void fill(final DiskBlock diskBlock, final byte val) {
|
||||
final byte[] buffer = diskBlock.getBuffer();
|
||||
private void fill(final DiskBlock diskBlock, final byte val) {
|
||||
final byte[] buffer = diskBlock.getBuffer();
|
||||
|
||||
for (int i = 0; i < buffer.length; i++) {
|
||||
buffer[i] = val;
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < buffer.length; i++) {
|
||||
buffer[i] = val;
|
||||
}
|
||||
}
|
||||
|
||||
private long[] allocateBlocks(final DiskStorage ds, final int numNewBlocks, final int blockSize)
|
||||
throws IOException {
|
||||
private long[] allocateBlocks(final DiskStorage ds, final int numNewBlocks, final int blockSize)
|
||||
throws IOException {
|
||||
|
||||
final long[] result = new long[numNewBlocks];
|
||||
for (int i = 0; i < numNewBlocks; i++) {
|
||||
final long blockOffset = ds.allocateBlock(blockSize);
|
||||
result[i] = blockOffset;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
final long[] result = new long[numNewBlocks];
|
||||
for (int i = 0; i < numNewBlocks; i++) {
|
||||
final long blockOffset = ds.allocateBlock(blockSize);
|
||||
result[i] = blockOffset;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,79 +15,79 @@ import java.util.concurrent.ThreadLocalRandom;
|
||||
|
||||
public class CsvTestDataCreator {
|
||||
|
||||
private static final List<String> PODS = Arrays.asList("vapbrewe01", "vapfinra01", "vapondem01", "vapondem02",
|
||||
"vapondem03", "vapondem04", "vapnyse01", "vapnorto01", "vapfackb01", "vaprjrey01", "vadtrans01",
|
||||
"vadaxcel09", "vadaxcel66");
|
||||
private static final List<String> HOSTS = new ArrayList<>();
|
||||
private static final List<String> CLASSES = Arrays.asList("AuditLog", "Brava", "Collection", "Folder", "Field",
|
||||
"Tagging", "Arrangment", "Review", "Production", "ProductionExport", "View", "Jobs", "Navigation",
|
||||
"RecentNavigation", "Entity", "Search", "Tasks", "PcWorkflow", "Batch", "Matter");
|
||||
private static final List<String> ENDPOINTS = Arrays.asList("create", "remove", "update", "delete", "createBulk",
|
||||
"removeBulk", "deleteBulk", "list", "index", "listing", "all");
|
||||
private static final List<String> METHODS = new ArrayList<>();
|
||||
private static final List<String> PROJECTS = new ArrayList<>();
|
||||
private static final List<String> SOURCE = Arrays.asList("web", "service", "metrics");
|
||||
private static final List<String> BUILDS = new ArrayList<>();
|
||||
private static final List<String> PODS = Arrays.asList("vapbrewe01", "vapfinra01", "vapondem01", "vapondem02",
|
||||
"vapondem03", "vapondem04", "vapnyse01", "vapnorto01", "vapfackb01", "vaprjrey01", "vadtrans01",
|
||||
"vadaxcel09", "vadaxcel66");
|
||||
private static final List<String> HOSTS = new ArrayList<>();
|
||||
private static final List<String> CLASSES = Arrays.asList("AuditLog", "Brava", "Collection", "Folder", "Field",
|
||||
"Tagging", "Arrangment", "Review", "Production", "ProductionExport", "View", "Jobs", "Navigation",
|
||||
"RecentNavigation", "Entity", "Search", "Tasks", "PcWorkflow", "Batch", "Matter");
|
||||
private static final List<String> ENDPOINTS = Arrays.asList("create", "remove", "update", "delete", "createBulk",
|
||||
"removeBulk", "deleteBulk", "list", "index", "listing", "all");
|
||||
private static final List<String> METHODS = new ArrayList<>();
|
||||
private static final List<String> PROJECTS = new ArrayList<>();
|
||||
private static final List<String> SOURCE = Arrays.asList("web", "service", "metrics");
|
||||
private static final List<String> BUILDS = new ArrayList<>();
|
||||
|
||||
static {
|
||||
for (int i = 0; i < 500; i++) {
|
||||
BUILDS.add("AXC_5.15_" + i);
|
||||
}
|
||||
static {
|
||||
for (int i = 0; i < 500; i++) {
|
||||
BUILDS.add("AXC_5.15_" + i);
|
||||
}
|
||||
|
||||
for (int i = 0; i < 500; i++) {
|
||||
HOSTS.add(UUID.randomUUID().toString().substring(1, 16));
|
||||
PROJECTS.add(UUID.randomUUID().toString().substring(1, 16) + "_Review");
|
||||
}
|
||||
for (int i = 0; i < 500; i++) {
|
||||
HOSTS.add(UUID.randomUUID().toString().substring(1, 16));
|
||||
PROJECTS.add(UUID.randomUUID().toString().substring(1, 16) + "_Review");
|
||||
}
|
||||
|
||||
for (final String clazz : CLASSES) {
|
||||
for (final String endpoint : ENDPOINTS) {
|
||||
METHODS.add(clazz + "Service." + endpoint);
|
||||
METHODS.add(clazz + "Controller." + endpoint);
|
||||
}
|
||||
}
|
||||
}
|
||||
for (final String clazz : CLASSES) {
|
||||
for (final String endpoint : ENDPOINTS) {
|
||||
METHODS.add(clazz + "Service." + endpoint);
|
||||
METHODS.add(clazz + "Controller." + endpoint);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static void main(final String[] args) throws IOException {
|
||||
final Path testdataFile = Files.createTempFile("testData", ".csv");
|
||||
public static void main(final String[] args) throws IOException {
|
||||
final Path testdataFile = Files.createTempFile("testData", ".csv");
|
||||
|
||||
final ThreadLocalRandom r = ThreadLocalRandom.current();
|
||||
int lines = 0;
|
||||
final ThreadLocalRandom r = ThreadLocalRandom.current();
|
||||
int lines = 0;
|
||||
|
||||
try (FileWriter writer = new FileWriter(testdataFile.toFile())) {
|
||||
writer.append("@timestamp,duration,pod,host,method,project,source,build\n");
|
||||
try (FileWriter writer = new FileWriter(testdataFile.toFile())) {
|
||||
writer.append("@timestamp,duration,pod,host,method,project,source,build\n");
|
||||
|
||||
for (lines = 0; lines < 1_000_000; lines++) {
|
||||
final String timestamp = Instant.ofEpochMilli(r.nextLong(1234567890L, 12345678901L))
|
||||
.atOffset(ZoneOffset.UTC).format(DateTimeFormatter.ISO_OFFSET_DATE_TIME);
|
||||
final String duration = String.valueOf(r.nextInt(10000));
|
||||
final String pod = PODS.get(r.nextInt(PODS.size()));
|
||||
final String host = HOSTS.get(r.nextInt(HOSTS.size()));
|
||||
final String method = METHODS.get(r.nextInt(METHODS.size()));
|
||||
final String project = PROJECTS.get(r.nextInt(PROJECTS.size()));
|
||||
final String source = SOURCE.get(r.nextInt(SOURCE.size()));
|
||||
final String build = BUILDS.get(r.nextInt(BUILDS.size()));
|
||||
for (lines = 0; lines < 1_000_000; lines++) {
|
||||
final String timestamp = Instant.ofEpochMilli(r.nextLong(1234567890L, 12345678901L))
|
||||
.atOffset(ZoneOffset.UTC).format(DateTimeFormatter.ISO_OFFSET_DATE_TIME);
|
||||
final String duration = String.valueOf(r.nextInt(10000));
|
||||
final String pod = PODS.get(r.nextInt(PODS.size()));
|
||||
final String host = HOSTS.get(r.nextInt(HOSTS.size()));
|
||||
final String method = METHODS.get(r.nextInt(METHODS.size()));
|
||||
final String project = PROJECTS.get(r.nextInt(PROJECTS.size()));
|
||||
final String source = SOURCE.get(r.nextInt(SOURCE.size()));
|
||||
final String build = BUILDS.get(r.nextInt(BUILDS.size()));
|
||||
|
||||
writer.append(timestamp);
|
||||
writer.append(",");
|
||||
writer.append(duration);
|
||||
writer.append(",");
|
||||
writer.append(pod);
|
||||
writer.append(",");
|
||||
writer.append(host);
|
||||
writer.append(",");
|
||||
writer.append(method);
|
||||
writer.append(",");
|
||||
writer.append(project);
|
||||
writer.append(",");
|
||||
writer.append(source);
|
||||
writer.append(",");
|
||||
writer.append(build);
|
||||
writer.append("\n");
|
||||
writer.append(timestamp);
|
||||
writer.append(",");
|
||||
writer.append(duration);
|
||||
writer.append(",");
|
||||
writer.append(pod);
|
||||
writer.append(",");
|
||||
writer.append(host);
|
||||
writer.append(",");
|
||||
writer.append(method);
|
||||
writer.append(",");
|
||||
writer.append(project);
|
||||
writer.append(",");
|
||||
writer.append(source);
|
||||
writer.append(",");
|
||||
writer.append(build);
|
||||
writer.append("\n");
|
||||
|
||||
if (lines % 1000 == 0) {
|
||||
System.out.println("lines: " + lines);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (lines % 1000 == 0) {
|
||||
System.out.println("lines: " + lines);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,27 +11,27 @@ import org.testng.annotations.Test;
|
||||
|
||||
@Test
|
||||
public class NodeEntryTest {
|
||||
@DataProvider
|
||||
public Object[][] providerPrefixCompare() {
|
||||
final List<Object[]> result = new ArrayList<>();
|
||||
@DataProvider
|
||||
public Object[][] providerPrefixCompare() {
|
||||
final List<Object[]> result = new ArrayList<>();
|
||||
|
||||
result.add(new Object[] { "ab", "abc", -1 });
|
||||
result.add(new Object[] { "abb", "abc", -1 });
|
||||
result.add(new Object[] { "abc", "abc", 0 });
|
||||
result.add(new Object[] { "abcd", "abc", 0 });
|
||||
result.add(new Object[] { "abd", "abc", 1 });
|
||||
result.add(new Object[] { "abz", "abc", 23 });
|
||||
result.add(new Object[] { "ab", "abc", -1 });
|
||||
result.add(new Object[] { "abb", "abc", -1 });
|
||||
result.add(new Object[] { "abc", "abc", 0 });
|
||||
result.add(new Object[] { "abcd", "abc", 0 });
|
||||
result.add(new Object[] { "abd", "abc", 1 });
|
||||
result.add(new Object[] { "abz", "abc", 23 });
|
||||
|
||||
return result.toArray(Object[][]::new);
|
||||
}
|
||||
return result.toArray(Object[][]::new);
|
||||
}
|
||||
|
||||
@Test(dataProvider = "providerPrefixCompare")
|
||||
public void testPrefixCompare(final String key, final String prefix, final int expected) {
|
||||
@Test(dataProvider = "providerPrefixCompare")
|
||||
public void testPrefixCompare(final String key, final String prefix, final int expected) {
|
||||
|
||||
final NodeEntry nodeEntry = new NodeEntry(ValueType.NODE_POINTER, key.getBytes(StandardCharsets.UTF_8),
|
||||
new byte[0]);
|
||||
final NodeEntry nodeEntry = new NodeEntry(ValueType.NODE_POINTER, key.getBytes(StandardCharsets.UTF_8),
|
||||
new byte[0]);
|
||||
|
||||
final int actual = nodeEntry.compareKeyPrefix(prefix.getBytes(StandardCharsets.UTF_8));
|
||||
Assert.assertEquals(actual, expected, key + " ? " + prefix);
|
||||
}
|
||||
final int actual = nodeEntry.compareKeyPrefix(prefix.getBytes(StandardCharsets.UTF_8));
|
||||
Assert.assertEquals(actual, expected, key + " ? " + prefix);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,29 +14,29 @@ import org.testng.annotations.Test;
|
||||
@Test
|
||||
public class PersistentMapDiskNodeTest {
|
||||
|
||||
public void serializeDeserialize() throws Exception {
|
||||
public void serializeDeserialize() throws Exception {
|
||||
|
||||
final List<NodeEntry> entries = new ArrayList<>();
|
||||
entries.add(newNode(ValueType.NODE_POINTER, "key1", "value1"));
|
||||
entries.add(newNode(ValueType.VALUE_INLINE, "key2_", "value2--"));
|
||||
entries.add(newNode(ValueType.NODE_POINTER, "key3__", "value3---"));
|
||||
entries.add(newNode(ValueType.VALUE_INLINE, "key4___", "value4----"));
|
||||
final List<NodeEntry> entries = new ArrayList<>();
|
||||
entries.add(newNode(ValueType.NODE_POINTER, "key1", "value1"));
|
||||
entries.add(newNode(ValueType.VALUE_INLINE, "key2_", "value2--"));
|
||||
entries.add(newNode(ValueType.NODE_POINTER, "key3__", "value3---"));
|
||||
entries.add(newNode(ValueType.VALUE_INLINE, "key4___", "value4----"));
|
||||
|
||||
final long nodeOffset = ThreadLocalRandom.current().nextInt();
|
||||
final PersistentMapDiskNode node = new PersistentMapDiskNode(nodeOffset, entries, null);
|
||||
final long nodeOffset = ThreadLocalRandom.current().nextInt();
|
||||
final PersistentMapDiskNode node = new PersistentMapDiskNode(nodeOffset, entries, null);
|
||||
|
||||
final byte[] buffer = node.serialize();
|
||||
final byte[] buffer = node.serialize();
|
||||
|
||||
final ByteBuffer byteBuffer = ByteBuffer.wrap(buffer);
|
||||
final PersistentMapDiskNode actualNode = PersistentMapDiskNode.parse(nodeOffset,
|
||||
new DiskBlock(nodeOffset, byteBuffer));
|
||||
final ByteBuffer byteBuffer = ByteBuffer.wrap(buffer);
|
||||
final PersistentMapDiskNode actualNode = PersistentMapDiskNode.parse(nodeOffset,
|
||||
new DiskBlock(nodeOffset, byteBuffer));
|
||||
|
||||
Assert.assertEquals(actualNode.getEntries(), entries);
|
||||
}
|
||||
Assert.assertEquals(actualNode.getEntries(), entries);
|
||||
}
|
||||
|
||||
private static NodeEntry newNode(final ValueType type, final String key, final String value) {
|
||||
return new NodeEntry(ValueType.VALUE_INLINE, key.getBytes(StandardCharsets.UTF_8),
|
||||
value.getBytes(StandardCharsets.UTF_8));
|
||||
}
|
||||
private static NodeEntry newNode(final ValueType type, final String key, final String value) {
|
||||
return new NodeEntry(ValueType.VALUE_INLINE, key.getBytes(StandardCharsets.UTF_8),
|
||||
value.getBytes(StandardCharsets.UTF_8));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -24,368 +24,369 @@ import org.testng.annotations.Test;
|
||||
@Test
|
||||
public class PersistentMapTest {
|
||||
|
||||
private Path dataDirectory;
|
||||
|
||||
@BeforeMethod
|
||||
public void beforeMethod() throws IOException {
|
||||
dataDirectory = Files.createTempDirectory("pdb");
|
||||
}
|
||||
private Path dataDirectory;
|
||||
|
||||
@BeforeMethod
|
||||
public void beforeMethod() throws IOException {
|
||||
dataDirectory = Files.createTempDirectory("pdb");
|
||||
}
|
||||
|
||||
@AfterMethod
|
||||
public void afterMethod() throws IOException {
|
||||
FileUtils.delete(dataDirectory);
|
||||
}
|
||||
|
||||
public void testSingleValue() throws Exception {
|
||||
final Path file = dataDirectory.resolve("map.db");
|
||||
final String value = "value1";
|
||||
final String key = "key1";
|
||||
@AfterMethod
|
||||
public void afterMethod() throws IOException {
|
||||
FileUtils.delete(dataDirectory);
|
||||
}
|
||||
|
||||
public void testSingleValue() throws Exception {
|
||||
final Path file = dataDirectory.resolve("map.db");
|
||||
final String value = "value1";
|
||||
final String key = "key1";
|
||||
|
||||
try (final PersistentMap<String, String> map = new PersistentMap<>(file, dataDirectory, PersistentMap.STRING_CODER,
|
||||
PersistentMap.STRING_CODER)) {
|
||||
try (final PersistentMap<String, String> map = new PersistentMap<>(file, dataDirectory,
|
||||
PersistentMap.STRING_CODER, PersistentMap.STRING_CODER)) {
|
||||
|
||||
Assert.assertNull(map.getValue(key));
|
||||
Assert.assertNull(map.getValue(key));
|
||||
|
||||
Assert.assertNull(map.putValue(key, value));
|
||||
Assert.assertNull(map.putValue(key, value));
|
||||
|
||||
Assert.assertEquals(map.getValue(key), value);
|
||||
}
|
||||
try (final PersistentMap<String, String> map = new PersistentMap<>(file, dataDirectory,PersistentMap.STRING_CODER,
|
||||
PersistentMap.STRING_CODER)) {
|
||||
Assert.assertEquals(map.getValue(key), value);
|
||||
}
|
||||
try (final PersistentMap<String, String> map = new PersistentMap<>(file, dataDirectory,
|
||||
PersistentMap.STRING_CODER, PersistentMap.STRING_CODER)) {
|
||||
|
||||
Assert.assertEquals(map.getValue(key), value);
|
||||
}
|
||||
}
|
||||
Assert.assertEquals(map.getValue(key), value);
|
||||
}
|
||||
}
|
||||
|
||||
@Test(invocationCount = 1)
|
||||
public void testManyValues() throws Exception {
|
||||
final Path file = dataDirectory.resolve("map.db");
|
||||
final var insertedValues = new HashMap<String, String>();
|
||||
@Test(invocationCount = 1)
|
||||
public void testManyValues() throws Exception {
|
||||
final Path file = dataDirectory.resolve("map.db");
|
||||
final var insertedValues = new HashMap<String, String>();
|
||||
|
||||
final Random rnd = new Random(1);
|
||||
|
||||
try (final PersistentMap<String, String> map = new PersistentMap<>(file,dataDirectory, PersistentMap.STRING_CODER,
|
||||
PersistentMap.STRING_CODER)) {
|
||||
map.setMaxEntriesInNode(2);
|
||||
|
||||
for (int i = 0; i < 100; i++) {
|
||||
// System.out.println("\n\ninserting: " + i);
|
||||
final UUID nextUUID = new UUID(rnd.nextLong(), rnd.nextLong());
|
||||
final String key = nextUUID.toString() + "__" + i;
|
||||
final String value = "long value to waste some bytes " + i + "__"
|
||||
+ UUID.randomUUID().toString().repeat(1);
|
||||
Assert.assertNull(map.getValue(key));
|
||||
|
||||
Assert.assertNull(map.putValue(key, value));
|
||||
|
||||
insertedValues.put(key, value);
|
||||
|
||||
// map.print(PersistentMap.STRING_DECODER, PersistentMap.STRING_DECODER);
|
||||
|
||||
final boolean failEarly = false;
|
||||
if (failEarly) {
|
||||
for (final var entry : insertedValues.entrySet()) {
|
||||
final String actualValue = map.getValue(entry.getKey());
|
||||
|
||||
if (!Objects.equals(actualValue, entry.getValue())) {
|
||||
map.print();
|
||||
}
|
||||
|
||||
Assert.assertEquals(actualValue, entry.getValue(),
|
||||
"value for key " + entry.getKey() + " in the " + i + "th iteration");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
try (final PersistentMap<String, String> map = new PersistentMap<>(file,dataDirectory, PersistentMap.STRING_CODER,
|
||||
PersistentMap.STRING_CODER)) {
|
||||
// map.print(PersistentMap.STRING_DECODER, PersistentMap.STRING_DECODER);
|
||||
final AtomicInteger maxDepth = new AtomicInteger();
|
||||
map.visitNodeEntriesPreOrder(
|
||||
(node, parentNode, nodeEntry, depth) -> maxDepth.set(Math.max(depth, maxDepth.get())));
|
||||
|
||||
Assert.assertTrue(maxDepth.get() >= 4,
|
||||
"The tree's depth. This test must have at least depth 4, "
|
||||
+ "so that we can be sure that splitting parent nodes works recursively, but was "
|
||||
+ maxDepth.get());
|
||||
|
||||
for (final var entry : insertedValues.entrySet()) {
|
||||
final String actualValue = map.getValue(entry.getKey());
|
||||
Assert.assertEquals(actualValue, entry.getValue(),
|
||||
"value for key " + entry.getKey() + " after all iterations");
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@Test(invocationCount = 1)
|
||||
public void testManySmallValues() throws Exception {
|
||||
final Path file = dataDirectory.resolve("map.db");
|
||||
final var insertedValues = new HashMap<Long, Long>();
|
||||
|
||||
final SecureRandom rnd = new SecureRandom();
|
||||
rnd.setSeed(1);
|
||||
|
||||
try (final PersistentMap<Long, Long> map = new PersistentMap<>(file,dataDirectory, PersistentMap.LONG_CODER,
|
||||
PersistentMap.LONG_CODER)) {
|
||||
|
||||
for (int i = 0; i < 1000; i++) {
|
||||
// System.out.println("\n\ninserting: " + i);
|
||||
|
||||
final Long key = (long) (rnd.nextGaussian() * Integer.MAX_VALUE);
|
||||
final Long value = (long) (rnd.nextGaussian() * Integer.MAX_VALUE);
|
||||
Assert.assertNull(map.getValue(key));
|
||||
|
||||
Assert.assertNull(map.putValue(key, value));
|
||||
|
||||
insertedValues.put(key, value);
|
||||
|
||||
// map.print();
|
||||
|
||||
final boolean failEarly = false;
|
||||
if (failEarly) {
|
||||
for (final var entry : insertedValues.entrySet()) {
|
||||
final Long actualValue = map.getValue(entry.getKey());
|
||||
|
||||
if (!Objects.equals(actualValue, entry.getValue())) {
|
||||
map.print();
|
||||
}
|
||||
|
||||
Assert.assertEquals(actualValue, entry.getValue(),
|
||||
"value for key " + entry.getKey() + " in the " + i + "th iteration");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
try (final PersistentMap<Long, Long> map = new PersistentMap<>(file,dataDirectory, PersistentMap.LONG_CODER,
|
||||
PersistentMap.LONG_CODER)) {
|
||||
// map.print(PersistentMap.LONG_DECODER, PersistentMap.LONG_DECODER);
|
||||
final AtomicInteger counter = new AtomicInteger();
|
||||
map.visitNodeEntriesPreOrder(
|
||||
(node, parentNode, nodeEntry, depth) -> counter.addAndGet(nodeEntry.isInnerNode() ? 1 : 0));
|
||||
|
||||
Assert.assertEquals(counter.get(), 4,
|
||||
"number of nodes should be small. Any number larger than 4 indicates, "
|
||||
+ "that new inner nodes are created even though the existing inner "
|
||||
+ "nodes could hold the values");
|
||||
|
||||
for (final var entry : insertedValues.entrySet()) {
|
||||
final Long actualValue = map.getValue(entry.getKey());
|
||||
Assert.assertEquals(actualValue, entry.getValue(),
|
||||
"value for key " + entry.getKey() + " after all iterations");
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
@Test(invocationCount = 1)
|
||||
public void testManyEmptyValues() throws Exception {
|
||||
final Path file = dataDirectory.resolve("map.db");
|
||||
final var insertedValues = new HashMap<Long, Empty>();
|
||||
|
||||
final SecureRandom rnd = new SecureRandom();
|
||||
rnd.setSeed(1);
|
||||
|
||||
try (final PersistentMap<Long, Empty> map = new PersistentMap<>(file,dataDirectory, PersistentMap.LONG_CODER,
|
||||
PersistentMap.EMPTY_ENCODER)) {
|
||||
|
||||
for (int i = 0; i < 1500; i++) {
|
||||
// System.out.println("\n\ninserting: " + i);
|
||||
|
||||
final Long key = (long) (rnd.nextGaussian() * Integer.MAX_VALUE);
|
||||
final Empty value = Empty.INSTANCE;
|
||||
Assert.assertNull(map.getValue(key));
|
||||
|
||||
Assert.assertNull(map.putValue(key, value));
|
||||
|
||||
insertedValues.put(key, value);
|
||||
|
||||
// map.print();
|
||||
|
||||
final boolean failEarly = false;
|
||||
if (failEarly) {
|
||||
for (final var entry : insertedValues.entrySet()) {
|
||||
final Empty actualValue = map.getValue(entry.getKey());
|
||||
|
||||
if (!Objects.equals(actualValue, entry.getValue())) {
|
||||
map.print();
|
||||
}
|
||||
|
||||
Assert.assertEquals(actualValue, entry.getValue(),
|
||||
"value for key " + entry.getKey() + " in the " + i + "th iteration");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
try (final PersistentMap<Long, Empty> map = new PersistentMap<>(file,dataDirectory, PersistentMap.LONG_CODER,
|
||||
PersistentMap.EMPTY_ENCODER)) {
|
||||
map.print();
|
||||
final AtomicInteger counter = new AtomicInteger();
|
||||
map.visitNodeEntriesPreOrder(
|
||||
(node, parentNode, nodeEntry, depth) -> counter.addAndGet(nodeEntry.isInnerNode() ? 1 : 0));
|
||||
|
||||
Assert.assertEquals(counter.get(), 4,
|
||||
"number of nodes should be small. Any number larger than 4 indicates, "
|
||||
+ "that new inner nodes are created even though the existing inner "
|
||||
+ "nodes could hold the values");
|
||||
|
||||
for (final var entry : insertedValues.entrySet()) {
|
||||
final Empty actualValue = map.getValue(entry.getKey());
|
||||
Assert.assertEquals(actualValue, entry.getValue(),
|
||||
"value for key " + entry.getKey() + " after all iterations");
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@Test(invocationCount = 1)
|
||||
public void testEasyValues() throws Exception {
|
||||
final Path file = dataDirectory.resolve("map.db");
|
||||
final var insertedValues = new HashMap<String, String>();
|
||||
|
||||
final Queue<Integer> numbers = new LinkedList<>(Arrays.asList(1, 15, 11, 4, 16, 3, 13));
|
||||
|
||||
try (final PersistentMap<String, String> map = new PersistentMap<>(file,dataDirectory, PersistentMap.STRING_CODER,
|
||||
PersistentMap.STRING_CODER)) {
|
||||
|
||||
final int numbersSize = numbers.size();
|
||||
for (int i = 0; i < numbersSize; i++) {
|
||||
|
||||
final Integer keyNumber = numbers.poll();
|
||||
// System.out.println("\n\ninserting: " + keyNumber);
|
||||
|
||||
final String key = "" + keyNumber;
|
||||
final String value = "value";
|
||||
Assert.assertNull(map.getValue(key));
|
||||
|
||||
Assert.assertNull(map.putValue(key, value));
|
||||
|
||||
insertedValues.put(key, value);
|
||||
|
||||
// map.print(PersistentMap.STRING_DECODER, PersistentMap.STRING_DECODER);
|
||||
|
||||
for (final var entry : insertedValues.entrySet()) {
|
||||
final String actualValue = map.getValue(entry.getKey());
|
||||
|
||||
Assert.assertEquals(actualValue, entry.getValue(),
|
||||
"value for key " + entry.getKey() + " in the " + i + "th iteration");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
try (final PersistentMap<String, String> map = new PersistentMap<>(file,dataDirectory, PersistentMap.STRING_CODER,
|
||||
PersistentMap.STRING_CODER)) {
|
||||
// map.print(PersistentMap.STRING_DECODER, PersistentMap.STRING_DECODER);
|
||||
|
||||
final AtomicInteger counter = new AtomicInteger();
|
||||
map.visitNodeEntriesPreOrder(
|
||||
(node, parentNode, nodeEntry, depth) -> counter.addAndGet(nodeEntry.isInnerNode() ? 1 : 0));
|
||||
|
||||
for (final var entry : insertedValues.entrySet()) {
|
||||
final String actualValue = map.getValue(entry.getKey());
|
||||
Assert.assertEquals(actualValue, entry.getValue(),
|
||||
"value for key " + entry.getKey() + " after all iterations");
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFindAllByPrefix() throws Exception {
|
||||
final Path file = dataDirectory.resolve("map.db");
|
||||
|
||||
final Map<String, String> expectedBar = new HashMap<>();
|
||||
for (int i = 0; i < 100; i++) {
|
||||
// the value is a little bit longer to make sure that the values don't fit into
|
||||
// a single leaf node
|
||||
expectedBar.put("bar:" + i, "bar:" + i + "__##################################");
|
||||
}
|
||||
|
||||
final Map<String, String> input = new HashMap<>();
|
||||
input.putAll(expectedBar);
|
||||
for (int i = 0; i < 500; i++) {
|
||||
input.put(UUID.randomUUID().toString(), UUID.randomUUID().toString());
|
||||
}
|
||||
|
||||
try (final PersistentMap<String, String> map = new PersistentMap<>(file,dataDirectory, PersistentMap.STRING_CODER,
|
||||
PersistentMap.STRING_CODER)) {
|
||||
|
||||
map.putAllValues(input);
|
||||
}
|
||||
|
||||
try (final PersistentMap<String, String> map = new PersistentMap<>(file,dataDirectory, PersistentMap.STRING_CODER,
|
||||
PersistentMap.STRING_CODER)) {
|
||||
|
||||
{
|
||||
final LinkedHashMap<String, String> actualBar = new LinkedHashMap<>();
|
||||
final Visitor<String, String> visitor = (key, value) -> actualBar.put(key, value);
|
||||
map.visitValues("bar:", visitor);
|
||||
|
||||
Assert.assertEquals(actualBar, expectedBar);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test(invocationCount = 1)
|
||||
public void testLotsOfValues() throws Exception {
|
||||
final Path file = dataDirectory.resolve("map.db");
|
||||
final var insertedValues = new HashMap<Long, Long>();
|
||||
|
||||
final SecureRandom rnd = new SecureRandom();
|
||||
rnd.setSeed(1);
|
||||
|
||||
try (final PersistentMap<Long, Long> map = new PersistentMap<>(file,dataDirectory, PersistentMap.LONG_CODER,
|
||||
PersistentMap.LONG_CODER)) {
|
||||
|
||||
for (int i = 0; i < 1_000; i++) {
|
||||
|
||||
final Long key = (long) (rnd.nextGaussian() * Integer.MAX_VALUE);
|
||||
final Long value = (long) (rnd.nextGaussian() * Integer.MAX_VALUE);
|
||||
|
||||
if (insertedValues.containsKey(key)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
Assert.assertNull(map.putValue(key, value));
|
||||
|
||||
insertedValues.put(key, value);
|
||||
|
||||
final boolean failEarly = false;
|
||||
if (failEarly) {
|
||||
for (final var entry : insertedValues.entrySet()) {
|
||||
final Long actualValue = map.getValue(entry.getKey());
|
||||
|
||||
if (!Objects.equals(actualValue, entry.getValue())) {
|
||||
map.print();
|
||||
}
|
||||
|
||||
Assert.assertEquals(actualValue, entry.getValue(),
|
||||
"value for key " + entry.getKey() + " in the " + i + "th iteration");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
try (final PersistentMap<Long, Long> map = new PersistentMap<>(file,dataDirectory, PersistentMap.LONG_CODER,
|
||||
PersistentMap.LONG_CODER)) {
|
||||
final AtomicInteger counter = new AtomicInteger();
|
||||
final AtomicInteger maxDepth = new AtomicInteger();
|
||||
map.visitNodeEntriesPreOrder((node, parentNode, nodeEntry, depth) -> {
|
||||
counter.addAndGet(nodeEntry.isInnerNode() ? 1 : 0);
|
||||
maxDepth.set(Math.max(maxDepth.get(), depth));
|
||||
});
|
||||
|
||||
final long start = System.nanoTime();
|
||||
for (final var entry : insertedValues.entrySet()) {
|
||||
final Long actualValue = map.getValue(entry.getKey());
|
||||
Assert.assertEquals(actualValue, entry.getValue(),
|
||||
"value for key " + entry.getKey() + " after all iterations");
|
||||
}
|
||||
System.out.println("nodes=" + counter.get() + ", depth=" + maxDepth.get() + ": "
|
||||
+ (System.nanoTime() - start) / 1_000_000.0 + "ms");
|
||||
}
|
||||
}
|
||||
final Random rnd = new Random(1);
|
||||
|
||||
try (final PersistentMap<String, String> map = new PersistentMap<>(file, dataDirectory,
|
||||
PersistentMap.STRING_CODER, PersistentMap.STRING_CODER)) {
|
||||
map.setMaxEntriesInNode(2);
|
||||
|
||||
for (int i = 0; i < 100; i++) {
|
||||
// System.out.println("\n\ninserting: " + i);
|
||||
final UUID nextUUID = new UUID(rnd.nextLong(), rnd.nextLong());
|
||||
final String key = nextUUID.toString() + "__" + i;
|
||||
final String value = "long value to waste some bytes " + i + "__"
|
||||
+ UUID.randomUUID().toString().repeat(1);
|
||||
Assert.assertNull(map.getValue(key));
|
||||
|
||||
Assert.assertNull(map.putValue(key, value));
|
||||
|
||||
insertedValues.put(key, value);
|
||||
|
||||
// map.print(PersistentMap.STRING_DECODER, PersistentMap.STRING_DECODER);
|
||||
|
||||
final boolean failEarly = false;
|
||||
if (failEarly) {
|
||||
for (final var entry : insertedValues.entrySet()) {
|
||||
final String actualValue = map.getValue(entry.getKey());
|
||||
|
||||
if (!Objects.equals(actualValue, entry.getValue())) {
|
||||
map.print();
|
||||
}
|
||||
|
||||
Assert.assertEquals(actualValue, entry.getValue(),
|
||||
"value for key " + entry.getKey() + " in the " + i + "th iteration");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
try (final PersistentMap<String, String> map = new PersistentMap<>(file, dataDirectory,
|
||||
PersistentMap.STRING_CODER, PersistentMap.STRING_CODER)) {
|
||||
// map.print(PersistentMap.STRING_DECODER, PersistentMap.STRING_DECODER);
|
||||
final AtomicInteger maxDepth = new AtomicInteger();
|
||||
map.visitNodeEntriesPreOrder(
|
||||
(node, parentNode, nodeEntry, depth) -> maxDepth.set(Math.max(depth, maxDepth.get())));
|
||||
|
||||
Assert.assertTrue(maxDepth.get() >= 4,
|
||||
"The tree's depth. This test must have at least depth 4, "
|
||||
+ "so that we can be sure that splitting parent nodes works recursively, but was "
|
||||
+ maxDepth.get());
|
||||
|
||||
for (final var entry : insertedValues.entrySet()) {
|
||||
final String actualValue = map.getValue(entry.getKey());
|
||||
Assert.assertEquals(actualValue, entry.getValue(),
|
||||
"value for key " + entry.getKey() + " after all iterations");
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@Test(invocationCount = 1)
|
||||
public void testManySmallValues() throws Exception {
|
||||
final Path file = dataDirectory.resolve("map.db");
|
||||
final var insertedValues = new HashMap<Long, Long>();
|
||||
|
||||
final SecureRandom rnd = new SecureRandom();
|
||||
rnd.setSeed(1);
|
||||
|
||||
try (final PersistentMap<Long, Long> map = new PersistentMap<>(file, dataDirectory, PersistentMap.LONG_CODER,
|
||||
PersistentMap.LONG_CODER)) {
|
||||
|
||||
for (int i = 0; i < 1000; i++) {
|
||||
// System.out.println("\n\ninserting: " + i);
|
||||
|
||||
final Long key = (long) (rnd.nextGaussian() * Integer.MAX_VALUE);
|
||||
final Long value = (long) (rnd.nextGaussian() * Integer.MAX_VALUE);
|
||||
Assert.assertNull(map.getValue(key));
|
||||
|
||||
Assert.assertNull(map.putValue(key, value));
|
||||
|
||||
insertedValues.put(key, value);
|
||||
|
||||
// map.print();
|
||||
|
||||
final boolean failEarly = false;
|
||||
if (failEarly) {
|
||||
for (final var entry : insertedValues.entrySet()) {
|
||||
final Long actualValue = map.getValue(entry.getKey());
|
||||
|
||||
if (!Objects.equals(actualValue, entry.getValue())) {
|
||||
map.print();
|
||||
}
|
||||
|
||||
Assert.assertEquals(actualValue, entry.getValue(),
|
||||
"value for key " + entry.getKey() + " in the " + i + "th iteration");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
try (final PersistentMap<Long, Long> map = new PersistentMap<>(file, dataDirectory, PersistentMap.LONG_CODER,
|
||||
PersistentMap.LONG_CODER)) {
|
||||
// map.print(PersistentMap.LONG_DECODER, PersistentMap.LONG_DECODER);
|
||||
final AtomicInteger counter = new AtomicInteger();
|
||||
map.visitNodeEntriesPreOrder(
|
||||
(node, parentNode, nodeEntry, depth) -> counter.addAndGet(nodeEntry.isInnerNode() ? 1 : 0));
|
||||
|
||||
Assert.assertEquals(counter.get(), 4,
|
||||
"number of nodes should be small. Any number larger than 4 indicates, "
|
||||
+ "that new inner nodes are created even though the existing inner "
|
||||
+ "nodes could hold the values");
|
||||
|
||||
for (final var entry : insertedValues.entrySet()) {
|
||||
final Long actualValue = map.getValue(entry.getKey());
|
||||
Assert.assertEquals(actualValue, entry.getValue(),
|
||||
"value for key " + entry.getKey() + " after all iterations");
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@Test(invocationCount = 1)
|
||||
public void testManyEmptyValues() throws Exception {
|
||||
final Path file = dataDirectory.resolve("map.db");
|
||||
final var insertedValues = new HashMap<Long, Empty>();
|
||||
|
||||
final SecureRandom rnd = new SecureRandom();
|
||||
rnd.setSeed(1);
|
||||
|
||||
try (final PersistentMap<Long, Empty> map = new PersistentMap<>(file, dataDirectory, PersistentMap.LONG_CODER,
|
||||
PersistentMap.EMPTY_ENCODER)) {
|
||||
|
||||
for (int i = 0; i < 1500; i++) {
|
||||
// System.out.println("\n\ninserting: " + i);
|
||||
|
||||
final Long key = (long) (rnd.nextGaussian() * Integer.MAX_VALUE);
|
||||
final Empty value = Empty.INSTANCE;
|
||||
Assert.assertNull(map.getValue(key));
|
||||
|
||||
Assert.assertNull(map.putValue(key, value));
|
||||
|
||||
insertedValues.put(key, value);
|
||||
|
||||
// map.print();
|
||||
|
||||
final boolean failEarly = false;
|
||||
if (failEarly) {
|
||||
for (final var entry : insertedValues.entrySet()) {
|
||||
final Empty actualValue = map.getValue(entry.getKey());
|
||||
|
||||
if (!Objects.equals(actualValue, entry.getValue())) {
|
||||
map.print();
|
||||
}
|
||||
|
||||
Assert.assertEquals(actualValue, entry.getValue(),
|
||||
"value for key " + entry.getKey() + " in the " + i + "th iteration");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
try (final PersistentMap<Long, Empty> map = new PersistentMap<>(file, dataDirectory, PersistentMap.LONG_CODER,
|
||||
PersistentMap.EMPTY_ENCODER)) {
|
||||
map.print();
|
||||
final AtomicInteger counter = new AtomicInteger();
|
||||
map.visitNodeEntriesPreOrder(
|
||||
(node, parentNode, nodeEntry, depth) -> counter.addAndGet(nodeEntry.isInnerNode() ? 1 : 0));
|
||||
|
||||
Assert.assertEquals(counter.get(), 4,
|
||||
"number of nodes should be small. Any number larger than 4 indicates, "
|
||||
+ "that new inner nodes are created even though the existing inner "
|
||||
+ "nodes could hold the values");
|
||||
|
||||
for (final var entry : insertedValues.entrySet()) {
|
||||
final Empty actualValue = map.getValue(entry.getKey());
|
||||
Assert.assertEquals(actualValue, entry.getValue(),
|
||||
"value for key " + entry.getKey() + " after all iterations");
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@Test(invocationCount = 1)
|
||||
public void testEasyValues() throws Exception {
|
||||
final Path file = dataDirectory.resolve("map.db");
|
||||
final var insertedValues = new HashMap<String, String>();
|
||||
|
||||
final Queue<Integer> numbers = new LinkedList<>(Arrays.asList(1, 15, 11, 4, 16, 3, 13));
|
||||
|
||||
try (final PersistentMap<String, String> map = new PersistentMap<>(file, dataDirectory,
|
||||
PersistentMap.STRING_CODER, PersistentMap.STRING_CODER)) {
|
||||
|
||||
final int numbersSize = numbers.size();
|
||||
for (int i = 0; i < numbersSize; i++) {
|
||||
|
||||
final Integer keyNumber = numbers.poll();
|
||||
// System.out.println("\n\ninserting: " + keyNumber);
|
||||
|
||||
final String key = "" + keyNumber;
|
||||
final String value = "value";
|
||||
Assert.assertNull(map.getValue(key));
|
||||
|
||||
Assert.assertNull(map.putValue(key, value));
|
||||
|
||||
insertedValues.put(key, value);
|
||||
|
||||
// map.print(PersistentMap.STRING_DECODER, PersistentMap.STRING_DECODER);
|
||||
|
||||
for (final var entry : insertedValues.entrySet()) {
|
||||
final String actualValue = map.getValue(entry.getKey());
|
||||
|
||||
Assert.assertEquals(actualValue, entry.getValue(),
|
||||
"value for key " + entry.getKey() + " in the " + i + "th iteration");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
try (final PersistentMap<String, String> map = new PersistentMap<>(file, dataDirectory,
|
||||
PersistentMap.STRING_CODER, PersistentMap.STRING_CODER)) {
|
||||
// map.print(PersistentMap.STRING_DECODER, PersistentMap.STRING_DECODER);
|
||||
|
||||
final AtomicInteger counter = new AtomicInteger();
|
||||
map.visitNodeEntriesPreOrder(
|
||||
(node, parentNode, nodeEntry, depth) -> counter.addAndGet(nodeEntry.isInnerNode() ? 1 : 0));
|
||||
|
||||
for (final var entry : insertedValues.entrySet()) {
|
||||
final String actualValue = map.getValue(entry.getKey());
|
||||
Assert.assertEquals(actualValue, entry.getValue(),
|
||||
"value for key " + entry.getKey() + " after all iterations");
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFindAllByPrefix() throws Exception {
|
||||
final Path file = dataDirectory.resolve("map.db");
|
||||
|
||||
final Map<String, String> expectedBar = new HashMap<>();
|
||||
for (int i = 0; i < 100; i++) {
|
||||
// the value is a little bit longer to make sure that the values don't fit into
|
||||
// a single leaf node
|
||||
expectedBar.put("bar:" + i, "bar:" + i + "__##################################");
|
||||
}
|
||||
|
||||
final Map<String, String> input = new HashMap<>();
|
||||
input.putAll(expectedBar);
|
||||
for (int i = 0; i < 500; i++) {
|
||||
input.put(UUID.randomUUID().toString(), UUID.randomUUID().toString());
|
||||
}
|
||||
|
||||
try (final PersistentMap<String, String> map = new PersistentMap<>(file, dataDirectory,
|
||||
PersistentMap.STRING_CODER, PersistentMap.STRING_CODER)) {
|
||||
|
||||
map.putAllValues(input);
|
||||
}
|
||||
|
||||
try (final PersistentMap<String, String> map = new PersistentMap<>(file, dataDirectory,
|
||||
PersistentMap.STRING_CODER, PersistentMap.STRING_CODER)) {
|
||||
|
||||
{
|
||||
final LinkedHashMap<String, String> actualBar = new LinkedHashMap<>();
|
||||
final Visitor<String, String> visitor = (key, value) -> actualBar.put(key, value);
|
||||
map.visitValues("bar:", visitor);
|
||||
|
||||
Assert.assertEquals(actualBar, expectedBar);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test(invocationCount = 1)
|
||||
public void testLotsOfValues() throws Exception {
|
||||
final Path file = dataDirectory.resolve("map.db");
|
||||
final var insertedValues = new HashMap<Long, Long>();
|
||||
|
||||
final SecureRandom rnd = new SecureRandom();
|
||||
rnd.setSeed(1);
|
||||
|
||||
try (final PersistentMap<Long, Long> map = new PersistentMap<>(file, dataDirectory, PersistentMap.LONG_CODER,
|
||||
PersistentMap.LONG_CODER)) {
|
||||
|
||||
for (int i = 0; i < 1_000; i++) {
|
||||
|
||||
final Long key = (long) (rnd.nextGaussian() * Integer.MAX_VALUE);
|
||||
final Long value = (long) (rnd.nextGaussian() * Integer.MAX_VALUE);
|
||||
|
||||
if (insertedValues.containsKey(key)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
Assert.assertNull(map.putValue(key, value));
|
||||
|
||||
insertedValues.put(key, value);
|
||||
|
||||
final boolean failEarly = false;
|
||||
if (failEarly) {
|
||||
for (final var entry : insertedValues.entrySet()) {
|
||||
final Long actualValue = map.getValue(entry.getKey());
|
||||
|
||||
if (!Objects.equals(actualValue, entry.getValue())) {
|
||||
map.print();
|
||||
}
|
||||
|
||||
Assert.assertEquals(actualValue, entry.getValue(),
|
||||
"value for key " + entry.getKey() + " in the " + i + "th iteration");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
try (final PersistentMap<Long, Long> map = new PersistentMap<>(file, dataDirectory, PersistentMap.LONG_CODER,
|
||||
PersistentMap.LONG_CODER)) {
|
||||
final AtomicInteger counter = new AtomicInteger();
|
||||
final AtomicInteger maxDepth = new AtomicInteger();
|
||||
map.visitNodeEntriesPreOrder((node, parentNode, nodeEntry, depth) -> {
|
||||
counter.addAndGet(nodeEntry.isInnerNode() ? 1 : 0);
|
||||
maxDepth.set(Math.max(maxDepth.get(), depth));
|
||||
});
|
||||
|
||||
final long start = System.nanoTime();
|
||||
for (final var entry : insertedValues.entrySet()) {
|
||||
final Long actualValue = map.getValue(entry.getKey());
|
||||
Assert.assertEquals(actualValue, entry.getValue(),
|
||||
"value for key " + entry.getKey() + " after all iterations");
|
||||
}
|
||||
System.out.println("nodes=" + counter.get() + ", depth=" + maxDepth.get() + ": "
|
||||
+ (System.nanoTime() - start) / 1_000_000.0 + "ms");
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user