cache disk blocks in an LRU cache
Improves read access by factor 4 for small trees.
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
package org.lucares.pdb.blockstorage;
|
||||
|
||||
import java.nio.MappedByteBuffer;
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
import org.lucares.collections.LongList;
|
||||
import org.lucares.pdb.diskstorage.DiskBlock;
|
||||
@@ -28,7 +28,7 @@ public class BSFileDiskBlock {
|
||||
public byte[] getBuffer() {
|
||||
|
||||
if (buffer == null) {
|
||||
final MappedByteBuffer byteBuffer = diskBlock.getByteBuffer();
|
||||
final ByteBuffer byteBuffer = diskBlock.getByteBuffer();
|
||||
this.buffer = new byte[byteBuffer.capacity() - INT_SEQUENCE_OFFSET];
|
||||
byteBuffer.position(INT_SEQUENCE_OFFSET);
|
||||
byteBuffer.get(buffer);
|
||||
@@ -86,7 +86,7 @@ public class BSFileDiskBlock {
|
||||
}
|
||||
|
||||
public void force() {
|
||||
diskBlock.getByteBuffer().force();
|
||||
diskBlock.force();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
package org.lucares.pdb.diskstorage;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.MappedByteBuffer;
|
||||
|
||||
public class DiskBlock {
|
||||
@@ -7,9 +8,9 @@ public class DiskBlock {
|
||||
private byte[] buffer = null;
|
||||
private final long blockOffset;
|
||||
|
||||
private final MappedByteBuffer byteBuffer;
|
||||
private final ByteBuffer byteBuffer;
|
||||
|
||||
public DiskBlock(final long blockOffset, final MappedByteBuffer byteBuffer) {
|
||||
public DiskBlock(final long blockOffset, final ByteBuffer byteBuffer) {
|
||||
this.blockOffset = blockOffset;
|
||||
this.byteBuffer = byteBuffer;
|
||||
}
|
||||
@@ -24,7 +25,7 @@ public class DiskBlock {
|
||||
return buffer;
|
||||
}
|
||||
|
||||
public MappedByteBuffer getByteBuffer() {
|
||||
public ByteBuffer getByteBuffer() {
|
||||
return byteBuffer;
|
||||
}
|
||||
|
||||
@@ -42,7 +43,10 @@ public class DiskBlock {
|
||||
}
|
||||
|
||||
public void force() {
|
||||
byteBuffer.force();
|
||||
// some tests use HeapByteBuffer and don't support force
|
||||
if (byteBuffer instanceof MappedByteBuffer) {
|
||||
((MappedByteBuffer) byteBuffer).force();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@@ -16,6 +16,7 @@ import org.lucares.pdb.diskstorage.DiskBlock;
|
||||
import org.lucares.pdb.diskstorage.DiskStorage;
|
||||
import org.lucares.utils.Preconditions;
|
||||
import org.lucares.utils.byteencoder.VariableByteEncoder;
|
||||
import org.lucares.utils.cache.LRUCache;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
@@ -101,6 +102,8 @@ public class PersistentMap<K, V> implements AutoCloseable {
|
||||
|
||||
private final EncoderDecoder<V> valueEncoder;
|
||||
|
||||
private final LRUCache<Long, PersistentMapDiskNode> nodeCache = new LRUCache<>(10_000);
|
||||
|
||||
public PersistentMap(final Path path, final EncoderDecoder<K> keyEncoder, final EncoderDecoder<V> valueEncoder)
|
||||
throws IOException {
|
||||
this.diskStore = new DiskStorage(path);
|
||||
@@ -294,16 +297,27 @@ public class PersistentMap<K, V> implements AutoCloseable {
|
||||
}
|
||||
|
||||
private PersistentMapDiskNode getNode(final long nodeOffset) throws IOException {
|
||||
final DiskBlock diskBlock = diskStore.getDiskBlock(nodeOffset, BLOCK_SIZE);
|
||||
final byte[] buffer = diskBlock.getBuffer();
|
||||
final PersistentMapDiskNode node = PersistentMapDiskNode.parse(nodeOffset, buffer);
|
||||
|
||||
PersistentMapDiskNode node = nodeCache.get(nodeOffset);
|
||||
if (node == null) {
|
||||
|
||||
final DiskBlock diskBlock = diskStore.getDiskBlock(nodeOffset, BLOCK_SIZE);
|
||||
|
||||
node = PersistentMapDiskNode.parse(nodeOffset, diskBlock);
|
||||
nodeCache.put(nodeOffset, node);
|
||||
}
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
private void writeNode(final PersistentMapDiskNode node) throws IOException {
|
||||
LOGGER.trace("writing node {}", node);
|
||||
final long nodeOffest = node.getNodeOffset();
|
||||
final DiskBlock diskBlock = diskStore.getDiskBlock(nodeOffest, BLOCK_SIZE);
|
||||
// final DiskBlock diskBlock = diskStore.getDiskBlock(nodeOffest, BLOCK_SIZE);
|
||||
DiskBlock diskBlock = node.getDiskBlock();
|
||||
if (diskBlock == null) {
|
||||
diskBlock = diskStore.getDiskBlock(nodeOffest, BLOCK_SIZE);
|
||||
}
|
||||
final byte[] buffer = diskBlock.getBuffer();
|
||||
final byte[] newBuffer = node.serialize();
|
||||
System.arraycopy(newBuffer, 0, buffer, 0, buffer.length);
|
||||
|
||||
@@ -7,6 +7,7 @@ import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.lucares.collections.LongList;
|
||||
import org.lucares.pdb.diskstorage.DiskBlock;
|
||||
import org.lucares.pdb.map.NodeEntry.ValueType;
|
||||
import org.lucares.utils.Preconditions;
|
||||
import org.lucares.utils.byteencoder.VariableByteEncoder;
|
||||
@@ -32,19 +33,23 @@ import org.lucares.utils.byteencoder.VariableByteEncoder;
|
||||
*/
|
||||
public class PersistentMapDiskNode {
|
||||
|
||||
// TODO use map instead of list
|
||||
private final List<NodeEntry> entries;
|
||||
private final long nodeOffset;
|
||||
private final DiskBlock diskBlock;
|
||||
|
||||
public PersistentMapDiskNode(final long nodeOffset, final List<NodeEntry> entries) {
|
||||
public PersistentMapDiskNode(final long nodeOffset, final List<NodeEntry> entries, final DiskBlock diskBlock) {
|
||||
this.nodeOffset = nodeOffset;
|
||||
this.diskBlock = diskBlock;
|
||||
this.entries = new ArrayList<>(entries);
|
||||
}
|
||||
|
||||
public static PersistentMapDiskNode emptyRootNode(final long nodeOffset) {
|
||||
return new PersistentMapDiskNode(nodeOffset, Collections.emptyList());
|
||||
return new PersistentMapDiskNode(nodeOffset, Collections.emptyList(), null);
|
||||
}
|
||||
|
||||
public static PersistentMapDiskNode parse(final long nodeOffset, final byte[] data) {
|
||||
public static PersistentMapDiskNode parse(final long nodeOffset, final DiskBlock diskBlock) {
|
||||
final byte[] data = diskBlock.getBuffer();
|
||||
if (data.length != PersistentMap.BLOCK_SIZE) {
|
||||
throw new IllegalStateException(
|
||||
"block size must be " + PersistentMap.BLOCK_SIZE + " but was " + data.length);
|
||||
@@ -52,7 +57,7 @@ public class PersistentMapDiskNode {
|
||||
final LongList longs = VariableByteEncoder.decode(data);
|
||||
|
||||
final List<NodeEntry> entries = deserialize(longs, data);
|
||||
return new PersistentMapDiskNode(nodeOffset, entries);
|
||||
return new PersistentMapDiskNode(nodeOffset, entries, diskBlock);
|
||||
}
|
||||
|
||||
public static List<NodeEntry> deserialize(final LongList keyLengths, final byte[] buffer) {
|
||||
@@ -91,6 +96,10 @@ public class PersistentMapDiskNode {
|
||||
return serialize(entries);
|
||||
}
|
||||
|
||||
public DiskBlock getDiskBlock() {
|
||||
return diskBlock;
|
||||
}
|
||||
|
||||
public long getNodeOffset() {
|
||||
return nodeOffset;
|
||||
}
|
||||
@@ -195,7 +204,7 @@ public class PersistentMapDiskNode {
|
||||
entries.clear();
|
||||
entries.addAll(rightEntries);
|
||||
|
||||
return new PersistentMapDiskNode(newBlockOffset, leftEntries);
|
||||
return new PersistentMapDiskNode(newBlockOffset, leftEntries, null);
|
||||
}
|
||||
|
||||
public static int neededBytesTotal(final List<NodeEntry> entries) {
|
||||
|
||||
Reference in New Issue
Block a user