adding the blocksize to the metadata section
This commit is contained in:
@@ -2,6 +2,7 @@ package org.lucares.pdb.map;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
@@ -31,6 +32,8 @@ public class PersistentMap<K, V> implements AutoCloseable {
|
||||
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(PersistentMap.class);
|
||||
|
||||
private static final long CURRENT_VERSION = 1;
|
||||
|
||||
// the maximum key
|
||||
static final byte[] MAX_KEY;
|
||||
static {
|
||||
@@ -151,7 +154,7 @@ public class PersistentMap<K, V> implements AutoCloseable {
|
||||
public static final EncoderDecoder<Empty> EMPTY_ENCODER = new EmptyCoder();
|
||||
|
||||
static final int BLOCK_SIZE = 4096;
|
||||
static final long NODE_OFFSET_TO_ROOT_NODE = 8;
|
||||
static final long OFFSET_META_DATA = 8;
|
||||
|
||||
private DiskStorage diskStore;
|
||||
|
||||
@@ -170,6 +173,8 @@ public class PersistentMap<K, V> implements AutoCloseable {
|
||||
|
||||
private final Path path;
|
||||
|
||||
private long version;
|
||||
|
||||
public PersistentMap(final Path path, final Path storageBasePath, final EncoderDecoder<K> keyEncoder,
|
||||
final EncoderDecoder<V> valueEncoder) {
|
||||
this.path = path;
|
||||
@@ -178,12 +183,8 @@ public class PersistentMap<K, V> implements AutoCloseable {
|
||||
this.valueEncoder = valueEncoder;
|
||||
initIfNew();
|
||||
|
||||
readOffsetOfRootNode();
|
||||
}
|
||||
|
||||
private void readOffsetOfRootNode() {
|
||||
final DiskBlock diskBlock = diskStore.getDiskBlock(NODE_OFFSET_TO_ROOT_NODE, diskStore.minAllocationSize());
|
||||
nodeOffsetOfRootNode = diskBlock.getByteBuffer().getLong(0);
|
||||
readMetaData();
|
||||
updateIfNecessary();
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -198,7 +199,7 @@ public class PersistentMap<K, V> implements AutoCloseable {
|
||||
private void initIfNew() {
|
||||
if (diskStore.size() < BLOCK_SIZE) {
|
||||
final long nodeOffsetToRootNode = diskStore.allocateBlock(diskStore.minAllocationSize());
|
||||
Preconditions.checkEqual(nodeOffsetToRootNode, NODE_OFFSET_TO_ROOT_NODE,
|
||||
Preconditions.checkEqual(nodeOffsetToRootNode, OFFSET_META_DATA,
|
||||
"The offset of the pointer to the root node must be at a well known location. "
|
||||
+ "Otherwise we would not be able to find it in an already existing file.");
|
||||
|
||||
@@ -210,14 +211,40 @@ public class PersistentMap<K, V> implements AutoCloseable {
|
||||
final var rootNode = PersistentMapDiskNode.emptyRootNode(blockOffset);
|
||||
writeNode(rootNode);
|
||||
|
||||
// 4. update pointer to root node
|
||||
writeNodeOffsetOfRootNode(blockOffset);
|
||||
// 4. meta data section with pointer to root node and version
|
||||
initMetaDataSection(blockOffset);
|
||||
|
||||
// 5. insert a dummy entry with a 'maximum' key
|
||||
putValue(MAX_KEY, valueEncoder.getEmptyValue());
|
||||
}
|
||||
}
|
||||
|
||||
private void initMetaDataSection(final long newNodeOffsetToRootNode) {
|
||||
|
||||
final DiskBlock diskBlock = diskStore.getDiskBlock(OFFSET_META_DATA, diskStore.minAllocationSize());
|
||||
final ByteBuffer byteBuffer = diskBlock.getByteBuffer();
|
||||
byteBuffer.putLong(newNodeOffsetToRootNode);
|
||||
byteBuffer.putLong(CURRENT_VERSION);
|
||||
byteBuffer.putLong(BLOCK_SIZE);
|
||||
diskBlock.force();
|
||||
nodeOffsetOfRootNode = newNodeOffsetToRootNode;
|
||||
version = CURRENT_VERSION;
|
||||
}
|
||||
|
||||
private void readMetaData() {
|
||||
final DiskBlock diskBlock = diskStore.getDiskBlock(OFFSET_META_DATA, diskStore.minAllocationSize());
|
||||
final ByteBuffer byteBuffer = diskBlock.getByteBuffer();
|
||||
nodeOffsetOfRootNode = byteBuffer.getLong();
|
||||
version = byteBuffer.getLong();
|
||||
}
|
||||
|
||||
private void writeNodeOffsetOfRootNode(final long newNodeOffsetToRootNode) {
|
||||
final DiskBlock diskBlock = diskStore.getDiskBlock(OFFSET_META_DATA, diskStore.minAllocationSize());
|
||||
diskBlock.getByteBuffer().putLong(0, newNodeOffsetToRootNode);
|
||||
diskBlock.force();
|
||||
nodeOffsetOfRootNode = newNodeOffsetToRootNode;
|
||||
}
|
||||
|
||||
public synchronized void putAllValues(final Map<K, V> map) {
|
||||
for (final Entry<K, V> e : map.entrySet()) {
|
||||
putValue(e.getKey(), e.getValue());
|
||||
@@ -434,7 +461,9 @@ public class PersistentMap<K, V> implements AutoCloseable {
|
||||
// diskBlock.force(); // makes writing nodes slower by factor 800 (sic!)
|
||||
}
|
||||
|
||||
public synchronized void print() {
|
||||
public synchronized void print(final boolean printValues) {
|
||||
|
||||
System.out.println("printing nodes:");
|
||||
|
||||
visitNodeEntriesPreOrder((node, parentNode, nodeEntry, depth) -> {
|
||||
|
||||
@@ -442,8 +471,11 @@ public class PersistentMap<K, V> implements AutoCloseable {
|
||||
|
||||
final String children = "#" + node.getEntries().size();
|
||||
|
||||
writer.println(" ".repeat(depth) + "@" + node.getNodeOffset() + " " + children + " " + nodeEntry
|
||||
.toString(b -> String.valueOf(keyEncoder.decode(b)), b -> String.valueOf(valueEncoder.decode(b))));
|
||||
if (printValues || nodeEntry.isInnerNode()) {
|
||||
writer.println(" ".repeat(depth) + "@" + node.getNodeOffset() + " " + children + " "
|
||||
+ nodeEntry.toString(b -> String.valueOf(keyEncoder.decode(b)),
|
||||
b -> String.valueOf(valueEncoder.decode(b))));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@@ -507,7 +539,7 @@ public class PersistentMap<K, V> implements AutoCloseable {
|
||||
public synchronized void reindex() throws IOException {
|
||||
final long start = System.nanoTime();
|
||||
final AtomicLong countValues = new AtomicLong();
|
||||
LOGGER.info("start reindexing file: {}", path);
|
||||
LOGGER.info("start reindexing file: {}, version: {}, stats before:\n{}", path, version, stats());
|
||||
final Path newFile = path.getParent().resolve(path.getFileName() + ".tmp");
|
||||
|
||||
try (PersistentMap<K, V> newMap = new PersistentMap<>(newFile, null, keyEncoder, valueEncoder)) {
|
||||
@@ -527,11 +559,12 @@ public class PersistentMap<K, V> implements AutoCloseable {
|
||||
swapFiles(newFile);
|
||||
|
||||
diskStore = new DiskStorage(path, null);
|
||||
readOffsetOfRootNode();
|
||||
readMetaData();
|
||||
version = CURRENT_VERSION;
|
||||
final double durationInMs = (System.nanoTime() - start) / 1_000_000.0;
|
||||
final double valuesPerSecond = countValues.get() / (durationInMs / 1000);
|
||||
LOGGER.info("done reindexing, took {} ms, {} values, {} values/s", (int) Math.ceil(durationInMs),
|
||||
countValues.get(), valuesPerSecond);
|
||||
LOGGER.info("done reindexing, took {} ms, {} values, {} values/s, stats after:\n{}",
|
||||
(int) Math.ceil(durationInMs), countValues.get(), valuesPerSecond, stats());
|
||||
}
|
||||
|
||||
public synchronized PersistentMapStats stats() {
|
||||
@@ -596,11 +629,15 @@ public class PersistentMap<K, V> implements AutoCloseable {
|
||||
return nodeOffsetOfRootNode;
|
||||
}
|
||||
|
||||
private void writeNodeOffsetOfRootNode(final long newNodeOffsetToRootNode) {
|
||||
final DiskBlock diskBlock = diskStore.getDiskBlock(NODE_OFFSET_TO_ROOT_NODE, diskStore.minAllocationSize());
|
||||
diskBlock.getByteBuffer().putLong(0, newNodeOffsetToRootNode);
|
||||
diskBlock.force();
|
||||
nodeOffsetOfRootNode = newNodeOffsetToRootNode;
|
||||
private void updateIfNecessary() {
|
||||
try {
|
||||
if (version < 1) {
|
||||
reindex();
|
||||
}
|
||||
} catch (final IOException e) {
|
||||
throw new IllegalStateException(
|
||||
"failed to update " + path + " from version " + version + " to current version", e);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user