add a pointer to the root node

Before the offset of the root node was hard-coded.
Now the offset of the pointer to the root node is hard-coded.
That allows us to replace the root node.
This commit is contained in:
2018-10-27 08:55:15 +02:00
parent 8bb98deb1e
commit 8b48b8c3e7
4 changed files with 67 additions and 12 deletions

View File

@@ -250,4 +250,8 @@ public class DiskStorage implements AutoCloseable {
public long size() throws IOException {
return fileChannel.size();
}
public int minAllocationSize() {
return FREE_LIST_NODE_SIZE;
}
}

View File

@@ -18,7 +18,7 @@ public class PersistentMap {
private static final Charset UTF8 = StandardCharsets.UTF_8;
static final int BLOCK_SIZE = 4096;
private static final int ROOT_NODE_OFFEST = 4096;
static final long NODE_OFFSET_TO_ROOT_NODE = 8;
private final DiskStorage diskStore;
@@ -29,14 +29,20 @@ public class PersistentMap {
private void initIfNew() throws IOException {
if (diskStore.size() < BLOCK_SIZE) {
// this map is new:
// 1. make sure that new blocks are aligned to the block size (for faster disk
final long nodeOffsetToRootNode = diskStore.allocateBlock(diskStore.minAllocationSize());
Preconditions.checkEqual(nodeOffsetToRootNode, NODE_OFFSET_TO_ROOT_NODE,
"The offset of the pointer to the root node must be at a well known location. "
+ "Otherwise we would not be able to find it in an already existing file.");
// 2. make sure that new blocks are aligned to the block size (for faster disk
// IO)
diskStore.ensureAlignmentForNewBlocks(BLOCK_SIZE);
// 2. initialize an empty root node
// 3. initialize an empty root node
final long blockOffset = diskStore.allocateBlock(BLOCK_SIZE);
assert blockOffset == ROOT_NODE_OFFEST : "offset was: " + blockOffset;
// 4. upate pointer to root node
writeNodeOffsetOfRootNode(blockOffset);
}
}
@@ -62,11 +68,13 @@ public class PersistentMap {
}
public byte[] put(final byte[] key, final byte[] value) throws IOException {
return insert(ROOT_NODE_OFFEST, key, value);
final long rootNodeOffset = readNodeOffsetOfRootNode();
return insert(rootNodeOffset, key, value);
}
public byte[] get(final byte[] key) throws IOException {
final NodeEntry entry = findNodeEntry(ROOT_NODE_OFFEST, key);
final long rootNodeOffset = readNodeOffsetOfRootNode();
final NodeEntry entry = findNodeEntry(rootNodeOffset, key);
return entry == null ? null : entry.getValue();
}
@@ -180,7 +188,8 @@ public class PersistentMap {
}
public void visitPreOrder(final VisitorCallback visitor) throws IOException {
visitPreOrderRecursively(ROOT_NODE_OFFEST, visitor, 0);
final long rootNodeOffset = readNodeOffsetOfRootNode();
visitPreOrderRecursively(rootNodeOffset, visitor, 0);
}
private void visitPreOrderRecursively(final long nodeOffset, final VisitorCallback visitor, final int depth)
@@ -196,4 +205,16 @@ public class PersistentMap {
}
}
}
private long readNodeOffsetOfRootNode() throws IOException {
final DiskBlock diskBlock = diskStore.getDiskBlock(NODE_OFFSET_TO_ROOT_NODE, diskStore.minAllocationSize());
return diskBlock.getByteBuffer().getLong(0);
}
private void writeNodeOffsetOfRootNode(final long newNodeOffsetToRootNode) throws IOException {
final DiskBlock diskBlock = diskStore.getDiskBlock(NODE_OFFSET_TO_ROOT_NODE, diskStore.minAllocationSize());
diskBlock.getByteBuffer().putLong(0, newNodeOffsetToRootNode);
diskBlock.force();
}
}

View File

@@ -5,6 +5,7 @@ import java.nio.file.Files;
import java.nio.file.Path;
import java.util.HashMap;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicInteger;
import org.lucares.pdb.diskstorage.DiskStorage;
import org.lucares.utils.file.FileUtils;
@@ -30,17 +31,21 @@ public class PersistentMapTest {
public void testSingleValue() throws Exception {
final Path file = dataDirectory.resolve("map.db");
final String value = "value1";
final String key = "key1";
try (final DiskStorage ds = new DiskStorage(file)) {
final PersistentMap map = new PersistentMap(ds);
final String value = "value1";
final String key = "key1";
Assert.assertNull(map.getAsString(key));
Assert.assertNull(map.put(key, value));
Assert.assertEquals(map.getAsString(key), value);
}
try (final DiskStorage ds = new DiskStorage(file)) {
final PersistentMap map = new PersistentMap(ds);
Assert.assertEquals(map.getAsString(key), value);
}
}
@@ -52,7 +57,7 @@ public class PersistentMapTest {
try (final DiskStorage ds = new DiskStorage(file)) {
final PersistentMap map = new PersistentMap(ds);
for (int i = 0; i < 100; i++) {
for (int i = 0; i < 200; i++) {
final String key = UUID.randomUUID().toString() + "__" + i;
final String value = "long value to waste some bytes " + i;
@@ -72,11 +77,19 @@ public class PersistentMapTest {
try (final DiskStorage ds = new DiskStorage(file)) {
final PersistentMap map = new PersistentMap(ds);
map.visitPreOrder((nodeEntry, depth) -> {
if (nodeEntry.isInnerNode()) {
System.out.println(" ".repeat(depth) + nodeEntry);
}
});
final AtomicInteger counter = new AtomicInteger();
map.visitPreOrder((nodeEntry, depth) -> counter.addAndGet(nodeEntry.isInnerNode() ? 1 : 0));
// Assert.assertEquals(counter.get(), 3,
// "number of nodes should be small. Any number larger than 3 indicates, "
// + "that new inner nodes are created even though the existing inner "
// + "nodes could hold the values");
for (final var entry : insertedValues.entrySet()) {
final String actualValue = map.getAsString(entry.getKey());

View File

@@ -29,4 +29,21 @@ public class Preconditions {
throw new IllegalStateException();
}
}
/**
* Check that the given values are equal. The check is done with
* {@link Objects#equals(Object, Object)}
*
* @param actual the actual value
* @param expected the expected value
* @param message formatted with {@link MessageFormat}
* @param args arguments for the message
*/
public static void checkEqual(final Object actual, final Object expected, final String message,
final Object... args) {
if (!Objects.equals(actual, expected)) {
throw new IllegalStateException(
MessageFormat.format(message, args) + " Expected: " + actual + " equals " + expected);
}
}
}