handle IOExceptions earlier
This commit is contained in:
@@ -1,6 +1,5 @@
|
|||||||
package org.lucares.pdb.blockstorage;
|
package org.lucares.pdb.blockstorage;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.NoSuchElementException;
|
import java.util.NoSuchElementException;
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
@@ -60,14 +59,12 @@ public class BSFile implements AutoCloseable {
|
|||||||
|
|
||||||
private final BSFileCustomizer customizer;
|
private final BSFileCustomizer customizer;
|
||||||
|
|
||||||
BSFile(final long rootBlockOffset, final DiskStorage diskStorage, final BSFileCustomizer customizer)
|
BSFile(final long rootBlockOffset, final DiskStorage diskStorage, final BSFileCustomizer customizer) {
|
||||||
throws IOException {
|
|
||||||
|
|
||||||
this(new BSFileDiskBlock(diskStorage.getDiskBlock(rootBlockOffset, BLOCK_SIZE)), diskStorage, customizer);
|
this(new BSFileDiskBlock(diskStorage.getDiskBlock(rootBlockOffset, BLOCK_SIZE)), diskStorage, customizer);
|
||||||
}
|
}
|
||||||
|
|
||||||
BSFile(final BSFileDiskBlock rootDiskBlock, final DiskStorage diskStorage, final BSFileCustomizer customizer)
|
BSFile(final BSFileDiskBlock rootDiskBlock, final DiskStorage diskStorage, final BSFileCustomizer customizer) {
|
||||||
throws IOException {
|
|
||||||
|
|
||||||
this.rootDiskBlock = rootDiskBlock;
|
this.rootDiskBlock = rootDiskBlock;
|
||||||
this.customizer = customizer;
|
this.customizer = customizer;
|
||||||
@@ -98,17 +95,17 @@ public class BSFile implements AutoCloseable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public static BSFile existingFile(final long blockNumber, final DiskStorage diskStorage,
|
public static BSFile existingFile(final long blockNumber, final DiskStorage diskStorage,
|
||||||
final BSFileCustomizer customizer) throws IOException {
|
final BSFileCustomizer customizer) {
|
||||||
return new BSFile(blockNumber, diskStorage, customizer);
|
return new BSFile(blockNumber, diskStorage, customizer);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static BSFile newFile(final DiskStorage diskStorage, final BSFileCustomizer customizer) throws IOException {
|
public static BSFile newFile(final DiskStorage diskStorage, final BSFileCustomizer customizer) {
|
||||||
final long rootBlockOffset = diskStorage.allocateBlock(BLOCK_SIZE);
|
final long rootBlockOffset = diskStorage.allocateBlock(BLOCK_SIZE);
|
||||||
LOGGER.trace("create new bsFile={}", rootBlockOffset);
|
LOGGER.trace("create new bsFile={}", rootBlockOffset);
|
||||||
return new BSFile(rootBlockOffset, diskStorage, customizer);
|
return new BSFile(rootBlockOffset, diskStorage, customizer);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void append(final long value1, final long value2) throws IOException {
|
public void append(final long value1, final long value2) {
|
||||||
final long val1 = customizer.preProcessWriteValue1(value1);
|
final long val1 = customizer.preProcessWriteValue1(value1);
|
||||||
final long val2 = customizer.preProcessWriteValue2(value2);
|
final long val2 = customizer.preProcessWriteValue2(value2);
|
||||||
|
|
||||||
@@ -124,7 +121,7 @@ public class BSFile implements AutoCloseable {
|
|||||||
dirty = true;
|
dirty = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void append(final long value) throws IOException {
|
public void append(final long value) {
|
||||||
int bytesWritten = VariableByteEncoder.encodeInto(value, buffer.getBuffer(), offsetInBuffer);
|
int bytesWritten = VariableByteEncoder.encodeInto(value, buffer.getBuffer(), offsetInBuffer);
|
||||||
|
|
||||||
if (bytesWritten == 0) {
|
if (bytesWritten == 0) {
|
||||||
@@ -136,7 +133,7 @@ public class BSFile implements AutoCloseable {
|
|||||||
dirty = true;
|
dirty = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
private void flushFullBufferAndCreateNew() throws IOException {
|
private void flushFullBufferAndCreateNew() {
|
||||||
|
|
||||||
final long newBlockOffset = diskStorage.allocateBlock(BLOCK_SIZE);
|
final long newBlockOffset = diskStorage.allocateBlock(BLOCK_SIZE);
|
||||||
|
|
||||||
@@ -214,23 +211,19 @@ public class BSFile implements AutoCloseable {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public LongList next() {
|
public LongList next() {
|
||||||
try {
|
if (nextBlockOffset == BSFileDiskBlock.NO_NEXT_POINTER) {
|
||||||
if (nextBlockOffset == BSFileDiskBlock.NO_NEXT_POINTER) {
|
throw new NoSuchElementException();
|
||||||
throw new NoSuchElementException();
|
|
||||||
}
|
|
||||||
|
|
||||||
final BSFileDiskBlock diskBlock = getDiskBlock(nextBlockOffset);
|
|
||||||
nextBlockOffset = diskBlock.getNextBlockNumber();
|
|
||||||
|
|
||||||
final byte[] buf = diskBlock.getBuffer();
|
|
||||||
next = VariableByteEncoder.decode(buf);
|
|
||||||
return next;
|
|
||||||
} catch (final IOException e) {
|
|
||||||
throw new RuntimeException(e);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
final BSFileDiskBlock diskBlock = getDiskBlock(nextBlockOffset);
|
||||||
|
nextBlockOffset = diskBlock.getNextBlockNumber();
|
||||||
|
|
||||||
|
final byte[] buf = diskBlock.getBuffer();
|
||||||
|
next = VariableByteEncoder.decode(buf);
|
||||||
|
return next;
|
||||||
}
|
}
|
||||||
|
|
||||||
private BSFileDiskBlock getDiskBlock(final long blockOffset) throws IOException {
|
private BSFileDiskBlock getDiskBlock(final long blockOffset) {
|
||||||
final DiskBlock diskBlock = diskStorage.getDiskBlock(blockOffset, BLOCK_SIZE);
|
final DiskBlock diskBlock = diskStorage.getDiskBlock(blockOffset, BLOCK_SIZE);
|
||||||
return new BSFileDiskBlock(diskBlock);
|
return new BSFileDiskBlock(diskBlock);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
package org.lucares.pdb.blockstorage;
|
package org.lucares.pdb.blockstorage;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
import java.util.stream.Stream;
|
import java.util.stream.Stream;
|
||||||
|
|
||||||
@@ -11,22 +10,21 @@ public class TimeSeriesFile implements AutoCloseable {
|
|||||||
|
|
||||||
private final BSFile bsFile;
|
private final BSFile bsFile;
|
||||||
|
|
||||||
private TimeSeriesFile(final BSFile bsFile) throws IOException {
|
private TimeSeriesFile(final BSFile bsFile) {
|
||||||
this.bsFile = bsFile;
|
this.bsFile = bsFile;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static TimeSeriesFile existingFile(final long blockNumber, final DiskStorage diskStorage)
|
public static TimeSeriesFile existingFile(final long blockNumber, final DiskStorage diskStorage) {
|
||||||
throws IOException {
|
|
||||||
final BSFile bsFile = BSFile.existingFile(blockNumber, diskStorage, new TimeSeriesCustomizer());
|
final BSFile bsFile = BSFile.existingFile(blockNumber, diskStorage, new TimeSeriesCustomizer());
|
||||||
return new TimeSeriesFile(bsFile);
|
return new TimeSeriesFile(bsFile);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static TimeSeriesFile newFile(final DiskStorage diskStorage) throws IOException {
|
public static TimeSeriesFile newFile(final DiskStorage diskStorage) {
|
||||||
final BSFile bsFile = BSFile.newFile(diskStorage, new TimeSeriesCustomizer());
|
final BSFile bsFile = BSFile.newFile(diskStorage, new TimeSeriesCustomizer());
|
||||||
return new TimeSeriesFile(bsFile);
|
return new TimeSeriesFile(bsFile);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void appendTimeValue(final long epochMilli, final long value) throws IOException {
|
public void appendTimeValue(final long epochMilli, final long value) {
|
||||||
|
|
||||||
bsFile.append(epochMilli, value);
|
bsFile.append(epochMilli, value);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -25,14 +25,17 @@ public class DiskStorage implements AutoCloseable {
|
|||||||
|
|
||||||
private final FileChannel fileChannel;
|
private final FileChannel fileChannel;
|
||||||
|
|
||||||
public DiskStorage(final Path databaseFile) throws IOException {
|
public DiskStorage(final Path databaseFile) {
|
||||||
|
try {
|
||||||
|
Files.createDirectories(databaseFile.getParent());
|
||||||
|
|
||||||
Files.createDirectories(databaseFile.getParent());
|
fileChannel = FileChannel.open(databaseFile, StandardOpenOption.READ, StandardOpenOption.WRITE,
|
||||||
|
StandardOpenOption.CREATE);
|
||||||
|
|
||||||
fileChannel = FileChannel.open(databaseFile, StandardOpenOption.READ, StandardOpenOption.WRITE,
|
initIfNew();
|
||||||
StandardOpenOption.CREATE);
|
} catch (final IOException e) {
|
||||||
|
throw new DiskStorageException(e);
|
||||||
initIfNew();
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void initIfNew() throws IOException {
|
private void initIfNew() throws IOException {
|
||||||
@@ -42,35 +45,45 @@ public class DiskStorage implements AutoCloseable {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public DiskBlock getDiskBlock(final long blockOffset, final int blockSize) throws IOException {
|
public DiskBlock getDiskBlock(final long blockOffset, final int blockSize) {
|
||||||
|
try {
|
||||||
|
LOGGER.trace("read block={}", blockOffset);
|
||||||
|
|
||||||
LOGGER.trace("read block={}", blockOffset);
|
final var byteBuffer = fileChannel.map(MapMode.READ_WRITE, blockOffset, blockSize);
|
||||||
|
|
||||||
final var byteBuffer = fileChannel.map(MapMode.READ_WRITE, blockOffset, blockSize);
|
return new DiskBlock(blockOffset, byteBuffer);
|
||||||
|
} catch (final IOException e) {
|
||||||
return new DiskBlock(blockOffset, byteBuffer);
|
throw new DiskStorageException(e);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void close() throws IOException {
|
public void close() {
|
||||||
fileChannel.force(true);
|
try {
|
||||||
fileChannel.close();
|
fileChannel.force(true);
|
||||||
|
fileChannel.close();
|
||||||
|
} catch (final IOException e) {
|
||||||
|
throw new DiskStorageException(e);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized long allocateBlock(final int blockSize) throws IOException {
|
public synchronized long allocateBlock(final int blockSize) {
|
||||||
|
|
||||||
if (blockSize < FREE_LIST_NODE_SIZE) {
|
if (blockSize < FREE_LIST_NODE_SIZE) {
|
||||||
throw new IllegalArgumentException("The minimal allocation size is 32 byte.");
|
throw new IllegalArgumentException("The minimal allocation size is 32 byte.");
|
||||||
}
|
}
|
||||||
|
|
||||||
final var optionalFreeBlock = findFreeBlockWithSize(blockSize);
|
try {
|
||||||
if (optionalFreeBlock.isPresent()) {
|
final var optionalFreeBlock = findFreeBlockWithSize(blockSize);
|
||||||
final FreeListNode freeBlock = optionalFreeBlock.get();
|
if (optionalFreeBlock.isPresent()) {
|
||||||
removeBlockFromFreeList(freeBlock);
|
final FreeListNode freeBlock = optionalFreeBlock.get();
|
||||||
clearBlock(freeBlock);
|
removeBlockFromFreeList(freeBlock);
|
||||||
return freeBlock.getOffset();
|
clearBlock(freeBlock);
|
||||||
} else {
|
return freeBlock.getOffset();
|
||||||
return allocateNewBlock(blockSize);
|
} else {
|
||||||
|
return allocateNewBlock(blockSize);
|
||||||
|
}
|
||||||
|
} catch (final IOException e) {
|
||||||
|
throw new DiskStorageException(e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -237,18 +250,26 @@ public class DiskStorage implements AutoCloseable {
|
|||||||
fileChannel.write(freeListFirstBlock, FREE_LIST_ROOT_OFFSET);
|
fileChannel.write(freeListFirstBlock, FREE_LIST_ROOT_OFFSET);
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized void ensureAlignmentForNewBlocks(final int alignment) throws IOException {
|
public synchronized void ensureAlignmentForNewBlocks(final int alignment) {
|
||||||
final long size = fileChannel.size();
|
try {
|
||||||
final int alignmentMismatch = Math.floorMod(size, alignment);
|
final long size = fileChannel.size();
|
||||||
if (alignmentMismatch != 0) {
|
final int alignmentMismatch = Math.floorMod(size, alignment);
|
||||||
// The next allocated block would not be aligned. Therefore we allocate a
|
if (alignmentMismatch != 0) {
|
||||||
// throw-away block.
|
// The next allocated block would not be aligned. Therefore we allocate a
|
||||||
allocateNewBlock(alignment - alignmentMismatch);
|
// throw-away block.
|
||||||
|
allocateNewBlock(alignment - alignmentMismatch);
|
||||||
|
}
|
||||||
|
} catch (final IOException e) {
|
||||||
|
throw new DiskStorageException(e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public long size() throws IOException {
|
public long size() {
|
||||||
return fileChannel.size();
|
try {
|
||||||
|
return fileChannel.size();
|
||||||
|
} catch (final IOException e) {
|
||||||
|
throw new DiskStorageException(e);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public int minAllocationSize() {
|
public int minAllocationSize() {
|
||||||
|
|||||||
@@ -0,0 +1,19 @@
|
|||||||
|
package org.lucares.pdb.diskstorage;
|
||||||
|
|
||||||
|
public class DiskStorageException extends RuntimeException {
|
||||||
|
|
||||||
|
private static final long serialVersionUID = 1683775743640383633L;
|
||||||
|
|
||||||
|
public DiskStorageException(final String message, final Throwable cause) {
|
||||||
|
super(message, cause);
|
||||||
|
}
|
||||||
|
|
||||||
|
public DiskStorageException(final String message) {
|
||||||
|
super(message);
|
||||||
|
}
|
||||||
|
|
||||||
|
public DiskStorageException(final Throwable cause) {
|
||||||
|
super(cause);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
@@ -1,6 +1,5 @@
|
|||||||
package org.lucares.pdb.map;
|
package org.lucares.pdb.map;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.PrintStream;
|
import java.io.PrintStream;
|
||||||
import java.nio.charset.StandardCharsets;
|
import java.nio.charset.StandardCharsets;
|
||||||
import java.nio.file.Path;
|
import java.nio.file.Path;
|
||||||
@@ -126,8 +125,7 @@ public class PersistentMap<K, V> implements AutoCloseable {
|
|||||||
|
|
||||||
private final LRUCache<K, V> valueCache = new LRUCache<>(1_000);
|
private final LRUCache<K, V> valueCache = new LRUCache<>(1_000);
|
||||||
|
|
||||||
public PersistentMap(final Path path, final EncoderDecoder<K> keyEncoder, final EncoderDecoder<V> valueEncoder)
|
public PersistentMap(final Path path, final EncoderDecoder<K> keyEncoder, final EncoderDecoder<V> valueEncoder) {
|
||||||
throws IOException {
|
|
||||||
this.diskStore = new DiskStorage(path);
|
this.diskStore = new DiskStorage(path);
|
||||||
this.keyEncoder = keyEncoder;
|
this.keyEncoder = keyEncoder;
|
||||||
this.valueEncoder = valueEncoder;
|
this.valueEncoder = valueEncoder;
|
||||||
@@ -135,7 +133,7 @@ public class PersistentMap<K, V> implements AutoCloseable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void close() throws IOException {
|
public void close() {
|
||||||
diskStore.close();
|
diskStore.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -143,7 +141,7 @@ public class PersistentMap<K, V> implements AutoCloseable {
|
|||||||
this.maxEntriesInNode = maxEntriesInNode;
|
this.maxEntriesInNode = maxEntriesInNode;
|
||||||
}
|
}
|
||||||
|
|
||||||
private void initIfNew() throws IOException {
|
private void initIfNew() {
|
||||||
if (diskStore.size() < BLOCK_SIZE) {
|
if (diskStore.size() < BLOCK_SIZE) {
|
||||||
final long nodeOffsetToRootNode = diskStore.allocateBlock(diskStore.minAllocationSize());
|
final long nodeOffsetToRootNode = diskStore.allocateBlock(diskStore.minAllocationSize());
|
||||||
Preconditions.checkEqual(nodeOffsetToRootNode, NODE_OFFSET_TO_ROOT_NODE,
|
Preconditions.checkEqual(nodeOffsetToRootNode, NODE_OFFSET_TO_ROOT_NODE,
|
||||||
@@ -166,13 +164,13 @@ public class PersistentMap<K, V> implements AutoCloseable {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized void putAllValues(final Map<K, V> map) throws IOException {
|
public synchronized void putAllValues(final Map<K, V> map) {
|
||||||
for (final Entry<K, V> e : map.entrySet()) {
|
for (final Entry<K, V> e : map.entrySet()) {
|
||||||
putValue(e.getKey(), e.getValue());
|
putValue(e.getKey(), e.getValue());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized V putValue(final K key, final V value) throws IOException {
|
public synchronized V putValue(final K key, final V value) {
|
||||||
|
|
||||||
final V cachedValue = valueCache.get(key);
|
final V cachedValue = valueCache.get(key);
|
||||||
if (cachedValue != null && cachedValue == value) {
|
if (cachedValue != null && cachedValue == value) {
|
||||||
@@ -187,7 +185,7 @@ public class PersistentMap<K, V> implements AutoCloseable {
|
|||||||
return oldValue;
|
return oldValue;
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized V getValue(final K key) throws IOException {
|
public synchronized V getValue(final K key) {
|
||||||
|
|
||||||
final V cachedValue = valueCache.get(key);
|
final V cachedValue = valueCache.get(key);
|
||||||
if (cachedValue != null) {
|
if (cachedValue != null) {
|
||||||
@@ -201,13 +199,13 @@ public class PersistentMap<K, V> implements AutoCloseable {
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
private byte[] putValue(final byte[] key, final byte[] value) throws IOException {
|
private byte[] putValue(final byte[] key, final byte[] value) {
|
||||||
final long rootNodeOffset = readNodeOffsetOfRootNode();
|
final long rootNodeOffset = readNodeOffsetOfRootNode();
|
||||||
final Stack<PersistentMapDiskNode> parents = new Stack<>();
|
final Stack<PersistentMapDiskNode> parents = new Stack<>();
|
||||||
return insert(parents, rootNodeOffset, key, value);
|
return insert(parents, rootNodeOffset, key, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
private byte[] getValue(final byte[] key) throws IOException {
|
private byte[] getValue(final byte[] key) {
|
||||||
final long rootNodeOffset = readNodeOffsetOfRootNode();
|
final long rootNodeOffset = readNodeOffsetOfRootNode();
|
||||||
final NodeEntry entry = findNodeEntry(rootNodeOffset, key);
|
final NodeEntry entry = findNodeEntry(rootNodeOffset, key);
|
||||||
|
|
||||||
@@ -215,7 +213,7 @@ public class PersistentMap<K, V> implements AutoCloseable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private byte[] insert(final Stack<PersistentMapDiskNode> parents, final long nodeOffest, final byte[] key,
|
private byte[] insert(final Stack<PersistentMapDiskNode> parents, final long nodeOffest, final byte[] key,
|
||||||
final byte[] value) throws IOException {
|
final byte[] value) {
|
||||||
final PersistentMapDiskNode node = getNode(nodeOffest);
|
final PersistentMapDiskNode node = getNode(nodeOffest);
|
||||||
|
|
||||||
final NodeEntry entry = node.getNodeEntryTo(key);
|
final NodeEntry entry = node.getNodeEntryTo(key);
|
||||||
@@ -266,7 +264,7 @@ public class PersistentMap<K, V> implements AutoCloseable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private PersistentMapDiskNode splitNode(final Stack<PersistentMapDiskNode> parents,
|
private PersistentMapDiskNode splitNode(final Stack<PersistentMapDiskNode> parents,
|
||||||
final PersistentMapDiskNode node) throws IOException {
|
final PersistentMapDiskNode node) {
|
||||||
|
|
||||||
// System.out.println("\n\npre split node: " + node + "\n");
|
// System.out.println("\n\npre split node: " + node + "\n");
|
||||||
|
|
||||||
@@ -321,7 +319,7 @@ public class PersistentMap<K, V> implements AutoCloseable {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private NodeEntry findNodeEntry(final long nodeOffest, final byte[] key) throws IOException {
|
private NodeEntry findNodeEntry(final long nodeOffest, final byte[] key) {
|
||||||
final PersistentMapDiskNode node = getNode(nodeOffest);
|
final PersistentMapDiskNode node = getNode(nodeOffest);
|
||||||
|
|
||||||
final var entry = node.getNodeEntryTo(key);
|
final var entry = node.getNodeEntryTo(key);
|
||||||
@@ -344,7 +342,7 @@ public class PersistentMap<K, V> implements AutoCloseable {
|
|||||||
return VariableByteEncoder.decodeFirstValue(entry.getValue());
|
return VariableByteEncoder.decodeFirstValue(entry.getValue());
|
||||||
}
|
}
|
||||||
|
|
||||||
private PersistentMapDiskNode getNode(final long nodeOffset) throws IOException {
|
private PersistentMapDiskNode getNode(final long nodeOffset) {
|
||||||
|
|
||||||
PersistentMapDiskNode node = nodeCache.get(nodeOffset);
|
PersistentMapDiskNode node = nodeCache.get(nodeOffset);
|
||||||
if (node == null) {
|
if (node == null) {
|
||||||
@@ -358,7 +356,7 @@ public class PersistentMap<K, V> implements AutoCloseable {
|
|||||||
return node;
|
return node;
|
||||||
}
|
}
|
||||||
|
|
||||||
private void writeNode(final PersistentMapDiskNode node) throws IOException {
|
private void writeNode(final PersistentMapDiskNode node) {
|
||||||
LOGGER.trace("writing node {}", node);
|
LOGGER.trace("writing node {}", node);
|
||||||
final long nodeOffest = node.getNodeOffset();
|
final long nodeOffest = node.getNodeOffset();
|
||||||
// final DiskBlock diskBlock = diskStore.getDiskBlock(nodeOffest, BLOCK_SIZE);
|
// final DiskBlock diskBlock = diskStore.getDiskBlock(nodeOffest, BLOCK_SIZE);
|
||||||
@@ -373,7 +371,7 @@ public class PersistentMap<K, V> implements AutoCloseable {
|
|||||||
// diskBlock.force(); // makes writing nodes slower by factor 800 (sic!)
|
// diskBlock.force(); // makes writing nodes slower by factor 800 (sic!)
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized void print() throws IOException {
|
public synchronized void print() {
|
||||||
|
|
||||||
visitNodeEntriesPreOrder((node, parentNode, nodeEntry, depth) -> {
|
visitNodeEntriesPreOrder((node, parentNode, nodeEntry, depth) -> {
|
||||||
|
|
||||||
@@ -386,13 +384,13 @@ public class PersistentMap<K, V> implements AutoCloseable {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized void visitNodeEntriesPreOrder(final VisitorCallback visitor) throws IOException {
|
public synchronized void visitNodeEntriesPreOrder(final VisitorCallback visitor) {
|
||||||
final long rootNodeOffset = readNodeOffsetOfRootNode();
|
final long rootNodeOffset = readNodeOffsetOfRootNode();
|
||||||
visitNodeEntriesPreOrderRecursively(rootNodeOffset, null, visitor, 0);
|
visitNodeEntriesPreOrderRecursively(rootNodeOffset, null, visitor, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void visitNodeEntriesPreOrderRecursively(final long nodeOffset, final PersistentMapDiskNode parentNode,
|
private void visitNodeEntriesPreOrderRecursively(final long nodeOffset, final PersistentMapDiskNode parentNode,
|
||||||
final VisitorCallback visitor, final int depth) throws IOException {
|
final VisitorCallback visitor, final int depth) {
|
||||||
final PersistentMapDiskNode node = getNode(nodeOffset);
|
final PersistentMapDiskNode node = getNode(nodeOffset);
|
||||||
|
|
||||||
for (final NodeEntry child : node.getEntries()) {
|
for (final NodeEntry child : node.getEntries()) {
|
||||||
@@ -409,15 +407,14 @@ public class PersistentMap<K, V> implements AutoCloseable {
|
|||||||
FIND, ITERATE
|
FIND, ITERATE
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized void visitValues(final K keyPrefix, final Visitor<K, V> visitor) throws IOException {
|
public synchronized void visitValues(final K keyPrefix, final Visitor<K, V> visitor) {
|
||||||
final byte[] encodedKeyPrefix = keyEncoder.encode(keyPrefix);
|
final byte[] encodedKeyPrefix = keyEncoder.encode(keyPrefix);
|
||||||
|
|
||||||
final long rootNodeOffset = readNodeOffsetOfRootNode();
|
final long rootNodeOffset = readNodeOffsetOfRootNode();
|
||||||
iterateNodeEntryByPrefix(rootNodeOffset, encodedKeyPrefix, visitor);
|
iterateNodeEntryByPrefix(rootNodeOffset, encodedKeyPrefix, visitor);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void iterateNodeEntryByPrefix(final long nodeOffest, final byte[] keyPrefix, final Visitor<K, V> visitor)
|
private void iterateNodeEntryByPrefix(final long nodeOffest, final byte[] keyPrefix, final Visitor<K, V> visitor) {
|
||||||
throws IOException {
|
|
||||||
final PersistentMapDiskNode node = getNode(nodeOffest);
|
final PersistentMapDiskNode node = getNode(nodeOffest);
|
||||||
|
|
||||||
// list of children that might contain a key with the keyPrefix
|
// list of children that might contain a key with the keyPrefix
|
||||||
@@ -447,13 +444,13 @@ public class PersistentMap<K, V> implements AutoCloseable {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private long readNodeOffsetOfRootNode() throws IOException {
|
private long readNodeOffsetOfRootNode() {
|
||||||
final DiskBlock diskBlock = diskStore.getDiskBlock(NODE_OFFSET_TO_ROOT_NODE, diskStore.minAllocationSize());
|
final DiskBlock diskBlock = diskStore.getDiskBlock(NODE_OFFSET_TO_ROOT_NODE, diskStore.minAllocationSize());
|
||||||
|
|
||||||
return diskBlock.getByteBuffer().getLong(0);
|
return diskBlock.getByteBuffer().getLong(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void writeNodeOffsetOfRootNode(final long newNodeOffsetToRootNode) throws IOException {
|
private void writeNodeOffsetOfRootNode(final long newNodeOffsetToRootNode) {
|
||||||
final DiskBlock diskBlock = diskStore.getDiskBlock(NODE_OFFSET_TO_ROOT_NODE, diskStore.minAllocationSize());
|
final DiskBlock diskBlock = diskStore.getDiskBlock(NODE_OFFSET_TO_ROOT_NODE, diskStore.minAllocationSize());
|
||||||
diskBlock.getByteBuffer().putLong(0, newNodeOffsetToRootNode);
|
diskBlock.getByteBuffer().putLong(0, newNodeOffsetToRootNode);
|
||||||
diskBlock.force();
|
diskBlock.force();
|
||||||
|
|||||||
@@ -1,12 +1,10 @@
|
|||||||
package org.lucares.pdb.datastore;
|
package org.lucares.pdb.datastore;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.function.Function;
|
import java.util.function.Function;
|
||||||
import java.util.stream.Stream;
|
import java.util.stream.Stream;
|
||||||
|
|
||||||
import org.lucares.collections.LongList;
|
import org.lucares.collections.LongList;
|
||||||
import org.lucares.pdb.api.RuntimeIOException;
|
|
||||||
import org.lucares.pdb.api.Tags;
|
import org.lucares.pdb.api.Tags;
|
||||||
import org.lucares.pdb.blockstorage.BSFile;
|
import org.lucares.pdb.blockstorage.BSFile;
|
||||||
import org.lucares.pdb.blockstorage.TimeSeriesFile;
|
import org.lucares.pdb.blockstorage.TimeSeriesFile;
|
||||||
@@ -26,13 +24,9 @@ public class PdbFile {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Stream<LongList> apply(final PdbFile pdbFile) {
|
public Stream<LongList> apply(final PdbFile pdbFile) {
|
||||||
try {
|
final DiskStorage diskStorage = clusteredDiskStorage.getExisting(pdbFile.getClusterId());
|
||||||
final DiskStorage diskStorage = clusteredDiskStorage.getExisting(pdbFile.getClusterId());
|
final TimeSeriesFile bsFile = TimeSeriesFile.existingFile(pdbFile.getRootBlockNumber(), diskStorage);
|
||||||
final TimeSeriesFile bsFile = TimeSeriesFile.existingFile(pdbFile.getRootBlockNumber(), diskStorage);
|
return bsFile.streamOfLongLists();
|
||||||
return bsFile.streamOfLongLists();
|
|
||||||
} catch (final IOException e) {
|
|
||||||
throw new RuntimeIOException(e);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,12 +1,10 @@
|
|||||||
package org.lucares.pdb.datastore;
|
package org.lucares.pdb.datastore;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
|
|
||||||
public class ReadException extends RuntimeException {
|
public class ReadException extends RuntimeException {
|
||||||
|
|
||||||
private static final long serialVersionUID = 1L;
|
private static final long serialVersionUID = 1L;
|
||||||
|
|
||||||
public ReadException(final IOException e) {
|
public ReadException(final RuntimeException e) {
|
||||||
super(e);
|
super(e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -22,28 +22,20 @@ public class ClusteredDiskStore {
|
|||||||
public ClusteredDiskStore(final Path storageBasePath, final String filename) {
|
public ClusteredDiskStore(final Path storageBasePath, final String filename) {
|
||||||
|
|
||||||
creator = clusterId -> {
|
creator = clusterId -> {
|
||||||
try {
|
final Path file = storageBasePath.resolve(clusterId.getClusterId()).resolve(filename);
|
||||||
final Path file = storageBasePath.resolve(clusterId.getClusterId()).resolve(filename);
|
final boolean isNew = !Files.exists(file);
|
||||||
final boolean isNew = !Files.exists(file);
|
final DiskStorage diskStorage = new DiskStorage(file);
|
||||||
final DiskStorage diskStorage = new DiskStorage(file);
|
if (isNew) {
|
||||||
if (isNew) {
|
diskStorage.ensureAlignmentForNewBlocks(BSFile.BLOCK_SIZE);
|
||||||
diskStorage.ensureAlignmentForNewBlocks(BSFile.BLOCK_SIZE);
|
|
||||||
}
|
|
||||||
return diskStorage;
|
|
||||||
} catch (final IOException e) {
|
|
||||||
throw new RuntimeIOException(e);
|
|
||||||
}
|
}
|
||||||
|
return diskStorage;
|
||||||
};
|
};
|
||||||
supplier = clusterId -> {
|
supplier = clusterId -> {
|
||||||
try {
|
final Path file = storageBasePath.resolve(clusterId.getClusterId()).resolve(filename);
|
||||||
final Path file = storageBasePath.resolve(clusterId.getClusterId()).resolve(filename);
|
if (Files.exists(file)) {
|
||||||
if (Files.exists(file)) {
|
return new DiskStorage(file);
|
||||||
return new DiskStorage(file);
|
|
||||||
}
|
|
||||||
return null;
|
|
||||||
} catch (final IOException e) {
|
|
||||||
throw new RuntimeIOException(e);
|
|
||||||
}
|
}
|
||||||
|
return null;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -56,12 +48,8 @@ public class ClusteredDiskStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public long allocateBlock(final ClusterId clusterId, final int blockSize) {
|
public long allocateBlock(final ClusterId clusterId, final int blockSize) {
|
||||||
try {
|
final DiskStorage diskStorage = getCreateIfNotExists(clusterId);
|
||||||
final DiskStorage diskStorage = getCreateIfNotExists(clusterId);
|
return diskStorage.allocateBlock(blockSize);
|
||||||
return diskStorage.allocateBlock(blockSize);
|
|
||||||
} catch (final IOException e) {
|
|
||||||
throw new RuntimeIOException(e);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public LongStreamFile streamExistingFile(final Long diskStoreOffsetForDocIdsOfTag, final ClusterId clusterId) {
|
public LongStreamFile streamExistingFile(final Long diskStoreOffsetForDocIdsOfTag, final ClusterId clusterId) {
|
||||||
@@ -79,7 +67,7 @@ public class ClusteredDiskStore {
|
|||||||
for (final DiskStorage diskStorage : diskStorages.values()) {
|
for (final DiskStorage diskStorage : diskStorages.values()) {
|
||||||
try {
|
try {
|
||||||
diskStorage.close();
|
diskStorage.close();
|
||||||
} catch (final IOException e) {
|
} catch (final RuntimeException e) {
|
||||||
throwables.add(e);
|
throwables.add(e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
package org.lucares.pdb.datastore.internal;
|
package org.lucares.pdb.datastore.internal;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.nio.file.Files;
|
import java.nio.file.Files;
|
||||||
import java.nio.file.Path;
|
import java.nio.file.Path;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
@@ -8,8 +7,6 @@ import java.util.List;
|
|||||||
import java.util.concurrent.ConcurrentHashMap;
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
import java.util.function.Function;
|
import java.util.function.Function;
|
||||||
|
|
||||||
import org.lucares.pdb.api.RuntimeIOException;
|
|
||||||
import org.lucares.pdb.datastore.ReadRuntimeException;
|
|
||||||
import org.lucares.pdb.map.PersistentMap;
|
import org.lucares.pdb.map.PersistentMap;
|
||||||
import org.lucares.pdb.map.PersistentMap.EncoderDecoder;
|
import org.lucares.pdb.map.PersistentMap.EncoderDecoder;
|
||||||
import org.lucares.pdb.map.Visitor;
|
import org.lucares.pdb.map.Visitor;
|
||||||
@@ -37,23 +34,15 @@ public class ClusteredPersistentMap<K, V, P> implements AutoCloseable {
|
|||||||
|
|
||||||
this.valueEncoder = valueEncoder;
|
this.valueEncoder = valueEncoder;
|
||||||
creator = clusterId -> {
|
creator = clusterId -> {
|
||||||
try {
|
final Path file = storageBasePath.resolve(clusterId.getClusterId()).resolve(filename);
|
||||||
final Path file = storageBasePath.resolve(clusterId.getClusterId()).resolve(filename);
|
return new PersistentMap<>(file, keyEncoder, valueEncoder);
|
||||||
return new PersistentMap<>(file, keyEncoder, valueEncoder);
|
|
||||||
} catch (final IOException e) {
|
|
||||||
throw new RuntimeIOException(e);
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
supplier = clusterId -> {
|
supplier = clusterId -> {
|
||||||
try {
|
final Path file = storageBasePath.resolve(clusterId.getClusterId()).resolve(filename);
|
||||||
final Path file = storageBasePath.resolve(clusterId.getClusterId()).resolve(filename);
|
if (Files.exists(file)) {
|
||||||
if (Files.exists(file)) {
|
return new PersistentMap<>(file, keyEncoder, valueEncoder);
|
||||||
return new PersistentMap<>(file, keyEncoder, valueEncoder);
|
|
||||||
}
|
|
||||||
return null;
|
|
||||||
} catch (final IOException e) {
|
|
||||||
throw new RuntimeIOException(e);
|
|
||||||
}
|
}
|
||||||
|
return null;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -66,51 +55,49 @@ public class ClusteredPersistentMap<K, V, P> implements AutoCloseable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public V getValue(final ClusterId clusterId, final K key) {
|
public V getValue(final ClusterId clusterId, final K key) {
|
||||||
try {
|
final PersistentMap<K, P> map = getExistingPersistentMap(clusterId);
|
||||||
|
final P persistedValue = map != null ? map.getValue(key) : null;
|
||||||
final PersistentMap<K, P> map = getExistingPersistentMap(clusterId);
|
return valueEncoder.decodeValue(clusterId, persistedValue);
|
||||||
final P persistedValue = map != null ? map.getValue(key) : null;
|
|
||||||
return valueEncoder.decodeValue(clusterId, persistedValue);
|
|
||||||
} catch (final IOException e) {
|
|
||||||
throw new ReadRuntimeException(e);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public List<V> getValues(final ClusterIdSource clusterIdSource, final K key) {
|
public List<V> getValues(final ClusterIdSource clusterIdSource, final K key) {
|
||||||
try {
|
final List<V> result = new ArrayList<>();
|
||||||
final List<V> result = new ArrayList<>();
|
final List<ClusterId> clusterIds = clusterIdSource.toClusterIds();
|
||||||
final List<ClusterId> clusterIds = clusterIdSource.toClusterIds();
|
|
||||||
|
|
||||||
for (final ClusterId clusterId : clusterIds) {
|
for (final ClusterId clusterId : clusterIds) {
|
||||||
final PersistentMap<K, P> map = getPersistentMapCreateIfNotExists(clusterId);
|
final PersistentMap<K, P> map = getPersistentMapCreateIfNotExists(clusterId);
|
||||||
if (map != null) {
|
if (map != null) {
|
||||||
final V value = valueEncoder.decodeValue(clusterId, map.getValue(key));
|
final V value = valueEncoder.decodeValue(clusterId, map.getValue(key));
|
||||||
if (value != null) {
|
if (value != null) {
|
||||||
result.add(value);
|
result.add(value);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return result;
|
|
||||||
} catch (final IOException e) {
|
|
||||||
throw new ReadRuntimeException(e);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
public V putValue(final ClusterId clusterId, final K key, final V value) {
|
public V putValue(final ClusterId clusterId, final K key, final V value) {
|
||||||
try {
|
final PersistentMap<K, P> map = getPersistentMapCreateIfNotExists(clusterId);
|
||||||
|
final P persistedValue = valueEncoder.encodeValue(value);
|
||||||
final PersistentMap<K, P> map = getPersistentMapCreateIfNotExists(clusterId);
|
final P previousPersistedValue = map.putValue(key, persistedValue);
|
||||||
final P persistedValue = valueEncoder.encodeValue(value);
|
return valueEncoder.decodeValue(clusterId, previousPersistedValue);
|
||||||
final P previousPersistedValue = map.putValue(key, persistedValue);
|
|
||||||
return valueEncoder.decodeValue(clusterId, previousPersistedValue);
|
|
||||||
} catch (final IOException e) {
|
|
||||||
throw new ReadRuntimeException(e);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void visitValues(final ClusterId clusterId, final K keyPrefix, final Visitor<K, V> visitor) {
|
public void visitValues(final ClusterId clusterId, final K keyPrefix, final Visitor<K, V> visitor) {
|
||||||
try {
|
final PersistentMap<K, P> map = getExistingPersistentMap(clusterId);
|
||||||
|
if (map != null) {
|
||||||
|
map.visitValues(keyPrefix, (k, p) -> {
|
||||||
|
final V value = valueEncoder.decodeValue(clusterId, p);
|
||||||
|
visitor.visit(k, value);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void visitValues(final ClusterIdSource clusterIdSource, final K keyPrefix, final Visitor<K, V> visitor) {
|
||||||
|
final List<ClusterId> clusterIds = clusterIdSource.toClusterIds();
|
||||||
|
|
||||||
|
for (final ClusterId clusterId : clusterIds) {
|
||||||
final PersistentMap<K, P> map = getExistingPersistentMap(clusterId);
|
final PersistentMap<K, P> map = getExistingPersistentMap(clusterId);
|
||||||
if (map != null) {
|
if (map != null) {
|
||||||
map.visitValues(keyPrefix, (k, p) -> {
|
map.visitValues(keyPrefix, (k, p) -> {
|
||||||
@@ -118,26 +105,6 @@ public class ClusteredPersistentMap<K, V, P> implements AutoCloseable {
|
|||||||
visitor.visit(k, value);
|
visitor.visit(k, value);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
} catch (final IOException e) {
|
|
||||||
throw new ReadRuntimeException(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public void visitValues(final ClusterIdSource clusterIdSource, final K keyPrefix, final Visitor<K, V> visitor) {
|
|
||||||
try {
|
|
||||||
final List<ClusterId> clusterIds = clusterIdSource.toClusterIds();
|
|
||||||
|
|
||||||
for (final ClusterId clusterId : clusterIds) {
|
|
||||||
final PersistentMap<K, P> map = getExistingPersistentMap(clusterId);
|
|
||||||
if (map != null) {
|
|
||||||
map.visitValues(keyPrefix, (k, p) -> {
|
|
||||||
final V value = valueEncoder.decodeValue(clusterId, p);
|
|
||||||
visitor.visit(k, value);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} catch (final IOException e) {
|
|
||||||
throw new ReadRuntimeException(e);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -148,7 +115,7 @@ public class ClusteredPersistentMap<K, V, P> implements AutoCloseable {
|
|||||||
for (final PersistentMap<K, P> map : maps.values()) {
|
for (final PersistentMap<K, P> map : maps.values()) {
|
||||||
try {
|
try {
|
||||||
map.close();
|
map.close();
|
||||||
} catch (final IOException e) {
|
} catch (final RuntimeException e) {
|
||||||
throwables.add(e);
|
throwables.add(e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -371,7 +371,7 @@ public class DataStore implements AutoCloseable {
|
|||||||
final Doc doc = docsForTags.get();
|
final Doc doc = docsForTags.get();
|
||||||
final PdbFile pdbFile = new PdbFile(clusterId, doc.getRootBlockNumber(), tags);
|
final PdbFile pdbFile = new PdbFile(clusterId, doc.getRootBlockNumber(), tags);
|
||||||
writer = new PdbWriter(pdbFile, diskStorage.getExisting(clusterId));
|
writer = new PdbWriter(pdbFile, diskStorage.getExisting(clusterId));
|
||||||
} catch (final IOException e) {
|
} catch (final RuntimeException e) {
|
||||||
throw new ReadException(e);
|
throw new ReadException(e);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@@ -389,12 +389,12 @@ public class DataStore implements AutoCloseable {
|
|||||||
METRICS_LOGGER_NEW_WRITER.debug("newPdbWriter took {}ms tags: {}",
|
METRICS_LOGGER_NEW_WRITER.debug("newPdbWriter took {}ms tags: {}",
|
||||||
(System.nanoTime() - start) / 1_000_000.0, tags);
|
(System.nanoTime() - start) / 1_000_000.0, tags);
|
||||||
return result;
|
return result;
|
||||||
} catch (final IOException e) {
|
} catch (final RuntimeException e) {
|
||||||
throw new WriteException(e);
|
throw new WriteException(e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private PdbFile createNewPdbFile(final ClusterId clusterId, final Tags tags) throws IOException {
|
private PdbFile createNewPdbFile(final ClusterId clusterId, final Tags tags) {
|
||||||
|
|
||||||
final long rootBlockNumber = createNewFile(clusterId, tags);
|
final long rootBlockNumber = createNewFile(clusterId, tags);
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
package org.lucares.pdb.datastore.internal;
|
package org.lucares.pdb.datastore.internal;
|
||||||
|
|
||||||
import java.io.Flushable;
|
import java.io.Flushable;
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
|
|
||||||
import org.lucares.pdb.api.Entry;
|
import org.lucares.pdb.api.Entry;
|
||||||
@@ -25,7 +24,7 @@ class PdbWriter implements AutoCloseable, Flushable {
|
|||||||
|
|
||||||
private final TimeSeriesFile bsFile;
|
private final TimeSeriesFile bsFile;
|
||||||
|
|
||||||
public PdbWriter(final PdbFile pdbFile, final DiskStorage diskStorage) throws IOException {
|
public PdbWriter(final PdbFile pdbFile, final DiskStorage diskStorage) {
|
||||||
this.pdbFile = pdbFile;
|
this.pdbFile = pdbFile;
|
||||||
|
|
||||||
bsFile = TimeSeriesFile.existingFile(pdbFile.getRootBlockNumber(), diskStorage);
|
bsFile = TimeSeriesFile.existingFile(pdbFile.getRootBlockNumber(), diskStorage);
|
||||||
@@ -47,7 +46,7 @@ class PdbWriter implements AutoCloseable, Flushable {
|
|||||||
bsFile.appendTimeValue(epochMilli, value);
|
bsFile.appendTimeValue(epochMilli, value);
|
||||||
|
|
||||||
lastEpochMilli = epochMilli;
|
lastEpochMilli = epochMilli;
|
||||||
} catch (final IOException e) {
|
} catch (final RuntimeException e) {
|
||||||
throw new WriteException(e);
|
throw new WriteException(e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -64,8 +63,7 @@ class PdbWriter implements AutoCloseable, Flushable {
|
|||||||
bsFile.flush();
|
bsFile.flush();
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void writeEntry(final PdbFile pdbFile, final DiskStorage diskStorage, final Entry... entries)
|
public static void writeEntry(final PdbFile pdbFile, final DiskStorage diskStorage, final Entry... entries) {
|
||||||
throws IOException {
|
|
||||||
try (PdbWriter writer = new PdbWriter(pdbFile, diskStorage)) {
|
try (PdbWriter writer = new PdbWriter(pdbFile, diskStorage)) {
|
||||||
for (final Entry entry : entries) {
|
for (final Entry entry : entries) {
|
||||||
writer.write(entry.getEpochMilli(), entry.getValue());
|
writer.write(entry.getEpochMilli(), entry.getValue());
|
||||||
|
|||||||
Reference in New Issue
Block a user