switch back to my own HotEntryCache implementation

Guava's cache does not evict elements reliably by
time. Configure a cache to have a lifetime of n
seconds, then you cannot expect that an element is
actually evicted after n seconds with Guava.
This commit is contained in:
2019-08-18 20:14:14 +02:00
parent 0dc908c79c
commit 4d9ea6d2a8
3 changed files with 815 additions and 72 deletions

View File

@@ -37,6 +37,7 @@ import org.lucares.pdb.datastore.lang.QueryLanguageParser;
import org.lucares.pdb.map.PersistentMap;
import org.lucares.utils.Preconditions;
import org.lucares.utils.cache.HotEntryCache;
import org.lucares.utils.cache.HotEntryCache.EventType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -73,7 +74,7 @@ public class DataStore implements AutoCloseable {
// A Doc will never be changed once it is created. Therefore we can cache them
// easily.
private final HotEntryCache<Long, Doc> docIdToDocCache = new HotEntryCache<>(Duration.ofMinutes(30), 100_000);
private final HotEntryCache<Long, Doc> docIdToDocCache = new HotEntryCache<>(Duration.ofMinutes(30)/* , 100_000 */);
private final HotEntryCache<Tags, PdbWriter> writerCache;
@@ -102,8 +103,9 @@ public class DataStore implements AutoCloseable {
queryCompletionIndex = new QueryCompletionIndex(storageBasePath);
writerCache = new HotEntryCache<>(Duration.ofSeconds(10), 1000);
writerCache.addListener((tags, writer) -> writer.close());
writerCache = new HotEntryCache<>(Duration.ofSeconds(10)/* , 1000 */);
// writerCache.addListener((tags, writer) -> writer.close());
writerCache.addListener(event -> event.getValue().close(), EventType.EVICTED, EventType.REMOVED);
}
private Path keyCompressionFile(final Path dataDirectory) throws IOException {
@@ -234,7 +236,8 @@ public class DataStore implements AutoCloseable {
final SortedSet<String> result = new TreeSet<>();
if (query.getQuery().isEmpty()) {
final PartitionIdSource partitionIdSource = new DatePartitioner(query.getDateRange());
tagToDocsId.visitValues(partitionIdSource, new Tag(key, ""), (tag, __) -> result.add(tag.getValueAsString()));
tagToDocsId.visitValues(partitionIdSource, new Tag(key, ""),
(tag, __) -> result.add(tag.getValueAsString()));
} else {
final List<Doc> docs = search(query);
for (final Doc doc : docs) {
@@ -317,23 +320,23 @@ public class DataStore implements AutoCloseable {
}
private Doc getDocByDocId(final ParititionId partitionId, final Long docId) {
return docIdToDocCache.putIfAbsent(docId, () -> {
return docIdToDoc.getValue(partitionId, docId);
return docIdToDocCache.putIfAbsent(docId, documentId -> {
return docIdToDoc.getValue(partitionId, documentId);
});
}
private Doc getDocByDocId(final DateTimeRange dateRange, final Long docId) {
return docIdToDocCache.putIfAbsent(docId, () -> {
return docIdToDocCache.putIfAbsent(docId, documentId -> {
final DatePartitioner datePartitioner = new DatePartitioner(dateRange);
final List<Doc> docIds = docIdToDoc.getValues(datePartitioner, docId);
final List<Doc> docIds = docIdToDoc.getValues(datePartitioner, documentId);
if (docIds.size() == 1) {
return docIds.get(0);
} else if (docIds.size() > 1) {
throw new IllegalStateException(
"Found multiple documents for " + dateRange + " and docId " + docId + ": " + docIds);
"Found multiple documents for " + dateRange + " and docId " + documentId + ": " + docIds);
}
throw new IllegalStateException("Found no documents for " + dateRange + " and docId " + docId);
throw new IllegalStateException("Found no documents for " + dateRange + " and docId " + documentId);
});
}
@@ -351,7 +354,7 @@ public class DataStore implements AutoCloseable {
private PdbWriter getWriter(final ParititionId partitionId, final Tags tags) throws ReadException, WriteException {
return writerCache.putIfAbsent(tags, () -> getWriterInternal(partitionId, tags));
return writerCache.putIfAbsent(tags, t -> getWriterInternal(partitionId, tags));
}
// visible for test