cluster the indices

This commit is contained in:
2019-03-31 09:01:55 +02:00
parent 95f2f26966
commit 2a1885a77f
11 changed files with 262 additions and 99 deletions

View File

@@ -1,6 +1,6 @@
package org.lucares.pdb.datastore.internal;
public class ClusterId {
public class ClusterId implements Comparable<ClusterId> {
private final String clusterId;
/**
@@ -18,6 +18,11 @@ public class ClusterId {
return new ClusterId(clusterId);
}
@Override
public int compareTo(final ClusterId other) {
return clusterId.compareTo(other.getClusterId());
}
/**
* @return the id, e.g. a time like 201902 (cluster for entries of February
* 2019)

View File

@@ -1,7 +1,7 @@
package org.lucares.pdb.datastore.internal;
import java.util.List;
import java.util.Set;
public interface ClusterIdSource {
List<ClusterId> toClusterIds();
Set<ClusterId> toClusterIds(Set<? extends ClusterId> availableClusters);
}

View File

@@ -0,0 +1,95 @@
package org.lucares.pdb.datastore.internal;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import org.lucares.collections.LongList;
public class ClusteredLongList implements Iterable<ClusterId> {
private final Map<ClusterId, LongList> lists = new HashMap<>();
public LongList put(final ClusterId clusterId, final LongList longList) {
return lists.put(clusterId, longList);
}
public LongList get(final ClusterId clusterId) {
return lists.get(clusterId);
}
@Override
public Iterator<ClusterId> iterator() {
return lists.keySet().iterator();
}
public static ClusteredLongList intersection(final ClusteredLongList a, final ClusteredLongList b) {
final ClusteredLongList result = new ClusteredLongList();
final Set<ClusterId> clusterIds = new HashSet<>();
clusterIds.addAll(a.lists.keySet());
clusterIds.addAll(b.lists.keySet());
for (final ClusterId clusterId : clusterIds) {
final LongList x = a.get(clusterId);
final LongList y = b.get(clusterId);
if (x != null && y != null) {
final LongList intersection = LongList.intersection(x, y);
result.put(clusterId, intersection);
} else {
// one list is empty => the intersection is empty
}
}
return result;
}
public static ClusteredLongList union(final ClusteredLongList a, final ClusteredLongList b) {
final ClusteredLongList result = new ClusteredLongList();
final Set<ClusterId> clusterIds = new HashSet<>();
clusterIds.addAll(a.lists.keySet());
clusterIds.addAll(b.lists.keySet());
for (final ClusterId clusterId : clusterIds) {
final LongList x = a.get(clusterId);
final LongList y = b.get(clusterId);
if (x != null && y != null) {
final LongList intersection = LongList.union(x, y);
result.put(clusterId, intersection);
} else if (x != null) {
result.put(clusterId, x.clone());
} else if (y != null) {
result.put(clusterId, y.clone());
}
}
return result;
}
public int size() {
int size = 0;
for (final LongList longList : lists.values()) {
size += longList.size();
}
return size;
}
public boolean isSorted() {
for (final LongList longList : lists.values()) {
if (!longList.isSorted()) {
return false;
}
}
return true;
}
public void removeAll(final ClusteredLongList remove) {
for (final ClusterId clusterId : lists.keySet()) {
final LongList removeLongList = remove.get(clusterId);
if (removeLongList != null) {
lists.get(clusterId).removeAll(removeLongList);
}
}
}
}

View File

@@ -1,12 +1,15 @@
package org.lucares.pdb.datastore.internal;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.function.Function;
import org.lucares.pdb.api.RuntimeIOException;
import org.lucares.pdb.map.PersistentMap;
import org.lucares.pdb.map.PersistentMap.EncoderDecoder;
import org.lucares.pdb.map.Visitor;
@@ -17,7 +20,7 @@ import org.lucares.pdb.map.Visitor;
*
* @param <K> the key
* @param <V> the value used by the consumer of this
* {@link ClusteredPersistentMap}
* {@link ClusteredPersistentMap}
* @param <P> the value that is stored
*/
public class ClusteredPersistentMap<K, V, P> implements AutoCloseable {
@@ -44,6 +47,28 @@ public class ClusteredPersistentMap<K, V, P> implements AutoCloseable {
}
return null;
};
preload(storageBasePath);
}
private void preload(final Path storageBasePath) {
try {
Files.list(storageBasePath)//
.filter(Files::isDirectory)//
.map(Path::getFileName)//
.map(Path::toString)//
.map(ClusterId::of)//
.forEach(clusterId -> maps.computeIfAbsent(clusterId, supplier));
} catch (final IOException e) {
throw new RuntimeIOException(e);
}
}
private Set<ClusterId> getAllClusterIds() {
return maps.keySet();
}
public Set<ClusterId> getAvailableClusterIds(final ClusterIdSource clusterIdSource) {
return clusterIdSource.toClusterIds(getAllClusterIds());
}
private PersistentMap<K, P> getExistingPersistentMap(final ClusterId clusterId) {
@@ -62,7 +87,7 @@ public class ClusteredPersistentMap<K, V, P> implements AutoCloseable {
public List<V> getValues(final ClusterIdSource clusterIdSource, final K key) {
final List<V> result = new ArrayList<>();
final List<ClusterId> clusterIds = clusterIdSource.toClusterIds();
final Set<ClusterId> clusterIds = clusterIdSource.toClusterIds(getAllClusterIds());
for (final ClusterId clusterId : clusterIds) {
final PersistentMap<K, P> map = getPersistentMapCreateIfNotExists(clusterId);
@@ -95,7 +120,7 @@ public class ClusteredPersistentMap<K, V, P> implements AutoCloseable {
}
public void visitValues(final ClusterIdSource clusterIdSource, final K keyPrefix, final Visitor<K, V> visitor) {
final List<ClusterId> clusterIds = clusterIdSource.toClusterIds();
final Set<ClusterId> clusterIds = clusterIdSource.toClusterIds(getAllClusterIds());
for (final ClusterId clusterId : clusterIds) {
final PersistentMap<K, P> map = getExistingPersistentMap(clusterId);

View File

@@ -73,7 +73,7 @@ public class DataStore implements AutoCloseable {
// A Doc will never be changed once it is created. Therefore we can cache them
// easily.
private final HotEntryCache<Long, Doc> docIdToDocCache = new HotEntryCache<>(Duration.ofMillis(30), 100_000);
private final HotEntryCache<Long, Doc> docIdToDocCache = new HotEntryCache<>(Duration.ofMinutes(30), 100_000);
private final HotEntryCache<Tags, PdbWriter> writerCache;
@@ -103,7 +103,7 @@ public class DataStore implements AutoCloseable {
queryCompletionIndex = new QueryCompletionIndex(storageBasePath);
writerCache = new HotEntryCache<>(Duration.ofSeconds(10), 1000);
writerCache.addListener((k, v) -> v.close());
writerCache.addListener((tags, writer) -> writer.close());
}
private Path keyCompressionFile(final Path dataDirectory) throws IOException {
@@ -196,15 +196,11 @@ public class DataStore implements AutoCloseable {
public List<Doc> search(final Query query) {
try {
final List<Doc> result = new ArrayList<>();
final List<ClusterId> clusterIds = DateIndexExtension.toClusterIds(query.getDateRange());
for (final ClusterId clusterId : clusterIds) {
final LongList docIdsList = executeQuery(clusterId, query.getQuery());
LOGGER.trace("query {} found {} docs", query, docIdsList.size());
final List<Doc> docs = mapDocIdsToDocs(clusterId, docIdsList);
result.addAll(docs);
}
final ClusteredLongList docIdsList = executeQuery(query);
LOGGER.trace("query {} found {} docs", query, docIdsList.size());
final List<Doc> docs = mapDocIdsToDocs(docIdsList);
result.addAll(docs);
return result;
} catch (final IOException e) {
@@ -213,15 +209,8 @@ public class DataStore implements AutoCloseable {
}
public int count(final Query query) {
int count = 0;
final List<ClusterId> clusterIds = DateIndexExtension.toClusterIds(query.getDateRange());
for (final ClusterId clusterId : clusterIds) {
final LongList docIdsList = executeQuery(clusterId, query.getQuery());
count += docIdsList.size();
}
return count;
final ClusteredLongList docIdsList = executeQuery(query);
return docIdsList.size();
}
public List<String> getAvailableFields(final DateTimeRange dateRange) {
@@ -261,31 +250,38 @@ public class DataStore implements AutoCloseable {
}
private LongList executeQuery(final ClusterId clusterId, final String query) {
private ClusteredLongList executeQuery(final Query query) {
final long start = System.nanoTime();
synchronized (docIdToDoc) {
final Expression expression = QueryLanguageParser.parse(query);
final ExpressionToDocIdVisitor visitor = new ExpressionToDocIdVisitor(clusterId, tagToDocsId, diskStorage);
final LongList docIdsList = expression.visit(visitor);
final Expression expression = QueryLanguageParser.parse(query.getQuery());
final ExpressionToDocIdVisitor visitor = new ExpressionToDocIdVisitor(query.getDateRange(), tagToDocsId,
diskStorage);
final ClusteredLongList docIdsList = expression.visit(visitor);
EXECUTE_QUERY_LOGGER.debug("executeQuery({}) took {}ms returned {} results ", query,
(System.nanoTime() - start) / 1_000_000.0, docIdsList.size());
return docIdsList;
}
}
private List<Doc> mapDocIdsToDocs(final ClusterId clusterId, final LongList docIdsList) throws IOException {
private List<Doc> mapDocIdsToDocs(final ClusteredLongList docIdsList) throws IOException {
final List<Doc> result = new ArrayList<>(docIdsList.size());
synchronized (docIdToDoc) {
final long start = System.nanoTime();
for (int i = 0; i < docIdsList.size(); i++) {
final long docId = docIdsList.get(i);
final Doc doc = getDocByDocId(clusterId, docId);
Objects.requireNonNull(doc, "Doc with id " + docId + " did not exist.");
for (final ClusterId clusterId : docIdsList) {
final LongList docIds = docIdsList.get(clusterId);
result.add(doc);
for (int i = 0; i < docIds.size(); i++) {
final long docId = docIds.get(i);
final Doc doc = getDocByDocId(clusterId, docId);
Objects.requireNonNull(doc, "Doc with id " + docId + " did not exist.");
result.add(doc);
}
}
MAP_DOCS_TO_DOCID.debug("mapDocIdsToDocs({}): {}ms", docIdsList.size(),
(System.nanoTime() - start) / 1_000_000.0);
}

View File

@@ -1,6 +1,6 @@
package org.lucares.pdb.datastore.internal;
import java.util.List;
import java.util.Set;
import org.lucares.pdb.api.DateTimeRange;
@@ -13,7 +13,7 @@ public class DateCluster implements ClusterIdSource {
}
@Override
public List<ClusterId> toClusterIds() {
return DateIndexExtension.toClusterIds(dateRange);
public Set<ClusterId> toClusterIds(final Set<? extends ClusterId> availableClusters) {
return DateIndexExtension.toClusterIds(dateRange, availableClusters);
}
}

View File

@@ -5,6 +5,8 @@ import java.time.OffsetDateTime;
import java.time.ZoneOffset;
import java.time.format.DateTimeFormatter;
import java.util.ArrayList;
import java.util.Collection;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map.Entry;
import java.util.Set;
@@ -75,7 +77,14 @@ public class DateIndexExtension {
return result;
}
public static List<ClusterId> toClusterIds(final DateTimeRange dateRange) {
/**
* only for tests, use toClusterIds(final DateTimeRange dateRange,final
* Collection<? extends ClusterId> availableClusterIds) instead
*
* @param dateRange
* @return
*/
static List<ClusterId> toClusterIds(final DateTimeRange dateRange) {
final List<ClusterId> result = new ArrayList<>();
OffsetDateTime current = dateRange.getStart();
@@ -93,6 +102,22 @@ public class DateIndexExtension {
return result;
}
public static Set<ClusterId> toClusterIds(final DateTimeRange dateRange,
final Collection<? extends ClusterId> availableClusterIds) {
final Set<ClusterId> result = new LinkedHashSet<>();
final ClusterId start = toClusterId(dateRange.getStart().toInstant().toEpochMilli());
final ClusterId end = toClusterId(dateRange.getEnd().toInstant().toEpochMilli());
for (final ClusterId clusterId : availableClusterIds) {
if (start.compareTo(clusterId) <= 0 && end.compareTo(clusterId) >= 0) {
result.add(clusterId);
}
}
return result;
}
public static DatePrefixAndRange toDatePrefixAndRange(final long epochMilli) {
final OffsetDateTime date = Instant.ofEpochMilli(epochMilli).atOffset(ZoneOffset.UTC);
final OffsetDateTime beginOfMonth = date.withDayOfMonth(1).withHour(0).withMinute(0).withSecond(0).withNano(0);

View File

@@ -3,16 +3,20 @@ package org.lucares.pdb.datastore.lang;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Set;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import org.lucares.collections.LongList;
import org.lucares.pdb.api.DateTimeRange;
import org.lucares.pdb.api.Tag;
import org.lucares.pdb.blockstorage.LongStreamFile;
import org.lucares.pdb.datastore.internal.ClusterId;
import org.lucares.pdb.datastore.internal.ClusteredDiskStore;
import org.lucares.pdb.datastore.internal.ClusteredLongList;
import org.lucares.pdb.datastore.internal.ClusteredPersistentMap;
import org.lucares.pdb.datastore.internal.DataStore;
import org.lucares.pdb.datastore.internal.DateCluster;
import org.lucares.pdb.datastore.lang.Expression.And;
import org.lucares.pdb.datastore.lang.Expression.Not;
import org.lucares.pdb.datastore.lang.Expression.Or;
@@ -21,31 +25,31 @@ import org.lucares.utils.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class ExpressionToDocIdVisitor extends ExpressionVisitor<LongList> {
public class ExpressionToDocIdVisitor extends ExpressionVisitor<ClusteredLongList> {
private static final Logger LOGGER = LoggerFactory.getLogger(ExpressionToDocIdVisitor.class);
private final ClusteredPersistentMap<Tag, Long, Long> keyToValueToDocId;
private final ClusteredDiskStore diskStorage;
private final ClusterId clusterId;
private final DateCluster dateCluster;
public ExpressionToDocIdVisitor(final ClusterId clusterId,
public ExpressionToDocIdVisitor(final DateTimeRange dateRange,
final ClusteredPersistentMap<Tag, Long, Long> keyToValueToDocsId, final ClusteredDiskStore diskStorage) {
this.clusterId = clusterId;
this.dateCluster = new DateCluster(dateRange);
this.keyToValueToDocId = keyToValueToDocsId;
this.diskStorage = diskStorage;
}
@Override
public LongList visit(final And expression) {
public ClusteredLongList visit(final And expression) {
final Expression left = expression.getLeft();
final Expression right = expression.getRight();
final LongList leftFiles = left.visit(this);
final LongList rightFiles = right.visit(this);
final ClusteredLongList leftFiles = left.visit(this);
final ClusteredLongList rightFiles = right.visit(this);
final long start = System.nanoTime();
final LongList result = LongList.intersection(leftFiles, rightFiles);
final ClusteredLongList result = ClusteredLongList.intersection(leftFiles, rightFiles);
LOGGER.trace("and: {} took {} ms results={}", expression, (System.nanoTime() - start) / 1_000_000.0,
result.size());
assert result.isSorted();
@@ -54,14 +58,14 @@ public class ExpressionToDocIdVisitor extends ExpressionVisitor<LongList> {
}
@Override
public LongList visit(final Or expression) {
public ClusteredLongList visit(final Or expression) {
final Expression left = expression.getLeft();
final Expression right = expression.getRight();
final LongList leftFiles = left.visit(this);
final LongList rightFiles = right.visit(this);
final ClusteredLongList leftFiles = left.visit(this);
final ClusteredLongList rightFiles = right.visit(this);
final long start = System.nanoTime();
final LongList result = LongList.union(leftFiles, rightFiles);
final ClusteredLongList result = ClusteredLongList.union(leftFiles, rightFiles);
LOGGER.trace("or: {} took {} ms results={}", expression, (System.nanoTime() - start) / 1_000_000.0,
result.size());
assert result.isSorted();
@@ -70,13 +74,13 @@ public class ExpressionToDocIdVisitor extends ExpressionVisitor<LongList> {
}
@Override
public LongList visit(final Not expression) {
public ClusteredLongList visit(final Not expression) {
final Expression negatedExpression = expression.getExpression();
final LongList docIdsToBeNegated = negatedExpression.visit(this);
final ClusteredLongList docIdsToBeNegated = negatedExpression.visit(this);
final long start = System.nanoTime();
final LongList result = getAllDocIds().clone();
final ClusteredLongList result = getAllDocIds();
result.removeAll(docIdsToBeNegated);
LOGGER.trace("not: {} took {} ms results={}", expression, (System.nanoTime() - start) / 1_000_000.0,
@@ -86,35 +90,34 @@ public class ExpressionToDocIdVisitor extends ExpressionVisitor<LongList> {
}
@Override
public LongList visit(final Parentheses parentheses) {
public ClusteredLongList visit(final Parentheses parentheses) {
throw new UnsupportedOperationException(
"Parenthesis not supported. The correct order should come from the parser.");
}
@Override
public LongList visit(final Expression.MatchAll expression) {
public ClusteredLongList visit(final Expression.MatchAll expression) {
final long start = System.nanoTime();
final LongList result = getAllDocIds();
final ClusteredLongList result = getAllDocIds();
LOGGER.trace("matchAll: {} took {} ms results={}", expression, (System.nanoTime() - start) / 1_000_000.0,
result.size());
return result;
}
@Override
public LongList visit(final Expression.InExpression expression) {
public ClusteredLongList visit(final Expression.InExpression expression) {
final long start = System.nanoTime();
final String propertyName = expression.getProperty();
final List<String> values = expression.getValues();
LongList result = new LongList();
ClusteredLongList result = new ClusteredLongList();
for (final String value : values) {
final Collection<LongList> docIds = filterByWildcard(propertyName, GloblikePattern.globlikeToRegex(value));
final LongList mergedDocIds = merge(docIds);
result = LongList.union(result, mergedDocIds);
final ClusteredLongList docIds = filterByWildcard(propertyName, GloblikePattern.globlikeToRegex(value));
result = ClusteredLongList.union(result, docIds);
}
LOGGER.trace("in: {} took {} ms results={}", expression, (System.nanoTime() - start) / 1_000_000.0,
@@ -122,44 +125,53 @@ public class ExpressionToDocIdVisitor extends ExpressionVisitor<LongList> {
return result;
}
private LongList getAllDocIds() {
private ClusteredLongList getAllDocIds() {
final ClusteredLongList result = new ClusteredLongList();
final Set<ClusterId> availableClusterIds = keyToValueToDocId.getAvailableClusterIds(dateCluster);
for (final ClusterId clusterId : availableClusterIds) {
final Long blockOffset = keyToValueToDocId.getValue(clusterId, DataStore.TAG_ALL_DOCS);
final Long blockOffset = keyToValueToDocId.getValue(clusterId, DataStore.TAG_ALL_DOCS);
if (blockOffset != null) {
final LongStreamFile bsFile = diskStorage.streamExistingFile(blockOffset, clusterId);
final LongList longList = bsFile.asLongList();
return longList;
} else {
return new LongList(0);
if (blockOffset != null) {
final LongStreamFile bsFile = diskStorage.streamExistingFile(blockOffset, clusterId);
final LongList tmp = bsFile.asLongList();
result.put(clusterId, tmp);
}
}
return result;
}
private List<LongList> filterByWildcard(final String propertyName, final Pattern valuePattern) {
final List<LongList> result = new ArrayList<>();
private ClusteredLongList filterByWildcard(final String propertyName, final Pattern valuePattern) {
final ClusteredLongList result = new ClusteredLongList();
final long start = System.nanoTime();
keyToValueToDocId.visitValues(clusterId, new Tag(propertyName, ""), (tags, blockOffsetToDocIds) -> {
if (valuePattern.matcher(tags.getValueAsString()).matches()) {
try (final LongStreamFile bsFile = diskStorage.streamExistingFile(blockOffsetToDocIds, clusterId)) {
final Set<ClusterId> availableClusterIds = keyToValueToDocId.getAvailableClusterIds(dateCluster);
for (final ClusterId clusterId : availableClusterIds) {
final List<LongList> docIdsForCluster = new ArrayList<>();
keyToValueToDocId.visitValues(clusterId, new Tag(propertyName, ""), (tags, blockOffsetToDocIds) -> {
if (valuePattern.matcher(tags.getValueAsString()).matches()) {
try (final LongStreamFile bsFile = diskStorage.streamExistingFile(blockOffsetToDocIds, clusterId)) {
// We know that all LongLists coming from a BSFile are sorted, non-overlapping
// and increasing, that means we can just concatenate them and get a sorted
// list.
final List<LongList> longLists = bsFile.streamOfLongLists().collect(Collectors.toList());
final LongList concatenatedLists = concatenateLists(longLists);
// We know that all LongLists coming from a BSFile are sorted, non-overlapping
// and increasing, that means we can just concatenate them and get a sorted
// list.
final List<LongList> longLists = bsFile.streamOfLongLists().collect(Collectors.toList());
final LongList concatenatedLists = concatenateLists(longLists);
Preconditions.checkTrue(concatenatedLists.isSorted(),
"The LongLists containing document ids must be sorted, "
+ "non-overlapping and increasing, so that the concatenation "
+ "is sorted. This is guaranteed by the fact that document ids "
+ "are generated in monotonically increasing order.");
Preconditions.checkTrue(concatenatedLists.isSorted(),
"The LongLists containing document ids must be sorted, "
+ "non-overlapping and increasing, so that the concatenation "
+ "is sorted. This is guaranteed by the fact that document ids "
+ "are generated in monotonically increasing order.");
result.add(concatenatedLists);
docIdsForCluster.add(concatenatedLists);
}
}
}
});
});
final LongList mergedDocsIdsForCluster = merge(docIdsForCluster);
result.put(clusterId, mergedDocsIdsForCluster);
}
LOGGER.trace("filterByWildcard: for key {} took {}ms", propertyName, (System.nanoTime() - start) / 1_000_000.0);

View File

@@ -312,13 +312,14 @@ public class DataStoreTest {
Assert.assertFalse(result.isEmpty(), "The query '" + query + "' must return a result, but didn't.");
}
private void assertSearch(final DateTimeRange dateRange, final String query, final Tags... tags) {
final List<Doc> actualDocs = dataStore.search(new Query(query, dateRange));
private void assertSearch(final DateTimeRange dateRange, final String queryString, final Tags... tags) {
final Query query = new Query(queryString, dateRange);
final List<Doc> actualDocs = dataStore.search(query);
final List<Long> actual = CollectionUtils.map(actualDocs, Doc::getRootBlockNumber);
final List<Long> expectedPaths = CollectionUtils.map(tags, tagsToBlockStorageRootBlockNumber::get);
Assert.assertEquals(actual, expectedPaths, "Query: " + query + " Found: " + actual);
Assert.assertEquals(actual, expectedPaths, "Query: " + queryString + " Found: " + actual);
}
}

View File

@@ -2,11 +2,16 @@ package org.lucares.pdb.api;
import java.time.Duration;
import java.time.OffsetDateTime;
import java.time.ZoneOffset;
import java.time.temporal.ChronoUnit;
import java.time.temporal.TemporalUnit;
public class DateTimeRange {
private static final DateTimeRange MAX = new DateTimeRange(
OffsetDateTime.of(1900, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC),
OffsetDateTime.of(2100, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC));
private final OffsetDateTime start;
private final OffsetDateTime end;
@@ -15,6 +20,10 @@ public class DateTimeRange {
this.end = end;
}
public static DateTimeRange max() {
return MAX;
}
public static DateTimeRange now() {
return relativeMillis(0);
}
@@ -101,5 +110,4 @@ public class DateTimeRange {
|| timeRange.inRange(start)//
|| timeRange.inRange(end);
}
}

View File

@@ -236,9 +236,7 @@ public class PdbController implements HardcodedValues, PropertyKeys {
)
@ResponseBody
List<String> fields() {
// TODO get date range from UI
// TODO time range must not be static
final DateTimeRange dateTimeRange = DateTimeRange.relativeYears(5);
final DateTimeRange dateTimeRange = DateTimeRange.max();
final List<String> fields = db.getFields(dateTimeRange);
fields.sort(Collator.getInstance(Locale.ENGLISH));
@@ -255,9 +253,7 @@ public class PdbController implements HardcodedValues, PropertyKeys {
SortedSet<String> fields(@PathVariable(name = "fieldName") final String fieldName,
@RequestParam(name = "query") final String query) {
// TODO get date range from UI
// TODO time range must not be static
final DateTimeRange dateRange = DateTimeRange.relativeYears(5);
final DateTimeRange dateRange = DateTimeRange.max();
final Query q = new Query(query, dateRange);
final SortedSet<String> fields = db.getFieldsValues(q, fieldName);