add flag to make CSV upload wait until entries are flushed
To make it easier/possible to write stable unit test the CSV upload can optionally wait until all entries have been flushed to disk. This is necessary for tests that ingest data and then read the data.
This commit is contained in:
@@ -10,8 +10,10 @@ import java.time.format.DateTimeFormatter;
|
||||
import java.time.temporal.ChronoUnit;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ArrayBlockingQueue;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
|
||||
import org.junit.jupiter.api.AfterEach;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.lucares.collections.LongList;
|
||||
@@ -19,7 +21,6 @@ import org.lucares.pdb.api.DateTimeRange;
|
||||
import org.lucares.pdb.api.Entries;
|
||||
import org.lucares.pdb.api.Query;
|
||||
import org.lucares.performance.db.PerformanceDb;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.lucares.utils.file.FileUtils;
|
||||
|
||||
public class CsvToEntryTransformerTest {
|
||||
@@ -37,7 +38,7 @@ public class CsvToEntryTransformerTest {
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIngest() throws IOException, InterruptedException {
|
||||
public void testIngest() throws IOException, InterruptedException, TimeoutException {
|
||||
final OffsetDateTime dateA = OffsetDateTime.now();
|
||||
final OffsetDateTime dateB = OffsetDateTime.now();
|
||||
|
||||
@@ -75,9 +76,10 @@ public class CsvToEntryTransformerTest {
|
||||
*
|
||||
* @throws IOException
|
||||
* @throws InterruptedException
|
||||
* @throws TimeoutException
|
||||
*/
|
||||
@Test
|
||||
public void testIgnoreColumns() throws IOException, InterruptedException {
|
||||
public void testIgnoreColumns() throws IOException, InterruptedException, TimeoutException {
|
||||
|
||||
try (final PerformanceDb db = new PerformanceDb(dataDirectory)) {
|
||||
|
||||
|
||||
@@ -4,10 +4,10 @@ import java.time.OffsetDateTime;
|
||||
import java.time.format.DateTimeFormatter;
|
||||
import java.time.temporal.ChronoUnit;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.logging.log4j.Level;
|
||||
import org.apache.logging.log4j.core.config.Configurator;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.extension.ExtendWith;
|
||||
import org.lucares.collections.LongList;
|
||||
@@ -15,7 +15,6 @@ import org.lucares.pdb.api.DateTimeRange;
|
||||
import org.lucares.pdb.api.GroupResult;
|
||||
import org.lucares.pdb.api.Query;
|
||||
import org.lucares.performance.db.PerformanceDb;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.test.context.SpringBootTest;
|
||||
import org.springframework.boot.test.context.SpringBootTest.WebEnvironment;
|
||||
@@ -60,7 +59,6 @@ public class PdbControllerTest {
|
||||
|
||||
final CsvReaderSettings settings = CsvReaderSettings.create(timeColumn, ',', ignoredColumn);
|
||||
uploadCsv(settings, csv);
|
||||
TimeUnit.SECONDS.sleep(1);
|
||||
{
|
||||
final GroupResult groupResult = performanceDb.get(new Query("tag=tagValue", DateTimeRange.ofDay(dateA)))
|
||||
.singleGroup();
|
||||
@@ -96,7 +94,8 @@ public class PdbControllerTest {
|
||||
final HttpEntity<MultiValueMap<String, Object>> entity = new HttpEntity<MultiValueMap<String, Object>>(
|
||||
parameters, headers);
|
||||
|
||||
final ResponseEntity<String> response = rest.exchange("/data", HttpMethod.POST, entity, String.class);
|
||||
final ResponseEntity<String> response = rest.exchange("/data?waitUntilFinished=true", HttpMethod.POST, entity,
|
||||
String.class);
|
||||
|
||||
Assertions.assertEquals(response.getStatusCode(), HttpStatus.CREATED, "response status");
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user