handle corrupt json

Entries must be separated by a newline. This allows
us to handle corrupt json entries, because we know
that entries only start at a line beginning.
This commit is contained in:
ahr
2018-03-03 09:58:50 +01:00
parent 9d4eb660a5
commit 5a9aae70af
3 changed files with 137 additions and 109 deletions

View File

@@ -34,8 +34,8 @@ import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.scheduling.annotation.Async;
import org.springframework.stereotype.Component;
import com.fasterxml.jackson.core.JsonParseException;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.MappingIterator;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
@@ -56,8 +56,6 @@ public class TcpIngestor implements Ingestor, AutoCloseable, DisposableBean {
public final static class Handler implements Callable<Void> {
private final ObjectMapper objectMapper = new ObjectMapper();
private final TypeReference<Map<String, Object>> typeReferenceForMap = new TypeReference<Map<String, Object>>() {
};
@@ -76,41 +74,41 @@ public class TcpIngestor implements Ingestor, AutoCloseable, DisposableBean {
LOGGER.debug("opening streams to client");
try (PrintWriter out = new PrintWriter(clientSocket.getOutputStream(), true);
BufferedReader in = new BufferedReader(new InputStreamReader(clientSocket.getInputStream()));
) {
final ObjectMapper objectMapper = new ObjectMapper();
final ObjectReader objectReader = objectMapper.readerFor(typeReferenceForMap);
final MappingIterator<Object> iterator = objectReader.readValues(in);
double duration = 0.0;
int count = 0;
LOGGER.debug("reading from stream");
while (iterator.hasNext()) {
String line;
while ((line = in.readLine()) != null) {
final long start = System.nanoTime();
@SuppressWarnings("unchecked")
final Map<String, Object> object = (Map<String, Object>) iterator.next();
try {
final Map<String, Object> object = objectReader.readValue(line);
final Optional<Entry> entry = createEntry(object);
final long end = System.nanoTime();
duration += (end - start) / 1_000_000.0;
final Optional<Entry> entry = createEntry(object);
final long end = System.nanoTime();
duration += (end - start) / 1_000_000.0;
count++;
if (count == 100000) {
METRICS_LOGGER.debug("reading {} took {} ms", count, duration);
duration = 0.0;
count = 0;
count++;
if (count == 100000) {
METRICS_LOGGER.debug("reading {} took {} ms", count, duration);
duration = 0.0;
count = 0;
}
if (entry.isPresent()) {
LOGGER.debug("adding entry to queue: {}", entry);
queue.put(entry.get());
}
} catch (JsonParseException e) {
LOGGER.info("json parse error in line '" + line + "'", e);
}
if (entry.isPresent()) {
LOGGER.debug("adding entry to queue: {}", entry);
queue.put(entry.get());
}
}
LOGGER.debug("connection closed: " + clientAddress);
}
catch (Exception e)
{
} catch (Exception e) {
LOGGER.warn("Stream handling failed", e);
throw e;
}
@@ -121,8 +119,7 @@ public class TcpIngestor implements Ingestor, AutoCloseable, DisposableBean {
public Optional<Entry> createEntry(final Map<String, Object> map) {
try {
if (map.containsKey("duration")
&& map.containsKey("@timestamp")) {
if (map.containsKey("duration") && map.containsKey("@timestamp")) {
final OffsetDateTime date = getDate(map);
final long duration = (int) map.get("duration");