move TcpIngestor to pdb-ui
and start it in the web application. Also use the spring way of handling property files.
This commit is contained in:
@@ -1,258 +0,0 @@
|
||||
package org.lucares.recommind.logs;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.PrintWriter;
|
||||
import java.net.ServerSocket;
|
||||
import java.net.Socket;
|
||||
import java.net.SocketAddress;
|
||||
import java.net.SocketTimeoutException;
|
||||
import java.nio.file.Path;
|
||||
import java.time.OffsetDateTime;
|
||||
import java.time.format.DateTimeFormatter;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.ArrayBlockingQueue;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import org.lucares.pdb.api.Entry;
|
||||
import org.lucares.pdb.api.Tags;
|
||||
import org.lucares.performance.db.BlockingQueueIterator;
|
||||
import org.lucares.performance.db.PerformanceDb;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.fasterxml.jackson.core.type.TypeReference;
|
||||
import com.fasterxml.jackson.databind.MappingIterator;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.fasterxml.jackson.databind.ObjectReader;
|
||||
|
||||
public class TcpIngestor implements AutoCloseable {
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(TcpIngestor.class);
|
||||
private static final Logger METRICS_LOGGER = LoggerFactory.getLogger("org.lucares.metrics.tcpIngestor");
|
||||
|
||||
public static final int PORT = 17347;
|
||||
|
||||
private final AtomicBoolean acceptNewConnections = new AtomicBoolean(true);
|
||||
|
||||
private final ExecutorService serverThreadPool = Executors.newFixedThreadPool(2);
|
||||
|
||||
private final ExecutorService workerThreadPool = Executors.newCachedThreadPool();
|
||||
|
||||
private final PerformanceDb db;
|
||||
|
||||
public final static class Handler implements Callable<Void> {
|
||||
|
||||
private final ObjectMapper objectMapper = new ObjectMapper();
|
||||
|
||||
private final TypeReference<Map<String, Object>> typeReferenceForMap = new TypeReference<Map<String, Object>>() {
|
||||
};
|
||||
|
||||
final Socket clientSocket;
|
||||
private final ArrayBlockingQueue<Entry> queue;
|
||||
|
||||
public Handler(final Socket clientSocket, final ArrayBlockingQueue<Entry> queue) {
|
||||
this.clientSocket = clientSocket;
|
||||
this.queue = queue;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Void call() throws Exception {
|
||||
final SocketAddress clientAddress = clientSocket.getRemoteSocketAddress();
|
||||
Thread.currentThread().setName("worker-" + clientAddress);
|
||||
LOGGER.debug("opening streams to client");
|
||||
try (PrintWriter out = new PrintWriter(clientSocket.getOutputStream(), true);
|
||||
BufferedReader in = new BufferedReader(new InputStreamReader(clientSocket.getInputStream()));
|
||||
|
||||
) {
|
||||
final ObjectReader objectReader = objectMapper.readerFor(typeReferenceForMap);
|
||||
final MappingIterator<Object> iterator = objectReader.readValues(in);
|
||||
|
||||
double duration = 0.0;
|
||||
int count = 0;
|
||||
LOGGER.debug("reading from stream");
|
||||
while (iterator.hasNext()) {
|
||||
|
||||
final long start = System.nanoTime();
|
||||
@SuppressWarnings("unchecked")
|
||||
final Map<String, Object> object = (Map<String, Object>) iterator.next();
|
||||
|
||||
final Optional<Entry> entry = createEntry(object);
|
||||
final long end = System.nanoTime();
|
||||
duration += (end - start) / 1_000_000.0;
|
||||
|
||||
count++;
|
||||
if (count == 100000) {
|
||||
METRICS_LOGGER.debug("reading {} took {} ms", count, duration);
|
||||
duration = 0.0;
|
||||
count = 0;
|
||||
}
|
||||
|
||||
if (entry.isPresent()) {
|
||||
LOGGER.trace("adding entry to queue: {}", entry);
|
||||
queue.put(entry.get());
|
||||
}
|
||||
|
||||
}
|
||||
LOGGER.debug("connection closed: " + clientAddress);
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
public Optional<Entry> createEntry(final Map<String, Object> map) {
|
||||
try {
|
||||
|
||||
final OffsetDateTime date = getDate(map);
|
||||
final long duration = (int) map.get("duration");
|
||||
|
||||
final Tags tags = createTags(map);
|
||||
|
||||
final Entry entry = new Entry(date, duration, tags);
|
||||
return Optional.of(entry);
|
||||
} catch (final Exception e) {
|
||||
return Optional.empty();
|
||||
}
|
||||
}
|
||||
|
||||
private Tags createTags(final Map<String, Object> map) {
|
||||
Tags tags = Tags.create();
|
||||
for (final java.util.Map.Entry<String, Object> e : map.entrySet()) {
|
||||
|
||||
final String key = e.getKey();
|
||||
final Object value = e.getValue();
|
||||
|
||||
switch (key) {
|
||||
case "@timestamp":
|
||||
case "duration":
|
||||
// these fields are not tags
|
||||
break;
|
||||
case "tags":
|
||||
// TODO @ahr add support for simple tags, currently we
|
||||
// only support key/value tags
|
||||
break;
|
||||
default:
|
||||
tags = tags.copyAddIfNotNull(key, String.valueOf(value));
|
||||
break;
|
||||
}
|
||||
}
|
||||
return tags;
|
||||
}
|
||||
|
||||
private OffsetDateTime getDate(final Map<String, Object> map) {
|
||||
final String timestamp = (String) map.get("@timestamp");
|
||||
|
||||
final OffsetDateTime date = OffsetDateTime.parse(timestamp, DateTimeFormatter.ISO_ZONED_DATE_TIME);
|
||||
return date;
|
||||
}
|
||||
}
|
||||
|
||||
public TcpIngestor(final Path dataDirectory) {
|
||||
LOGGER.info("opening performance db: " + dataDirectory);
|
||||
db = new PerformanceDb(dataDirectory);
|
||||
LOGGER.debug("performance db open");
|
||||
}
|
||||
|
||||
public void start() throws Exception {
|
||||
|
||||
final ArrayBlockingQueue<Entry> queue = new ArrayBlockingQueue<>(1);
|
||||
|
||||
serverThreadPool.submit(() -> {
|
||||
Thread.currentThread().setName("db-ingestion");
|
||||
|
||||
boolean finished = false;
|
||||
while (!finished) {
|
||||
try {
|
||||
db.put(new BlockingQueueIterator<>(queue, Entry.POISON));
|
||||
finished = true;
|
||||
} catch (final Exception e) {
|
||||
LOGGER.warn("Write to database failed. Will retry with the next element.", e);
|
||||
}
|
||||
}
|
||||
return null;
|
||||
});
|
||||
|
||||
serverThreadPool.submit(() -> listen(queue));
|
||||
}
|
||||
|
||||
private Void listen(final ArrayBlockingQueue<Entry> queue) throws IOException {
|
||||
Thread.currentThread().setName("socket-listener");
|
||||
try (ServerSocket serverSocket = new ServerSocket(
|
||||
PORT/* , 10, InetAddress.getLocalHost() */);) {
|
||||
LOGGER.info("listening on port " + PORT);
|
||||
|
||||
serverSocket.setSoTimeout((int) TimeUnit.MILLISECONDS.toMillis(100));
|
||||
|
||||
while (acceptNewConnections.get()) {
|
||||
try {
|
||||
final Socket clientSocket = serverSocket.accept();
|
||||
LOGGER.debug("accepted connection: " + clientSocket.getRemoteSocketAddress());
|
||||
|
||||
workerThreadPool.submit(new Handler(clientSocket, queue));
|
||||
LOGGER.debug("handler submitted");
|
||||
} catch (final SocketTimeoutException e) {
|
||||
// expected every 100ms
|
||||
// needed to be able to stop the server
|
||||
} catch (final Exception e) {
|
||||
LOGGER.warn("Exception caught while waiting for a new connection. "
|
||||
+ "We'll ignore this error and keep going.", e);
|
||||
}
|
||||
}
|
||||
LOGGER.info("not accepting new connections. ");
|
||||
|
||||
LOGGER.info("stopping worker pool");
|
||||
workerThreadPool.shutdown();
|
||||
try {
|
||||
workerThreadPool.awaitTermination(10, TimeUnit.MINUTES);
|
||||
LOGGER.debug("workers stopped");
|
||||
} catch (final InterruptedException e) {
|
||||
Thread.interrupted();
|
||||
}
|
||||
LOGGER.debug("adding poison");
|
||||
queue.put(Entry.POISON);
|
||||
} catch (final InterruptedException e) {
|
||||
LOGGER.info("Listener thread interrupted. Likely while adding the poison. "
|
||||
+ "That would mean that the db-ingestion thread will not terminate. ");
|
||||
Thread.interrupted();
|
||||
} catch (final Exception e) {
|
||||
LOGGER.error("", e);
|
||||
throw e;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
LOGGER.debug("stopping accept thread");
|
||||
acceptNewConnections.set(false);
|
||||
serverThreadPool.shutdown();
|
||||
try {
|
||||
serverThreadPool.awaitTermination(10, TimeUnit.MINUTES);
|
||||
} catch (final InterruptedException e) {
|
||||
Thread.interrupted();
|
||||
}
|
||||
LOGGER.debug("closing database");
|
||||
db.close();
|
||||
LOGGER.debug("close done");
|
||||
}
|
||||
|
||||
public static void main(final String[] args) throws Exception {
|
||||
|
||||
Runtime.getRuntime().addShutdownHook(new Thread() {
|
||||
@Override
|
||||
public void run() {
|
||||
LOGGER.info("shutdown hook");
|
||||
}
|
||||
});
|
||||
|
||||
try (final TcpIngestor ingestor = new TcpIngestor(Config.DATA_DIR)) {
|
||||
ingestor.start();
|
||||
TimeUnit.MILLISECONDS.sleep(Long.MAX_VALUE);
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user