diff --git a/docs/DataDogAdapter.md b/docs/DataDogAdapter.md new file mode 100644 index 000000000..41c5fc2ba --- /dev/null +++ b/docs/DataDogAdapter.md @@ -0,0 +1,66 @@ +# Overview +The Wavefront proxy accepts data in a number of formats. One of those formats is the JSON format created by the DataDog agent as well as the DogStatsD format. This document outlines the how the Wavefront proxy can be configured to accept metrics coming from the DataDog agent or DogStatsD programs. + +# Configuration Options +## Send metrics to Wavefront only + +![Metrics sent directly to Wavefront](direct_to_wf.png) + +## Send metrics to both DataDog and Wavefront + +![Duplicate traffic to both DataDog and Wavefront](with_duplicating_proxy.png) + +# Install and Run Wavefront Proxy +1. Install the Wavefront proxy with this command line: + +```$ sudo bash -c "$(curl -sL https://goo.gl/c70QCx)"``` +Follow the prompts to install the proxy on the box you are setting up. Your Wavefront URL is in the form of: https://{instance name}.wavefront.com/api/. The API token can be obtained from the bottom of this page: https://{instance name}.wavefront.com/settings/profile. + +2. (Optional) If you are running the Wavefront proxy on the same machine as you are running the DataDog agent, then you’ll need to add/update 2 configuration options. +``` +$ sudo vi /opt/wavefront/wavefront-proxy/conf/wavefront.conf +``` +Add (to the bottom of the file): +``` +dogstatsdPorts=9125 +datadogPorts=9126 +``` + +3. (Re)start the Wavefront Proxy +``` +$ sudo service wavefront-proxy restart +``` + +# (Optional) Install a HTTP duplicator (to send to both DataDog and to Wavefront). +*If you would like to send your data to both DataDog and Wavefront during your PoC, please complete the steps in this section. If you are not sending your metrics to both servers, then you can skip this section.* + +1. We’ve tested internally with teeproxy (an open source Go script), but there are other solutions out there. teeproxy is available from https://github.com/chrislusf/teeproxy. + +``` +$ git clone https://github.com/chrislusf/teeproxy.git +$ cd teeproxy; go build +``` +The teeproxy has this usage: +``` +./teeproxy -debug=true -l : -a -b +``` +An example: +``` +$ ./teeproxy -l : -a -b :8126 +(Example: ./teeproxy -l :8090 -a localhost:8087 -b localhost:8126) +``` + +The results from server “b” are ignored and the results server “a” will be returned to the DD agent. The Wavefront proxy is listening on port 8126 for DataDog agent requests. +***NOTE:** If you added the 2 configuration items in step #3 in previous section, then you’ll need to change this to port 9126 (or whatever port you entered in the configuration file).* + + +# Update DataDog Agent Configuration + +1. Update the **dd_url** in the **datadog.conf** configuration file to point to one of: +* the teeproxy listener (in the example above it’s http://localhost:{PORT TO LISTEN ON}) +* the Wavefront proxy (the default is http://localhost:8126) + +2. Restart the DataDog supervisor/agent process. +3. Test to make sure everything is working. + + diff --git a/docs/direct_to_wf.png b/docs/direct_to_wf.png new file mode 100644 index 000000000..07a568c09 Binary files /dev/null and b/docs/direct_to_wf.png differ diff --git a/docs/with_duplicating_proxy.png b/docs/with_duplicating_proxy.png new file mode 100644 index 000000000..e9d32359b Binary files /dev/null and b/docs/with_duplicating_proxy.png differ diff --git a/java-lib/pom.xml b/java-lib/pom.xml index 1255155ae..1931e057a 100644 --- a/java-lib/pom.xml +++ b/java-lib/pom.xml @@ -84,6 +84,11 @@ io.netty netty-handler + + io.netty + netty-codec-http + 4.0.10.Final + org.antlr antlr4-runtime @@ -135,4 +140,4 @@ - \ No newline at end of file + diff --git a/java-lib/src/main/java/com/wavefront/ingester/DogStatsDDecoder.java b/java-lib/src/main/java/com/wavefront/ingester/DogStatsDDecoder.java new file mode 100644 index 000000000..ffc96986b --- /dev/null +++ b/java-lib/src/main/java/com/wavefront/ingester/DogStatsDDecoder.java @@ -0,0 +1,81 @@ +package com.wavefront.ingester; + +import java.util.logging.Logger; +import java.util.logging.Level; +import java.util.Iterator; +import java.util.List; +import java.util.HashMap; +import java.util.Map; + +import sunnylabs.report.ReportPoint; + +/** + * DogStatsD decoder that takes a string in this format: + * metric.name:value|type|@sample_rate|#tag1:value,tag2 + */ +public class DogStatsDDecoder implements Decoder { + private static final Logger LOG = Logger.getLogger( + DogStatsDDecoder.class.getCanonicalName()); + + /** + * {@inheritDoc} + */ + @Override + public void decodeReportPoints(String msg, List out, String customerId) { + final Map annotations = new HashMap<>(); + final String[] name_metadata = msg.split(":", 2); + if (name_metadata.length != 2) { + // not a valid message + LOG.warning("Unsupported DogStatsD format: '" + msg + "'"); + return; + } + final String[] parts = name_metadata[1].split("|"); + if (parts.length <= 1) { + LOG.warning("Unsupported DogStatsD message: '" + msg + "'"); + return; + } + if (parts[1].charAt(0) != 'g' && parts[1].charAt(0) != 'c') { + LOG.warning("Skipping DogStatsD metric type: '" + parts[1] + "' (" + msg + ")"); + return; + } + + if (parts.length > 2 && parts[3].charAt(0) == '#') { + for (int i = 3; i < parts.length; i++) { + final String[] tag = parts[i].split(":"); + if (tag.length == 2) { + annotations.put(tag[0], tag[1]); + } + } + } + + out.add(ReportPoint.newBuilder() + .setAnnotations(annotations) + .setMetric(name_metadata[0]) + .setValue(parts[0]) + .setTable("datadog") // TODO: what is table? + .setHost(getHostName()).build()); + LOG.warning(out.get(0).toString()); + } + + /** + * {@inheritDoc} + */ + @Override + public void decodeReportPoints(String msg, List out) { + throw new IllegalStateException("No customer ID set for dogstatsd format"); + } + + /** + * Gets the hostname (assumes windows or unix). This code was lifted from + * this SO question: + * http://stackoverflow.com/a/17958246 + */ + private String getHostName() { + if (System.getProperty("os.name").startsWith("Windows")) { + // Windows will always set the 'COMPUTERNAME' variable + return System.getenv("COMPUTERNAME"); + } else { + return System.getenv("HOSTNAME"); + } + } +} diff --git a/java-lib/src/main/java/com/wavefront/ingester/Ingester.java b/java-lib/src/main/java/com/wavefront/ingester/Ingester.java index d9ac413d9..5669da1b4 100644 --- a/java-lib/src/main/java/com/wavefront/ingester/Ingester.java +++ b/java-lib/src/main/java/com/wavefront/ingester/Ingester.java @@ -9,7 +9,10 @@ import javax.annotation.Nullable; +import io.netty.bootstrap.AbstractBootstrap; import io.netty.bootstrap.ServerBootstrap; + +import io.netty.channel.Channel; import io.netty.channel.ChannelDuplexHandler; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelHandler; @@ -18,8 +21,6 @@ import io.netty.channel.ChannelOption; import io.netty.channel.ChannelPipeline; import io.netty.channel.nio.NioEventLoopGroup; -import io.netty.channel.socket.SocketChannel; -import io.netty.channel.socket.nio.NioServerSocketChannel; import io.netty.handler.codec.LineBasedFrameDecoder; import io.netty.handler.codec.string.StringDecoder; import io.netty.handler.timeout.IdleState; @@ -31,18 +32,34 @@ * * @author Clement Pang (clement@wavefront.com). */ -public class Ingester implements Runnable { - - private static final Logger logger = Logger.getLogger(Ingester.class.getCanonicalName()); +public abstract class Ingester implements Runnable { + private static final Logger logger = + Logger.getLogger(Ingester.class.getCanonicalName()); - private static final int CHANNEL_IDLE_TIMEOUT_IN_SECS = (int) TimeUnit.DAYS.toSeconds(1); + /** + * Default number of seconds before the channel idle timeout handler + * closes the connection. + */ + private static final int CHANNEL_IDLE_TIMEOUT_IN_SECS_DEFAULT = + (int)TimeUnit.DAYS.toSeconds(1); + /** + * Additional decoders to add to the pipeline + */ @Nullable - private final List> decoders; - private final ChannelHandler commandHandler; - private final int listeningPort; + private final List> decoders; + + /** + * The ChannelHandler that is handling the message + */ + protected final ChannelHandler commandHandler; + + /** + * The port that this ingester should be listening on + */ + protected final int listeningPort; - public Ingester(List> decoders, + public Ingester(List> decoders, ChannelHandler commandHandler, int port) { this.listeningPort = port; this.commandHandler = commandHandler; @@ -55,52 +72,41 @@ public Ingester(ChannelHandler commandHandler, int port) { this.decoders = null; } - public void run() { - // Configure the server. - ServerBootstrap b = new ServerBootstrap(); - try { - b.group(new NioEventLoopGroup(), new NioEventLoopGroup()) - .channel(NioServerSocketChannel.class) - .option(ChannelOption.SO_BACKLOG, 100) - .localAddress(listeningPort) - .childHandler(new ChannelInitializer() { - @Override - public void initChannel(SocketChannel ch) throws Exception { - ChannelPipeline pipeline = ch.pipeline(); - pipeline.addLast(new LineBasedFrameDecoder(4096, true, true)); - pipeline.addLast(new StringDecoder(Charsets.UTF_8)); - if (decoders != null) { - for (Function handler : decoders) { - pipeline.addLast(handler.apply(ch)); - } - } - // Shared across all reports for proper batching - pipeline.addLast("idleStateHandler", new IdleStateHandler(CHANNEL_IDLE_TIMEOUT_IN_SECS, - 0, 0)); - pipeline.addLast("idleChannelTerminator", new ChannelDuplexHandler() { - @Override - public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception { - if (evt instanceof IdleStateEvent) { - if (((IdleStateEvent) evt).state() == IdleState.READER_IDLE) { - logger.warning("terminating connection to graphite client due to inactivity after " + - CHANNEL_IDLE_TIMEOUT_IN_SECS + "s: " + ctx.channel()); - ctx.close(); - } - } - } - }); - pipeline.addLast(commandHandler); + /** + * Adds an idle timeout handler to the given pipeline + * @param pipeline the pipeline to add the idle timeout handler + */ + protected void addIdleTimeoutHandler(final ChannelPipeline pipeline) { + // Shared across all reports for proper batching + pipeline.addLast("idleStateHandler", + new IdleStateHandler(CHANNEL_IDLE_TIMEOUT_IN_SECS_DEFAULT, + 0, 0)); + pipeline.addLast("idleChannelTerminator", new ChannelDuplexHandler() { + @Override + public void userEventTriggered(ChannelHandlerContext ctx, + Object evt) throws Exception { + if (evt instanceof IdleStateEvent) { + if (((IdleStateEvent) evt).state() == IdleState.READER_IDLE) { + logger.warning("terminating connection to graphite client due to inactivity after " + CHANNEL_IDLE_TIMEOUT_IN_SECS_DEFAULT + "s: " + ctx.channel()); + ctx.close(); } - }); - - // Start the server. - ChannelFuture f = b.bind().sync(); + } + } + }); + } - // Wait until the server socket is closed. - f.channel().closeFuture().sync(); - } catch (InterruptedException e) { - // Server was interrupted - e.printStackTrace(); + /** + * Adds additional decoders passed in during construction of this object + * (if not null). + * @param ch the channel and pipeline to add these decoders to + */ + protected void addDecoders(final Channel ch) { + if (decoders != null) { + ChannelPipeline pipeline = ch.pipeline(); + for (Function handler : decoders) { + pipeline.addLast(handler.apply(ch)); + } } } + } diff --git a/java-lib/src/main/java/com/wavefront/ingester/OpenTSDBDecoder.java b/java-lib/src/main/java/com/wavefront/ingester/OpenTSDBDecoder.java index eab2f734e..ad8aff5c6 100644 --- a/java-lib/src/main/java/com/wavefront/ingester/OpenTSDBDecoder.java +++ b/java-lib/src/main/java/com/wavefront/ingester/OpenTSDBDecoder.java @@ -3,6 +3,8 @@ import com.google.common.base.Preconditions; import java.util.List; +import java.util.logging.Level; +import java.util.logging.Logger; import sunnylabs.report.ReportPoint; @@ -14,6 +16,7 @@ * @author Clement Pang (clement@wavefront.com). */ public class OpenTSDBDecoder implements Decoder { + protected static final Logger logger = Logger.getLogger("OpenTSDBDecoder"); private final String hostName; private static final IngesterFormatter FORMAT = IngesterFormatter.newBuilder().whiteSpace() @@ -39,6 +42,7 @@ public OpenTSDBDecoder(String hostName, List customSourceTags) { @Override public void decodeReportPoints(String msg, List out, String customerId) { + logger.fine("Decoding OpenTSDB point " + msg); ReportPoint point = FORMAT.drive(msg, hostName, customerId, customSourceTags); if (out != null) { out.add(point); @@ -47,9 +51,15 @@ public void decodeReportPoints(String msg, List out, String custome @Override public void decodeReportPoints(String msg, List out) { + logger.fine("Decoding OpenTSDB point " + msg); ReportPoint point = FORMAT.drive(msg, hostName, "dummy", customSourceTags); if (out != null) { out.add(point); } } + + @Override + public String toString() { + return "Open TSDB Decoder"; + } } diff --git a/java-lib/src/main/java/com/wavefront/ingester/StringLineIngester.java b/java-lib/src/main/java/com/wavefront/ingester/StringLineIngester.java new file mode 100644 index 000000000..b93c9e846 --- /dev/null +++ b/java-lib/src/main/java/com/wavefront/ingester/StringLineIngester.java @@ -0,0 +1,58 @@ +package com.wavefront.ingester; + +import com.google.common.base.Charsets; +import com.google.common.base.Function; + +import java.util.ArrayList; +import java.util.List; + +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandler; +import io.netty.handler.codec.LineBasedFrameDecoder; +import io.netty.handler.codec.string.StringDecoder; + +/** + * Default Ingester thread that sets up decoders and a command handler to listen for metrics that are string formatted lines on a port. + * + * @author Clement Pang (clement@wavefront.com). + */ +public class StringLineIngester extends TcpIngester { + + public StringLineIngester(List> decoders, + ChannelHandler commandHandler, int port) { + super(createDecoderList(decoders), commandHandler, port); + } + + public StringLineIngester(ChannelHandler commandHandler, int port) { + super(createDecoderList(null), commandHandler, port); + } + + /** + * Returns a copy of the given list plus inserts the 2 decoders needed for + * this specific ingester (LineBasedFrameDecoder and StringDecoder) + * @param decoders the starting list + * @return copy of the provided list with additional decodiers prepended + */ + private static List> createDecoderList(final List> decoders) { + final List> copy; + if (decoders == null) { + copy = new ArrayList<>(); + } else { + copy = new ArrayList<>(decoders); + } + copy.add(0, new Function() { + @Override + public ChannelHandler apply(Channel input) { + return new LineBasedFrameDecoder(4096, true, true); + } + }); + copy.add(1, new Function() { + @Override + public ChannelHandler apply(Channel input) { + return new StringDecoder(Charsets.UTF_8); + } + }); + + return copy; + } +} diff --git a/java-lib/src/main/java/com/wavefront/ingester/TcpIngester.java b/java-lib/src/main/java/com/wavefront/ingester/TcpIngester.java new file mode 100644 index 000000000..b931eff8d --- /dev/null +++ b/java-lib/src/main/java/com/wavefront/ingester/TcpIngester.java @@ -0,0 +1,70 @@ +package com.wavefront.ingester; + +import com.google.common.base.Charsets; +import com.google.common.base.Function; + +import java.util.List; +import java.util.logging.Level; +import java.util.logging.Logger; + +import io.netty.bootstrap.ServerBootstrap; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelOption; +import io.netty.channel.ChannelPipeline; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.Channel; +import io.netty.channel.socket.SocketChannel; +import io.netty.channel.socket.nio.NioServerSocketChannel; + +/** + * Ingester thread that sets up decoders and a command handler to listen for metrics on a port. + * + * @author Clement Pang (clement@wavefront.com). + */ +public class TcpIngester extends Ingester { + + private static final Logger logger = + Logger.getLogger(TcpIngester.class.getCanonicalName()); + + public TcpIngester(List> decoders, + ChannelHandler commandHandler, int port) { + super(decoders, commandHandler, port); + } + + public TcpIngester(ChannelHandler commandHandler, int port) { + super(commandHandler, port); + } + + public void run() { + // Configure the server. + ServerBootstrap b = new ServerBootstrap(); + try { + b.group(new NioEventLoopGroup(), new NioEventLoopGroup()) + .channel(NioServerSocketChannel.class) + .option(ChannelOption.SO_BACKLOG, 100) + .localAddress(listeningPort) + .childHandler(new ChannelInitializer() { + @Override + public void initChannel(SocketChannel ch) throws Exception { + ChannelPipeline pipeline = ch.pipeline(); + addDecoders(ch); + addIdleTimeoutHandler(pipeline); + pipeline.addLast(commandHandler); + } + }); + + // Start the server. + ChannelFuture f = b.bind().sync(); + + // Wait until the server socket is closed. + f.channel().closeFuture().sync(); + } catch (InterruptedException e) { + logger.log(Level.WARNING, "Interrupted", e); + + // Server was interrupted + e.printStackTrace(); + } + } +} diff --git a/java-lib/src/main/java/com/wavefront/ingester/UdpIngester.java b/java-lib/src/main/java/com/wavefront/ingester/UdpIngester.java new file mode 100644 index 000000000..2114694d7 --- /dev/null +++ b/java-lib/src/main/java/com/wavefront/ingester/UdpIngester.java @@ -0,0 +1,62 @@ +package com.wavefront.ingester; + +import com.google.common.base.Charsets; +import com.google.common.base.Function; + +import java.util.ArrayList; +import java.util.List; +import java.util.logging.Level; +import java.util.logging.Logger; +import java.net.InetSocketAddress; +import io.netty.bootstrap.Bootstrap; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelOption; +import io.netty.channel.ChannelPipeline; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.nio.NioDatagramChannel; +import io.netty.channel.socket.DatagramChannel; +import io.netty.channel.Channel; + +/** + * Ingester thread that sets up decoders and a command handler to listen for metrics on a port. + * + * @author Clement Pang (clement@wavefront.com). + */ +public class UdpIngester extends Ingester { + + private static final Logger logger = + Logger.getLogger(UdpIngester.class.getCanonicalName()); + + public UdpIngester(List> decoders, + ChannelHandler commandHandler, int port) { + super(decoders, commandHandler, port); + } + + public UdpIngester(ChannelHandler commandHandler, int port) { + super(commandHandler, port); + } + + public void run() { + // Configure the server. + final NioEventLoopGroup group = new NioEventLoopGroup(); + try { + final Bootstrap b = new Bootstrap(); + b.group(group) + .channel(NioDatagramChannel.class) + .option(ChannelOption.SO_BROADCAST, true) + .handler(commandHandler); + + // Start the server. + b.bind(listeningPort).sync().channel().closeFuture().await(); + } catch (InterruptedException e) { + logger.log(Level.WARNING, "Interrupted", e); + + // Server was interrupted + e.printStackTrace(); + } finally { + group.shutdownGracefully(); + } + } +} diff --git a/proxy/pom.xml b/proxy/pom.xml index 77597b70f..c4523ebe2 100644 --- a/proxy/pom.xml +++ b/proxy/pom.xml @@ -102,6 +102,21 @@ com.google.code.findbugs jsr305 + + org.slf4j + slf4j-api + 1.7.5 + + + org.slf4j + slf4j-simple + 1.6.4 + + + org.slf4j + slf4j-log4j12 + 1.7.5 + @@ -190,4 +205,4 @@ - \ No newline at end of file + diff --git a/proxy/src/main/java/com/wavefront/agent/AbstractAgent.java b/proxy/src/main/java/com/wavefront/agent/AbstractAgent.java index 2895dc1b7..d22ea3f5f 100644 --- a/proxy/src/main/java/com/wavefront/agent/AbstractAgent.java +++ b/proxy/src/main/java/com/wavefront/agent/AbstractAgent.java @@ -64,6 +64,8 @@ public abstract class AbstractAgent { private static final int GRAPHITE_LISTENING_PORT = 2878; private static final int OPENTSDB_LISTENING_PORT = 4242; private static final int HTTP_JSON_LISTENING_PORT = 3878; + private static final int DOGSTATSD_LISTENING_PORT = 8125; + private static final int DATADOG_HTTP_LISTENING_PORT = 8126; @Parameter(names = {"-f", "--file"}, description = "Proxy configuration file") @@ -164,6 +166,14 @@ public abstract class AbstractAgent { @Parameter(names = {"--opentsdbBlacklistRegex"}, description = "Regex pattern (java.util.regex) that opentsdb input lines must NOT match to be accepted") protected String opentsdbBlacklistRegex; + @Parameter(names = {"--datadogPorts"}, description = "Comma-separated list of ports to listen on for DataDog agent " + + "data. Defaults to: " + DATADOG_HTTP_LISTENING_PORT) + protected String datadogAgentPorts = "" + DATADOG_HTTP_LISTENING_PORT; + + @Parameter(names = {"--dogstatsdPorts"}, description = "Comma-separated list of ports to listen on for DataDog DogStatsD " + + "data. Defaults to: " + DOGSTATSD_LISTENING_PORT) + protected String dogstatsdPorts = "" + DOGSTATSD_LISTENING_PORT; + @Parameter(names = {"--splitPushWhenRateLimited"}, description = "Whether to split the push batch size when the push is rejected by Wavefront due to rate limit. Default false.") protected boolean splitPushWhenRateLimited = false; @@ -260,6 +270,8 @@ private void loadListenerConfigurationFile() throws IOException { opentsdbBlacklistRegex = prop.getProperty("opentsdbBlacklistRegex", opentsdbBlacklistRegex); splitPushWhenRateLimited = Boolean.parseBoolean(prop.getProperty("splitPushWhenRateLimited", String.valueOf(splitPushWhenRateLimited))); + datadogAgentPorts = prop.getProperty("datadogPorts", datadogAgentPorts); + dogstatsdPorts = prop.getProperty("dogstatsdPorts", dogstatsdPorts); retryBackoffBaseSeconds = Double.parseDouble(prop.getProperty("retryBackoffBaseSeconds", String.valueOf(retryBackoffBaseSeconds))); customSourceTagsProperty = prop.getProperty("customSourceTags", customSourceTagsProperty); diff --git a/proxy/src/main/java/com/wavefront/agent/ChannelStringHandler.java b/proxy/src/main/java/com/wavefront/agent/ChannelStringHandler.java index 10b39121a..8bc67d1a9 100644 --- a/proxy/src/main/java/com/wavefront/agent/ChannelStringHandler.java +++ b/proxy/src/main/java/com/wavefront/agent/ChannelStringHandler.java @@ -15,6 +15,7 @@ import java.util.List; import java.util.UUID; import java.util.regex.Pattern; +import java.util.logging.Logger; import javax.annotation.Nullable; @@ -30,6 +31,8 @@ */ @ChannelHandler.Sharable public class ChannelStringHandler extends SimpleChannelInboundHandler { + private static final Logger logger = + Logger.getLogger(ChannelStringHandler.class.getCanonicalName()); private final Decoder decoder; private final List validatedPoints = new ArrayList<>(); @@ -100,6 +103,7 @@ protected boolean passesWhiteAndBlackLists(String pointLine) { @Override protected void channelRead0(ChannelHandlerContext ctx, String msg) throws Exception { + logger.finer("Processing message " + msg); // ignore empty lines. if (msg == null || msg.trim().length() == 0) return; if (transformer != null) { @@ -140,4 +144,13 @@ private void handleBlockedPoint(String pointLine) { public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { // ignore. } + + @Override + public String toString() { + if (decoder == null) { + return "Channel String Handler (no decoder)"; + } else { + return "Channel String Handler (" + decoder.toString() + ")"; + } + } } diff --git a/proxy/src/main/java/com/wavefront/agent/DataDogAgentHandler.java b/proxy/src/main/java/com/wavefront/agent/DataDogAgentHandler.java new file mode 100644 index 000000000..263df370f --- /dev/null +++ b/proxy/src/main/java/com/wavefront/agent/DataDogAgentHandler.java @@ -0,0 +1,409 @@ +package com.wavefront.agent; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.SimpleChannelInboundHandler; +import io.netty.handler.codec.http.DefaultFullHttpResponse; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpContent; +import io.netty.handler.codec.http.HttpHeaders; +import io.netty.handler.codec.http.HttpObject; +import io.netty.handler.codec.http.HttpRequest; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.HttpVersion; +import io.netty.handler.codec.http.LastHttpContent; +import io.netty.util.CharsetUtil; + +import static io.netty.handler.codec.http.HttpHeaders.Names.*; + +import java.nio.ByteBuffer; +import java.util.zip.Inflater; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.logging.Logger; +import java.util.logging.Level; +import java.util.Map; +import java.util.Map.Entry; +import java.util.UUID; + +import com.wavefront.agent.api.ForceQueueEnabledAgentAPI; +import sunnylabs.report.ReportPoint; + +import org.codehaus.jackson.map.ObjectMapper; +import org.codehaus.jackson.JsonNode; + +/** + * This class is a netty channel handler for metrics arriving from + * a DataDog agent. This handler operates as a mini HTTP server and returns + * a 200 status code for all requests (unless an exception occurs). + * To use this, change datadog agent configuration(s) to point the dd_url + * value to the Wavefront proxy where this handler is running. + * To use this to send data to both datadog and WF, be sure to put a proxy + * in front of this handler (teeproxy, or similar) and duplicate requests to + * this handler. + * This is based off the netty example HttpSnoopServer found here: + * https://github.com/netty/netty/tree/4.1/example/src/main/java/io/netty/example/http/snoop + * This class was created from the example provided in HttpSnoopServerHandler + * class in the above directory. + */ +@ChannelHandler.Sharable +public class DataDogAgentHandler extends SimpleChannelInboundHandler { + private static final Logger LOG = Logger.getLogger( + DataDogAgentHandler.class.getCanonicalName()); + + /** + * The HTTP request object passed to channelRead0() + */ + private HttpRequest request; + + /** + * The point handler that takes report metrics one data point at a time + * and handles batching and retries, etc + */ + private final PointHandler pointHandler; + + public DataDogAgentHandler(final ForceQueueEnabledAgentAPI agentAPI, + final UUID daemonId, + final int port, + final String prefix, + final String logLevel, + final String validationLevel, + final long millisecondsPerBatch, + final int blockedPointsPerBatch) { + this.pointHandler = new PointHandler(agentAPI, daemonId, port, logLevel, validationLevel, millisecondsPerBatch, blockedPointsPerBatch); + } + + @Override + public void channelReadComplete(ChannelHandlerContext ctx) throws Exception { + ctx.flush(); + } + + @Override + protected void channelRead0(io.netty.channel.ChannelHandlerContext ctx, + Object msg) { + if (msg instanceof HttpRequest) { + this.request = (HttpRequest) msg; + LOG.info(String.format("%s %s", request.getMethod(), request.getUri())); + // TODO: anything to do here? + } + + // NOTE: this requires the use of the HttpObjectAggregator in the netty + // pipeline + if (msg instanceof LastHttpContent) { + final HttpContent httpContent = (HttpContent) msg; + LOG.fine(String.format("[%s] CONTENT\n%s", request.getUri(), request)); + final ByteBuf content = httpContent.content(); + final String header = request.headers().get("Content-Encoding"); + final boolean compressed = (header != null && header.equalsIgnoreCase("deflate")); + if (request.getUri().startsWith("/intake")) { + final JsonNode root = parseJson(content, compressed); + handleIntakeRequest(root); + } else if (request.getUri().startsWith("/api/v1/series")) { + final JsonNode root = parseJson(content, compressed); + handleApiSeries(root); + } else if (request.getUri().equals("/")) { // assume point series + final JsonNode root = parseJson(content, compressed); + handleApiSeries(root); + } else { + LOG.warning(String.format("Ignoring %s %s:\n%s", request.getMethod(), request.getUri(), content.toString(CharsetUtil.UTF_8))); + } + + writeResponse(httpContent, ctx); + } + } + + /** + * Decompress the provided string and parse the resulting JSON string + * @param content the HttpContent (compressed JSON) + * @param isCompressed is the contents compressed? + * @return the parsed root node + */ + private JsonNode parseJson(final ByteBuf content, + final boolean isCompressed) { + if (!content.isReadable()) { + LOG.warning(String.format("[%s] Unable to read content. Ignoring", + request.getUri())); + throw new IllegalArgumentException("Unable to read content"); + } + + // get the contents of the HTTP message body as a byte array + final ByteBuffer data = ByteBuffer.allocate(content.readableBytes()); + content.getBytes(0, data); + byte[] jsonBytes = null; + if (isCompressed) { + final byte[] compressed = data.array(); + jsonBytes = new byte[compressed.length * 100]; + if (isCompressed) { + try { + // decompress the message + final Inflater decompressor = new Inflater(); + decompressor.setInput(compressed); + decompressor.inflate(jsonBytes); + } catch (final java.util.zip.DataFormatException e) { + LOG.log(Level.WARNING, "Failed to decompress message", e); + throw new IllegalArgumentException("Unable to decompress message", e); + } + } + } else { + jsonBytes = data.array(); + } + + // decompressed - now parse JSON + final ObjectMapper jsonTree = new ObjectMapper(); + try { + return jsonTree.readTree(jsonBytes); + } catch (final java.io.IOException e) { + LOG.log(Level.WARNING, + String.format("Unable to parse JSON\n%s", jsonBytes), + e); + throw new IllegalArgumentException("Unable to parse JSON", e); + } + } + + /** + * Handles the HTTP request from the datadog agent with the URI: + * /intake/?api_key= + * ASSUMPTION: the content body is zip'd using ZLib and the value is + * a JSON object. + * Given a json message, this will parse and find the metrics and post those + * to the WF server. The JSON is expected to look like this: + * { + * "metrics": [ + * [ + * "system.disk.total", + * 1451409097, + * 497448.0, + * { + * "device_name": "udev", + * "hostname": "mike-ubuntu14", + * "type": "gauge" + * } + * ], + * ... + * } + * Each metric in the metrics array is consider a report point and is + * sent to the WF server using the PointHandler object. The metric array + * element is made up of: + * (0): metric name + * (1): timestamp (epoch seconds) + * (2): value (assuming float for all values) + * (3): tags (including host); all tags are converted to tags except + * hostname which is sent on its own as the source for the point. + * + * In addition to the metric array elements, all top level elements that + * begin with : + * cpu* + * mem* + * are captured and the value is sent. These items are in the form of: + * { + * ... + * "collection_timestamp": 1451409092.995346, + * "cpuGuest": 0.0, + * "cpuIdle": 99.33, + * "cpuStolen": 0.0, + * ... + * "internalHostname": "mike-ubuntu14", + * ... + * } + * The names are retrieved from the JSON key name splitting the key + * on upper case letters and adding a dot between to form a metric name + * like this example: + * "cpuGuest" => "cpu.guest" + * The value comes from the JSON key's value. + * + * @param root root node of the parsed HttpContent body + */ + private void handleIntakeRequest(final JsonNode root) { + // get the hostname used by all of the top level metrics + final String hostName = root.findPath("internalHostname").asText(); + // get the collection timestamp for all the top level metrics + final double ts = root.findPath("collection_timestamp").asDouble(); + + // iterator over all the top level fields and pull out the name/value + // pairs of all items we care about + // { + // "cpuIdle": 0.00, + // "cpuUser": 0.00, + // .... + // } + Iterator> fields = root.getFields(); + while (fields.hasNext()) { + Map.Entry field = fields.next(); + if (field.getKey().startsWith("cpu") || + field.getKey().startsWith("mem")) + { + final ReportPoint point = ReportPoint.newBuilder() + .setMetric(covertKeyToDottedName(field.getKey())) + .setTimestamp((long)ts * 1000) // convert to ms + .setHost(hostName) + .setValue(field.getValue().asDouble()) + .setTable("datadog") // TODO: what is table? + .build(); + LOG.finer("reporting point: " + point); + pointHandler.reportPoint(point, root.toString()); + } + } + + // metrics array items + final JsonNode metrics = root.findPath("metrics"); + for (final JsonNode metric : metrics) { + // pull out the tags and then search for the hostname + // we won't send the hostname as a tag, we'll send that as "source" + // to WF point handler + final JsonNode tags = metric.get(3); + final Map annotations = new HashMap<>(); + JsonNode host = null; + fields = tags.getFields(); + while (fields.hasNext()) { + Map.Entry field = fields.next(); + if (field.getKey().equals("hostname")) { + host = field.getValue(); + } else { + annotations.put(field.getKey(), field.getValue().asText()); + } + } + + // assuming we found a host, then send the details to WF + if (host != null) { + final ReportPoint point = ReportPoint.newBuilder() + .setAnnotations(annotations) + .setMetric(metric.get(0).asText()) + .setTimestamp(metric.get(1).asLong() * 1000) // convert to ms + .setHost(host.asText()) + .setValue(metric.get(2).asDouble()) + .setTable("datadog") // TODO: what is table? + .build(); + LOG.finer("reporting point: " + point); + pointHandler.reportPoint(point, root.toString()); + } + } + } + + /** + * Handles the HTTP request from the datadog agent with the URI: + * /api/v1/series/?api_key= + * JSON is expected to look like : + * { + * "series": [ + * { + * "device_name" : null, + * "host": "", + * "interval": 10.0, + * "metric": "", + * "points": [ + * [ + * 1451950930.0, + * 0 + * ] + * ], + * "tags": null, + * "type": "gauge" + * }, + * ... + * ] + * } + * The point element is made up of: + * (0): timestamp (epoch seconds) + * (1): value (numeric) + * @param root root node of the parsed HttpContent JSON body + */ + private void handleApiSeries(final JsonNode root) { + // ignore everything else and get the "series" array + final JsonNode metrics = root.findPath("series"); + for (final JsonNode metric : metrics) { + String metricName = metric.findPath("metric").asText(); + + // grab the tags + final Map annotations = new HashMap<>(); + final JsonNode tags = metric.findPath("tags"); + if (tags.isArray()) { + // assumption: must be an array, values are strings; format: + // name:value + for (final JsonNode tag : tags) { + final String namevalue = tag.asText(); + final String[] parts = namevalue.split(":"); + if (parts.length != 2) { + LOG.warning(String.format("Expected tag to be in format : but got '%s'. Ignoring this tag.", namevalue)); + continue; + } + annotations.put(parts[0], parts[1]); + } + } + + final JsonNode points = metric.findPath("points"); + for (final JsonNode pt : points) { + final ReportPoint point = ReportPoint.newBuilder() + .setAnnotations(annotations) + .setMetric(metricName) + .setTimestamp(pt.get(0).asLong() * 1000) // convert to ms + .setHost(metric.findPath("host").asText()) + .setValue(pt.get(1).asDouble()) + .setTable("datadog") // TODO: what is table? + .build(); + LOG.finer("reporting point: " + point); + pointHandler.reportPoint(point, root.toString()); + } + } + } + + /** + * Writes the response - 200 if everything went ok. + * This is mostly the same as what was found in the snoop example referenced + * above in the class details. + */ + private void writeResponse(final HttpObject current, + final ChannelHandlerContext ctx) { + // Decide whether to close the connection or not. + final boolean keepAlive = HttpHeaders.isKeepAlive(request); + // Build the response object. + final HttpResponseStatus status = current.getDecoderResult().isSuccess() ? HttpResponseStatus.OK : HttpResponseStatus.BAD_REQUEST; + final FullHttpResponse response = new DefaultFullHttpResponse( + HttpVersion.HTTP_1_1, status, Unpooled.copiedBuffer("", CharsetUtil.UTF_8)); + + response.headers().set(CONTENT_TYPE, "text/plain; charset=UTF-8"); + + if (keepAlive) { + // Add 'Content-Length' header only for a keep-alive connection. + response.headers().set(CONTENT_LENGTH, response.content().readableBytes()); + // Add keep alive header as per: + // - http://www.w3.org/Protocols/HTTP/1.1/draft-ietf-http-v11-spec-01.html#Connection + response.headers().set(CONNECTION, HttpHeaders.Values.KEEP_ALIVE); + } + + // Write the response. + LOG.fine("response: " + response.toString()); + ctx.write(response); + } + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { + LOG.log(Level.WARNING, "Failed", cause); + // TODO: write 500 response + ctx.close(); + } + + /** + * Convert a key that is camel-case notation to a dotted equivalent. This + * is best described with an example: + * key = "memPhysFree" + * returns "mem.phys.free" + * @param key a camel-case string value + * @return dotted notation with each uppercase containing a dot before + */ + private String covertKeyToDottedName(final String key) { + final StringBuilder sb = new StringBuilder(); + for (int i = 0; i < key.length(); i++) { + final char c = key.charAt(i); + if (Character.isUpperCase(c)) { + sb.append("."); + sb.append(Character.toLowerCase(c)); + } else { + sb.append(c); + } + } + return sb.toString(); + } +} diff --git a/proxy/src/main/java/com/wavefront/agent/DogStatsDUDPHandler.java b/proxy/src/main/java/com/wavefront/agent/DogStatsDUDPHandler.java new file mode 100644 index 000000000..eccd0deca --- /dev/null +++ b/proxy/src/main/java/com/wavefront/agent/DogStatsDUDPHandler.java @@ -0,0 +1,134 @@ +package com.wavefront.agent; + +import java.util.logging.Logger; +import java.util.logging.Level; +import java.util.HashMap; +import java.util.Map; +import java.util.UUID; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.SimpleChannelInboundHandler; +import io.netty.channel.socket.DatagramPacket; +import io.netty.util.CharsetUtil; +import sunnylabs.report.ReportPoint; +import com.wavefront.agent.api.ForceQueueEnabledAgentAPI; + +/** + * DogStatsD handler that takes a string in this format: + * metric.name:value|type|@sample_rate|#tag1:value,tag2 + * parses and then sends the metric to the wavefront server. + * Currently only 'g' and 'c' metric types are supported (others are ignored) + */ +public class DogStatsDUDPHandler extends SimpleChannelInboundHandler { + private static final Logger LOG = Logger.getLogger( + DogStatsDUDPHandler.class.getCanonicalName()); + + /** + * The point handler that takes report metrics one data point at a time + * and handles batching and retries, etc + */ + private final PointHandler pointHandler; + + /** + * Constructor (matches the other constructors). + */ + public DogStatsDUDPHandler(final ForceQueueEnabledAgentAPI agentAPI, + final UUID daemonId, + final int port, + final String prefix, + final String logLevel, + final String validationLevel, + final long millisecondsPerBatch, + final int blockedPointsPerBatch) { + this.pointHandler = new PointHandler(agentAPI, daemonId, port, logLevel, validationLevel, millisecondsPerBatch, blockedPointsPerBatch); + } + + /** + * {@inheritDoc} + */ + @Override + public void channelRead0(ChannelHandlerContext ctx, DatagramPacket packet) throws Exception { + final String msg = packet.content().toString(CharsetUtil.UTF_8); + LOG.info("Received message '" + msg + "'"); + final ReportPoint point = + decodeMessage(msg, packet.sender().getHostName()); + if (point != null) { + LOG.fine("Sending point : " + point.toString()); + pointHandler.reportPoint(point, msg); + } + } + + /** + * {@inheritDoc} + */ + @Override + public void channelReadComplete(ChannelHandlerContext ctx) { + ctx.flush(); + } + + /** + * {@inheritDoc} + */ + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { + LOG.log(Level.WARNING, "Caught exception in dogstatsd handler", cause); + // Don't close the channel because we can keep serving requests. + } + + /** + * Decodes a message received. The expected format is: + * metric.name:value|type|@sample_rate|#tag1:value,tag2 + * @param msg the incoming message + * @param source the source/host + * @return the report point generated or null if the message could not be + * parsed or is invalid. + */ + public ReportPoint decodeMessage(final String msg, final String source) { + final Map annotations = new HashMap<>(); + + // split into name and value + metadata and check the message format + final String[] name_metadata = msg.split(":", 2); + if (name_metadata.length != 2) { + // not a valid message + LOG.warning("Unsupported DogStatsD format: '" + msg + "'"); + return null; + } + final String[] parts = name_metadata[1].split("\\|"); + if (parts.length <= 1) { + LOG.warning("Unsupported DogStatsD message: '" + msg + "'"); + return null; + } + + // check the metric type is supported + if (parts[1].charAt(0) != 'g' && parts[1].charAt(0) != 'c') { + LOG.warning("Skipping DogStatsD metric type: '" + parts[1] + "' (" + msg + ")"); + return null; + } + + // skip over the sample rate and find tags + int loc = 1; + if (parts.length > loc+1) { + if (parts[loc].charAt(0) == '@') { + loc++; + } + if (parts.length > loc+1) { + if (parts[2].charAt(0) == '#') { + for (int i = 3; i < parts.length; i++) { + final String[] tag = parts[i].split(":"); + if (tag.length == 2) { + annotations.put(tag[0], tag[1]); + } + } + } + } + } + + return ReportPoint.newBuilder() + .setHost(source) + .setAnnotations(annotations) + .setMetric(name_metadata[0]) + .setValue(Double.parseDouble(parts[0])) + .setTimestamp(System.currentTimeMillis()) + .setTable("datadog") // TODO: what is table? + .build(); + } +} diff --git a/proxy/src/main/java/com/wavefront/agent/PointHandler.java b/proxy/src/main/java/com/wavefront/agent/PointHandler.java index efea32b39..c9c390a7d 100644 --- a/proxy/src/main/java/com/wavefront/agent/PointHandler.java +++ b/proxy/src/main/java/com/wavefront/agent/PointHandler.java @@ -14,6 +14,7 @@ import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.logging.Logger; +import java.util.logging.Level; /** * Adds all graphite strings to a working list, and batches them up on a set schedule (100ms) to be @@ -72,20 +73,17 @@ public void reportPoint(ReportPoint point, String debugLine) { if (!charactersAreValid(point.getMetric())) { illegalCharacterPoints.inc(); String errorMessage = port + ": Point metric has illegal character (" + debugLine + ")"; - logger.warning(errorMessage); throw new RuntimeException(errorMessage); } if (!annotationKeysAreValid(point)) { String errorMessage = port + ": Point annotation key has illegal character (" + debugLine + ")"; - logger.warning(errorMessage); throw new RuntimeException(errorMessage); } if (!pointInRange(point)) { outOfRangePointTimes.inc(); String errorMessage = port + ": Point outside of reasonable time frame (" + debugLine + ")"; - logger.warning(errorMessage); throw new RuntimeException(errorMessage); } @@ -94,7 +92,8 @@ public void reportPoint(ReportPoint point, String debugLine) { switch (validationLevel) { case VALIDATION_NUMERIC_ONLY: if (!(pointValue instanceof Long) && !(pointValue instanceof Double)) { - throw new RuntimeException(port + ": Was not long/double object"); + String errorMessage = port + ": Was not long/double object (" + debugLine + ")"; + throw new RuntimeException(errorMessage); } break; } @@ -105,6 +104,7 @@ public void reportPoint(ReportPoint point, String debugLine) { } } catch (Exception e) { + logger.log(Level.WARNING, "Failed to add point", e); if (this.sendDataTask.getBlockedSampleSize() < this.blockedPointsPerBatch) { this.sendDataTask.addBlockedSample(debugLine); } diff --git a/proxy/src/main/java/com/wavefront/agent/PushAgent.java b/proxy/src/main/java/com/wavefront/agent/PushAgent.java index 09d4c99c1..0820e531d 100644 --- a/proxy/src/main/java/com/wavefront/agent/PushAgent.java +++ b/proxy/src/main/java/com/wavefront/agent/PushAgent.java @@ -8,9 +8,12 @@ import com.wavefront.ingester.GraphiteDecoder; import com.wavefront.ingester.GraphiteHostAnnotator; import com.wavefront.ingester.Ingester; +import com.wavefront.ingester.StringLineIngester; +import com.wavefront.ingester.TcpIngester; +import com.wavefront.ingester.UdpIngester; import com.wavefront.ingester.OpenTSDBDecoder; -import io.netty.channel.ChannelHandler; -import io.netty.channel.socket.SocketChannel; +import com.wavefront.agent.DataDogAgentHandler; +import com.wavefront.agent.DogStatsDUDPHandler; import org.glassfish.jersey.jackson.JacksonFeature; import org.glassfish.jersey.jetty.JettyHttpContainerFactory; import org.glassfish.jersey.server.ResourceConfig; @@ -19,8 +22,16 @@ import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; +import java.util.ArrayList; import java.util.List; +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandler; +import io.netty.channel.socket.SocketChannel; +import io.netty.handler.codec.http.HttpRequestDecoder; +import io.netty.handler.codec.http.HttpResponseEncoder; +import io.netty.handler.codec.http.HttpObjectAggregator; + /** * Push-only Agent. * @@ -65,6 +76,22 @@ protected void startListeners() { } } } + if (datadogAgentPorts != null) { + for (final String strPort : datadogAgentPorts.split(",")) { + if (strPort.trim().length() > 0) { + startDataDogAgentListener(strPort); + logger.info("listening on port: " + strPort + " for DataDog agent metrics"); + } + } + } + if (dogstatsdPorts != null) { + for (final String strPort : dogstatsdPorts.split(",")) { + if (strPort.trim().length() > 0) { + startDogStatsDListener(strPort); + logger.info("listening on port: " + strPort + " for DogStatsD metrics"); + } + } + } if (httpJsonPorts != null) { for (String strPort : httpJsonPorts.split(",")) { if (strPort.trim().length() > 0) { @@ -95,7 +122,42 @@ protected void startOpenTsdbListener(String strPort) { agentAPI, agentId, port, prefix, pushLogLevel, pushValidationLevel, pushFlushInterval, pushBlockedSamples, null, opentsdbWhitelistRegex, opentsdbBlacklistRegex); - new Thread(new Ingester(graphiteHandler, port)).start(); + new Thread(new StringLineIngester(graphiteHandler, port)).start(); + } + + protected void startDogStatsDListener(String strPort) { + int port = Integer.parseInt(strPort); + + // Set up a custom graphite handler, with no formatter + ChannelHandler handler = new DogStatsDUDPHandler(agentAPI, agentId, port, prefix, pushLogLevel, pushValidationLevel, pushFlushInterval, pushBlockedSamples); + new Thread(new UdpIngester(handler, port)).start(); + } + + protected void startDataDogAgentListener(String strPort) { + int port = Integer.parseInt(strPort); + // decoders + List> decoders = new ArrayList<>(); + decoders.add(new Function() { + @Override + public ChannelHandler apply(Channel input) { + return new HttpRequestDecoder(); + } + }); + decoders.add(new Function() { + @Override + public ChannelHandler apply(Channel input) { + return new HttpResponseEncoder(); + } + }); + decoders.add(new Function() { + @Override + public ChannelHandler apply(Channel input) { + return new HttpObjectAggregator(1048576); + } + }); + + ChannelHandler handler = new DataDogAgentHandler(agentAPI, agentId, port, prefix, pushLogLevel, pushValidationLevel, pushFlushInterval, pushBlockedSamples); + new Thread(new TcpIngester(decoders, handler, port)).start(); } protected void startGraphiteListener(String strPort, @@ -108,16 +170,17 @@ protected void startGraphiteListener(String strPort, pushBlockedSamples, formatter, whitelistRegex, blacklistRegex); if (formatter == null) { - List> handler = Lists.newArrayList(1); - handler.add(new Function() { + List> handler = Lists.newArrayList(1); + handler.add(new Function() { @Override - public ChannelHandler apply(SocketChannel input) { - return new GraphiteHostAnnotator(input.remoteAddress().getHostName(), customSourceTags); + public ChannelHandler apply(Channel input) { + SocketChannel ch = (SocketChannel)input; + return new GraphiteHostAnnotator(ch.remoteAddress().getHostName(), customSourceTags); } }); - new Thread(new Ingester(handler, graphiteHandler, port)).start(); + new Thread(new StringLineIngester(handler, graphiteHandler, port)).start(); } else { - new Thread(new Ingester(graphiteHandler, port)).start(); + new Thread(new StringLineIngester(graphiteHandler, port)).start(); } }