From 7036e2307de9e6f0a14b9367b1d3b70c49f40edc Mon Sep 17 00:00:00 2001 From: Chris Larsen Date: Fri, 16 Feb 2024 05:12:02 -0800 Subject: [PATCH] jmh: update publish payload benchmarks (#1610) Add benchmark to show benefit of skipping the time series ID. --- .../atlas/webapi/PublishPayloadsBench.scala | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/atlas-jmh/src/main/scala/com/netflix/atlas/webapi/PublishPayloadsBench.scala b/atlas-jmh/src/main/scala/com/netflix/atlas/webapi/PublishPayloadsBench.scala index 8e76a4e33..bfe5b8a09 100644 --- a/atlas-jmh/src/main/scala/com/netflix/atlas/webapi/PublishPayloadsBench.scala +++ b/atlas-jmh/src/main/scala/com/netflix/atlas/webapi/PublishPayloadsBench.scala @@ -35,6 +35,7 @@ import scala.util.Using * ``` * Benchmark Mode Cnt Score Error Units * decodeBatch thrpt 5 23.787 ± 1.148 ops/s + * decodeBatchDatapoints thrpt 5 129.900 ± 3.767 ops/s * decodeCompactBatch thrpt 5 173.148 ± 1.835 ops/s * decodeList thrpt 5 25.277 ± 0.254 ops/s * encodeBatch thrpt 5 179.382 ± 39.696 ops/s @@ -76,6 +77,13 @@ class PublishPayloadsBench { } } + // Skips the ID calculation. + private def decodeBatchDatapoints(data: Array[Byte]): List[Datapoint] = { + Using.resource(Json.newSmileParser(new ByteArrayInputStream(data))) { parser => + PublishPayloads.decodeBatchDatapoints(parser) + } + } + private def encodeBatch(values: List[DatapointTuple]): Array[Byte] = { Streams.byteArray { out => Using.resource(Json.newSmileGenerator(out)) { gen => @@ -126,6 +134,14 @@ class PublishPayloadsBench { } } + @Benchmark + def decodeBatchDatapoints(bh: Blackhole): Unit = { + val consumer = new BlackholePublishConsumer(bh) + decodeBatchDatapoints(encodedBatch).foreach { d => + consumer.consume(null, d.tags, d.timestamp, d.value) + } + } + @Benchmark def decodeCompactBatch(bh: Blackhole): Unit = { val consumer = new BlackholePublishConsumer(bh)