Skip to content

Commit

Permalink
finagle-core: Track queue delay and send latency in pipelining client
Browse files Browse the repository at this point in the history
Problem
We need to measure the delays between submission to the Netty epoll loop
queue and task execution, as well as the latencies for message sending,
including socket writes.

Solution
Add epoll_queue_delay_ns and message_send_latency_ns to
PipeliningClientPushSession

Differential Revision: https://phabricator.twitter.biz/D1185421
  • Loading branch information
Ivan Gorbachev authored and jenkins committed Nov 27, 2024
1 parent dc5f647 commit e4a911e
Show file tree
Hide file tree
Showing 3 changed files with 37 additions and 6 deletions.
2 changes: 2 additions & 0 deletions CHANGELOG.rst
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ Runtime Behavior Changes
Previously it was `[0, dur)`, which could result in `next.duration < duration`
for arbitrary long invocation chains. ``PHAB_ID=D1182252``
* finagle-core: `Backoff.equalJittered` is now deprecated and falls back to `exponentialJittered`. ``PHAB_ID=D1182535``
* finagle-core: `PipeliningClientPushSession` now collects stats `epoll_queue_delay_ns` and `message_send_latency_ns`.
``PHAB_ID=D1185421``

New Features
~~~~~~~~~~
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,12 @@ import com.twitter.finagle.Failure
import com.twitter.finagle.FailureFlags
import com.twitter.finagle.Service
import com.twitter.finagle.Status
import com.twitter.finagle.stats.DefaultStatsReceiver
import com.twitter.finagle.stats.HistogramFormat
import com.twitter.finagle.stats.MetricBuilder
import com.twitter.finagle.stats.MetricBuilder.HistogramType
import com.twitter.finagle.stats.MetricUsageHint
import com.twitter.finagle.stats.StatsReceiver
import com.twitter.logging.Logger
import com.twitter.util._

Expand All @@ -22,13 +28,29 @@ import com.twitter.util._
* result of another request unless the connection is stuck, and does not
* look like it will make progress. Use `stallTimeout` to configure this timeout.
*/
final class PipeliningClientPushSession[In, Out](
class PipeliningClientPushSession[In, Out](
handle: PushChannelHandle[In, Out],
stallTimeout: Duration,
timer: Timer)
timer: Timer,
statsReceiver: StatsReceiver = DefaultStatsReceiver)
extends PushSession[In, Out](handle) { self =>

private[this] val logger = Logger.get
private[this] val scopedStatsReceived = statsReceiver.scope("pipelining_client")
private[this] val epollQueueDelay = scopedStatsReceived.stat(
MetricBuilder(metricType = HistogramType)
.withHistogramFormat(HistogramFormat.FullSummary)
.withPercentiles(0.5, 0.9, 0.99, 0.999, 0.9999)
.withMetricUsageHints(Set(MetricUsageHint.HighContention))
.withName("epoll_queue_delay_ns")
)
private[this] val messageSendLatency = scopedStatsReceived.stat(
MetricBuilder(metricType = HistogramType)
.withHistogramFormat(HistogramFormat.FullSummary)
.withPercentiles(0.5, 0.9, 0.99, 0.999, 0.9999)
.withMetricUsageHints(Set(MetricUsageHint.HighContention))
.withName("message_send_latency_ns")
)

// used only within SerialExecutor
private[this] val h_queue = new java.util.ArrayDeque[Promise[In]]()
Expand Down Expand Up @@ -93,7 +115,9 @@ final class PipeliningClientPushSession[In, Out](
})
}
}
handle.serialExecutor.execute(new Runnable { def run(): Unit = handleDispatch(request, p) })

val requestStartTime = System.nanoTime()
handle.serialExecutor.execute(() => handleDispatch(request, p, requestStartTime))
p
}

Expand Down Expand Up @@ -123,12 +147,16 @@ final class PipeliningClientPushSession[In, Out](
}
}

private[this] def handleDispatch(request: Out, p: Promise[In]): Unit = {
private[this] def handleDispatch(request: Out, p: Promise[In], requestStartTime: Long): Unit = {
val handleStartTime = System.nanoTime()
epollQueueDelay.add(handleStartTime - requestStartTime)
if (!h_running) p.setException(new ChannelClosedException(handle.remoteAddress))
else {
h_queue.offer(p)
h_queueSize += 1
handle.sendAndForget(request)
handle.send(request) { _ =>
messageSendLatency.add(System.nanoTime() - handleStartTime)
}
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,8 @@ object Memcached extends finagle.Client[Command, Response] with finagle.Server[C
new PipeliningClientPushSession[Response, Command](
handle,
params[StalledPipelineTimeout].timeout,
params[finagle.param.Timer].timer
params[finagle.param.Timer].timer,
statsReceiver = params[Stats].statsReceiver,
)
)
}
Expand Down

0 comments on commit e4a911e

Please sign in to comment.