mirror of
https://github.com/status-im/nim-codex.git
synced 2025-02-19 14:14:40 +00:00
half-baked integration of new profiler branch
This commit is contained in:
parent
8c84d8b743
commit
96156b16c3
2
.gitmodules
vendored
2
.gitmodules
vendored
@ -77,7 +77,7 @@
|
||||
path = vendor/nim-chronos
|
||||
url = https://github.com/codex-storage/nim-chronos.git
|
||||
ignore = untracked
|
||||
branch = future-instrumentation-codex
|
||||
branch = feat/profiling-hooks
|
||||
[submodule "vendor/nim-faststreams"]
|
||||
path = vendor/nim-faststreams
|
||||
url = https://github.com/status-im/nim-faststreams.git
|
||||
|
@ -57,9 +57,9 @@ when isMainModule:
|
||||
|
||||
# TODO this should not be here, but currently can't have it in setupMetrics
|
||||
# or we get a circular import.
|
||||
when chronosFuturesInstrumentation:
|
||||
when chronosProfiling:
|
||||
enableProfiling()
|
||||
AsyncProfilerInfo.initDefault(k = config.profilerMaxMetrics)
|
||||
enableChronosProfiling()
|
||||
|
||||
case config.cmd:
|
||||
of StartUpCommand.noCommand:
|
||||
|
@ -46,7 +46,7 @@ const
|
||||
codex_enable_api_debug_peers* {.booldefine.} = false
|
||||
codex_enable_proof_failures* {.booldefine.} = false
|
||||
codex_enable_log_counter* {.booldefine.} = false
|
||||
chronosFuturesInstrumentation* {.booldefine.} = false
|
||||
chronosProfiling* {.booldefine.} = false
|
||||
|
||||
type
|
||||
StartUpCommand* {.pure.} = enum
|
||||
|
@ -460,14 +460,14 @@ proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
|
||||
trace "Excepting processing request", exc = exc.msg
|
||||
return RestApiResponse.error(Http500)
|
||||
|
||||
when chronosFuturesInstrumentation:
|
||||
when chronosProfiling:
|
||||
router.api(
|
||||
MethodGet,
|
||||
"/api/codex/v1/debug/performance") do () -> RestApiResponse:
|
||||
# Returns profiling information, highest totalExecTime first
|
||||
# Returns profiling information, highest execTime first
|
||||
|
||||
without metrics =? sortBy(%(profiler.getFutureSummaryMetrics()),
|
||||
"totalExecTime").catch, error:
|
||||
without metrics =? sortBy(%(getMetrics().totals),
|
||||
"execTime").catch, error:
|
||||
return RestApiResponse.error(Http500, error.msg)
|
||||
|
||||
RestApiResponse.response($(metrics), contentType="application/json")
|
||||
|
@ -1,8 +1,9 @@
|
||||
import ../conf
|
||||
|
||||
when chronosFuturesInstrumentation:
|
||||
import ./asyncprofiler/asyncprofiler
|
||||
when chronosProfiling:
|
||||
import chronos/profiler
|
||||
|
||||
import ./asyncprofiler/serialization
|
||||
import ./asyncprofiler/metricscollector
|
||||
|
||||
export asyncprofiler, serialization, metricscollector
|
||||
export profiler, serialization, metricscollector
|
||||
|
@ -1,197 +0,0 @@
|
||||
|
||||
import std/[tables, macros, options, hashes]
|
||||
import pkg/chronos
|
||||
import pkg/chronos/timer
|
||||
|
||||
import ../json
|
||||
|
||||
export tables, options, hashes, timer, chronos, SrcLoc
|
||||
|
||||
type
|
||||
AsyncProfiler*[TFutureBase] = object
|
||||
perFutureMetrics: PartialMetrics
|
||||
futureSummaryMetrics: MetricsSummary
|
||||
onChange: ChangeListener
|
||||
|
||||
FutureMetrics* = object
|
||||
## Tracks timing information for a single future (typically an async
|
||||
## proc). Created when a future starts, and discarded when a future ends.
|
||||
closureLoc*: ptr SrcLoc
|
||||
created*: Moment
|
||||
start*: Option[Moment]
|
||||
duration*: Duration
|
||||
blocks*: int
|
||||
initDuration*: Duration
|
||||
durationChildren*: Duration
|
||||
|
||||
OverallMetrics* = object
|
||||
## Holds overall execution statistics for all runs of an async proc
|
||||
totalExecTime* {.serialize.}: Duration
|
||||
totalRunTime* {.serialize.}: Duration
|
||||
totalWallTime* {.serialize.}: Duration
|
||||
minSingleTime* {.serialize.}: Duration
|
||||
maxSingleTime* {.serialize.}: Duration
|
||||
count* {.serialize.}: int64
|
||||
|
||||
PartialMetrics* = Table[uint, FutureMetrics]
|
||||
|
||||
MetricsSummary* = Table[ptr SrcLoc, OverallMetrics]
|
||||
|
||||
ChangeListener* = proc (): void {.raises: [].}
|
||||
|
||||
var
|
||||
profiler* {.threadvar.}: AsyncProfiler[FutureBase]
|
||||
enabled {.threadvar.}: bool
|
||||
|
||||
proc getPerFutureMetrics*(self: AsyncProfiler): PartialMetrics {.gcsafe.} =
|
||||
{.cast(gcsafe).}:
|
||||
self.perFutureMetrics
|
||||
|
||||
proc getFutureSummaryMetrics*(self: AsyncProfiler): MetricsSummary {.gcsafe.} =
|
||||
## get a copy of the table of summary metrics for all futures.
|
||||
{.cast(gcsafe).}:
|
||||
self.futureSummaryMetrics
|
||||
|
||||
proc setChangeCallback*(self: var AsyncProfiler, callback: ChangeListener): void =
|
||||
## Allows registration of a single callback which gets called whenever
|
||||
## a the summary metric table changes.
|
||||
self.onChange = callback
|
||||
|
||||
proc addRun(self: var OverallMetrics, run: FutureMetrics) =
|
||||
## Adds metrics for a single run of a given async proc to its OverallMetrics.
|
||||
self.totalExecTime += run.duration
|
||||
self.totalWallTime += Moment.now() - run.created
|
||||
self.totalRunTime += run.duration + run.durationChildren
|
||||
self.count.inc
|
||||
self.minSingleTime = min(self.minSingleTime, run.duration)
|
||||
self.maxSingleTime = max(self.maxSingleTime, run.duration)
|
||||
|
||||
proc handleFutureCreate*[TFutureBase](self: var AsyncProfiler[TFutureBase],
|
||||
fut: TFutureBase) {.raises: [].} =
|
||||
{.cast(gcsafe).}:
|
||||
self.perFutureMetrics[fut.id] = FutureMetrics()
|
||||
self.perFutureMetrics.withValue(fut.id, metric):
|
||||
metric.created = Moment.now()
|
||||
|
||||
proc handleFutureStart*[TFutureBase](self: var AsyncProfiler[TFutureBase],
|
||||
fut: TFutureBase) {.raises: [].} =
|
||||
{.cast(gcsafe).}:
|
||||
assert self.perFutureMetrics.hasKey(fut.id)
|
||||
self.perFutureMetrics.withValue(fut.id, metric):
|
||||
let ts = Moment.now()
|
||||
metric.start = some ts
|
||||
metric.blocks.inc()
|
||||
|
||||
proc handleFuturePause*[TFutureBase](self: var AsyncProfiler[TFutureBase],
|
||||
fut, child: TFutureBase) {.raises: [].} =
|
||||
{.cast(gcsafe).}:
|
||||
let childLoc = if child.isNil: nil else: child.internalLocation[Create]
|
||||
var durationChildren = ZeroDuration
|
||||
var initDurationChildren = ZeroDuration
|
||||
if childLoc != nil:
|
||||
self.perFutureMetrics.withValue(child.id, metric):
|
||||
durationChildren = metric.duration
|
||||
initDurationChildren = metric.initDuration
|
||||
|
||||
assert self.perFutureMetrics.hasKey(fut.id)
|
||||
self.perFutureMetrics.withValue(fut.id, metric):
|
||||
if metric.start.isSome:
|
||||
let ts = Moment.now()
|
||||
metric.duration += ts - metric.start.get()
|
||||
metric.duration -= initDurationChildren
|
||||
if metric.blocks == 1:
|
||||
metric.initDuration = ts - metric.created # tricky,
|
||||
# the first block of a child iterator also
|
||||
# runs on the parents clock, so we track our first block
|
||||
# time so any parents can get it
|
||||
|
||||
metric.durationChildren += durationChildren
|
||||
metric.start = none Moment
|
||||
|
||||
proc handleFutureComplete*[TFutureBase](self: var AsyncProfiler[TFutureBase],
|
||||
fut: TFutureBase) {.raises: [].} =
|
||||
{.cast(gcsafe).}:
|
||||
## used for setting the duration
|
||||
let loc = fut.internalLocation[Create]
|
||||
# assert "set duration: " & $loc
|
||||
var runMetrics: FutureMetrics
|
||||
self.perFutureMetrics.withValue(fut.id, metric):
|
||||
runMetrics = metric[]
|
||||
|
||||
discard self.futureSummaryMetrics.hasKeyOrPut(loc,
|
||||
OverallMetrics(minSingleTime: InfiniteDuration))
|
||||
|
||||
self.futureSummaryMetrics.withValue(loc, metric):
|
||||
metric[].addRun(runMetrics)
|
||||
|
||||
if not isNil(self.onChange):
|
||||
self.onChange()
|
||||
|
||||
onFutureCreate =
|
||||
proc (f: FutureBase) {.nimcall, gcsafe, raises: [].} =
|
||||
if enabled:
|
||||
profiler.handleFutureCreate(f)
|
||||
|
||||
onFutureRunning =
|
||||
proc (f: FutureBase) {.nimcall, gcsafe, raises: [].} =
|
||||
if enabled:
|
||||
profiler.handleFutureStart(f)
|
||||
|
||||
onFuturePause =
|
||||
proc (f, child: FutureBase) {.nimcall, gcsafe, raises: [].} =
|
||||
if enabled:
|
||||
profiler.handleFuturePause(f, child)
|
||||
|
||||
onFutureStop =
|
||||
proc (f: FutureBase) {.nimcall, gcsafe, raises: [].} =
|
||||
if enabled:
|
||||
profiler.handleFuturePause(f, nil)
|
||||
profiler.handleFutureComplete(f)
|
||||
|
||||
proc enableChronosProfiling* (): void =
|
||||
## This gates for which chronos event loops we want to collect metrics. This
|
||||
## method should be called for each event loop, before chronos is started.
|
||||
enabled = true
|
||||
|
||||
when isMainModule:
|
||||
import std/unittest
|
||||
import std/os
|
||||
|
||||
suite "async profiling":
|
||||
test "basic profiling":
|
||||
proc simpleAsyncChild() {.async.} =
|
||||
echo "child sleep..."
|
||||
os.sleep(25)
|
||||
|
||||
proc simpleAsync1() {.async.} =
|
||||
for i in 0..1:
|
||||
await sleepAsync(40.milliseconds)
|
||||
await simpleAsyncChild()
|
||||
echo "sleep..."
|
||||
os.sleep(50)
|
||||
|
||||
waitFor(simpleAsync1())
|
||||
|
||||
let metrics = futureSummaryMetrics
|
||||
echo "\n=== metrics ==="
|
||||
echo "execTime:\ttime to execute non-async portions of async proc"
|
||||
echo "runTime:\texecution time + execution time of children"
|
||||
echo "wallTime:\twall time elapsed for future's lifetime"
|
||||
for (k,v) in metrics.pairs():
|
||||
let count = v.count
|
||||
if count > 0:
|
||||
echo ""
|
||||
echo "metric: ", $k
|
||||
echo "count: ", count
|
||||
echo "avg execTime:\t", v.totalExecTime div count, "\ttotal: ", v.totalExecTime
|
||||
echo "avg wallTime:\t", v.totalWallTime div count, "\ttotal: ", v.totalWallTime
|
||||
echo "avg runTime:\t", v.totalRunTime div count, "\ttotal: ", v.totalRunTime
|
||||
if k.procedure == "simpleAsync1":
|
||||
echo "v: ", v
|
||||
check v.totalExecTime >= 100.milliseconds()
|
||||
check v.totalExecTime <= 180.milliseconds()
|
||||
|
||||
check v.totalRunTime >= 150.milliseconds()
|
||||
check v.totalRunTime <= 240.milliseconds()
|
||||
discard
|
||||
echo ""
|
@ -1,10 +1,12 @@
|
||||
import std/algorithm
|
||||
import std/enumerate
|
||||
import std/sequtils
|
||||
import std/tables
|
||||
import std/times
|
||||
|
||||
import asyncprofiler
|
||||
import metrics
|
||||
import pkg/chronos
|
||||
import pkg/chronos/profiler
|
||||
import pkg/metrics
|
||||
|
||||
when defined(metrics):
|
||||
type
|
||||
@ -17,11 +19,11 @@ when defined(metrics):
|
||||
lastSample: Time
|
||||
collections*: uint
|
||||
|
||||
PerfSampler = proc (): MetricsSummary {.raises: [].}
|
||||
PerfSampler = proc (): MetricsTotals {.raises: [].}
|
||||
|
||||
Clock = proc (): Time {.raises: [].}
|
||||
|
||||
ProfilerMetric = (SrcLoc, OverallMetrics)
|
||||
ProfilerMetric = (SrcLoc, AggregateFutureMetrics)
|
||||
|
||||
const locationLabels = ["proc", "file", "line"]
|
||||
|
||||
@ -33,8 +35,8 @@ when defined(metrics):
|
||||
)
|
||||
|
||||
declarePublicGauge(
|
||||
chronos_run_time_total,
|
||||
"chronos_exec_time_total of this proc plus of all its children (procs" &
|
||||
chronos_exec_time_with_children_total,
|
||||
"chronos_exec_time_with_children_total of this proc plus of all its children (procs" &
|
||||
"that this proc called and awaited for)",
|
||||
labels = locationLabels,
|
||||
)
|
||||
@ -59,17 +61,6 @@ when defined(metrics):
|
||||
labels = locationLabels,
|
||||
)
|
||||
|
||||
# Global Statistics
|
||||
declarePublicGauge(
|
||||
chronos_largest_exec_time_total,
|
||||
"the largest chronos_exec_time_total of all procs",
|
||||
)
|
||||
|
||||
declarePublicGauge(
|
||||
chronos_largest_exec_time_max,
|
||||
"the largest chronos_single_exec_time_max of all procs",
|
||||
)
|
||||
|
||||
# Keeps track of the thread initializing the module. This is the only thread
|
||||
# that will be allowed to interact with the metrics collector.
|
||||
let moduleInitThread = getThreadId()
|
||||
@ -108,41 +99,23 @@ when defined(metrics):
|
||||
$(location.line),
|
||||
]
|
||||
|
||||
chronos_exec_time_total.set(metrics.totalExecTime.nanoseconds,
|
||||
chronos_exec_time_total.set(metrics.execTime.nanoseconds,
|
||||
labelValues = locationLabels)
|
||||
|
||||
chronos_run_time_total.set(metrics.totalRunTime.nanoseconds,
|
||||
chronos_exec_time_with_children_total.set(
|
||||
metrics.execTimeWithChildren.nanoseconds,
|
||||
labelValues = locationLabels
|
||||
)
|
||||
|
||||
chronos_wall_time_total.set(metrics.wallClockTime.nanoseconds,
|
||||
labelValues = locationLabels)
|
||||
|
||||
chronos_wall_time_total.set(metrics.totalWallTime.nanoseconds,
|
||||
chronos_single_exec_time_max.set(metrics.execTimeMax.nanoseconds,
|
||||
labelValues = locationLabels)
|
||||
|
||||
chronos_single_exec_time_max.set(metrics.maxSingleTime.nanoseconds,
|
||||
chronos_call_count_total.set(metrics.callCount.int64,
|
||||
labelValues = locationLabels)
|
||||
|
||||
chronos_call_count_total.set(metrics.count, labelValues = locationLabels)
|
||||
|
||||
proc collectOutlierMetrics(
|
||||
self: AsyncProfilerInfo,
|
||||
profilerMetrics: seq[ProfilerMetric],
|
||||
timestampMillis: int64,
|
||||
): void =
|
||||
## Adds summary metrics for the procs that have the highest exec time
|
||||
## (which stops the async loop) and the highest max exec time. This can
|
||||
## help spot outliers.
|
||||
|
||||
var largestExecTime = low(timer.Duration)
|
||||
var largestMaxExecTime = low(timer.Duration)
|
||||
|
||||
for (_, metric) in profilerMetrics:
|
||||
if metric.maxSingleTime > largestMaxExecTime:
|
||||
largestMaxExecTime = metric.maxSingleTime
|
||||
if metric.totalExecTime > largestExecTime:
|
||||
largestExecTime = metric.totalExecTime
|
||||
|
||||
chronos_largest_exec_time_total.set(largestExecTime.nanoseconds)
|
||||
chronos_largest_exec_time_max.set(largestMaxExecTime.nanoseconds)
|
||||
|
||||
proc collect*(self: AsyncProfilerInfo, force: bool = false): void =
|
||||
# Calling this method from the wrong thread has happened a lot in the past,
|
||||
# so this makes sure we're not doing anything funny.
|
||||
@ -158,24 +131,19 @@ when defined(metrics):
|
||||
perfSampler().
|
||||
pairs.
|
||||
toSeq.
|
||||
map(
|
||||
proc (pair: (ptr SrcLoc, OverallMetrics)): ProfilerMetric =
|
||||
(pair[0][], pair[1])
|
||||
).
|
||||
# We don't scoop metrics with 0 exec time as we have a limited number of
|
||||
# prometheus slots, and those are less likely to be useful in debugging
|
||||
# Chronos performance issues.
|
||||
filter(
|
||||
proc (pair: ProfilerMetric): bool =
|
||||
pair[1].totalExecTime.nanoseconds > 0
|
||||
pair[1].execTimeWithChildren.nanoseconds > 0
|
||||
).
|
||||
sorted(
|
||||
proc (a, b: ProfilerMetric): int =
|
||||
cmp(a[1].totalExecTime, b[1].totalExecTime),
|
||||
cmp(a[1].execTimeWithChildren, b[1].execTimeWithChildren),
|
||||
order = SortOrder.Descending
|
||||
)
|
||||
|
||||
self.collectOutlierMetrics(currentMetrics, now.toMilliseconds())
|
||||
self.collectSlowestProcs(currentMetrics, now.toMilliseconds(), self.k)
|
||||
|
||||
self.lastSample = now
|
||||
@ -188,14 +156,13 @@ when defined(metrics):
|
||||
|
||||
proc reset*(self: AsyncProfilerInfo): void =
|
||||
resetMetric(chronos_exec_time_total)
|
||||
resetMetric(chronos_run_time_total)
|
||||
resetMetric(chronos_exec_time_with_children_total)
|
||||
resetMetric(chronos_wall_time_total)
|
||||
resetMetric(chronos_call_count_total)
|
||||
resetMetric(chronos_single_exec_time_max)
|
||||
resetMetric(chronos_largest_exec_time_total)
|
||||
resetMetric(chronos_largest_exec_time_max)
|
||||
|
||||
var asyncProfilerInfo* {.global.}: AsyncProfilerInfo
|
||||
var wrappedEventHandler {.global.}: proc (e: Event) {.nimcall, gcsafe, raises: [].}
|
||||
|
||||
proc initDefault*(AsyncProfilerInfo: typedesc, k: int) =
|
||||
assert getThreadId() == moduleInitThread, "You cannot call " &
|
||||
@ -203,11 +170,18 @@ when defined(metrics):
|
||||
"metricscolletor module."
|
||||
|
||||
asyncProfilerInfo = AsyncProfilerInfo.newCollector(
|
||||
perfSampler = proc (): MetricsSummary = profiler.getFutureSummaryMetrics(),
|
||||
perfSampler = proc (): MetricsTotals = getMetrics().totals,
|
||||
k = k,
|
||||
# We want to collect metrics every 5 seconds.
|
||||
sampleInterval = initDuration(seconds = 5),
|
||||
clock = proc (): Time = getTime(),
|
||||
)
|
||||
|
||||
profiler.setChangeCallback(proc (): void = asyncProfilerInfo.collect())
|
||||
wrappedEventHandler = handleFutureEvent
|
||||
handleFutureEvent = proc (e: Event) {.nimcall, gcsafe.} =
|
||||
{.cast(gcsafe).}:
|
||||
wrappedEventHandler(e)
|
||||
|
||||
if e.newState == ExtendedFutureState.Completed:
|
||||
asyncProfilerInfo.collect()
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
import std/algorithm
|
||||
import std/json
|
||||
|
||||
import asyncprofiler
|
||||
import chronos/profiler
|
||||
|
||||
proc `%`*(o: Duration): JsonNode =
|
||||
%(o.nanoseconds)
|
||||
@ -10,16 +10,16 @@ proc `%`*(o: Duration): JsonNode =
|
||||
proc `%`*(o: cstring): JsonNode =
|
||||
%($(o))
|
||||
|
||||
proc toJson*(o: MetricsSummary): JsonNode =
|
||||
proc toJson*(o: Table[SrcLoc, AggregateFutureMetrics]): JsonNode =
|
||||
var rows = newJArray()
|
||||
for (location, metric) in o.pairs:
|
||||
var row = %(metric)
|
||||
row["location"] = %(location[])
|
||||
row["location"] = %(location)
|
||||
rows.add(row)
|
||||
|
||||
rows
|
||||
|
||||
proc `%`*(o: MetricsSummary): JsonNode = o.toJson()
|
||||
proc `%`*(o: Table[SrcLoc, AggregateFutureMetrics]): JsonNode = o.toJson()
|
||||
|
||||
proc sortBy*(jArray: JsonNode, metric: string): JsonNode {.raises: [ref KeyError].} =
|
||||
%(jArray.getElems.sorted(
|
||||
|
@ -4,7 +4,8 @@ include "build.nims"
|
||||
import std/os
|
||||
const currentDir = currentSourcePath()[0 .. ^(len("config.nims") + 1)]
|
||||
|
||||
--d:chronosFuturesInstrumentation
|
||||
--d:chronosFutureId
|
||||
--d:chronosProfiling
|
||||
|
||||
when getEnv("NIMBUS_BUILD_SYSTEM") == "yes" and
|
||||
# BEWARE
|
||||
|
@ -1,31 +0,0 @@
|
||||
# import pkg/asynctest
|
||||
# import pkg/chronos
|
||||
|
||||
# import pkg/codex/utils/asyncprofiler
|
||||
|
||||
# type
|
||||
# FakeFuture = object
|
||||
# id: uint
|
||||
# internalLocation*: array[LocationKind, ptr SrcLoc]
|
||||
|
||||
|
||||
# suite "asyncprofiler":
|
||||
|
||||
# test "should not keep metrics for a pending future in memory after it completes":
|
||||
|
||||
# var fakeLoc = SrcLoc(procedure: "foo", file: "foo.nim", line: 1)
|
||||
# let future = FakeFuture(
|
||||
# id: 1,
|
||||
# internalLocation: [
|
||||
# LocationKind.Create: addr fakeLoc,
|
||||
# LocationKind.Finish: addr fakeLoc,
|
||||
# ])
|
||||
|
||||
# var profiler = AsyncProfiler[FakeFuture]()
|
||||
|
||||
# profiler.handleFutureCreate(future)
|
||||
# profiler.handleFutureComplete(future)
|
||||
|
||||
# check len(profiler.getPerFutureMetrics()) == 0
|
||||
|
||||
|
@ -1,10 +1,10 @@
|
||||
import std/times
|
||||
import std/unittest
|
||||
|
||||
import pkg/chronos/profiler
|
||||
import pkg/metrics
|
||||
|
||||
import codex/utils/asyncprofiler
|
||||
|
||||
import codex/utils/asyncprofiler/metricscollector
|
||||
import ../../helpers
|
||||
|
||||
suite "asyncprofiler metrics collector":
|
||||
@ -18,45 +18,40 @@ suite "asyncprofiler metrics collector":
|
||||
]
|
||||
|
||||
let sample = {
|
||||
(addr locations[0]): OverallMetrics(
|
||||
totalExecTime: timer.nanoseconds(90062),
|
||||
totalRunTime: timer.nanoseconds(113553),
|
||||
totalWallTime: timer.nanoseconds(174567),
|
||||
minSingleTime: timer.nanoseconds(80062),
|
||||
maxSingleTime: timer.nanoseconds(80062),
|
||||
count: 1
|
||||
locations[0]: AggregateFutureMetrics(
|
||||
execTime: timer.nanoseconds(90062),
|
||||
execTimeMax: timer.nanoseconds(80062),
|
||||
childrenExecTime: timer.nanoseconds(52044),
|
||||
wallClockTime: timer.nanoseconds(174567),
|
||||
callCount: 1
|
||||
),
|
||||
(addr locations[1]): OverallMetrics(
|
||||
totalExecTime: timer.nanoseconds(91660),
|
||||
totalRunTime: timer.nanoseconds(71660),
|
||||
totalWallTime: timer.nanoseconds(72941),
|
||||
minSingleTime: timer.nanoseconds(71660),
|
||||
maxSingleTime: timer.nanoseconds(81660),
|
||||
count: 1
|
||||
locations[1]: AggregateFutureMetrics(
|
||||
execTime: timer.nanoseconds(91660),
|
||||
execTimeMax: timer.nanoseconds(81660),
|
||||
childrenExecTime: timer.nanoseconds(52495),
|
||||
wallClockTime: timer.nanoseconds(72941),
|
||||
callCount: 1
|
||||
),
|
||||
(addr locations[2]): OverallMetrics(
|
||||
totalExecTime: timer.nanoseconds(60529),
|
||||
totalRunTime: timer.nanoseconds(60529),
|
||||
totalWallTime: timer.nanoseconds(60784),
|
||||
minSingleTime: timer.nanoseconds(60529),
|
||||
maxSingleTime: timer.nanoseconds(60529),
|
||||
count: 1
|
||||
locations[2]: AggregateFutureMetrics(
|
||||
execTime: timer.nanoseconds(60529),
|
||||
execTimeMax: timer.nanoseconds(60529),
|
||||
childrenExecTime: timer.nanoseconds(9689),
|
||||
wallClockTime: timer.nanoseconds(60784),
|
||||
callCount: 1
|
||||
),
|
||||
(addr locations[3]): OverallMetrics(
|
||||
totalExecTime: timer.nanoseconds(60645),
|
||||
totalRunTime: timer.nanoseconds(156214),
|
||||
totalWallTime: timer.nanoseconds(60813),
|
||||
minSingleTime: timer.nanoseconds(5333),
|
||||
maxSingleTime: timer.nanoseconds(41257),
|
||||
count: 3
|
||||
locations[3]: AggregateFutureMetrics(
|
||||
execTime: timer.nanoseconds(60645),
|
||||
execTimeMax: timer.nanoseconds(41257),
|
||||
childrenExecTime: timer.nanoseconds(72934),
|
||||
wallClockTime: timer.nanoseconds(60813),
|
||||
callCount: 3
|
||||
),
|
||||
(addr locations[4]): OverallMetrics(
|
||||
totalExecTime: timer.nanoseconds(0),
|
||||
totalRunTime: timer.nanoseconds(156214),
|
||||
totalWallTime: timer.nanoseconds(60813),
|
||||
minSingleTime: timer.nanoseconds(0),
|
||||
maxSingleTime: timer.nanoseconds(0),
|
||||
count: 3
|
||||
locations[4]: AggregateFutureMetrics(
|
||||
execTime: timer.nanoseconds(0),
|
||||
execTimeMax: timer.nanoseconds(0),
|
||||
childrenExecTime: timer.nanoseconds(0),
|
||||
wallClockTime: timer.nanoseconds(60813),
|
||||
callCount: 3
|
||||
)
|
||||
}.toTable
|
||||
|
||||
@ -66,7 +61,7 @@ suite "asyncprofiler metrics collector":
|
||||
|
||||
proc setupCollector(k: int = high(int)): void =
|
||||
collector = AsyncProfilerInfo.newCollector(
|
||||
perfSampler = proc (): MetricsSummary = sample,
|
||||
perfSampler = proc (): MetricsTotals = sample,
|
||||
clock = proc (): Time = wallTime,
|
||||
sampleInterval = times.initDuration(minutes = 5),
|
||||
k = k,
|
||||
@ -75,25 +70,19 @@ suite "asyncprofiler metrics collector":
|
||||
collector.reset()
|
||||
collector.collect()
|
||||
|
||||
test "should keep track of basic worst-case exec time stats":
|
||||
test "should create labeled series for the k slowest procs in terms of execTime":
|
||||
setupCollector(k = 3)
|
||||
|
||||
check chronos_largest_exec_time_total.value == 91660
|
||||
check chronos_largest_exec_time_max.value == 81660
|
||||
|
||||
test "should create labeled series for the k slowest procs in terms of totalExecTime":
|
||||
setupCollector(k = 3)
|
||||
|
||||
check chronos_exec_time_total.value(
|
||||
labelValues = @["start", "discovery.nim", "192"]) == 91660
|
||||
check chronos_exec_time_total.value(
|
||||
labelValues = @["start", "discovery.nim", "174"]) == 90062
|
||||
check chronos_exec_time_total.value(
|
||||
labelValues = @["update", "sqliteds.nim", "107"]) == 60645
|
||||
check chronos_exec_time_with_children_total.value(
|
||||
labelValues = @["start", "discovery.nim", "192"]) == 144155
|
||||
check chronos_exec_time_with_children_total.value(
|
||||
labelValues = @["start", "discovery.nim", "174"]) == 142106
|
||||
check chronos_exec_time_with_children_total.value(
|
||||
labelValues = @["update", "sqliteds.nim", "107"]) == 133579
|
||||
|
||||
# This is out of the top-k slowest, so should not have been recorded.
|
||||
expect system.KeyError:
|
||||
discard chronos_exec_time_total.value(
|
||||
discard chronos_exec_time_with_children_total.value(
|
||||
labelValues = @["query", "manager.nim", "323"])
|
||||
|
||||
test "should not collect metrics again unless enough time has elapsed from last collection":
|
||||
|
@ -3,35 +3,35 @@ import std/tables
|
||||
import std/unittest
|
||||
import std/json
|
||||
|
||||
import codex/utils/asyncprofiler
|
||||
import chronos/profiler
|
||||
|
||||
import codex/utils/asyncprofiler/serialization
|
||||
|
||||
import ../../helpers
|
||||
|
||||
checksuite "asyncprofiler metrics serializer":
|
||||
|
||||
var fooLoc = SrcLoc(
|
||||
let fooLoc = SrcLoc(
|
||||
procedure: "foo",
|
||||
file: "foo.nim",
|
||||
line: 1
|
||||
)
|
||||
|
||||
let fooMetric = OverallMetrics(
|
||||
totalExecTime: 2.seconds,
|
||||
totalRunTime: 2.seconds,
|
||||
totalWallTime: 2.seconds,
|
||||
minSingleTime: 100.nanoseconds,
|
||||
maxSingleTime: 1500.milliseconds,
|
||||
count: 10
|
||||
let fooMetric = AggregateFutureMetrics(
|
||||
execTime: 2.seconds,
|
||||
wallClockTime: 2.seconds,
|
||||
childrenExecTime: 10.seconds,
|
||||
execTimeMax: 1500.milliseconds,
|
||||
callCount: 10
|
||||
)
|
||||
|
||||
test "should serialize OverallMetrics":
|
||||
check %fooMetric == %*{
|
||||
"totalExecTime": 2000000000,
|
||||
"totalRunTime": 2000000000,
|
||||
"totalWallTime": 2000000000,
|
||||
"minSingleTime": 100,
|
||||
"maxSingleTime": 1500000000,
|
||||
"count": 10
|
||||
"execTime": 2000000000,
|
||||
"wallClockTime": 2000000000,
|
||||
"childrenExecTime": 10000000000,
|
||||
"execTimeMax": 1500000000,
|
||||
"callCount": 10
|
||||
}
|
||||
|
||||
test "should serialize SrcLoc":
|
||||
@ -41,9 +41,9 @@ checksuite "asyncprofiler metrics serializer":
|
||||
"line": 1
|
||||
}
|
||||
|
||||
test "should serialize MetricsSummary":
|
||||
var summary: MetricsSummary = {
|
||||
(addr fooLoc): fooMetric
|
||||
test "should serialize metrics totals":
|
||||
var summary: MetricsTotals = {
|
||||
fooLoc: fooMetric
|
||||
}.toTable
|
||||
|
||||
check %summary == %*[%*{
|
||||
@ -52,37 +52,35 @@ checksuite "asyncprofiler metrics serializer":
|
||||
"file": "foo.nim",
|
||||
"line": 1,
|
||||
},
|
||||
"totalExecTime": 2000000000,
|
||||
"totalRunTime": 2000000000,
|
||||
"totalWallTime": 2000000000,
|
||||
"minSingleTime": 100,
|
||||
"maxSingleTime": 1500000000,
|
||||
"count": 10
|
||||
"execTime": 2000000000,
|
||||
"wallClockTime": 2000000000,
|
||||
"childrenExecTime": 10000000000,
|
||||
"execTimeMax": 1500000000,
|
||||
"callCount": 10
|
||||
}]
|
||||
|
||||
test "should sort MetricsSummary by the required key":
|
||||
var barLoc = SrcLoc(
|
||||
let barLoc = SrcLoc(
|
||||
procedure: "bar",
|
||||
file: "bar.nim",
|
||||
line: 1
|
||||
)
|
||||
|
||||
var barMetrics = OverallMetrics(
|
||||
totalExecTime: 3.seconds,
|
||||
totalRunTime: 1.seconds,
|
||||
totalWallTime: 1.seconds,
|
||||
minSingleTime: 100.nanoseconds,
|
||||
maxSingleTime: 1500.milliseconds,
|
||||
count: 5
|
||||
var barMetrics = AggregateFutureMetrics(
|
||||
execTime: 3.seconds,
|
||||
wallClockTime: 1.seconds,
|
||||
execTimeMax: 1500.milliseconds,
|
||||
childrenExecTime: 1.seconds,
|
||||
callCount: 5
|
||||
)
|
||||
|
||||
var summary: MetricsSummary = {
|
||||
(addr fooLoc): fooMetric,
|
||||
(addr barLoc): barMetrics
|
||||
var summary: Table[SrcLoc, AggregateFutureMetrics] = {
|
||||
fooLoc: fooMetric,
|
||||
barLoc: barMetrics
|
||||
}.toTable
|
||||
|
||||
check (%summary).sortBy("totalExecTime").getElems.map(
|
||||
check (%summary).sortBy("execTime").getElems.map(
|
||||
proc (x: JsonNode): string = x["location"]["procedure"].getStr) == @["bar", "foo"]
|
||||
|
||||
check (%summary).sortBy("count").getElems.map(
|
||||
check (%summary).sortBy("callCount").getElems.map(
|
||||
proc (x: JsonNode): string = x["location"]["procedure"].getStr) == @["foo", "bar"]
|
||||
|
@ -2,7 +2,8 @@
|
||||
--threads:on
|
||||
--tlsEmulation:off
|
||||
|
||||
--d:chronosFuturesInstrumentation
|
||||
--d:chronosFutureId
|
||||
--d:chronosProfiling
|
||||
|
||||
when not defined(chronicles_log_level):
|
||||
--define:"chronicles_log_level:NONE" # compile all log statements
|
||||
|
2
vendor/nim-chronos
vendored
2
vendor/nim-chronos
vendored
@ -1 +1 @@
|
||||
Subproject commit a492d0838efde4718c972545da0afddf6b36011b
|
||||
Subproject commit 128531cd830feb1473286654b4d975c5dc36d824
|
Loading…
x
Reference in New Issue
Block a user