2022-08-04 08:04:30 +00:00
|
|
|
# Nimbus
|
2024-02-16 09:08:07 +00:00
|
|
|
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
2022-08-04 08:04:30 +00:00
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
|
|
# http://opensource.org/licenses/MIT)
|
|
|
|
# at your option. This file may not be copied, modified, or distributed
|
|
|
|
# except according to those terms.
|
|
|
|
|
|
|
|
## Sync worker peers scheduler template
|
|
|
|
## ====================================
|
|
|
|
##
|
|
|
|
## Virtual method/interface functions to be provided as `mixin`:
|
|
|
|
##
|
2023-04-24 20:24:07 +00:00
|
|
|
## *runSetup(ctx: CtxRef[S]): bool*
|
2022-08-04 08:04:30 +00:00
|
|
|
## Global set up. This function will be called before any worker peer is
|
|
|
|
## started. If that function returns `false`, no worker peers will be run.
|
|
|
|
##
|
2022-11-25 14:56:42 +00:00
|
|
|
## Also, this function should decide whether the `runDaemon()` job will be
|
|
|
|
## started next by controlling the `ctx.daemon` flag (default is `false`.)
|
|
|
|
##
|
2022-08-04 08:04:30 +00:00
|
|
|
## *runRelease(ctx: CtxRef[S])*
|
|
|
|
## Global clean up, done with all the worker peers.
|
|
|
|
##
|
2022-11-14 14:13:00 +00:00
|
|
|
## *runDaemon(ctx: CtxRef[S]) {.async.}*
|
|
|
|
## Global background job that will be re-started as long as the variable
|
2022-11-25 14:56:42 +00:00
|
|
|
## `ctx.daemon` is set `true`. If that job was stopped due to re-setting
|
|
|
|
## `ctx.daemon` to `false`, it will be restarted next after it was reset
|
|
|
|
## as `true` not before there is some activity on the `runPool()`,
|
|
|
|
## `runSingle()`, or `runMulti()` functions.
|
2022-11-14 14:13:00 +00:00
|
|
|
##
|
2022-08-04 08:04:30 +00:00
|
|
|
##
|
|
|
|
## *runStart(buddy: BuddyRef[S,W]): bool*
|
|
|
|
## Initialise a new worker peer.
|
|
|
|
##
|
|
|
|
## *runStop(buddy: BuddyRef[S,W])*
|
|
|
|
## Clean up this worker peer.
|
|
|
|
##
|
|
|
|
##
|
2023-04-14 22:28:57 +00:00
|
|
|
## *runPool(buddy: BuddyRef[S,W], last: bool; laps: int): bool*
|
2022-08-04 08:04:30 +00:00
|
|
|
## Once started, the function `runPool()` is called for all worker peers in
|
2022-11-25 14:56:42 +00:00
|
|
|
## sequence as the body of an iteration as long as the function returns
|
|
|
|
## `false`. There will be no other worker peer functions activated
|
|
|
|
## simultaneously.
|
2022-08-04 08:04:30 +00:00
|
|
|
##
|
|
|
|
## This procedure is started if the global flag `buddy.ctx.poolMode` is set
|
2023-04-14 22:28:57 +00:00
|
|
|
## `true` (default is `false`.) It will be automatically reset before the
|
|
|
|
## the loop starts. Re-setting it again results in repeating the loop. The
|
|
|
|
## argument `laps` (starting with `0`) indicated the currend lap of the
|
|
|
|
## repeated loops. To avoid continous looping, the number of `laps` is
|
2023-04-25 12:24:32 +00:00
|
|
|
## limited (see `execPoolModeMax`, below.)
|
2022-08-24 13:44:18 +00:00
|
|
|
##
|
2023-04-14 22:28:57 +00:00
|
|
|
## The argument `last` is set `true` if the last entry of the current loop
|
|
|
|
## has been reached.
|
2022-08-04 08:04:30 +00:00
|
|
|
##
|
2022-10-21 19:29:42 +00:00
|
|
|
## Note:
|
2022-11-25 14:56:42 +00:00
|
|
|
## + This function does *not* runs in `async` mode.
|
2022-10-21 19:29:42 +00:00
|
|
|
## + The flag `buddy.ctx.poolMode` has priority over the flag
|
|
|
|
## `buddy.ctrl.multiOk` which controls `runSingle()` and `runMulti()`.
|
2022-08-04 08:04:30 +00:00
|
|
|
##
|
|
|
|
##
|
|
|
|
## *runSingle(buddy: BuddyRef[S,W]) {.async.}*
|
|
|
|
## This worker peer method is invoked if the peer-local flag
|
|
|
|
## `buddy.ctrl.multiOk` is set `false` which is the default mode. This flag
|
|
|
|
## is updated by the worker peer when deemed appropriate.
|
2022-11-09 19:16:25 +00:00
|
|
|
## + For all worker peerss, there can be only one `runSingle()` function
|
|
|
|
## active simultaneously.
|
|
|
|
## + There will be no `runMulti()` function active for the very same worker
|
|
|
|
## peer that runs the `runSingle()` function.
|
|
|
|
## + There will be no `runPool()` iterator active.
|
2022-08-04 08:04:30 +00:00
|
|
|
##
|
|
|
|
## Note that this function runs in `async` mode.
|
|
|
|
##
|
2022-10-21 19:29:42 +00:00
|
|
|
##
|
2022-08-04 08:04:30 +00:00
|
|
|
## *runMulti(buddy: BuddyRef[S,W]) {.async.}*
|
|
|
|
## This worker peer method is invoked if the `buddy.ctrl.multiOk` flag is
|
|
|
|
## set `true` which is typically done after finishing `runSingle()`. This
|
|
|
|
## instance can be simultaneously active for all worker peers.
|
|
|
|
##
|
2022-10-21 19:29:42 +00:00
|
|
|
## Note that this function runs in `async` mode.
|
|
|
|
##
|
2022-08-04 08:04:30 +00:00
|
|
|
##
|
|
|
|
## Additional import files needed when using this template:
|
2022-10-20 16:59:54 +00:00
|
|
|
## * eth/[common, p2p]
|
2022-08-04 08:04:30 +00:00
|
|
|
## * chronicles
|
|
|
|
## * chronos
|
|
|
|
## * stew/[interval_set, sorted_set],
|
|
|
|
## * "."/[sync_desc, sync_sched, protocol]
|
|
|
|
##
|
2023-03-07 14:23:22 +00:00
|
|
|
{.push raises: [].}
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
import
|
|
|
|
std/hashes,
|
|
|
|
chronos,
|
2023-04-26 15:46:42 +00:00
|
|
|
eth/[keys, p2p, p2p/peer_pool],
|
2022-08-04 08:04:30 +00:00
|
|
|
stew/keyed_queue,
|
2022-10-19 10:04:06 +00:00
|
|
|
"."/[handlers, sync_desc]
|
2022-08-04 08:04:30 +00:00
|
|
|
|
2022-10-21 19:29:42 +00:00
|
|
|
static:
|
|
|
|
# type `EthWireRef` is needed in `initSync()`
|
2024-02-16 09:08:07 +00:00
|
|
|
type silenceUnusedhandlerComplaint {.used.} = EthWireRef # dummy directive
|
2022-10-21 19:29:42 +00:00
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
type
|
|
|
|
ActiveBuddies[S,W] = ##\
|
|
|
|
## List of active workers, using `Hash(Peer)` rather than `Peer`
|
2023-04-26 15:46:42 +00:00
|
|
|
KeyedQueue[ENode,RunnerBuddyRef[S,W]]
|
2022-08-04 08:04:30 +00:00
|
|
|
|
|
|
|
RunnerSyncRef*[S,W] = ref object
|
|
|
|
## Module descriptor
|
|
|
|
ctx*: CtxRef[S] ## Shared data
|
|
|
|
pool: PeerPool ## For starting the system
|
|
|
|
buddies: ActiveBuddies[S,W] ## LRU cache with worker descriptors
|
2022-11-14 14:13:00 +00:00
|
|
|
daemonRunning: bool ## Run global background job
|
2022-11-09 19:16:25 +00:00
|
|
|
singleRunLock: bool ## Some single mode runner is activated
|
|
|
|
monitorLock: bool ## Monitor mode is activated
|
|
|
|
activeMulti: int ## Number of activated runners in multi-mode
|
2022-11-25 14:56:42 +00:00
|
|
|
shutdown: bool ## Internal shut down flag
|
2022-08-04 08:04:30 +00:00
|
|
|
|
|
|
|
RunnerBuddyRef[S,W] = ref object
|
|
|
|
## Per worker peer descriptor
|
|
|
|
dsc: RunnerSyncRef[S,W] ## Scheduler descriptor
|
|
|
|
worker: BuddyRef[S,W] ## Worker peer data
|
2023-04-26 15:46:42 +00:00
|
|
|
zombified: Moment ## When it became undead (if any)
|
2022-08-04 08:04:30 +00:00
|
|
|
|
2022-11-09 19:16:25 +00:00
|
|
|
const
|
2023-04-26 15:46:42 +00:00
|
|
|
zombieTimeToLinger = 20.seconds
|
|
|
|
## Maximum time a zombie is kept on the database.
|
|
|
|
|
2022-11-09 19:16:25 +00:00
|
|
|
execLoopTimeElapsedMin = 50.milliseconds
|
2022-11-14 14:13:00 +00:00
|
|
|
## Minimum elapsed time an exec loop needs for a single lap. If it is
|
|
|
|
## faster, asynchroneous sleep seconds are added. in order to avoid
|
2022-11-09 19:16:25 +00:00
|
|
|
## cpu overload.
|
|
|
|
|
|
|
|
execLoopTaskSwitcher = 1.nanoseconds
|
2022-11-14 14:13:00 +00:00
|
|
|
## Asynchroneous waiting time at the end of an exec loop unless some sleep
|
2022-11-09 19:16:25 +00:00
|
|
|
## seconds were added as decribed by `execLoopTimeElapsedMin`, above.
|
|
|
|
|
|
|
|
execLoopPollingTime = 50.milliseconds
|
|
|
|
## Single asynchroneous time interval wait state for event polling
|
|
|
|
|
2023-04-14 22:28:57 +00:00
|
|
|
execPoolModeLoopMax = 100
|
|
|
|
## Avoids continuous looping
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private helpers
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2023-04-26 15:46:42 +00:00
|
|
|
proc hash*(key: ENode): Hash =
|
|
|
|
## Mixin, needed for `buddies` table key comparison. Needs to be a public
|
|
|
|
## function technically although it should be seen logically as a private
|
|
|
|
## one.
|
|
|
|
var h: Hash = 0
|
|
|
|
h = h !& hashes.hash(key.pubkey.toRaw)
|
|
|
|
h = h !& hashes.hash(key.address)
|
|
|
|
!$h
|
|
|
|
|
|
|
|
proc key(peer: Peer): ENode =
|
|
|
|
## Map to key for below table methods.
|
|
|
|
peer.remote.node
|
2022-08-04 08:04:30 +00:00
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private functions
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2022-11-14 14:13:00 +00:00
|
|
|
proc daemonLoop[S,W](dsc: RunnerSyncRef[S,W]) {.async.} =
|
|
|
|
mixin runDaemon
|
|
|
|
|
2022-11-25 14:56:42 +00:00
|
|
|
if dsc.ctx.daemon and not dsc.shutdown:
|
2022-11-14 14:13:00 +00:00
|
|
|
dsc.daemonRunning = true
|
|
|
|
|
|
|
|
# Continue until stopped
|
|
|
|
while true:
|
|
|
|
# Enforce minimum time spend on this loop
|
|
|
|
let startMoment = Moment.now()
|
|
|
|
|
|
|
|
await dsc.ctx.runDaemon()
|
|
|
|
|
|
|
|
if not dsc.ctx.daemon:
|
|
|
|
break
|
|
|
|
|
|
|
|
# Enforce minimum time spend on this loop so we never each 100% cpu load
|
|
|
|
# caused by some empty sub-tasks which are out of this scheduler control.
|
|
|
|
let
|
|
|
|
elapsed = Moment.now() - startMoment
|
|
|
|
suspend = if execLoopTimeElapsedMin <= elapsed: execLoopTaskSwitcher
|
|
|
|
else: execLoopTimeElapsedMin - elapsed
|
|
|
|
await sleepAsync suspend
|
|
|
|
# End while
|
|
|
|
|
|
|
|
dsc.daemonRunning = false
|
|
|
|
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
proc workerLoop[S,W](buddy: RunnerBuddyRef[S,W]) {.async.} =
|
|
|
|
mixin runMulti, runSingle, runPool, runStop
|
|
|
|
let
|
|
|
|
dsc = buddy.dsc
|
|
|
|
ctx = dsc.ctx
|
|
|
|
worker = buddy.worker
|
|
|
|
peer = worker.peer
|
|
|
|
|
|
|
|
# Continue until stopped
|
2022-11-09 19:16:25 +00:00
|
|
|
block taskExecLoop:
|
2022-11-25 14:56:42 +00:00
|
|
|
while worker.ctrl.running and not dsc.shutdown:
|
2022-11-09 19:16:25 +00:00
|
|
|
# Enforce minimum time spend on this loop
|
|
|
|
let startMoment = Moment.now()
|
|
|
|
|
|
|
|
if dsc.monitorLock:
|
|
|
|
discard # suspend some time at the end of loop body
|
|
|
|
|
|
|
|
# Invoke `runPool()` over all buddies if requested
|
|
|
|
elif ctx.poolMode:
|
|
|
|
# Grab `monitorLock` (was `false` as checked above) and wait until
|
|
|
|
# clear to run as the only activated instance.
|
|
|
|
dsc.monitorLock = true
|
|
|
|
while 0 < dsc.activeMulti or dsc.singleRunLock:
|
|
|
|
await sleepAsync execLoopPollingTime
|
2022-09-16 07:24:12 +00:00
|
|
|
if worker.ctrl.stopped:
|
2022-11-09 19:16:25 +00:00
|
|
|
dsc.monitorLock = false
|
|
|
|
break taskExecLoop
|
2023-04-14 22:28:57 +00:00
|
|
|
|
|
|
|
var count = 0
|
|
|
|
while count < execPoolModeLoopMax:
|
|
|
|
ctx.poolMode = false
|
|
|
|
# Pool mode: stop this round if returned `true`,
|
|
|
|
# last invocation this round with `true` argument
|
|
|
|
var delayed = BuddyRef[S,W](nil)
|
|
|
|
for w in dsc.buddies.nextValues:
|
|
|
|
# Execute previous (aka delayed) item (unless first)
|
|
|
|
if delayed.isNil or not delayed.runPool(last=false, laps=count):
|
|
|
|
delayed = w.worker
|
|
|
|
else:
|
|
|
|
delayed = nil # not executing any final item
|
|
|
|
break # `true` => stop
|
|
|
|
if not delayed.isNil:
|
|
|
|
discard delayed.runPool(last=true, laps=count) # final item
|
|
|
|
if not ctx.poolMode:
|
|
|
|
break
|
|
|
|
count.inc
|
2022-11-09 19:16:25 +00:00
|
|
|
dsc.monitorLock = false
|
|
|
|
|
|
|
|
else:
|
|
|
|
# Rotate connection table so the most used entry is at the top/right
|
|
|
|
# end. So zombies will end up leftish.
|
2023-04-26 15:46:42 +00:00
|
|
|
discard dsc.buddies.lruFetch peer.key
|
2022-11-09 19:16:25 +00:00
|
|
|
|
|
|
|
# Multi mode
|
|
|
|
if worker.ctrl.multiOk:
|
|
|
|
if not dsc.singleRunLock:
|
|
|
|
dsc.activeMulti.inc
|
|
|
|
# Continue doing something, work a bit
|
|
|
|
await worker.runMulti()
|
|
|
|
dsc.activeMulti.dec
|
|
|
|
|
|
|
|
elif dsc.singleRunLock:
|
|
|
|
# Some other process is running single mode
|
|
|
|
discard # suspend some time at the end of loop body
|
|
|
|
|
|
|
|
else:
|
|
|
|
# Start single instance mode by grabbing `singleRunLock` (was
|
|
|
|
# `false` as checked above).
|
|
|
|
dsc.singleRunLock = true
|
|
|
|
await worker.runSingle()
|
|
|
|
dsc.singleRunLock = false
|
|
|
|
|
2022-11-14 14:13:00 +00:00
|
|
|
# Dispatch daemon sevice if needed
|
|
|
|
if not dsc.daemonRunning and dsc.ctx.daemon:
|
|
|
|
asyncSpawn dsc.daemonLoop()
|
|
|
|
|
|
|
|
# Check for termination
|
2022-11-09 19:16:25 +00:00
|
|
|
if worker.ctrl.stopped:
|
|
|
|
break taskExecLoop
|
|
|
|
|
|
|
|
# Enforce minimum time spend on this loop so we never each 100% cpu load
|
|
|
|
# caused by some empty sub-tasks which are out of this scheduler control.
|
|
|
|
let
|
|
|
|
elapsed = Moment.now() - startMoment
|
|
|
|
suspend = if execLoopTimeElapsedMin <= elapsed: execLoopTaskSwitcher
|
|
|
|
else: execLoopTimeElapsedMin - elapsed
|
|
|
|
await sleepAsync suspend
|
|
|
|
# End while
|
2022-08-04 08:04:30 +00:00
|
|
|
|
|
|
|
# Note that `runStart()` was dispatched in `onPeerConnected()`
|
2023-01-17 09:28:14 +00:00
|
|
|
worker.ctrl.stopped = true
|
2022-08-04 08:04:30 +00:00
|
|
|
worker.runStop()
|
|
|
|
|
|
|
|
|
|
|
|
proc onPeerConnected[S,W](dsc: RunnerSyncRef[S,W]; peer: Peer) =
|
|
|
|
mixin runStart, runStop
|
|
|
|
# Check for known entry (which should not exist.)
|
|
|
|
let
|
2023-01-30 22:10:23 +00:00
|
|
|
maxWorkers {.used.} = dsc.ctx.buddiesMax
|
|
|
|
nPeers {.used.} = dsc.pool.len
|
2023-04-26 15:46:42 +00:00
|
|
|
zombie = dsc.buddies.eq peer.key
|
|
|
|
if zombie.isOk:
|
|
|
|
let
|
|
|
|
now = Moment.now()
|
|
|
|
ttz = zombie.value.zombified + zombieTimeToLinger
|
|
|
|
if ttz < Moment.now():
|
|
|
|
trace "Reconnecting zombie peer ignored", peer,
|
2023-05-16 13:52:44 +00:00
|
|
|
nPeers, nWorkers=dsc.buddies.len, maxWorkers, canRequeue=(now-ttz)
|
2023-04-26 15:46:42 +00:00
|
|
|
return
|
|
|
|
# Zombie can be removed from the database
|
|
|
|
dsc.buddies.del peer.key
|
|
|
|
trace "Zombie peer timeout, ready for requeing", peer,
|
2023-05-16 13:52:44 +00:00
|
|
|
nPeers, nWorkers=dsc.buddies.len, maxWorkers
|
2022-08-04 08:04:30 +00:00
|
|
|
|
|
|
|
# Initialise worker for this peer
|
|
|
|
let buddy = RunnerBuddyRef[S,W](
|
|
|
|
dsc: dsc,
|
|
|
|
worker: BuddyRef[S,W](
|
|
|
|
ctx: dsc.ctx,
|
2022-09-30 08:22:14 +00:00
|
|
|
ctrl: BuddyCtrlRef(),
|
2022-08-04 08:04:30 +00:00
|
|
|
peer: peer))
|
|
|
|
if not buddy.worker.runStart():
|
2023-05-16 13:52:44 +00:00
|
|
|
trace "Ignoring useless peer", peer, nPeers,
|
|
|
|
nWorkers=dsc.buddies.len, maxWorkers
|
2022-08-04 08:04:30 +00:00
|
|
|
buddy.worker.ctrl.zombie = true
|
|
|
|
return
|
|
|
|
|
|
|
|
# Check for table overflow. An overflow might happen if there are zombies
|
|
|
|
# in the table (though preventing them from re-connecting for a while.)
|
2023-05-16 13:52:44 +00:00
|
|
|
if dsc.ctx.buddiesMax <= dsc.buddies.len:
|
|
|
|
let
|
|
|
|
leastVal = dsc.buddies.shift.value # unqueue first/least item
|
|
|
|
oldest = leastVal.data.worker
|
|
|
|
if oldest.isNil:
|
2022-08-04 08:04:30 +00:00
|
|
|
trace "Dequeuing zombie peer",
|
2023-05-16 13:52:44 +00:00
|
|
|
# Fake `Peer` pretty print for `oldest`
|
|
|
|
oldest=("Node[" & $leastVal.key.address & "]"),
|
|
|
|
since=leastVal.data.zombified, nPeers, nWorkers=dsc.buddies.len,
|
|
|
|
maxWorkers
|
2022-08-04 08:04:30 +00:00
|
|
|
discard
|
|
|
|
else:
|
|
|
|
# This could happen if there are idle entries in the table, i.e.
|
|
|
|
# somehow hanging runners.
|
2023-05-16 13:52:44 +00:00
|
|
|
trace "Peer table full! Dequeuing least used entry", oldest,
|
|
|
|
nPeers, nWorkers=dsc.buddies.len, maxWorkers
|
|
|
|
oldest.ctrl.zombie = true
|
|
|
|
oldest.runStop()
|
2022-08-04 08:04:30 +00:00
|
|
|
|
|
|
|
# Add peer entry
|
2023-04-26 15:46:42 +00:00
|
|
|
discard dsc.buddies.lruAppend(peer.key, buddy, dsc.ctx.buddiesMax)
|
2022-08-04 08:04:30 +00:00
|
|
|
|
2022-12-09 13:43:55 +00:00
|
|
|
trace "Running peer worker", peer, nPeers,
|
|
|
|
nWorkers=dsc.buddies.len, maxWorkers
|
2022-08-04 08:04:30 +00:00
|
|
|
|
|
|
|
asyncSpawn buddy.workerLoop()
|
|
|
|
|
|
|
|
|
|
|
|
proc onPeerDisconnected[S,W](dsc: RunnerSyncRef[S,W], peer: Peer) =
|
|
|
|
let
|
2022-12-09 13:43:55 +00:00
|
|
|
nPeers = dsc.pool.len
|
2022-08-04 08:04:30 +00:00
|
|
|
maxWorkers = dsc.ctx.buddiesMax
|
2022-12-09 13:43:55 +00:00
|
|
|
nWorkers = dsc.buddies.len
|
2023-04-26 15:46:42 +00:00
|
|
|
rc = dsc.buddies.eq peer.key
|
2022-08-04 08:04:30 +00:00
|
|
|
if rc.isErr:
|
2022-12-09 13:43:55 +00:00
|
|
|
debug "Disconnected, unregistered peer", peer, nPeers, nWorkers, maxWorkers
|
2023-05-16 13:52:44 +00:00
|
|
|
discard
|
|
|
|
elif rc.value.worker.isNil:
|
|
|
|
# Re-visiting zombie
|
|
|
|
trace "Ignore zombie", peer, nPeers, nWorkers, maxWorkers
|
|
|
|
discard
|
|
|
|
elif rc.value.worker.ctrl.zombie:
|
2022-08-04 08:04:30 +00:00
|
|
|
# Don't disconnect, leave them fall out of the LRU cache. The effect is,
|
2023-04-26 15:46:42 +00:00
|
|
|
# that reconnecting might be blocked, for a while. For few peers cases,
|
|
|
|
# the start of zombification is registered so that a zombie can eventually
|
|
|
|
# be let die and buried.
|
|
|
|
rc.value.worker = nil
|
|
|
|
rc.value.dsc = nil
|
|
|
|
rc.value.zombified = Moment.now()
|
2022-12-09 13:43:55 +00:00
|
|
|
trace "Disconnected, zombie", peer, nPeers, nWorkers, maxWorkers
|
2022-08-04 08:04:30 +00:00
|
|
|
else:
|
|
|
|
rc.value.worker.ctrl.stopped = true # in case it is hanging somewhere
|
2023-04-26 15:46:42 +00:00
|
|
|
dsc.buddies.del peer.key
|
2022-12-09 13:43:55 +00:00
|
|
|
trace "Disconnected buddy", peer, nPeers,
|
|
|
|
nWorkers=dsc.buddies.len, maxWorkers
|
2022-08-04 08:04:30 +00:00
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public functions
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc initSync*[S,W](
|
|
|
|
dsc: RunnerSyncRef[S,W];
|
|
|
|
node: EthereumNode;
|
2022-12-02 04:39:12 +00:00
|
|
|
chain: ChainRef,
|
2022-08-04 08:04:30 +00:00
|
|
|
slots: int;
|
2023-03-02 09:57:58 +00:00
|
|
|
exCtrlFile = none(string);
|
|
|
|
) =
|
2022-08-04 08:04:30 +00:00
|
|
|
## Constructor
|
|
|
|
# Leave one extra slot so that it can holds a *zombie* even if all slots
|
|
|
|
# are full. The effect is that a re-connect on the latest zombie will be
|
|
|
|
# rejected as long as its worker descriptor is registered.
|
|
|
|
dsc.ctx = CtxRef[S](
|
2022-10-19 10:04:06 +00:00
|
|
|
ethWireCtx: cast[EthWireRef](node.protocolState protocol.eth),
|
2022-08-04 08:04:30 +00:00
|
|
|
buddiesMax: max(1, slots + 1),
|
2023-03-02 09:57:58 +00:00
|
|
|
exCtrlFile: exCtrlFile,
|
2022-10-10 02:31:28 +00:00
|
|
|
chain: chain)
|
2022-08-04 08:04:30 +00:00
|
|
|
dsc.pool = node.peerPool
|
|
|
|
dsc.buddies.init(dsc.ctx.buddiesMax)
|
|
|
|
|
2022-11-25 14:56:42 +00:00
|
|
|
proc startSync*[S,W](dsc: RunnerSyncRef[S,W]): bool =
|
2022-10-21 19:29:42 +00:00
|
|
|
## Set up `PeerObserver` handlers and start syncing.
|
2022-08-04 08:04:30 +00:00
|
|
|
mixin runSetup
|
|
|
|
# Initialise sub-systems
|
2023-04-24 20:24:07 +00:00
|
|
|
if dsc.ctx.runSetup():
|
2022-08-04 08:04:30 +00:00
|
|
|
var po = PeerObserver(
|
|
|
|
onPeerConnected:
|
|
|
|
proc(p: Peer) {.gcsafe.} =
|
|
|
|
dsc.onPeerConnected(p),
|
|
|
|
onPeerDisconnected:
|
|
|
|
proc(p: Peer) {.gcsafe.} =
|
|
|
|
dsc.onPeerDisconnected(p))
|
|
|
|
|
|
|
|
po.setProtocol eth
|
|
|
|
dsc.pool.addObserver(dsc, po)
|
2022-11-25 14:56:42 +00:00
|
|
|
if dsc.ctx.daemon:
|
2022-11-14 14:13:00 +00:00
|
|
|
asyncSpawn dsc.daemonLoop()
|
2022-08-04 08:04:30 +00:00
|
|
|
return true
|
|
|
|
|
|
|
|
proc stopSync*[S,W](dsc: RunnerSyncRef[S,W]) =
|
2022-10-21 19:29:42 +00:00
|
|
|
## Stop syncing and free peer handlers .
|
2022-08-04 08:04:30 +00:00
|
|
|
mixin runRelease
|
2022-10-21 19:29:42 +00:00
|
|
|
dsc.pool.delObserver(dsc)
|
2022-08-04 08:04:30 +00:00
|
|
|
|
2022-11-25 14:56:42 +00:00
|
|
|
# Gracefully shut down async services
|
|
|
|
dsc.shutdown = true
|
2022-11-14 14:13:00 +00:00
|
|
|
for buddy in dsc.buddies.nextValues:
|
|
|
|
buddy.worker.ctrl.stopped = true
|
|
|
|
dsc.ctx.daemon = false
|
|
|
|
|
|
|
|
# Final shutdown (note that some workers might still linger on)
|
|
|
|
dsc.ctx.runRelease()
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# End
|
|
|
|
# ------------------------------------------------------------------------------
|