2022-08-04 08:04:30 +00:00
|
|
|
# Nimbus
|
2022-07-21 12:14:41 +00:00
|
|
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at
|
|
|
|
# https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at
|
|
|
|
# https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed
|
|
|
|
# except according to those terms.
|
|
|
|
|
|
|
|
## Fetch and store blocks
|
|
|
|
## ======================
|
|
|
|
##
|
|
|
|
## Worker items state diagram and sketch of sync algorithm:
|
|
|
|
## ::
|
|
|
|
## set of unprocessed | peer workers | list of work items ready
|
|
|
|
## block ranges | | for persistent database
|
|
|
|
## ==================================================================
|
|
|
|
##
|
|
|
|
## +---------------------------------------------+
|
|
|
|
## | |
|
|
|
|
## | +---------------------------------+ |
|
|
|
|
## | | | |
|
|
|
|
## V v | |
|
|
|
|
## <unprocessed> ---+-----> <worker-0> -----+---> <staged> ---> block chain
|
|
|
|
## | |
|
|
|
|
## +-----> <worker-1> -----+
|
|
|
|
## | |
|
|
|
|
## +-----> <worker-2> -----+
|
|
|
|
## : :
|
|
|
|
##
|
|
|
|
## A work item is created from a range of block numbers extracted from the
|
|
|
|
## `<unprocessed>` set of block ranges.
|
|
|
|
##
|
|
|
|
## A work item consists of a
|
|
|
|
## * current state `<worker-#>` or `<staged>`
|
|
|
|
## * given range of consecutive block numbers `[from..to]`
|
|
|
|
## * sequence of block headers relating to `[from..to]` (to be completed)
|
|
|
|
## * sequence of block buddies relating to `[from..to]` (to be completed)
|
|
|
|
##
|
|
|
|
## Block ranges *may* be recycled back into the `<unprocessed>` set when a
|
|
|
|
## work item is destroyed. This is supposed to be an exceptional case.
|
|
|
|
## Typically, a `<staged>` work item is added to the persistent block chain
|
|
|
|
## database and destroyed without block range recycling.
|
|
|
|
##
|
|
|
|
## Beware of `<staged>` overflow
|
|
|
|
## -----------------------------
|
|
|
|
## When the `<staged>` queue gets too long in non-backtrack/re-org mode, this
|
|
|
|
## may be caused by a gap between the least `<unprocessed>` block number and
|
|
|
|
## the least `<staged>` block number. Then a mechanism is invoked where
|
|
|
|
## `<unprocessed>` block range is updated.
|
|
|
|
##
|
|
|
|
## For backtrack/re-org the system runs in single instance mode tracing
|
|
|
|
## backvards parent hash references. So updating `<unprocessed>` block numbers
|
|
|
|
## would have no effect. In that case, the record with the largest block
|
|
|
|
## numbers are deleted from the `<staged>` list.
|
|
|
|
##
|
|
|
|
|
|
|
|
import
|
|
|
|
std/[algorithm, hashes, options, random, sequtils, sets, strutils],
|
|
|
|
chronicles,
|
|
|
|
chronos,
|
|
|
|
eth/[common/eth_types, p2p],
|
|
|
|
stew/[byteutils, interval_set, sorted_set],
|
2022-08-04 08:04:30 +00:00
|
|
|
"../.."/[db/db_chain, utils],
|
|
|
|
".."/[protocol, sync_desc],
|
|
|
|
./ticker
|
2022-07-21 12:14:41 +00:00
|
|
|
|
|
|
|
{.push raises:[Defect].}
|
|
|
|
|
|
|
|
logScope:
|
|
|
|
topics = "full-sync"
|
|
|
|
|
|
|
|
const
|
|
|
|
minPeersToStartSync = ##\
|
|
|
|
## Wait for consensus of at least this number of peers before syncing.
|
|
|
|
2
|
|
|
|
|
|
|
|
maxStagedWorkItems = ##\
|
|
|
|
## Maximal items in the `staged` list.
|
|
|
|
70
|
|
|
|
|
|
|
|
stagedWorkItemsTrigger = ##\
|
|
|
|
## Turn on the global `poolMode` if there are more than this many items
|
|
|
|
## staged.
|
|
|
|
50
|
|
|
|
|
|
|
|
type
|
2022-08-04 08:04:30 +00:00
|
|
|
BlockRangeSetRef = ##\
|
2022-07-21 12:14:41 +00:00
|
|
|
## Disjunct sets of block number intervals
|
|
|
|
IntervalSetRef[BlockNumber,UInt256]
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
BlockRange = ##\
|
2022-07-21 12:14:41 +00:00
|
|
|
## Block number interval
|
|
|
|
Interval[BlockNumber,UInt256]
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
WorkItemQueue = ##\
|
2022-07-21 12:14:41 +00:00
|
|
|
## Block intervals sorted by least block number
|
|
|
|
SortedSet[BlockNumber,WorkItemRef]
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
WorkItemWalkRef = ##\
|
2022-07-21 12:14:41 +00:00
|
|
|
## Fast traversal descriptor for `WorkItemQueue`
|
|
|
|
SortedSetWalkRef[BlockNumber,WorkItemRef]
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
WorkItemRef = ref object
|
2022-07-21 12:14:41 +00:00
|
|
|
## Block worker item wrapper for downloading a block range
|
2022-08-04 08:04:30 +00:00
|
|
|
blocks: BlockRange ## Block numbers to fetch
|
|
|
|
topHash: Option[Hash256] ## Fetch by top hash rather than blocks
|
|
|
|
headers: seq[BlockHeader] ## Block headers received
|
|
|
|
hashes: seq[Hash256] ## Hashed from `headers[]` for convenience
|
|
|
|
bodies: seq[BlockBody] ## Block bodies received
|
2022-07-21 12:14:41 +00:00
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
BuddyData* = object
|
2022-07-21 12:14:41 +00:00
|
|
|
## Local descriptor data extension
|
2022-08-04 08:04:30 +00:00
|
|
|
bestNumber: Option[BlockNumber] ## Largest block number reported
|
2022-07-21 12:14:41 +00:00
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
CtxData* = object
|
2022-07-21 12:14:41 +00:00
|
|
|
## Globally shared data extension
|
2022-08-04 08:04:30 +00:00
|
|
|
backtrack: Option[Hash256] ## Find reverse block after re-org
|
|
|
|
unprocessed: BlockRangeSetRef ## Block ranges to fetch
|
|
|
|
staged: WorkItemQueue ## Blocks fetched but not stored yet
|
|
|
|
untrusted: seq[Peer] ## Clean up list
|
|
|
|
trusted: HashSet[Peer] ## Peers ready for delivery
|
|
|
|
topPersistent: BlockNumber ## Up to this block number stored OK
|
|
|
|
ticker: TickerRef ## Logger ticker
|
|
|
|
|
|
|
|
FullBuddyRef* = ##\
|
|
|
|
## Extended worker peer descriptor
|
|
|
|
BuddyRef[CtxData,BuddyData]
|
|
|
|
|
|
|
|
FullCtxRef* = ##\
|
|
|
|
## Extended global descriptor
|
|
|
|
CtxRef[CtxData]
|
2022-07-21 12:14:41 +00:00
|
|
|
|
|
|
|
let
|
2022-08-04 08:04:30 +00:00
|
|
|
highBlockNumber = high(BlockNumber)
|
|
|
|
highBlockRange = BlockRange.new(highBlockNumber,highBlockNumber)
|
|
|
|
|
|
|
|
static:
|
|
|
|
doAssert stagedWorkItemsTrigger < maxStagedWorkItems
|
2022-07-21 12:14:41 +00:00
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private helpers
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc hash(peer: Peer): Hash =
|
|
|
|
## Mixin `HashSet[Peer]` handler
|
|
|
|
hash(cast[pointer](peer))
|
|
|
|
|
|
|
|
proc `+`(n: BlockNumber; delta: static[int]): BlockNumber =
|
|
|
|
## Syntactic sugar for expressions like `xxx.toBlockNumber + 1`
|
|
|
|
n + delta.toBlockNumber
|
|
|
|
|
|
|
|
proc `-`(n: BlockNumber; delta: static[int]): BlockNumber =
|
|
|
|
## Syntactic sugar for expressions like `xxx.toBlockNumber - 1`
|
|
|
|
n - delta.toBlockNumber
|
|
|
|
|
|
|
|
proc merge(ivSet: BlockRangeSetRef; wi: WorkItemRef): Uint256 =
|
|
|
|
## Syntactic sugar
|
|
|
|
ivSet.merge(wi.blocks)
|
|
|
|
|
|
|
|
proc reduce(ivSet: BlockRangeSetRef; wi: WorkItemRef): Uint256 =
|
|
|
|
## Syntactic sugar
|
|
|
|
ivSet.reduce(wi.blocks)
|
|
|
|
|
|
|
|
|
|
|
|
proc pp(n: BlockNumber): string =
|
|
|
|
## Dedicated pretty printer (`$` is defined elsewhere using `UInt256`)
|
2022-08-04 08:04:30 +00:00
|
|
|
if n == highBlockNumber: "high" else:"#" & $n
|
2022-07-21 12:14:41 +00:00
|
|
|
|
|
|
|
proc `$`(iv: BlockRange): string =
|
|
|
|
## Needed for macro generated DSL files like `snap.nim` because the
|
|
|
|
## `distinct` flavour of `NodeTag` is discarded there.
|
|
|
|
result = "[" & iv.minPt.pp
|
|
|
|
if iv.minPt != iv.maxPt:
|
|
|
|
result &= "," & iv.maxPt.pp
|
|
|
|
result &= "]"
|
|
|
|
|
|
|
|
proc `$`(n: Option[BlockRange]): string =
|
|
|
|
if n.isNone: "n/a" else: $n.get
|
|
|
|
|
|
|
|
proc `$`(n: Option[BlockNumber]): string =
|
|
|
|
if n.isNone: "n/a" else: n.get.pp
|
|
|
|
|
|
|
|
proc `$`(brs: BlockRangeSetRef): string =
|
|
|
|
"{" & toSeq(brs.increasing).mapIt($it).join(",") & "}"
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private getters
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
proc nextUnprocessed(desc: var CtxData): Option[BlockNumber] =
|
2022-07-21 12:14:41 +00:00
|
|
|
## Pseudo getter
|
2022-08-04 08:04:30 +00:00
|
|
|
let rc = desc.unprocessed.ge()
|
2022-07-21 12:14:41 +00:00
|
|
|
if rc.isOK:
|
|
|
|
result = some(rc.value.minPt)
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
proc nextStaged(desc: var CtxData): Option[BlockRange] =
|
2022-07-21 12:14:41 +00:00
|
|
|
## Pseudo getter
|
2022-08-04 08:04:30 +00:00
|
|
|
let rc = desc.staged.ge(low(BlockNumber))
|
2022-07-21 12:14:41 +00:00
|
|
|
if rc.isOK:
|
|
|
|
result = some(rc.value.data.blocks)
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private functions affecting all shared data
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
proc globalReset(ctx: FullCtxRef; backBlocks = maxHeadersFetch): bool =
|
2022-07-21 12:14:41 +00:00
|
|
|
## Globally flush `pending` and `staged` items and update `unprocessed`
|
|
|
|
## ranges and set the `unprocessed` back before the best block number/
|
|
|
|
var topPersistent: BlockNumber
|
|
|
|
try:
|
|
|
|
let
|
|
|
|
bestNumber = ctx.chain.getBestBlockHeader.blockNumber
|
|
|
|
nBackBlocks = backBlocks.toBlockNumber
|
|
|
|
# Initialise before best block number
|
|
|
|
topPersistent =
|
|
|
|
if nBackBlocks < bestNumber: bestNumber - nBackBlocks
|
|
|
|
else: 0.toBlockNumber
|
|
|
|
except CatchableError as e:
|
|
|
|
error "Best block header problem", backBlocks, error=($e.name), msg=e.msg
|
|
|
|
return false
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
ctx.data.unprocessed.clear()
|
|
|
|
ctx.data.staged.clear()
|
|
|
|
ctx.data.trusted.clear()
|
|
|
|
ctx.data.topPersistent = topPersistent
|
|
|
|
discard ctx.data.unprocessed.merge(topPersistent + 1, highBlockNumber)
|
2022-07-21 12:14:41 +00:00
|
|
|
|
|
|
|
true
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
proc tickerUpdater(ctx: FullCtxRef): TickerStatsUpdater =
|
2022-07-21 12:14:41 +00:00
|
|
|
result = proc: TickerStats =
|
|
|
|
let
|
2022-08-04 08:04:30 +00:00
|
|
|
stagedRange = ctx.data.nextStaged
|
2022-07-21 12:14:41 +00:00
|
|
|
nextStaged = if stagedRange.isSome: some(stagedRange.get.minPt)
|
|
|
|
else: none(BlockNumber)
|
|
|
|
TickerStats(
|
2022-08-04 08:04:30 +00:00
|
|
|
topPersistent: ctx.data.topPersistent,
|
2022-07-21 12:14:41 +00:00
|
|
|
nextStaged: nextStaged,
|
2022-08-04 08:04:30 +00:00
|
|
|
nextUnprocessed: ctx.data.nextUnprocessed,
|
|
|
|
nStagedQueue: ctx.data.staged.len,
|
|
|
|
reOrg: ctx.data.backtrack.isSome)
|
2022-07-21 12:14:41 +00:00
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private functions
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
template safeTransport(
|
|
|
|
buddy: FullBuddyRef;
|
|
|
|
info: static[string];
|
|
|
|
code: untyped) =
|
2022-07-21 12:14:41 +00:00
|
|
|
try:
|
|
|
|
code
|
|
|
|
except TransportError as e:
|
|
|
|
error info & ", stop", error=($e.name), msg=e.msg
|
|
|
|
buddy.ctrl.stopped = true
|
|
|
|
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
proc getRandomTrustedPeer(buddy: FullBuddyRef): Result[Peer,void] =
|
2022-07-21 12:14:41 +00:00
|
|
|
## Return random entry from `trusted` peer different from this peer set if
|
|
|
|
## there are enough
|
|
|
|
##
|
|
|
|
## Ackn: nim-eth/eth/p2p/blockchain_sync.nim: `randomTrustedPeer()`
|
|
|
|
let
|
2022-08-04 08:04:30 +00:00
|
|
|
ctx = buddy.ctx
|
|
|
|
nPeers = ctx.data.trusted.len
|
|
|
|
offInx = if buddy.peer in ctx.data.trusted: 2 else: 1
|
2022-07-21 12:14:41 +00:00
|
|
|
if 0 < nPeers:
|
|
|
|
var (walkInx, stopInx) = (0, rand(nPeers - offInx))
|
2022-08-04 08:04:30 +00:00
|
|
|
for p in ctx.data.trusted:
|
2022-07-21 12:14:41 +00:00
|
|
|
if p == buddy.peer:
|
|
|
|
continue
|
|
|
|
if walkInx == stopInx:
|
|
|
|
return ok(p)
|
|
|
|
walkInx.inc
|
|
|
|
err()
|
|
|
|
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
proc newWorkItem(buddy: FullBuddyRef): Result[WorkItemRef,void] =
|
2022-07-21 12:14:41 +00:00
|
|
|
## Fetch the next unprocessed block range and register it as work item.
|
|
|
|
##
|
|
|
|
## This function will grab a block range from the `unprocessed` range set,
|
|
|
|
## ove it and return it as a `WorkItemRef`. The returned range is registered
|
|
|
|
## in the `pending` list.
|
|
|
|
let
|
2022-08-04 08:04:30 +00:00
|
|
|
ctx = buddy.ctx
|
2022-07-21 12:14:41 +00:00
|
|
|
peer = buddy.peer
|
2022-08-04 08:04:30 +00:00
|
|
|
rc = ctx.data.unprocessed.ge()
|
2022-07-21 12:14:41 +00:00
|
|
|
if rc.isErr:
|
|
|
|
return err() # no more data for this peer
|
|
|
|
|
|
|
|
# Check whether there is somthing to do at all
|
2022-08-04 08:04:30 +00:00
|
|
|
if buddy.data.bestNumber.isNone or
|
|
|
|
buddy.data.bestNumber.get < rc.value.minPt:
|
2022-07-21 12:14:41 +00:00
|
|
|
return err() # no more data for this peer
|
|
|
|
|
|
|
|
# Compute interval
|
|
|
|
let iv = BlockRange.new(
|
|
|
|
rc.value.minPt,
|
|
|
|
min(rc.value.maxPt,
|
|
|
|
min(rc.value.minPt + maxHeadersFetch - 1,
|
2022-08-04 08:04:30 +00:00
|
|
|
buddy.data.bestNumber.get)))
|
2022-07-21 12:14:41 +00:00
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
discard ctx.data.unprocessed.reduce(iv)
|
2022-07-21 12:14:41 +00:00
|
|
|
return ok(WorkItemRef(blocks: iv))
|
|
|
|
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
proc recycleStaged(buddy: FullBuddyRef) =
|
2022-07-21 12:14:41 +00:00
|
|
|
## Flush list of staged items and store the block ranges
|
|
|
|
## back to the `unprocessed` ranges set
|
|
|
|
##
|
|
|
|
# using fast traversal
|
2022-08-04 08:04:30 +00:00
|
|
|
let
|
|
|
|
ctx = buddy.ctx
|
|
|
|
walk = WorkItemWalkRef.init(ctx.data.staged)
|
|
|
|
var
|
|
|
|
rc = walk.first()
|
2022-07-21 12:14:41 +00:00
|
|
|
while rc.isOk:
|
|
|
|
# Store back into `unprocessed` ranges set
|
2022-08-04 08:04:30 +00:00
|
|
|
discard ctx.data.unprocessed.merge(rc.value.data)
|
2022-07-21 12:14:41 +00:00
|
|
|
rc = walk.next()
|
|
|
|
# optional clean up, see comments on the destroy() directive
|
|
|
|
walk.destroy()
|
2022-08-04 08:04:30 +00:00
|
|
|
ctx.data.staged.clear()
|
2022-07-21 12:14:41 +00:00
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private `Future` helpers
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
proc getBestNumber(
|
|
|
|
buddy: FullBuddyRef
|
|
|
|
): Future[Result[BlockNumber,void]] {.async.} =
|
2022-07-21 12:14:41 +00:00
|
|
|
## Get best block number from best block hash.
|
|
|
|
##
|
|
|
|
## Ackn: nim-eth/eth/p2p/blockchain_sync.nim: `getBestBlockNumber()`
|
|
|
|
let
|
|
|
|
peer = buddy.peer
|
|
|
|
startHash = peer.state(eth).bestBlockHash
|
|
|
|
reqLen = 1u
|
|
|
|
hdrReq = BlocksRequest(
|
|
|
|
startBlock: HashOrNum(
|
|
|
|
isHash: true,
|
|
|
|
hash: startHash),
|
|
|
|
maxResults: reqLen,
|
|
|
|
skip: 0,
|
|
|
|
reverse: true)
|
|
|
|
|
|
|
|
trace trEthSendSendingGetBlockHeaders, peer,
|
|
|
|
startBlock=startHash.data.toHex, reqLen
|
|
|
|
|
|
|
|
var hdrResp: Option[blockHeadersObj]
|
|
|
|
buddy.safeTransport("Error fetching block header"):
|
|
|
|
hdrResp = await peer.getBlockHeaders(hdrReq)
|
|
|
|
if buddy.ctrl.stopped:
|
|
|
|
return err()
|
|
|
|
|
|
|
|
if hdrResp.isNone:
|
|
|
|
trace trEthRecvReceivedBlockHeaders, peer, reqLen, respose="n/a"
|
|
|
|
return err()
|
|
|
|
|
|
|
|
let hdrRespLen = hdrResp.get.headers.len
|
|
|
|
if hdrRespLen == 1:
|
|
|
|
let blockNumber = hdrResp.get.headers[0].blockNumber
|
|
|
|
trace trEthRecvReceivedBlockHeaders, peer, hdrRespLen, blockNumber
|
|
|
|
return ok(blockNumber)
|
|
|
|
|
|
|
|
trace trEthRecvReceivedBlockHeaders, peer, reqLen, hdrRespLen
|
|
|
|
return err()
|
|
|
|
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
proc agreesOnChain(buddy: FullBuddyRef; other: Peer): Future[bool] {.async.} =
|
2022-07-21 12:14:41 +00:00
|
|
|
## Returns `true` if one of the peers `buddy.peer` or `other` acknowledges
|
|
|
|
## existence of the best block of the other peer.
|
|
|
|
##
|
|
|
|
## Ackn: nim-eth/eth/p2p/blockchain_sync.nim: `peersAgreeOnChain()`
|
|
|
|
var
|
|
|
|
peer = buddy.peer
|
|
|
|
start = peer
|
|
|
|
fetch = other
|
|
|
|
# Make sure that `fetch` has not the smaller difficulty.
|
|
|
|
if fetch.state(eth).bestDifficulty < start.state(eth).bestDifficulty:
|
|
|
|
swap(fetch, start)
|
|
|
|
|
|
|
|
let
|
|
|
|
startHash = start.state(eth).bestBlockHash
|
|
|
|
hdrReq = BlocksRequest(
|
|
|
|
startBlock: HashOrNum(
|
|
|
|
isHash: true,
|
|
|
|
hash: startHash),
|
|
|
|
maxResults: 1,
|
|
|
|
skip: 0,
|
|
|
|
reverse: true)
|
|
|
|
|
|
|
|
trace trEthSendSendingGetBlockHeaders, peer, start, fetch,
|
|
|
|
startBlock=startHash.data.toHex, hdrReqLen=1
|
|
|
|
|
|
|
|
var hdrResp: Option[blockHeadersObj]
|
|
|
|
buddy.safeTransport("Error fetching block header"):
|
|
|
|
hdrResp = await fetch.getBlockHeaders(hdrReq)
|
|
|
|
if buddy.ctrl.stopped:
|
|
|
|
return false
|
|
|
|
|
|
|
|
if hdrResp.isSome:
|
|
|
|
let hdrRespLen = hdrResp.get.headers.len
|
|
|
|
if 0 < hdrRespLen:
|
|
|
|
let blockNumber = hdrResp.get.headers[0].blockNumber
|
|
|
|
trace trEthRecvReceivedBlockHeaders, peer, start, fetch,
|
|
|
|
hdrRespLen, blockNumber
|
|
|
|
return true
|
|
|
|
|
|
|
|
trace trEthRecvReceivedBlockHeaders, peer, start, fetch,
|
|
|
|
blockNumber="n/a"
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private functions, worker sub-tasks
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
proc initaliseWorker(buddy: FullBuddyRef): Future[bool] {.async.} =
|
2022-07-21 12:14:41 +00:00
|
|
|
## Initalise worker. This function must be run in single mode at the
|
|
|
|
## beginning of running worker peer.
|
|
|
|
##
|
|
|
|
## Ackn: nim-eth/eth/p2p/blockchain_sync.nim: `startSyncWithPeer()`
|
|
|
|
##
|
2022-08-04 08:04:30 +00:00
|
|
|
let
|
|
|
|
ctx = buddy.ctx
|
|
|
|
peer = buddy.peer
|
2022-07-21 12:14:41 +00:00
|
|
|
|
|
|
|
# Delayed clean up batch list
|
2022-08-04 08:04:30 +00:00
|
|
|
if 0 < ctx.data.untrusted.len:
|
|
|
|
trace "Removing untrused peers", peer, count=ctx.data.untrusted.len
|
|
|
|
for p in ctx.data.untrusted:
|
|
|
|
ctx.data.trusted.excl p
|
|
|
|
ctx.data.untrusted.setLen(0)
|
2022-07-21 12:14:41 +00:00
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
if buddy.data.bestNumber.isNone:
|
2022-07-21 12:14:41 +00:00
|
|
|
let rc = await buddy.getBestNumber()
|
|
|
|
# Beware of peer terminating the session right after communicating
|
|
|
|
if rc.isErr or buddy.ctrl.stopped:
|
|
|
|
return false
|
2022-08-04 08:04:30 +00:00
|
|
|
if rc.value <= ctx.data.topPersistent:
|
2022-07-21 12:14:41 +00:00
|
|
|
buddy.ctrl.zombie = true
|
|
|
|
trace "Useless peer, best number too low", peer,
|
2022-08-04 08:04:30 +00:00
|
|
|
topPersistent=ctx.data.topPersistent, bestNumber=rc.value
|
|
|
|
buddy.data.bestNumber = some(rc.value)
|
2022-07-21 12:14:41 +00:00
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
if minPeersToStartSync <= ctx.data.trusted.len:
|
2022-07-21 12:14:41 +00:00
|
|
|
# We have enough trusted peers. Validate new peer against trusted
|
|
|
|
let rc = buddy.getRandomTrustedPeer()
|
|
|
|
if rc.isOK:
|
|
|
|
if await buddy.agreesOnChain(rc.value):
|
2022-09-16 07:24:12 +00:00
|
|
|
ctx.data.trusted.incl peer
|
|
|
|
return true
|
2022-07-21 12:14:41 +00:00
|
|
|
|
|
|
|
# If there are no trusted peers yet, assume this very peer is trusted,
|
|
|
|
# but do not finish initialisation until there are more peers.
|
2022-08-04 08:04:30 +00:00
|
|
|
elif ctx.data.trusted.len == 0:
|
2022-07-21 12:14:41 +00:00
|
|
|
trace "Assume initial trusted peer", peer
|
2022-08-04 08:04:30 +00:00
|
|
|
ctx.data.trusted.incl peer
|
2022-07-21 12:14:41 +00:00
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
elif ctx.data.trusted.len == 1 and buddy.peer in ctx.data.trusted:
|
2022-07-21 12:14:41 +00:00
|
|
|
# Ignore degenerate case, note that `trusted.len < minPeersToStartSync`
|
|
|
|
discard
|
|
|
|
|
|
|
|
else:
|
|
|
|
# At this point we have some "trusted" candidates, but they are not
|
|
|
|
# "trusted" enough. We evaluate `peer` against all other candidates. If
|
|
|
|
# one of the candidates disagrees, we swap it for `peer`. If all candidates
|
|
|
|
# agree, we add `peer` to trusted set. The peers in the set will become
|
|
|
|
# "fully trusted" (and sync will start) when the set is big enough
|
|
|
|
var
|
|
|
|
agreeScore = 0
|
|
|
|
otherPeer: Peer
|
2022-08-04 08:04:30 +00:00
|
|
|
for p in ctx.data.trusted:
|
2022-07-21 12:14:41 +00:00
|
|
|
if peer == p:
|
|
|
|
inc agreeScore
|
2022-09-16 07:24:12 +00:00
|
|
|
elif await buddy.agreesOnChain(p):
|
|
|
|
inc agreeScore
|
|
|
|
elif buddy.ctrl.stopped:
|
2022-07-21 12:14:41 +00:00
|
|
|
# Beware of peer terminating the session
|
2022-09-16 07:24:12 +00:00
|
|
|
return false
|
|
|
|
else:
|
|
|
|
otherPeer = p
|
2022-07-21 12:14:41 +00:00
|
|
|
|
|
|
|
# Check for the number of peers that disagree
|
2022-08-04 08:04:30 +00:00
|
|
|
case ctx.data.trusted.len - agreeScore
|
2022-07-21 12:14:41 +00:00
|
|
|
of 0:
|
|
|
|
trace "Peer trusted by score", peer,
|
2022-08-04 08:04:30 +00:00
|
|
|
trusted=ctx.data.trusted.len
|
|
|
|
ctx.data.trusted.incl peer # best possible outcome
|
2022-07-21 12:14:41 +00:00
|
|
|
of 1:
|
|
|
|
trace "Other peer no longer trusted", peer,
|
2022-08-04 08:04:30 +00:00
|
|
|
otherPeer, trusted=ctx.data.trusted.len
|
|
|
|
ctx.data.trusted.excl otherPeer
|
|
|
|
ctx.data.trusted.incl peer
|
2022-07-21 12:14:41 +00:00
|
|
|
else:
|
|
|
|
trace "Peer not trusted", peer,
|
2022-08-04 08:04:30 +00:00
|
|
|
trusted=ctx.data.trusted.len
|
2022-07-21 12:14:41 +00:00
|
|
|
discard
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
if minPeersToStartSync <= ctx.data.trusted.len:
|
2022-07-21 12:14:41 +00:00
|
|
|
return true
|
|
|
|
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
proc fetchHeaders(
|
|
|
|
buddy: FullBuddyRef;
|
|
|
|
wi: WorkItemRef
|
|
|
|
): Future[bool] {.async.} =
|
2022-07-21 12:14:41 +00:00
|
|
|
## Get the work item with the least interval and complete it. The function
|
|
|
|
## returns `true` if bodies were fetched and there were no inconsistencies.
|
2022-08-04 08:04:30 +00:00
|
|
|
let
|
|
|
|
ctx = buddy.ctx
|
|
|
|
peer = buddy.peer
|
2022-07-21 12:14:41 +00:00
|
|
|
|
|
|
|
if 0 < wi.hashes.len:
|
|
|
|
return true
|
|
|
|
|
|
|
|
var hdrReq: BlocksRequest
|
|
|
|
if wi.topHash.isNone:
|
|
|
|
hdrReq = BlocksRequest(
|
|
|
|
startBlock: HashOrNum(
|
|
|
|
isHash: false,
|
|
|
|
number: wi.blocks.minPt),
|
|
|
|
maxResults: wi.blocks.len.truncate(uint),
|
|
|
|
skip: 0,
|
|
|
|
reverse: false)
|
|
|
|
trace trEthSendSendingGetBlockHeaders, peer,
|
|
|
|
blocks=($wi.blocks)
|
|
|
|
|
|
|
|
else:
|
|
|
|
hdrReq = BlocksRequest(
|
|
|
|
startBlock: HashOrNum(
|
|
|
|
isHash: true,
|
|
|
|
hash: wi.topHash.get),
|
|
|
|
maxResults: maxHeadersFetch,
|
|
|
|
skip: 0,
|
|
|
|
reverse: true)
|
|
|
|
trace trEthSendSendingGetBlockHeaders & " reverse", peer,
|
|
|
|
topHash=hdrReq.startBlock.hash, reqLen=hdrReq.maxResults
|
|
|
|
|
|
|
|
# Fetch headers from peer
|
|
|
|
var hdrResp: Option[blockHeadersObj]
|
|
|
|
block:
|
|
|
|
let reqLen = hdrReq.maxResults
|
|
|
|
buddy.safeTransport("Error fetching block headers"):
|
|
|
|
hdrResp = await peer.getBlockHeaders(hdrReq)
|
|
|
|
# Beware of peer terminating the session
|
|
|
|
if buddy.ctrl.stopped:
|
|
|
|
return false
|
|
|
|
|
|
|
|
if hdrResp.isNone:
|
|
|
|
trace trEthRecvReceivedBlockHeaders, peer, reqLen, respose="n/a"
|
|
|
|
return false
|
|
|
|
|
|
|
|
let hdrRespLen = hdrResp.get.headers.len
|
|
|
|
trace trEthRecvReceivedBlockHeaders, peer, reqLen, hdrRespLen
|
|
|
|
|
|
|
|
if hdrRespLen == 0:
|
|
|
|
buddy.ctrl.stopped = true
|
|
|
|
return false
|
|
|
|
|
|
|
|
# Update block range for reverse search
|
|
|
|
if wi.topHash.isSome:
|
|
|
|
# Headers are in reversed order
|
|
|
|
wi.headers = hdrResp.get.headers.reversed
|
|
|
|
wi.blocks = BlockRange.new(
|
|
|
|
wi.headers[0].blockNumber, wi.headers[^1].blockNumber)
|
2022-08-04 08:04:30 +00:00
|
|
|
discard ctx.data.unprocessed.reduce(wi)
|
2022-07-21 12:14:41 +00:00
|
|
|
trace "Updated reverse header range", peer, range=($wi.blocks)
|
|
|
|
|
|
|
|
# Verify start block number
|
|
|
|
elif hdrResp.get.headers[0].blockNumber != wi.blocks.minPt:
|
|
|
|
trace "Header range starts with wrong block number", peer,
|
|
|
|
startBlock=hdrResp.get.headers[0].blockNumber,
|
|
|
|
requestedBlock=wi.blocks.minPt
|
|
|
|
buddy.ctrl.zombie = true
|
|
|
|
return false
|
|
|
|
|
|
|
|
# Import into `wi.headers`
|
|
|
|
else:
|
|
|
|
wi.headers.shallowCopy(hdrResp.get.headers)
|
|
|
|
|
|
|
|
# Calculate block header hashes and verify it against parent links. If
|
|
|
|
# necessary, cut off some offending block headers tail.
|
|
|
|
wi.hashes.setLen(wi.headers.len)
|
|
|
|
wi.hashes[0] = wi.headers[0].hash
|
|
|
|
for n in 1 ..< wi.headers.len:
|
|
|
|
if wi.headers[n-1].blockNumber + 1 != wi.headers[n].blockNumber:
|
|
|
|
trace "Non-consecutive block numbers in header list response", peer
|
|
|
|
buddy.ctrl.zombie = true
|
|
|
|
return false
|
|
|
|
if wi.hashes[n-1] != wi.headers[n].parentHash:
|
|
|
|
# Oops, cul-de-sac after block chain re-org?
|
|
|
|
trace "Dangling parent link in header list response. Re-org?", peer
|
|
|
|
wi.headers.setLen(n)
|
|
|
|
wi.hashes.setLen(n)
|
|
|
|
break
|
|
|
|
wi.hashes[n] = wi.headers[n].hash
|
|
|
|
|
|
|
|
# Adjust range length if necessary
|
|
|
|
if wi.headers[^1].blockNumber < wi.blocks.maxPt:
|
|
|
|
let redRng = BlockRange.new(
|
|
|
|
wi.headers[0].blockNumber, wi.headers[^1].blockNumber)
|
|
|
|
trace "Adjusting block range", peer, range=($wi.blocks), reduced=($redRng)
|
2022-08-04 08:04:30 +00:00
|
|
|
discard ctx.data.unprocessed.merge(redRng.maxPt + 1, wi.blocks.maxPt)
|
2022-07-21 12:14:41 +00:00
|
|
|
wi.blocks = redRng
|
|
|
|
|
|
|
|
return true
|
|
|
|
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
proc fetchBodies(buddy: FullBuddyRef; wi: WorkItemRef): Future[bool] {.async.} =
|
2022-07-21 12:14:41 +00:00
|
|
|
## Get the work item with the least interval and complete it. The function
|
|
|
|
## returns `true` if bodies were fetched and there were no inconsistencies.
|
|
|
|
let peer = buddy.peer
|
|
|
|
|
|
|
|
# Complete group of bodies
|
|
|
|
buddy.safeTransport("Error fetching block bodies"):
|
|
|
|
while wi.bodies.len < wi.hashes.len:
|
|
|
|
let
|
|
|
|
start = wi.bodies.len
|
|
|
|
reqLen = min(wi.hashes.len - wi.bodies.len, maxBodiesFetch)
|
|
|
|
top = start + reqLen
|
|
|
|
hashes = wi.hashes[start ..< top]
|
|
|
|
|
|
|
|
trace trEthSendSendingGetBlockBodies, peer, reqLen
|
|
|
|
|
|
|
|
# Append bodies from peer to `wi.bodies`
|
|
|
|
block:
|
|
|
|
let bdyResp = await peer.getBlockBodies(hashes)
|
|
|
|
# Beware of peer terminating the session
|
|
|
|
if buddy.ctrl.stopped:
|
|
|
|
return false
|
|
|
|
|
|
|
|
if bdyResp.isNone:
|
|
|
|
trace trEthRecvReceivedBlockBodies, peer, reqLen, respose="n/a"
|
|
|
|
buddy.ctrl.zombie = true
|
|
|
|
return false
|
|
|
|
|
|
|
|
let bdyRespLen = bdyResp.get.blocks.len
|
|
|
|
trace trEthRecvReceivedBlockBodies, peer, reqLen, bdyRespLen
|
|
|
|
|
|
|
|
if bdyRespLen == 0 or reqLen < bdyRespLen:
|
|
|
|
buddy.ctrl.zombie = true
|
|
|
|
return false
|
|
|
|
|
|
|
|
wi.bodies.add bdyResp.get.blocks
|
|
|
|
|
|
|
|
return true
|
|
|
|
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
proc stageItem(buddy: FullBuddyRef; wi: WorkItemRef) =
|
2022-07-21 12:14:41 +00:00
|
|
|
## Add work item to the list of staged items
|
2022-08-04 08:04:30 +00:00
|
|
|
let
|
|
|
|
ctx = buddy.ctx
|
|
|
|
peer = buddy.peer
|
|
|
|
rc = ctx.data.staged.insert(wi.blocks.minPt)
|
2022-07-21 12:14:41 +00:00
|
|
|
if rc.isOk:
|
|
|
|
rc.value.data = wi
|
|
|
|
|
|
|
|
# Turn on pool mode if there are too may staged work items queued.
|
|
|
|
# This must only be done when the added work item is not backtracking.
|
2022-08-04 08:04:30 +00:00
|
|
|
if stagedWorkItemsTrigger < ctx.data.staged.len and
|
|
|
|
ctx.data.backtrack.isNone and
|
2022-07-21 12:14:41 +00:00
|
|
|
wi.topHash.isNone:
|
|
|
|
buddy.ctx.poolMode = true
|
|
|
|
|
|
|
|
# The list size is limited. So cut if necessary and recycle back the block
|
|
|
|
# range of the discarded item (tough luck if the current work item is the
|
|
|
|
# one removed from top.)
|
2022-08-04 08:04:30 +00:00
|
|
|
while maxStagedWorkItems < ctx.data.staged.len:
|
|
|
|
let topValue = ctx.data.staged.le(highBlockNumber).value
|
|
|
|
discard ctx.data.unprocessed.merge(topValue.data)
|
|
|
|
discard ctx.data.staged.delete(topValue.key)
|
2022-07-21 12:14:41 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
# Ooops, duplicates should not exist (but anyway ...)
|
|
|
|
let wj = block:
|
2022-08-04 08:04:30 +00:00
|
|
|
let rc = ctx.data.staged.eq(wi.blocks.minPt)
|
2022-07-21 12:14:41 +00:00
|
|
|
doAssert rc.isOk
|
|
|
|
# Store `wi` and return offending entry
|
|
|
|
let rcData = rc.value.data
|
|
|
|
rc.value.data = wi
|
|
|
|
rcData
|
|
|
|
|
|
|
|
debug "Replacing dup item in staged list", peer,
|
|
|
|
range=($wi.blocks), discarded=($wj.blocks)
|
|
|
|
# Update `staged` list and `unprocessed` ranges
|
|
|
|
block:
|
|
|
|
let rc = wi.blocks - wj.blocks
|
|
|
|
if rc.isOk:
|
2022-08-04 08:04:30 +00:00
|
|
|
discard ctx.data.unprocessed.merge(rc.value)
|
2022-07-21 12:14:41 +00:00
|
|
|
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
proc processStaged(buddy: FullBuddyRef): bool =
|
2022-07-21 12:14:41 +00:00
|
|
|
## Fetch a work item from the `staged` queue an process it to be
|
|
|
|
## stored on the persistent block chain.
|
|
|
|
let
|
2022-08-04 08:04:30 +00:00
|
|
|
ctx = buddy.ctx
|
2022-07-21 12:14:41 +00:00
|
|
|
peer = buddy.peer
|
|
|
|
chainDb = buddy.ctx.chain
|
2022-08-04 08:04:30 +00:00
|
|
|
rc = ctx.data.staged.ge(low(BlockNumber))
|
|
|
|
|
2022-07-21 12:14:41 +00:00
|
|
|
if rc.isErr:
|
|
|
|
# No more items in the database
|
|
|
|
return false
|
|
|
|
|
|
|
|
let
|
|
|
|
wi = rc.value.data
|
2022-08-04 08:04:30 +00:00
|
|
|
topPersistent = ctx.data.topPersistent
|
2022-07-21 12:14:41 +00:00
|
|
|
startNumber = wi.headers[0].blockNumber
|
2022-08-04 08:04:30 +00:00
|
|
|
stagedRecords = ctx.data.staged.len
|
2022-07-21 12:14:41 +00:00
|
|
|
|
|
|
|
# Check whether this record of blocks can be stored, at all
|
|
|
|
if topPersistent + 1 < startNumber:
|
|
|
|
trace "Staged work item postponed", peer, topPersistent,
|
|
|
|
range=($wi.blocks), stagedRecords
|
|
|
|
return false
|
|
|
|
|
|
|
|
# Ok, store into the block chain database
|
|
|
|
trace "Processing staged work item", peer,
|
|
|
|
topPersistent, range=($wi.blocks)
|
|
|
|
|
|
|
|
# remove from staged DB
|
2022-08-04 08:04:30 +00:00
|
|
|
discard ctx.data.staged.delete(wi.blocks.minPt)
|
2022-07-21 12:14:41 +00:00
|
|
|
|
|
|
|
try:
|
|
|
|
if chainDb.persistBlocks(wi.headers, wi.bodies) == ValidationResult.OK:
|
2022-08-04 08:04:30 +00:00
|
|
|
ctx.data.topPersistent = wi.blocks.maxPt
|
2022-07-21 12:14:41 +00:00
|
|
|
return true
|
|
|
|
except CatchableError as e:
|
|
|
|
error "Storing persistent blocks failed", peer, range=($wi.blocks),
|
|
|
|
error = $e.name, msg = e.msg
|
|
|
|
except Defect as e:
|
|
|
|
# Pass through
|
|
|
|
raise e
|
|
|
|
except Exception as e:
|
|
|
|
# Notorious case where the `Chain` reference applied to
|
|
|
|
# `persistBlocks()` has the compiler traced a possible `Exception`
|
|
|
|
# (i.e. `ctx.chain` could be uninitialised.)
|
|
|
|
error "Exception while storing persistent blocks", peer,
|
|
|
|
range=($wi.blocks), error=($e.name), msg=e.msg
|
|
|
|
raise (ref Defect)(msg: $e.name & ": " & e.msg)
|
|
|
|
|
|
|
|
# Something went wrong. Recycle work item (needs to be re-fetched, anyway)
|
|
|
|
let
|
|
|
|
parentHash = wi.headers[0].parentHash
|
|
|
|
parentHoN = HashOrNum(isHash: true, hash: parentHash)
|
|
|
|
try:
|
|
|
|
# Check whether hash of the first block is consistent
|
|
|
|
var parent: BlockHeader
|
|
|
|
if chainDb.getBlockHeader(parentHoN, parent):
|
|
|
|
# First block parent is ok, so there might be other problems. Re-fetch
|
|
|
|
# the blocks from another peer.
|
|
|
|
trace "Storing persistent blocks failed", peer,
|
|
|
|
range=($wi.blocks)
|
2022-08-04 08:04:30 +00:00
|
|
|
discard ctx.data.unprocessed.merge(wi.blocks)
|
2022-07-21 12:14:41 +00:00
|
|
|
buddy.ctrl.zombie = true
|
|
|
|
return false
|
|
|
|
except CatchableError as e:
|
|
|
|
error "Failed to access parent blocks", peer,
|
|
|
|
blockNumber=wi.headers[0].blockNumber.pp, error=($e.name), msg=e.msg
|
|
|
|
|
|
|
|
# Parent block header problem, so we might be in the middle of a re-org.
|
|
|
|
# Set single mode backtrack following the offending parent hash.
|
2022-08-04 08:04:30 +00:00
|
|
|
ctx.data.backtrack = some(parentHash)
|
2022-07-21 12:14:41 +00:00
|
|
|
buddy.ctrl.multiOk = false
|
|
|
|
|
|
|
|
if wi.topHash.isNone:
|
|
|
|
# Assuming that currently staged entries are on the wrong branch
|
|
|
|
buddy.recycleStaged()
|
|
|
|
notice "Starting chain re-org backtrack work item", peer,
|
|
|
|
range=($wi.blocks)
|
|
|
|
else:
|
|
|
|
# Leave that block range in the staged list
|
|
|
|
trace "Resuming chain re-org backtrack work item", peer,
|
|
|
|
range=($wi.blocks)
|
|
|
|
discard
|
|
|
|
|
|
|
|
return false
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public start/stop and admin functions
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
proc setup*(ctx: FullCtxRef; tickerOK: bool): bool =
|
2022-07-21 12:14:41 +00:00
|
|
|
## Global set up
|
2022-08-04 08:04:30 +00:00
|
|
|
ctx.data.unprocessed = BlockRangeSetRef.init()
|
|
|
|
ctx.data.staged.init()
|
2022-07-21 12:14:41 +00:00
|
|
|
if tickerOK:
|
2022-08-04 08:04:30 +00:00
|
|
|
ctx.data.ticker = TickerRef.init(ctx.tickerUpdater)
|
2022-07-21 12:14:41 +00:00
|
|
|
else:
|
|
|
|
debug "Ticker is disabled"
|
|
|
|
return ctx.globalReset(0)
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
proc release*(ctx: FullCtxRef) =
|
2022-07-21 12:14:41 +00:00
|
|
|
## Global clean up
|
2022-08-04 08:04:30 +00:00
|
|
|
if not ctx.data.ticker.isNil:
|
|
|
|
ctx.data.ticker.stop()
|
2022-07-21 12:14:41 +00:00
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
proc start*(buddy: FullBuddyRef): bool =
|
2022-07-21 12:14:41 +00:00
|
|
|
## Initialise worker peer
|
2022-08-04 08:04:30 +00:00
|
|
|
let ctx = buddy.ctx
|
2022-07-21 12:14:41 +00:00
|
|
|
if buddy.peer.supports(protocol.eth) and
|
|
|
|
buddy.peer.state(protocol.eth).initialized:
|
2022-08-04 08:04:30 +00:00
|
|
|
if not ctx.data.ticker.isNil:
|
|
|
|
ctx.data.ticker.startBuddy()
|
2022-07-21 12:14:41 +00:00
|
|
|
return true
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
proc stop*(buddy: FullBuddyRef) =
|
2022-07-21 12:14:41 +00:00
|
|
|
## Clean up this peer
|
2022-08-04 08:04:30 +00:00
|
|
|
let ctx = buddy.ctx
|
2022-07-21 12:14:41 +00:00
|
|
|
buddy.ctrl.stopped = true
|
2022-08-04 08:04:30 +00:00
|
|
|
ctx.data.untrusted.add buddy.peer
|
|
|
|
if not ctx.data.ticker.isNil:
|
|
|
|
ctx.data.ticker.stopBuddy()
|
2022-07-21 12:14:41 +00:00
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public functions
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
proc runSingle*(buddy: FullBuddyRef) {.async.} =
|
2022-07-21 12:14:41 +00:00
|
|
|
## This peer worker is invoked if the peer-local flag `buddy.ctrl.multiOk`
|
|
|
|
## is set `false` which is the default mode. This flag is updated by the
|
|
|
|
## worker when deemed appropriate.
|
|
|
|
## * For all workers, there can be only one `runSingle()` function active
|
|
|
|
## simultaneously for all worker peers.
|
|
|
|
## * There will be no `runMulti()` function active for the same worker peer
|
|
|
|
## simultaneously
|
|
|
|
## * There will be no `runPool()` iterator active simultaneously.
|
|
|
|
##
|
|
|
|
## Note that this function runs in `async` mode.
|
|
|
|
##
|
2022-08-04 08:04:30 +00:00
|
|
|
let
|
|
|
|
ctx = buddy.ctx
|
|
|
|
peer = buddy.peer
|
2022-07-21 12:14:41 +00:00
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
if ctx.data.backtrack.isSome:
|
2022-07-21 12:14:41 +00:00
|
|
|
trace "Single run mode, re-org backtracking", peer
|
|
|
|
let wi = WorkItemRef(
|
|
|
|
# This dummy interval can savely merged back without any effect
|
|
|
|
blocks: highBlockRange,
|
|
|
|
# Enable backtrack
|
2022-08-04 08:04:30 +00:00
|
|
|
topHash: some(ctx.data.backtrack.get))
|
2022-07-21 12:14:41 +00:00
|
|
|
|
|
|
|
# Fetch headers and bodies for the current work item
|
|
|
|
if await buddy.fetchHeaders(wi):
|
|
|
|
if await buddy.fetchBodies(wi):
|
2022-08-04 08:04:30 +00:00
|
|
|
ctx.data.backtrack = none(Hash256)
|
2022-07-21 12:14:41 +00:00
|
|
|
buddy.stageItem(wi)
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
# Update persistent database (may reset `multiOk`)
|
2022-07-21 12:14:41 +00:00
|
|
|
buddy.ctrl.multiOk = true
|
2022-08-04 08:04:30 +00:00
|
|
|
while buddy.processStaged() and not buddy.ctrl.stopped:
|
|
|
|
# Allow thread switch as `persistBlocks()` might be slow
|
|
|
|
await sleepAsync(10.milliseconds)
|
2022-07-21 12:14:41 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
# This work item failed, nothing to do anymore.
|
2022-08-04 08:04:30 +00:00
|
|
|
discard ctx.data.unprocessed.merge(wi)
|
2022-07-21 12:14:41 +00:00
|
|
|
buddy.ctrl.zombie = true
|
|
|
|
|
|
|
|
else:
|
2022-08-04 08:04:30 +00:00
|
|
|
if buddy.data.bestNumber.isNone:
|
2022-07-21 12:14:41 +00:00
|
|
|
# Only log for the first time, or so
|
|
|
|
trace "Single run mode, initialisation", peer,
|
2022-08-04 08:04:30 +00:00
|
|
|
trusted=ctx.data.trusted.len
|
2022-07-21 12:14:41 +00:00
|
|
|
discard
|
|
|
|
|
|
|
|
# Initialise/re-initialise this worker
|
|
|
|
if await buddy.initaliseWorker():
|
|
|
|
buddy.ctrl.multiOk = true
|
|
|
|
elif not buddy.ctrl.stopped:
|
|
|
|
await sleepAsync(2.seconds)
|
|
|
|
|
|
|
|
|
2022-08-24 13:44:18 +00:00
|
|
|
proc runPool*(buddy: FullBuddyRef; last: bool) =
|
2022-07-21 12:14:41 +00:00
|
|
|
## Ocne started, the function `runPool()` is called for all worker peers in
|
|
|
|
## a row (as the body of an iteration.) There will be no other worker peer
|
|
|
|
## functions activated simultaneously.
|
|
|
|
##
|
|
|
|
## This procedure is started if the global flag `buddy.ctx.poolMode` is set
|
|
|
|
## `true` (default is `false`.) It is the responsibility of the `runPool()`
|
|
|
|
## instance to reset the flag `buddy.ctx.poolMode`, typically at the first
|
2022-08-24 13:44:18 +00:00
|
|
|
## peer instance.
|
|
|
|
##
|
|
|
|
## The argument `last` is set `true` if the last entry is reached.
|
2022-07-21 12:14:41 +00:00
|
|
|
##
|
|
|
|
## Note that this function does not run in `async` mode.
|
|
|
|
##
|
2022-08-04 08:04:30 +00:00
|
|
|
let ctx = buddy.ctx
|
|
|
|
if ctx.poolMode:
|
2022-07-21 12:14:41 +00:00
|
|
|
# Mind the gap, fill in if necessary
|
|
|
|
let
|
2022-08-04 08:04:30 +00:00
|
|
|
topPersistent = ctx.data.topPersistent
|
2022-07-21 12:14:41 +00:00
|
|
|
covered = min(
|
2022-08-04 08:04:30 +00:00
|
|
|
ctx.data.nextUnprocessed.get(highBlockNumber),
|
|
|
|
ctx.data.nextStaged.get(highBlockRange).minPt)
|
2022-07-21 12:14:41 +00:00
|
|
|
if topPersistent + 1 < covered:
|
2022-08-04 08:04:30 +00:00
|
|
|
discard ctx.data.unprocessed.merge(topPersistent + 1, covered - 1)
|
|
|
|
ctx.poolMode = false
|
2022-07-21 12:14:41 +00:00
|
|
|
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
proc runMulti*(buddy: FullBuddyRef) {.async.} =
|
2022-07-21 12:14:41 +00:00
|
|
|
## This peer worker is invoked if the `buddy.ctrl.multiOk` flag is set
|
|
|
|
## `true` which is typically done after finishing `runSingle()`. This
|
|
|
|
## instance can be simultaneously active for all peer workers.
|
|
|
|
##
|
|
|
|
# Fetch work item
|
2022-08-04 08:04:30 +00:00
|
|
|
let
|
|
|
|
ctx = buddy.ctx
|
|
|
|
rc = buddy.newWorkItem()
|
2022-07-21 12:14:41 +00:00
|
|
|
if rc.isErr:
|
|
|
|
# No way, end of capacity for this peer => re-calibrate
|
|
|
|
buddy.ctrl.multiOk = false
|
2022-08-04 08:04:30 +00:00
|
|
|
buddy.data.bestNumber = none(BlockNumber)
|
2022-07-21 12:14:41 +00:00
|
|
|
return
|
|
|
|
let wi = rc.value
|
|
|
|
|
|
|
|
# Fetch headers and bodies for the current work item
|
|
|
|
if await buddy.fetchHeaders(wi):
|
|
|
|
if await buddy.fetchBodies(wi):
|
|
|
|
buddy.stageItem(wi)
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
# Update persistent database
|
|
|
|
while buddy.processStaged() and not buddy.ctrl.stopped:
|
|
|
|
# Allow thread switch as `persistBlocks()` might be slow
|
|
|
|
await sleepAsync(10.milliseconds)
|
2022-07-21 12:14:41 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
# This work item failed
|
2022-08-04 08:04:30 +00:00
|
|
|
discard ctx.data.unprocessed.merge(wi)
|
2022-07-21 12:14:41 +00:00
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# End
|
|
|
|
# ------------------------------------------------------------------------------
|