2020-06-18 10:03:36 +00:00
|
|
|
import options, sequtils, strutils
|
|
|
|
import chronos, chronicles
|
2021-03-02 10:27:45 +00:00
|
|
|
import ../spec/[datatypes, digest],
|
2021-03-05 13:12:00 +00:00
|
|
|
../networking/eth2_network,
|
|
|
|
../beacon_node_types,
|
|
|
|
../ssz/merkleization,
|
2021-03-11 10:10:57 +00:00
|
|
|
../gossip_processing/gossip_to_consensus,
|
2021-03-02 10:27:45 +00:00
|
|
|
./sync_protocol, ./sync_manager
|
2020-08-12 09:29:11 +00:00
|
|
|
export sync_manager
|
2020-06-18 10:03:36 +00:00
|
|
|
|
|
|
|
logScope:
|
|
|
|
topics = "requman"
|
|
|
|
|
|
|
|
const
|
2020-08-12 09:29:11 +00:00
|
|
|
SYNC_MAX_REQUESTED_BLOCKS* = 32 # Spec allows up to MAX_REQUEST_BLOCKS.
|
|
|
|
## Maximum number of blocks which will be requested in each
|
|
|
|
## `beaconBlocksByRoot` invocation.
|
2020-06-18 10:03:36 +00:00
|
|
|
PARALLEL_REQUESTS* = 2
|
|
|
|
## Number of peers we using to resolve our request.
|
2019-03-28 14:03:19 +00:00
|
|
|
|
2019-11-25 14:36:25 +00:00
|
|
|
type
|
|
|
|
RequestManager* = object
|
|
|
|
network*: Eth2Node
|
2020-08-12 09:29:11 +00:00
|
|
|
inpQueue*: AsyncQueue[FetchRecord]
|
2021-03-11 10:10:57 +00:00
|
|
|
verifQueues: ref VerifQueueManager
|
2020-06-18 10:03:36 +00:00
|
|
|
loopFuture: Future[void]
|
2019-11-25 14:36:25 +00:00
|
|
|
|
2020-06-18 10:03:36 +00:00
|
|
|
func shortLog*(x: seq[Eth2Digest]): string =
|
|
|
|
"[" & x.mapIt(shortLog(it)).join(", ") & "]"
|
|
|
|
|
|
|
|
func shortLog*(x: seq[FetchRecord]): string =
|
|
|
|
"[" & x.mapIt(shortLog(it.root)).join(", ") & "]"
|
|
|
|
|
|
|
|
proc init*(T: type RequestManager, network: Eth2Node,
|
2021-03-11 10:10:57 +00:00
|
|
|
verifQueues: ref VerifQueueManager): RequestManager =
|
2020-08-12 09:29:11 +00:00
|
|
|
RequestManager(
|
|
|
|
network: network,
|
|
|
|
inpQueue: newAsyncQueue[FetchRecord](),
|
2021-03-11 10:10:57 +00:00
|
|
|
verifQueues: verifQueues
|
2020-06-18 10:03:36 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
proc checkResponse(roots: openArray[Eth2Digest],
|
|
|
|
blocks: openArray[SignedBeaconBlock]): bool =
|
|
|
|
## This procedure checks peer's response.
|
|
|
|
var checks = @roots
|
|
|
|
if len(blocks) > len(roots):
|
|
|
|
return false
|
|
|
|
for blk in blocks:
|
2020-07-16 13:16:51 +00:00
|
|
|
let res = checks.find(blk.root)
|
2020-06-18 10:03:36 +00:00
|
|
|
if res == -1:
|
|
|
|
return false
|
|
|
|
else:
|
|
|
|
checks.del(res)
|
|
|
|
return true
|
|
|
|
|
2020-08-12 09:29:11 +00:00
|
|
|
proc validate(rman: RequestManager,
|
|
|
|
b: SignedBeaconBlock): Future[Result[void, BlockError]] {.async.} =
|
|
|
|
let sblock = SyncBlock(
|
|
|
|
blk: b,
|
|
|
|
resfut: newFuture[Result[void, BlockError]]("request.manager.validate")
|
|
|
|
)
|
2021-03-11 10:10:57 +00:00
|
|
|
rman.verifQueues[].addBlock(sblock)
|
2020-08-12 09:29:11 +00:00
|
|
|
return await sblock.resfut
|
|
|
|
|
2020-06-18 10:03:36 +00:00
|
|
|
proc fetchAncestorBlocksFromNetwork(rman: RequestManager,
|
|
|
|
items: seq[Eth2Digest]) {.async.} =
|
2020-03-22 20:54:47 +00:00
|
|
|
var peer: Peer
|
|
|
|
try:
|
2020-06-18 10:03:36 +00:00
|
|
|
peer = await rman.network.peerPool.acquire()
|
|
|
|
debug "Requesting blocks by root", peer = peer, blocks = shortLog(items),
|
|
|
|
peer_score = peer.getScore()
|
|
|
|
|
|
|
|
let blocks = await peer.beaconBlocksByRoot(BlockRootsList items)
|
2020-05-12 22:37:07 +00:00
|
|
|
if blocks.isOk:
|
2020-06-18 10:03:36 +00:00
|
|
|
let ublocks = blocks.get()
|
|
|
|
if checkResponse(items, ublocks):
|
2020-08-12 09:29:11 +00:00
|
|
|
var res: Result[void, BlockError]
|
|
|
|
if len(ublocks) > 0:
|
|
|
|
for b in ublocks:
|
|
|
|
res = await rman.validate(b)
|
2020-08-26 15:24:59 +00:00
|
|
|
# We are ignoring errors:
|
|
|
|
# `BlockError.MissingParent` - because the order of the blocks that
|
|
|
|
# we requested may be different from the order in which we need
|
|
|
|
# these blocks to apply.
|
|
|
|
# `BlockError.Old`, `BlockError.Duplicate` and `BlockError.Unviable`
|
|
|
|
# errors could occur due to the concurrent/parallel requests we are
|
|
|
|
# made.
|
|
|
|
if res.isErr() and (res.error == BlockError.Invalid):
|
|
|
|
# We stop processing blocks further to avoid DoS attack with big
|
|
|
|
# chunk of incorrect blocks.
|
2020-08-12 09:29:11 +00:00
|
|
|
break
|
|
|
|
else:
|
|
|
|
res = Result[void, BlockError].ok()
|
|
|
|
|
|
|
|
if res.isOk():
|
2020-10-06 12:10:02 +00:00
|
|
|
if len(ublocks) > 0:
|
|
|
|
# We reward peer only if it returns something.
|
|
|
|
peer.updateScore(PeerScoreGoodBlocks)
|
2020-08-12 09:29:11 +00:00
|
|
|
else:
|
2020-08-26 15:24:59 +00:00
|
|
|
# We are not penalizing other errors because of the reasons described
|
|
|
|
# above.
|
|
|
|
if res.error == BlockError.Invalid:
|
|
|
|
peer.updateScore(PeerScoreBadBlocks)
|
2020-06-18 10:03:36 +00:00
|
|
|
else:
|
|
|
|
peer.updateScore(PeerScoreBadResponse)
|
|
|
|
else:
|
|
|
|
peer.updateScore(PeerScoreNoBlocks)
|
|
|
|
|
|
|
|
except CancelledError as exc:
|
|
|
|
raise exc
|
|
|
|
except CatchableError as exc:
|
2020-08-04 15:05:29 +00:00
|
|
|
peer.updateScore(PeerScoreNoBlocks)
|
2020-06-18 10:03:36 +00:00
|
|
|
debug "Error while fetching ancestor blocks", exc = exc.msg,
|
|
|
|
items = shortLog(items), peer = peer, peer_score = peer.getScore()
|
|
|
|
raise exc
|
2020-03-22 20:54:47 +00:00
|
|
|
finally:
|
|
|
|
if not(isNil(peer)):
|
2020-06-18 10:03:36 +00:00
|
|
|
rman.network.peerPool.release(peer)
|
|
|
|
|
|
|
|
proc requestManagerLoop(rman: RequestManager) {.async.} =
|
|
|
|
var rootList = newSeq[Eth2Digest]()
|
|
|
|
var workers = newSeq[Future[void]](PARALLEL_REQUESTS)
|
|
|
|
while true:
|
|
|
|
try:
|
|
|
|
rootList.setLen(0)
|
2020-08-12 09:29:11 +00:00
|
|
|
let req = await rman.inpQueue.popFirst()
|
2020-06-18 10:03:36 +00:00
|
|
|
rootList.add(req.root)
|
|
|
|
|
2020-08-12 09:29:11 +00:00
|
|
|
var count = min(SYNC_MAX_REQUESTED_BLOCKS - 1, len(rman.inpQueue))
|
2020-06-18 10:03:36 +00:00
|
|
|
while count > 0:
|
2020-08-12 09:29:11 +00:00
|
|
|
rootList.add(rman.inpQueue.popFirstNoWait().root)
|
2020-06-18 10:03:36 +00:00
|
|
|
dec(count)
|
|
|
|
|
|
|
|
let start = SyncMoment.now(Slot(0))
|
|
|
|
|
|
|
|
for i in 0 ..< PARALLEL_REQUESTS:
|
|
|
|
workers[i] = rman.fetchAncestorBlocksFromNetwork(rootList)
|
|
|
|
|
|
|
|
# We do not care about
|
|
|
|
await allFutures(workers)
|
|
|
|
|
|
|
|
let finish = SyncMoment.now(Slot(0) + uint64(len(rootList)))
|
|
|
|
|
|
|
|
var succeed = 0
|
|
|
|
for worker in workers:
|
|
|
|
if worker.finished() and not(worker.failed()):
|
|
|
|
inc(succeed)
|
|
|
|
|
|
|
|
debug "Request manager tick", blocks_count = len(rootList),
|
|
|
|
succeed = succeed,
|
|
|
|
failed = (len(workers) - succeed),
|
2020-08-12 09:29:11 +00:00
|
|
|
queue_size = len(rman.inpQueue),
|
2020-06-18 10:03:36 +00:00
|
|
|
sync_speed = speed(start, finish)
|
|
|
|
|
|
|
|
except CatchableError as exc:
|
|
|
|
debug "Got a problem in request manager", exc = exc.msg
|
|
|
|
|
|
|
|
proc start*(rman: var RequestManager) =
|
|
|
|
## Start Request Manager's loop.
|
2020-08-12 09:29:11 +00:00
|
|
|
rman.loopFuture = rman.requestManagerLoop()
|
2020-06-18 10:03:36 +00:00
|
|
|
|
|
|
|
proc stop*(rman: RequestManager) =
|
|
|
|
## Stop Request Manager's loop.
|
|
|
|
if not(isNil(rman.loopFuture)):
|
|
|
|
rman.loopFuture.cancel()
|
2020-02-26 13:31:24 +00:00
|
|
|
|
2020-06-18 10:03:36 +00:00
|
|
|
proc fetchAncestorBlocks*(rman: RequestManager, roots: seq[FetchRecord]) =
|
|
|
|
## Enqueue list missing blocks roots ``roots`` for download by
|
|
|
|
## Request Manager ``rman``.
|
|
|
|
for item in roots:
|
2020-08-12 09:29:11 +00:00
|
|
|
rman.inpQueue.addLastNoWait(item)
|