2019-03-28 14:03:19 +00:00
|
|
|
import
|
2019-06-10 11:13:53 +00:00
|
|
|
options, random,
|
2019-03-28 14:03:19 +00:00
|
|
|
chronos, chronicles,
|
|
|
|
spec/datatypes,
|
2019-06-10 11:13:53 +00:00
|
|
|
eth2_network, beacon_node_types, sync_protocol,
|
|
|
|
eth/async_utils
|
2019-03-28 14:03:19 +00:00
|
|
|
|
2019-11-25 14:36:25 +00:00
|
|
|
type
|
|
|
|
RequestManager* = object
|
|
|
|
network*: Eth2Node
|
|
|
|
|
2019-05-22 07:13:15 +00:00
|
|
|
proc init*(T: type RequestManager, network: Eth2Node): T =
|
2019-03-28 14:03:19 +00:00
|
|
|
T(network: network)
|
|
|
|
|
|
|
|
type
|
|
|
|
FetchAncestorsResponseHandler = proc (b: BeaconBlock) {.gcsafe.}
|
|
|
|
|
2019-09-09 03:21:59 +00:00
|
|
|
proc fetchAncestorBlocksFromPeer(
|
|
|
|
peer: Peer,
|
|
|
|
rec: FetchRecord,
|
|
|
|
responseHandler: FetchAncestorsResponseHandler) {.async.} =
|
|
|
|
# TODO: It's not clear if this function follows the intention of the
|
|
|
|
# FetchRecord data type. Perhaps it is supposed to get a range of blocks
|
|
|
|
# instead. In order to do this, we'll need the slot number of the known
|
|
|
|
# block to be stored in the FetchRecord, so we can ask for a range of
|
|
|
|
# blocks starting N positions before this slot number.
|
2019-09-10 05:50:37 +00:00
|
|
|
try:
|
|
|
|
let blocks = await peer.beaconBlocksByRoot([rec.root])
|
|
|
|
if blocks.isSome:
|
|
|
|
for b in blocks.get:
|
|
|
|
responseHandler(b)
|
|
|
|
except CatchableError as err:
|
|
|
|
debug "Error while fetching ancestor blocks",
|
|
|
|
err = err.msg, root = rec.root, peer
|
2019-06-10 11:13:53 +00:00
|
|
|
|
2019-03-28 14:03:19 +00:00
|
|
|
proc fetchAncestorBlocks*(requestManager: RequestManager,
|
|
|
|
roots: seq[FetchRecord],
|
|
|
|
responseHandler: FetchAncestorsResponseHandler) =
|
|
|
|
# TODO: we could have some fancier logic here:
|
|
|
|
#
|
|
|
|
# * Keeps track of what was requested
|
|
|
|
# (this would give a little bit of time for the asked peer to respond)
|
|
|
|
#
|
|
|
|
# * Keep track of the average latency of each peer
|
|
|
|
# (we can give priority to peers with better latency)
|
|
|
|
#
|
|
|
|
|
2019-05-09 16:12:30 +00:00
|
|
|
const ParallelRequests = 2
|
|
|
|
|
|
|
|
var fetchComplete = false
|
|
|
|
for peer in requestManager.network.randomPeers(ParallelRequests, BeaconSync):
|
2019-11-18 12:48:41 +00:00
|
|
|
traceAsyncErrors peer.fetchAncestorBlocksFromPeer(roots.sample(), responseHandler)
|