2021-03-26 06:52:01 +00:00
|
|
|
# beacon_chain
|
2024-01-06 14:26:56 +00:00
|
|
|
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
2021-03-26 06:52:01 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2023-01-20 14:14:37 +00:00
|
|
|
{.push raises: [].}
|
2021-03-26 06:52:01 +00:00
|
|
|
|
2022-01-26 12:20:08 +00:00
|
|
|
import std/[sequtils, strutils]
|
2020-06-18 10:03:36 +00:00
|
|
|
import chronos, chronicles
|
2021-08-12 13:08:20 +00:00
|
|
|
import
|
2023-04-28 12:57:35 +00:00
|
|
|
../spec/datatypes/[phase0, deneb],
|
2024-06-18 13:31:56 +00:00
|
|
|
../spec/[forks, network, eip7594_helpers],
|
2021-08-12 13:08:20 +00:00
|
|
|
../networking/eth2_network,
|
2022-01-26 12:20:08 +00:00
|
|
|
../consensus_object_pools/block_quarantine,
|
2023-04-28 12:57:35 +00:00
|
|
|
../consensus_object_pools/blob_quarantine,
|
2024-06-18 13:31:56 +00:00
|
|
|
../consensus_object_pools/data_column_quarantine,
|
2023-03-07 20:19:17 +00:00
|
|
|
"."/sync_protocol, "."/sync_manager,
|
|
|
|
../gossip_processing/block_processor
|
|
|
|
|
2023-01-31 23:25:08 +00:00
|
|
|
from ../beacon_clock import GetBeaconTimeFn
|
2022-06-10 14:16:37 +00:00
|
|
|
export block_quarantine, sync_manager
|
2020-06-18 10:03:36 +00:00
|
|
|
|
|
|
|
logScope:
|
|
|
|
topics = "requman"
|
|
|
|
|
|
|
|
const
|
2020-08-12 09:29:11 +00:00
|
|
|
SYNC_MAX_REQUESTED_BLOCKS* = 32 # Spec allows up to MAX_REQUEST_BLOCKS.
|
|
|
|
## Maximum number of blocks which will be requested in each
|
|
|
|
## `beaconBlocksByRoot` invocation.
|
2020-06-18 10:03:36 +00:00
|
|
|
PARALLEL_REQUESTS* = 2
|
|
|
|
## Number of peers we using to resolve our request.
|
2019-03-28 14:03:19 +00:00
|
|
|
|
2023-04-28 12:57:35 +00:00
|
|
|
BLOB_GOSSIP_WAIT_TIME_NS* = 2 * 1_000_000_000
|
|
|
|
## How long to wait for blobs to arrive over gossip before fetching.
|
|
|
|
|
2024-07-02 15:57:50 +00:00
|
|
|
DATA_COLUMN_GOSSIP_WAIT_TIME_NS* = 8 * 1_000_000_000
|
2024-06-18 22:16:03 +00:00
|
|
|
|
2023-08-01 20:39:14 +00:00
|
|
|
POLL_INTERVAL = 1.seconds
|
2023-07-11 16:22:02 +00:00
|
|
|
|
2019-11-25 14:36:25 +00:00
|
|
|
type
|
2024-03-21 17:37:31 +00:00
|
|
|
BlockVerifierFn* = proc(
|
|
|
|
signedBlock: ForkedSignedBeaconBlock,
|
|
|
|
maybeFinalized: bool
|
|
|
|
): Future[Result[void, VerifierError]] {.async: (raises: [CancelledError]).}
|
|
|
|
|
|
|
|
BlockLoaderFn* = proc(
|
|
|
|
blockRoot: Eth2Digest
|
|
|
|
): Opt[ForkedTrustedSignedBeaconBlock] {.gcsafe, raises: [].}
|
|
|
|
|
|
|
|
BlobLoaderFn* = proc(
|
|
|
|
blobId: BlobIdentifier): Opt[ref BlobSidecar] {.gcsafe, raises: [].}
|
|
|
|
|
2024-06-18 13:31:56 +00:00
|
|
|
DataColumnLoaderFn* = proc(
|
|
|
|
columnId: DataColumnIdentifier
|
|
|
|
): Opt[ref DataColumnSidecar] {.gcsafe, raises: [].}
|
|
|
|
|
2024-03-21 17:37:31 +00:00
|
|
|
InhibitFn* = proc: bool {.gcsafe, raises: [].}
|
2023-07-11 16:22:02 +00:00
|
|
|
|
2019-11-25 14:36:25 +00:00
|
|
|
RequestManager* = object
|
|
|
|
network*: Eth2Node
|
2023-01-31 23:25:08 +00:00
|
|
|
getBeaconTime: GetBeaconTimeFn
|
2023-07-11 16:22:02 +00:00
|
|
|
inhibit: InhibitFn
|
2023-04-28 12:57:35 +00:00
|
|
|
quarantine: ref Quarantine
|
|
|
|
blobQuarantine: ref BlobQuarantine
|
2024-06-18 13:31:56 +00:00
|
|
|
dataColumnQuarantine: ref DataColumnQuarantine
|
2023-07-11 16:22:02 +00:00
|
|
|
blockVerifier: BlockVerifierFn
|
2024-03-21 17:37:31 +00:00
|
|
|
blockLoader: BlockLoaderFn
|
|
|
|
blobLoader: BlobLoaderFn
|
2024-06-18 22:16:03 +00:00
|
|
|
dataColumnLoader: DataColumnLoaderFn
|
2024-02-09 08:35:41 +00:00
|
|
|
blockLoopFuture: Future[void].Raising([CancelledError])
|
|
|
|
blobLoopFuture: Future[void].Raising([CancelledError])
|
2024-06-18 22:16:03 +00:00
|
|
|
dataColumnLoopFuture: Future[void].Raising([CancelledError])
|
2019-11-25 14:36:25 +00:00
|
|
|
|
2020-06-18 10:03:36 +00:00
|
|
|
func shortLog*(x: seq[Eth2Digest]): string =
|
|
|
|
"[" & x.mapIt(shortLog(it)).join(", ") & "]"
|
|
|
|
|
|
|
|
func shortLog*(x: seq[FetchRecord]): string =
|
|
|
|
"[" & x.mapIt(shortLog(it.root)).join(", ") & "]"
|
|
|
|
|
|
|
|
proc init*(T: type RequestManager, network: Eth2Node,
|
2023-03-11 00:28:19 +00:00
|
|
|
denebEpoch: Epoch,
|
2023-01-31 23:25:08 +00:00
|
|
|
getBeaconTime: GetBeaconTimeFn,
|
2023-07-11 16:22:02 +00:00
|
|
|
inhibit: InhibitFn,
|
2023-04-28 12:57:35 +00:00
|
|
|
quarantine: ref Quarantine,
|
|
|
|
blobQuarantine: ref BlobQuarantine,
|
2024-06-28 09:23:08 +00:00
|
|
|
dataColumnQuarantine: ref DataColumnQuarantine,
|
2024-03-21 17:37:31 +00:00
|
|
|
blockVerifier: BlockVerifierFn,
|
|
|
|
blockLoader: BlockLoaderFn = nil,
|
2024-06-28 09:23:08 +00:00
|
|
|
blobLoader: BlobLoaderFn = nil,
|
|
|
|
dataColumnLoader: DataColumnLoaderFn = nil): RequestManager =
|
2020-08-12 09:29:11 +00:00
|
|
|
RequestManager(
|
|
|
|
network: network,
|
2023-01-31 23:25:08 +00:00
|
|
|
getBeaconTime: getBeaconTime,
|
2023-07-11 16:22:02 +00:00
|
|
|
inhibit: inhibit,
|
2023-04-28 12:57:35 +00:00
|
|
|
quarantine: quarantine,
|
|
|
|
blobQuarantine: blobQuarantine,
|
2024-06-28 09:23:08 +00:00
|
|
|
dataColumnQuarantine: dataColumnQuarantine,
|
2023-01-31 23:25:08 +00:00
|
|
|
blockVerifier: blockVerifier,
|
2024-03-21 17:37:31 +00:00
|
|
|
blockLoader: blockLoader,
|
2024-06-28 09:23:08 +00:00
|
|
|
blobLoader: blobLoader,
|
|
|
|
dataColumnLoader: dataColumnLoader)
|
2020-06-18 10:03:36 +00:00
|
|
|
|
|
|
|
proc checkResponse(roots: openArray[Eth2Digest],
|
2022-02-07 17:20:10 +00:00
|
|
|
blocks: openArray[ref ForkedSignedBeaconBlock]): bool =
|
2020-06-18 10:03:36 +00:00
|
|
|
## This procedure checks peer's response.
|
|
|
|
var checks = @roots
|
|
|
|
if len(blocks) > len(roots):
|
|
|
|
return false
|
|
|
|
for blk in blocks:
|
2022-02-07 17:20:10 +00:00
|
|
|
let res = checks.find(blk[].root)
|
2020-06-18 10:03:36 +00:00
|
|
|
if res == -1:
|
|
|
|
return false
|
|
|
|
else:
|
|
|
|
checks.del(res)
|
2023-04-28 12:57:35 +00:00
|
|
|
true
|
|
|
|
|
|
|
|
proc checkResponse(idList: seq[BlobIdentifier],
|
|
|
|
blobs: openArray[ref BlobSidecar]): bool =
|
|
|
|
if len(blobs) > len(idList):
|
|
|
|
return false
|
|
|
|
for blob in blobs:
|
2023-11-06 06:48:43 +00:00
|
|
|
let block_root = hash_tree_root(blob.signed_block_header.message)
|
2023-04-28 12:57:35 +00:00
|
|
|
var found = false
|
|
|
|
for id in idList:
|
2024-03-21 17:37:31 +00:00
|
|
|
if id.block_root == block_root and id.index == blob.index:
|
|
|
|
found = true
|
|
|
|
break
|
2023-04-28 12:57:35 +00:00
|
|
|
if not found:
|
2024-03-21 17:37:31 +00:00
|
|
|
return false
|
|
|
|
blob[].verify_blob_sidecar_inclusion_proof().isOkOr:
|
|
|
|
return false
|
2023-04-28 12:57:35 +00:00
|
|
|
true
|
2020-06-18 10:03:36 +00:00
|
|
|
|
2024-06-18 13:31:56 +00:00
|
|
|
proc checkResponse(colIdList: seq[DataColumnIdentifier],
|
|
|
|
columns: openArray[ref DataColumnSidecar]): bool =
|
|
|
|
if len(columns) > len(colIdList):
|
|
|
|
return false
|
|
|
|
for column in columns:
|
|
|
|
let block_root = hash_tree_root(column.signed_block_header.message)
|
|
|
|
var found = false
|
|
|
|
for id in colIdList:
|
|
|
|
if id.block_root == block_root and id.index == column.index:
|
|
|
|
found = true
|
|
|
|
break
|
|
|
|
if not found:
|
|
|
|
return false
|
|
|
|
column[].verify_data_column_sidecar_inclusion_proof().isOkOr:
|
|
|
|
return false
|
|
|
|
true
|
|
|
|
|
2024-02-09 08:35:41 +00:00
|
|
|
proc requestBlocksByRoot(rman: RequestManager, items: seq[Eth2Digest]) {.async: (raises: [CancelledError]).} =
|
2020-03-22 20:54:47 +00:00
|
|
|
var peer: Peer
|
|
|
|
try:
|
2020-06-18 10:03:36 +00:00
|
|
|
peer = await rman.network.peerPool.acquire()
|
|
|
|
debug "Requesting blocks by root", peer = peer, blocks = shortLog(items),
|
|
|
|
peer_score = peer.getScore()
|
|
|
|
|
2022-10-27 16:51:43 +00:00
|
|
|
let blocks = (await beaconBlocksByRoot_v2(peer, BlockRootsList items))
|
2021-07-15 19:01:07 +00:00
|
|
|
|
2020-05-12 22:37:07 +00:00
|
|
|
if blocks.isOk:
|
2020-06-18 10:03:36 +00:00
|
|
|
let ublocks = blocks.get()
|
2022-10-27 16:51:43 +00:00
|
|
|
if checkResponse(items, ublocks.asSeq()):
|
2022-01-26 12:20:08 +00:00
|
|
|
var
|
|
|
|
gotGoodBlock = false
|
|
|
|
gotUnviableBlock = false
|
|
|
|
|
|
|
|
for b in ublocks:
|
2023-04-19 16:37:38 +00:00
|
|
|
let ver = await rman.blockVerifier(b[], false)
|
2022-01-26 12:20:08 +00:00
|
|
|
if ver.isErr():
|
|
|
|
case ver.error()
|
2022-11-10 17:40:27 +00:00
|
|
|
of VerifierError.MissingParent:
|
2022-01-26 12:20:08 +00:00
|
|
|
# Ignoring because the order of the blocks that
|
|
|
|
# we requested may be different from the order in which we need
|
|
|
|
# these blocks to apply.
|
|
|
|
discard
|
2022-11-10 17:40:27 +00:00
|
|
|
of VerifierError.Duplicate:
|
2022-01-26 12:20:08 +00:00
|
|
|
# Ignoring because these errors could occur due to the
|
|
|
|
# concurrent/parallel requests we made.
|
|
|
|
discard
|
2022-11-10 17:40:27 +00:00
|
|
|
of VerifierError.UnviableFork:
|
2022-01-26 12:20:08 +00:00
|
|
|
# If they're working a different fork, we'll want to descore them
|
|
|
|
# but also process the other blocks (in case we can register the
|
|
|
|
# other blocks as unviable)
|
|
|
|
gotUnviableBlock = true
|
2022-11-10 17:40:27 +00:00
|
|
|
of VerifierError.Invalid:
|
2022-01-26 12:20:08 +00:00
|
|
|
# We stop processing blocks because peer is either sending us
|
|
|
|
# junk or working a different fork
|
2022-07-06 10:34:12 +00:00
|
|
|
notice "Received invalid block",
|
2022-01-26 12:20:08 +00:00
|
|
|
peer = peer, blocks = shortLog(items),
|
|
|
|
peer_score = peer.getScore()
|
2022-11-11 11:34:28 +00:00
|
|
|
peer.updateScore(PeerScoreBadValues)
|
2022-01-26 12:20:08 +00:00
|
|
|
|
|
|
|
return # Stop processing this junk...
|
|
|
|
else:
|
|
|
|
gotGoodBlock = true
|
|
|
|
|
|
|
|
if gotUnviableBlock:
|
|
|
|
notice "Received blocks from an unviable fork",
|
|
|
|
peer = peer, blocks = shortLog(items),
|
|
|
|
peer_score = peer.getScore()
|
|
|
|
peer.updateScore(PeerScoreUnviableFork)
|
|
|
|
elif gotGoodBlock:
|
2023-05-11 00:36:35 +00:00
|
|
|
debug "Request manager got good block",
|
2023-07-11 16:22:02 +00:00
|
|
|
peer = peer, blocks = shortLog(items), ublocks = len(ublocks)
|
2023-05-11 00:36:35 +00:00
|
|
|
|
2022-01-26 12:20:08 +00:00
|
|
|
# We reward peer only if it returns something.
|
2022-11-11 11:34:28 +00:00
|
|
|
peer.updateScore(PeerScoreGoodValues)
|
2022-01-26 12:20:08 +00:00
|
|
|
|
2020-06-18 10:03:36 +00:00
|
|
|
else:
|
2023-07-11 16:22:02 +00:00
|
|
|
debug "Mismatching response to blocks by root",
|
|
|
|
peer = peer, blocks = shortLog(items), ublocks = len(ublocks)
|
2020-06-18 10:03:36 +00:00
|
|
|
peer.updateScore(PeerScoreBadResponse)
|
|
|
|
else:
|
2023-07-11 16:22:02 +00:00
|
|
|
debug "Blocks by root request failed",
|
|
|
|
peer = peer, blocks = shortLog(items), err = blocks.error()
|
2022-11-11 11:34:28 +00:00
|
|
|
peer.updateScore(PeerScoreNoValues)
|
2020-06-18 10:03:36 +00:00
|
|
|
|
2020-03-22 20:54:47 +00:00
|
|
|
finally:
|
|
|
|
if not(isNil(peer)):
|
2020-06-18 10:03:36 +00:00
|
|
|
rman.network.peerPool.release(peer)
|
|
|
|
|
2023-04-28 12:57:35 +00:00
|
|
|
proc fetchBlobsFromNetwork(self: RequestManager,
|
2024-02-09 08:35:41 +00:00
|
|
|
idList: seq[BlobIdentifier])
|
|
|
|
{.async: (raises: [CancelledError]).} =
|
2023-04-28 12:57:35 +00:00
|
|
|
var peer: Peer
|
|
|
|
|
|
|
|
try:
|
|
|
|
peer = await self.network.peerPool.acquire()
|
|
|
|
debug "Requesting blobs by root", peer = peer, blobs = shortLog(idList),
|
|
|
|
peer_score = peer.getScore()
|
|
|
|
|
2024-02-09 08:35:41 +00:00
|
|
|
let blobs = await blobSidecarsByRoot(peer, BlobIdentifierList idList)
|
2023-04-28 12:57:35 +00:00
|
|
|
|
|
|
|
if blobs.isOk:
|
|
|
|
let ublobs = blobs.get()
|
|
|
|
if not checkResponse(idList, ublobs.asSeq()):
|
2023-07-11 16:22:02 +00:00
|
|
|
debug "Mismatched response to blobs by root",
|
|
|
|
peer = peer, blobs = shortLog(idList), ublobs = len(ublobs)
|
2023-04-28 12:57:35 +00:00
|
|
|
peer.updateScore(PeerScoreBadResponse)
|
|
|
|
return
|
|
|
|
|
|
|
|
for b in ublobs:
|
|
|
|
self.blobQuarantine[].put(b)
|
|
|
|
var curRoot: Eth2Digest
|
|
|
|
for b in ublobs:
|
2023-11-06 06:48:43 +00:00
|
|
|
let block_root = hash_tree_root(b.signed_block_header.message)
|
|
|
|
if block_root != curRoot:
|
|
|
|
curRoot = block_root
|
2023-04-28 12:57:35 +00:00
|
|
|
if (let o = self.quarantine[].popBlobless(curRoot); o.isSome):
|
|
|
|
let b = o.unsafeGet()
|
2024-03-21 17:37:31 +00:00
|
|
|
discard await self.blockVerifier(b, false)
|
2023-04-28 12:57:35 +00:00
|
|
|
# TODO:
|
2024-03-21 17:37:31 +00:00
|
|
|
# If appropriate, return a VerifierError.InvalidBlob from
|
|
|
|
# verification, check for it here, and penalize the peer accordingly
|
2023-07-11 16:22:02 +00:00
|
|
|
else:
|
|
|
|
debug "Blobs by root request failed",
|
|
|
|
peer = peer, blobs = shortLog(idList), err = blobs.error()
|
|
|
|
peer.updateScore(PeerScoreNoValues)
|
2023-01-31 23:25:08 +00:00
|
|
|
|
2023-04-28 12:57:35 +00:00
|
|
|
finally:
|
|
|
|
if not(isNil(peer)):
|
|
|
|
self.network.peerPool.release(peer)
|
|
|
|
|
2024-06-18 13:31:56 +00:00
|
|
|
proc fetchDataColumnsFromNetwork(rman: RequestManager,
|
|
|
|
colIdList: seq[DataColumnIdentifier])
|
|
|
|
{.async: (raises: [CancelledError]).} =
|
|
|
|
var peer: Peer
|
|
|
|
|
|
|
|
try:
|
|
|
|
peer = await rman.network.peerPool.acquire()
|
|
|
|
debug "Requesting data columns by root", peer = peer, columns = shortLog(colIdList),
|
|
|
|
peer_score = peer.getScore()
|
|
|
|
|
|
|
|
let columns = await dataColumnSidecarsByRoot(peer, DataColumnIdentifierList colIdList)
|
|
|
|
|
|
|
|
if columns.isOk:
|
|
|
|
let ucolumns = columns.get()
|
|
|
|
if not checkResponse(colIdList, ucolumns.asSeq()):
|
|
|
|
debug "Mismatched response to data columns by root",
|
|
|
|
peer = peer, columns = shortLog(colIdList), ucolumns = len(ucolumns)
|
2024-06-28 12:34:46 +00:00
|
|
|
# peer.updateScore(PeerScoreBadResponse)
|
2024-06-18 13:31:56 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
for col in ucolumns:
|
|
|
|
rman.dataColumnQuarantine[].put(col)
|
|
|
|
var curRoot: Eth2Digest
|
|
|
|
for col in ucolumns:
|
|
|
|
let block_root = hash_tree_root(col.signed_block_header.message)
|
|
|
|
if block_root != curRoot:
|
|
|
|
curRoot = block_root
|
|
|
|
if (let o = rman.quarantine[].popColumnless(curRoot); o.isSome):
|
|
|
|
let col = o.unsafeGet()
|
|
|
|
discard await rman.blockVerifier(col, false)
|
|
|
|
|
|
|
|
else:
|
2024-06-21 09:21:54 +00:00
|
|
|
debug "Data columns by root request failed",
|
2024-06-18 13:31:56 +00:00
|
|
|
peer = peer, columns = shortLog(colIdList), err = columns.error()
|
2024-06-28 12:34:46 +00:00
|
|
|
# peer.updateScore(PeerScoreNoValues)
|
2024-06-18 13:31:56 +00:00
|
|
|
|
|
|
|
finally:
|
|
|
|
if not(isNil(peer)):
|
|
|
|
rman.network.peerPool.release(peer)
|
|
|
|
|
2024-03-21 17:37:31 +00:00
|
|
|
proc requestManagerBlockLoop(
|
|
|
|
rman: RequestManager) {.async: (raises: [CancelledError]).} =
|
2020-06-18 10:03:36 +00:00
|
|
|
while true:
|
2023-07-11 16:22:02 +00:00
|
|
|
# TODO This polling could be replaced with an AsyncEvent that is fired
|
|
|
|
# from the quarantine when there's work to do
|
2023-08-01 20:39:14 +00:00
|
|
|
await sleepAsync(POLL_INTERVAL)
|
2023-07-11 16:22:02 +00:00
|
|
|
|
|
|
|
if rman.inhibit():
|
|
|
|
continue
|
2020-06-18 10:03:36 +00:00
|
|
|
|
2024-03-21 17:37:31 +00:00
|
|
|
let missingBlockRoots =
|
|
|
|
rman.quarantine[].checkMissing(SYNC_MAX_REQUESTED_BLOCKS).mapIt(it.root)
|
|
|
|
if missingBlockRoots.len == 0:
|
2023-07-11 16:22:02 +00:00
|
|
|
continue
|
2020-06-18 10:03:36 +00:00
|
|
|
|
2024-03-22 13:35:46 +00:00
|
|
|
# TODO This logic can be removed if the database schema is extended
|
|
|
|
# to store non-canonical heads on top of the canonical head!
|
|
|
|
# If that is done, the database no longer contains extra blocks
|
|
|
|
# that have not yet been assigned a `BlockRef`
|
2024-03-21 17:37:31 +00:00
|
|
|
var blockRoots: seq[Eth2Digest]
|
|
|
|
if rman.blockLoader == nil:
|
|
|
|
blockRoots = missingBlockRoots
|
|
|
|
else:
|
|
|
|
var verifiers:
|
|
|
|
seq[Future[Result[void, VerifierError]].Raising([CancelledError])]
|
|
|
|
for blockRoot in missingBlockRoots:
|
|
|
|
let blck = rman.blockLoader(blockRoot).valueOr:
|
|
|
|
blockRoots.add blockRoot
|
|
|
|
continue
|
|
|
|
debug "Loaded orphaned block from storage", blockRoot
|
|
|
|
verifiers.add rman.blockVerifier(
|
|
|
|
blck.asSigned(), maybeFinalized = false)
|
|
|
|
try:
|
|
|
|
await allFutures(verifiers)
|
|
|
|
except CancelledError as exc:
|
|
|
|
var futs = newSeqOfCap[Future[void].Raising([])](verifiers.len)
|
|
|
|
for verifier in verifiers:
|
|
|
|
futs.add verifier.cancelAndWait()
|
|
|
|
await noCancel allFutures(futs)
|
|
|
|
raise exc
|
|
|
|
|
|
|
|
if blockRoots.len == 0:
|
|
|
|
continue
|
|
|
|
|
|
|
|
debug "Requesting detected missing blocks", blocks = shortLog(blockRoots)
|
2024-02-09 08:35:41 +00:00
|
|
|
let start = SyncMoment.now(0)
|
2023-07-11 16:22:02 +00:00
|
|
|
|
2024-03-21 17:37:31 +00:00
|
|
|
var workers:
|
|
|
|
array[PARALLEL_REQUESTS, Future[void].Raising([CancelledError])]
|
2020-06-18 10:03:36 +00:00
|
|
|
|
2024-02-09 08:35:41 +00:00
|
|
|
for i in 0 ..< PARALLEL_REQUESTS:
|
2024-03-21 17:37:31 +00:00
|
|
|
workers[i] = rman.requestBlocksByRoot(blockRoots)
|
2020-06-18 10:03:36 +00:00
|
|
|
|
2024-02-09 08:35:41 +00:00
|
|
|
await allFutures(workers)
|
2024-01-28 22:45:52 +00:00
|
|
|
|
2024-03-21 17:37:31 +00:00
|
|
|
let finish = SyncMoment.now(uint64(len(blockRoots)))
|
2023-04-28 12:57:35 +00:00
|
|
|
|
2024-03-21 17:37:31 +00:00
|
|
|
debug "Request manager block tick", blocks = shortLog(blockRoots),
|
2024-02-09 08:35:41 +00:00
|
|
|
sync_speed = speed(start, finish)
|
2023-04-28 12:57:35 +00:00
|
|
|
|
2023-08-01 20:39:14 +00:00
|
|
|
proc getMissingBlobs(rman: RequestManager): seq[BlobIdentifier] =
|
|
|
|
let
|
|
|
|
wallTime = rman.getBeaconTime()
|
|
|
|
wallSlot = wallTime.slotOrZero()
|
|
|
|
delay = wallTime - wallSlot.start_beacon_time()
|
|
|
|
waitDur = TimeDiff(nanoseconds: BLOB_GOSSIP_WAIT_TIME_NS)
|
2023-04-28 12:57:35 +00:00
|
|
|
|
2024-04-03 21:02:24 +00:00
|
|
|
var
|
|
|
|
fetches: seq[BlobIdentifier]
|
|
|
|
ready: seq[Eth2Digest]
|
2023-08-01 20:39:14 +00:00
|
|
|
for blobless in rman.quarantine[].peekBlobless():
|
2024-04-11 09:31:39 +00:00
|
|
|
withBlck(blobless):
|
|
|
|
when consensusFork >= ConsensusFork.Deneb:
|
|
|
|
# give blobs a chance to arrive over gossip
|
|
|
|
if forkyBlck.message.slot == wallSlot and delay < waitDur:
|
|
|
|
debug "Not handling missing blobs early in slot"
|
|
|
|
continue
|
|
|
|
|
|
|
|
if not rman.blobQuarantine[].hasBlobs(forkyBlck):
|
|
|
|
let missing = rman.blobQuarantine[].blobFetchRecord(forkyBlck)
|
|
|
|
if len(missing.indices) == 0:
|
|
|
|
warn "quarantine missing blobs, but missing indices is empty",
|
|
|
|
blk=blobless.root,
|
|
|
|
commitments=len(forkyBlck.message.body.blob_kzg_commitments)
|
|
|
|
for idx in missing.indices:
|
|
|
|
let id = BlobIdentifier(block_root: blobless.root, index: idx)
|
|
|
|
if id notin fetches:
|
|
|
|
fetches.add(id)
|
|
|
|
else:
|
|
|
|
# this is a programming error should it occur.
|
|
|
|
warn "missing blob handler found blobless block with all blobs",
|
|
|
|
blk=blobless.root,
|
|
|
|
commitments=len(forkyBlck.message.body.blob_kzg_commitments)
|
|
|
|
ready.add(blobless.root)
|
2023-04-28 12:57:35 +00:00
|
|
|
|
2024-04-03 21:02:24 +00:00
|
|
|
for root in ready:
|
|
|
|
let blobless = rman.quarantine[].popBlobless(root).valueOr:
|
|
|
|
continue
|
|
|
|
discard rman.blockVerifier(blobless, false)
|
2023-08-01 20:39:14 +00:00
|
|
|
fetches
|
2023-04-28 12:57:35 +00:00
|
|
|
|
2024-03-21 17:37:31 +00:00
|
|
|
proc requestManagerBlobLoop(
|
|
|
|
rman: RequestManager) {.async: (raises: [CancelledError]).} =
|
2023-08-01 20:39:14 +00:00
|
|
|
while true:
|
2024-02-09 08:35:41 +00:00
|
|
|
# TODO This polling could be replaced with an AsyncEvent that is fired
|
|
|
|
# from the quarantine when there's work to do
|
2023-08-01 20:39:14 +00:00
|
|
|
await sleepAsync(POLL_INTERVAL)
|
|
|
|
if rman.inhibit():
|
|
|
|
continue
|
2020-06-18 10:03:36 +00:00
|
|
|
|
2024-03-21 17:37:31 +00:00
|
|
|
let missingBlobIds = rman.getMissingBlobs()
|
|
|
|
if missingBlobIds.len == 0:
|
|
|
|
continue
|
|
|
|
|
2024-03-22 13:35:46 +00:00
|
|
|
# TODO This logic can be removed if the database schema is extended
|
|
|
|
# to store non-canonical heads on top of the canonical head!
|
|
|
|
# If that is done, the database no longer contains extra blocks
|
|
|
|
# that have not yet been assigned a `BlockRef`
|
2024-03-21 17:37:31 +00:00
|
|
|
var blobIds: seq[BlobIdentifier]
|
|
|
|
if rman.blobLoader == nil:
|
|
|
|
blobIds = missingBlobIds
|
|
|
|
else:
|
|
|
|
var
|
|
|
|
blockRoots: seq[Eth2Digest]
|
|
|
|
curRoot: Eth2Digest
|
|
|
|
for blobId in missingBlobIds:
|
|
|
|
if blobId.block_root != curRoot:
|
|
|
|
curRoot = blobId.block_root
|
|
|
|
blockRoots.add curRoot
|
|
|
|
let blob_sidecar = rman.blobLoader(blobId).valueOr:
|
|
|
|
blobIds.add blobId
|
|
|
|
if blockRoots.len > 0 and blockRoots[^1] == curRoot:
|
|
|
|
# A blob is missing, remove from list of fully available blocks
|
|
|
|
discard blockRoots.pop()
|
|
|
|
continue
|
|
|
|
debug "Loaded orphaned blob from storage", blobId
|
|
|
|
rman.blobQuarantine[].put(blob_sidecar)
|
|
|
|
var verifiers = newSeqOfCap[
|
|
|
|
Future[Result[void, VerifierError]]
|
|
|
|
.Raising([CancelledError])](blockRoots.len)
|
|
|
|
for blockRoot in blockRoots:
|
|
|
|
let blck = rman.quarantine[].popBlobless(blockRoot).valueOr:
|
|
|
|
continue
|
|
|
|
verifiers.add rman.blockVerifier(blck, maybeFinalized = false)
|
|
|
|
try:
|
|
|
|
await allFutures(verifiers)
|
|
|
|
except CancelledError as exc:
|
|
|
|
var futs = newSeqOfCap[Future[void].Raising([])](verifiers.len)
|
|
|
|
for verifier in verifiers:
|
|
|
|
futs.add verifier.cancelAndWait()
|
|
|
|
await noCancel allFutures(futs)
|
|
|
|
raise exc
|
|
|
|
|
|
|
|
if blobIds.len > 0:
|
|
|
|
debug "Requesting detected missing blobs", blobs = shortLog(blobIds)
|
2024-02-09 08:35:41 +00:00
|
|
|
let start = SyncMoment.now(0)
|
2024-03-21 17:37:31 +00:00
|
|
|
var workers:
|
|
|
|
array[PARALLEL_REQUESTS, Future[void].Raising([CancelledError])]
|
2024-02-09 08:35:41 +00:00
|
|
|
for i in 0 ..< PARALLEL_REQUESTS:
|
2024-03-21 17:37:31 +00:00
|
|
|
workers[i] = rman.fetchBlobsFromNetwork(blobIds)
|
2024-02-09 08:35:41 +00:00
|
|
|
|
|
|
|
await allFutures(workers)
|
2024-03-21 17:37:31 +00:00
|
|
|
let finish = SyncMoment.now(uint64(len(blobIds)))
|
2024-02-09 08:35:41 +00:00
|
|
|
|
|
|
|
debug "Request manager blob tick",
|
2024-03-21 17:37:31 +00:00
|
|
|
blobs_count = len(blobIds),
|
2024-02-09 08:35:41 +00:00
|
|
|
sync_speed = speed(start, finish)
|
2020-06-18 10:03:36 +00:00
|
|
|
|
2024-06-18 22:16:03 +00:00
|
|
|
proc getMissingDataColumns(rman: RequestManager): seq[DataColumnIdentifier] =
|
|
|
|
let
|
|
|
|
wallTime = rman.getBeaconTime()
|
|
|
|
wallSlot = wallTime.slotOrZero()
|
|
|
|
delay = wallTime - wallSlot.start_beacon_time()
|
2024-06-21 09:21:54 +00:00
|
|
|
|
|
|
|
const waitDur = TimeDiff(nanoseconds: DATA_COLUMN_GOSSIP_WAIT_TIME_NS)
|
2024-06-18 22:16:03 +00:00
|
|
|
|
|
|
|
var
|
|
|
|
fetches: seq[DataColumnIdentifier]
|
|
|
|
ready: seq[Eth2Digest]
|
|
|
|
for columnless in rman.quarantine[].peekColumnless():
|
|
|
|
withBlck(columnless):
|
|
|
|
when consensusFork >= ConsensusFork.Deneb:
|
|
|
|
# granting data columns a chance to arrive over gossip
|
|
|
|
if forkyBlck.message.slot == wallSlot and delay < waitDur:
|
|
|
|
debug "Not handling missing data columns early in slot"
|
|
|
|
continue
|
|
|
|
|
|
|
|
if not rman.dataColumnQuarantine[].hasDataColumns(forkyBlck):
|
|
|
|
let missing = rman.dataColumnQuarantine[].dataColumnFetchRecord(forkyBlck)
|
|
|
|
if len(missing.indices) == 0:
|
|
|
|
warn "quarantine is missing data columns, but missing indices are empty",
|
|
|
|
blk = columnless.root,
|
|
|
|
commitments = len(forkyBlck.message.body.blob_kzg_commitments)
|
|
|
|
for idx in missing.indices:
|
|
|
|
let id = DataColumnIdentifier(block_root: columnless.root, index: idx)
|
|
|
|
if id notin fetches:
|
|
|
|
fetches.add(id)
|
|
|
|
else:
|
2024-06-21 09:21:54 +00:00
|
|
|
# this is a programming error and it not should occur
|
2024-06-18 22:16:03 +00:00
|
|
|
warn "missing data column handler found columnless block with all data columns",
|
|
|
|
blk = columnless.root,
|
|
|
|
commitments=len(forkyBlck.message.body.blob_kzg_commitments)
|
|
|
|
ready.add(columnless.root)
|
|
|
|
|
|
|
|
for root in ready:
|
|
|
|
let columnless = rman.quarantine[].popColumnless(root).valueOr:
|
|
|
|
continue
|
|
|
|
discard rman.blockVerifier(columnless, false)
|
|
|
|
fetches
|
|
|
|
|
|
|
|
proc requestManagerDataColumnLoop(
|
|
|
|
rman: RequestManager) {.async: (raises: [CancelledError]).} =
|
|
|
|
while true:
|
|
|
|
|
|
|
|
await sleepAsync(POLL_INTERVAL)
|
|
|
|
if rman.inhibit():
|
|
|
|
continue
|
|
|
|
|
|
|
|
let missingColumnIds = rman.getMissingDataColumns()
|
|
|
|
if missingColumnIds.len == 0:
|
|
|
|
continue
|
|
|
|
|
|
|
|
var columnIds: seq[DataColumnIdentifier]
|
|
|
|
if rman.dataColumnLoader == nil:
|
|
|
|
columnIds = missingColumnIds
|
|
|
|
else:
|
|
|
|
var
|
|
|
|
blockRoots: seq[Eth2Digest]
|
|
|
|
curRoot: Eth2Digest
|
|
|
|
for columnId in missingColumnIds:
|
|
|
|
if columnId.block_root != curRoot:
|
|
|
|
curRoot = columnId.block_root
|
|
|
|
blockRoots.add curRoot
|
|
|
|
let data_column_sidecar = rman.dataColumnLoader(columnId).valueOr:
|
|
|
|
columnIds.add columnId
|
|
|
|
if blockRoots.len > 0 and blockRoots[^1] == curRoot:
|
|
|
|
# A data column is missing, remove from list of fully available data columns
|
|
|
|
discard blockRoots.pop()
|
|
|
|
continue
|
|
|
|
debug "Loaded orphaned data columns from storage", columnId
|
|
|
|
rman.dataColumnQuarantine[].put(data_column_sidecar)
|
|
|
|
var verifiers = newSeqOfCap[
|
|
|
|
Future[Result[void, VerifierError]]
|
|
|
|
.Raising([CancelledError])](blockRoots.len)
|
|
|
|
for blockRoot in blockRoots:
|
|
|
|
let blck = rman.quarantine[].popColumnless(blockRoot).valueOr:
|
|
|
|
continue
|
|
|
|
verifiers.add rman.blockVerifier(blck, maybeFinalized = false)
|
|
|
|
try:
|
|
|
|
await allFutures(verifiers)
|
|
|
|
except CancelledError as exc:
|
|
|
|
var futs = newSeqOfCap[Future[void].Raising([])](verifiers.len)
|
|
|
|
for verifier in verifiers:
|
|
|
|
futs.add verifier.cancelAndWait()
|
|
|
|
await noCancel allFutures(futs)
|
|
|
|
raise exc
|
|
|
|
if columnIds.len > 0:
|
|
|
|
debug "Requesting detected missing data columns", columns = shortLog(columnIds)
|
|
|
|
let start = SyncMoment.now(0)
|
|
|
|
var workers:
|
|
|
|
array[PARALLEL_REQUESTS, Future[void].Raising([CancelledError])]
|
|
|
|
for i in 0..<PARALLEL_REQUESTS:
|
|
|
|
workers[i] = rman.fetchDataColumnsFromNetwork(columnIds)
|
|
|
|
|
|
|
|
await allFutures(workers)
|
|
|
|
let finish = SyncMoment.now(uint64(len(columnIds)))
|
|
|
|
|
|
|
|
debug "Request manager data column tick",
|
|
|
|
data_columns_count = len(columnIds),
|
|
|
|
sync_speed = speed(start, finish)
|
|
|
|
|
|
|
|
|
2020-06-18 10:03:36 +00:00
|
|
|
proc start*(rman: var RequestManager) =
|
2023-04-28 12:57:35 +00:00
|
|
|
## Start Request Manager's loops.
|
|
|
|
rman.blockLoopFuture = rman.requestManagerBlockLoop()
|
2024-06-18 22:16:03 +00:00
|
|
|
rman.dataColumnLoopFuture = rman.requestManagerDataColumnLoop()
|
2024-07-02 09:06:44 +00:00
|
|
|
# rman.blobLoopFuture = rman.requestManagerBlobLoop()
|
2024-07-01 09:30:18 +00:00
|
|
|
|
2020-06-18 10:03:36 +00:00
|
|
|
|
|
|
|
proc stop*(rman: RequestManager) =
|
|
|
|
## Stop Request Manager's loop.
|
2023-04-28 12:57:35 +00:00
|
|
|
if not(isNil(rman.blockLoopFuture)):
|
2023-09-22 11:06:27 +00:00
|
|
|
rman.blockLoopFuture.cancelSoon()
|
2024-07-02 09:06:44 +00:00
|
|
|
# if not(isNil(rman.blobLoopFuture)):
|
|
|
|
# rman.blobLoopFuture.cancelSoon()
|
2024-06-18 22:16:03 +00:00
|
|
|
if not(isNil(rman.dataColumnLoopFuture)):
|
|
|
|
rman.dataColumnLoopFuture.cancelSoon()
|