limit by-root requests to non-finalized blocks (#3293)

* limit by-root requests to non-finalized blocks

Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.

We can distinguish between two cases where by-root access is useful:

* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really

In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.

Future work includes:

* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)

Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.

* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`

* fix dag.blocks ref
This commit is contained in:
Jacek Sieka 2022-01-21 12:33:16 +01:00 committed by GitHub
parent 1a37cae329
commit 61342c2449
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
22 changed files with 552 additions and 548 deletions

View File

@ -92,7 +92,7 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
```diff
+ Adding the same block twice returns a Duplicate error [Preset: mainnet] OK
+ Simple block add&get [Preset: mainnet] OK
+ getRef returns nil for missing blocks OK
+ getBlockRef returns none for missing blocks OK
+ loading tail block works [Preset: mainnet] OK
+ updateHead updates head and headState [Preset: mainnet] OK
+ updateStateData sanity [Preset: mainnet] OK

View File

@ -137,9 +137,10 @@ proc init*(T: type AttestationPool, dag: ChainDAGRef,
epochRef = dag.getEpochRef(blckRef, blckRef.slot.epoch, false).expect(
"Getting an EpochRef should always work for non-finalized blocks")
withBlck(dag.get(blckRef).data):
withBlck(dag.getForkedBlock(blckRef)):
forkChoice.process_block(
dag, epochRef, blckRef, blck.message, blckRef.slot.start_beacon_time)
dag, epochRef, blckRef, blck.message,
blckRef.slot.start_beacon_time)
doAssert status.isOk(), "Error in preloading the fork choice: " & $status.error
@ -704,17 +705,17 @@ proc getAggregatedAttestation*(pool: var AttestationPool,
res
proc selectHead*(pool: var AttestationPool, wallTime: BeaconTime): BlockRef =
proc selectHead*(pool: var AttestationPool, wallTime: BeaconTime): Opt[BlockRef] =
## Trigger fork choice and returns the new head block.
## Can return `nil`
let newHead = pool.forkChoice.get_head(pool.dag, wallTime)
if newHead.isErr:
error "Couldn't select head", err = newHead.error
nil
err()
else:
let ret = pool.dag.getRef(newHead.get())
if ret.isNil:
let ret = pool.dag.getBlockRef(newHead.get())
if ret.isErr():
# This should normally not happen, but if the chain dag and fork choice
# get out of sync, we'll need to try to download the selected head - in
# the meantime, return nil to indicate that no new head was chosen

View File

@ -8,17 +8,12 @@
{.push raises: [Defect].}
import
std/tables,
chronicles,
stew/[assign2, results],
eth/keys,
../spec/[
eth2_merkleization, forks, helpers, signatures, signatures_batch,
state_transition],
../spec/datatypes/[phase0, altair],
"."/[blockchain_dag]
../spec/[forks, signatures, signatures_batch, state_transition],
"."/[block_dag, blockchain_dag]
export results, signatures_batch
export results, signatures_batch, block_dag, blockchain_dag
# Clearance
# ---------------------------------------------
@ -50,7 +45,7 @@ proc addResolvedHeadBlock(
link(parent, blockRef)
dag.blocks.incl(KeyedBlockRef.init(blockRef))
dag.forkBlocks.incl(KeyedBlockRef.init(blockRef))
# Resolved blocks should be stored in database
dag.putBlock(trustedBlock)
@ -160,59 +155,52 @@ proc addHeadBlock*(
template blck(): untyped = signedBlock.message # shortcuts without copy
template blockRoot(): untyped = signedBlock.root
if blockRoot in dag:
debug "Block already exists"
# We should not call the block added callback for blocks that already
# existed in the pool, as that may confuse consumers such as the fork
# choice. While the validation result won't be accessed, it's IGNORE,
# according to the spec.
return err(BlockError.Duplicate)
# If the block we get is older than what we finalized already, we drop it.
# One way this can happen is that we start request a block and finalization
# happens in the meantime - the block we requested will then be stale
# by the time it gets here.
if blck.slot <= dag.finalizedHead.slot:
debug "Old block, dropping",
finalizedHead = shortLog(dag.finalizedHead),
tail = shortLog(dag.tail)
let previous = dag.getBlockIdAtSlot(blck.slot)
if previous.isProposed() and blockRoot == previous.bid.root:
# We should not call the block added callback for blocks that already
# existed in the pool, as that may confuse consumers such as the fork
# choice.
debug "Duplicate block"
return err(BlockError.Duplicate)
# Doesn't correspond to any specific validation condition, and still won't
# be used, but certainly would be IGNORE.
return err(BlockError.UnviableFork)
let parent = dag.getRef(blck.parent_root)
if parent == nil:
debug "Block parent unknown"
return err(BlockError.MissingParent)
if parent.slot >= signedBlock.message.slot:
# A block whose parent is newer than the block itself is clearly invalid -
# discard it immediately
debug "Block with invalid parent, dropping",
parentBlock = shortLog(parent)
return err(BlockError.Invalid)
if (parent.slot < dag.finalizedHead.slot) or
(parent.slot == dag.finalizedHead.slot and
parent != dag.finalizedHead.blck):
# We finalized a block that's newer than the parent of this block - this
# block, although recent, is thus building on a history we're no longer
# interested in pursuing. This can happen if a client produces a block
# while syncing - ie it's own head block will be old, but it'll create
# a block according to the wall clock, in its own little world - this is
# correct - from their point of view, the head block they have is the
# latest thing that happened on the chain and they're performing their
# duty correctly.
# Block is older than finalized, but different from the block in our
# canonical history: it must be from an unviable branch
debug "Block from unviable fork",
finalizedHead = shortLog(dag.finalizedHead),
tail = shortLog(dag.tail)
return err(BlockError.UnviableFork)
# Check non-finalized blocks as well
if dag.containsForkBlock(blockRoot):
return err(BlockError.Duplicate)
let parent = dag.getBlockRef(blck.parent_root).valueOr:
# There are two cases where the parent won't be found: we don't have it or
# it has been finalized already, and as a result the branch the new block
# is on is no longer a viable fork candidate - we can't tell which is which
# at this stage, but we can check if we've seen the parent block previously
# and thus prevent requests for it to be downloaded again.
if dag.db.containsBlock(blck.parent_root):
debug "Block unviable due to pre-finalized-checkpoint parent"
return err(BlockError.UnviableFork)
debug "Block parent unknown or finalized already"
return err(BlockError.MissingParent)
if parent.slot >= signedBlock.message.slot:
# A block whose parent is newer than the block itself is clearly invalid -
# discard it immediately
debug "Block with invalid parent",
parentBlock = shortLog(parent)
return err(BlockError.Invalid)
# The block is resolved, now it's time to validate it to ensure that the
# blocks we add to the database are clean for the given state
let startTick = Moment.now()
@ -244,13 +232,14 @@ proc addHeadBlock*(
if (let e = sigs.collectSignatureSets(
signedBlock, dag.db.immutableValidators,
dag.clearanceState.data, cache); e.isErr()):
# A PublicKey or Signature isn't on the BLS12-381 curve
info "Unable to load signature sets",
err = e.error()
# A PublicKey or Signature isn't on the BLS12-381 curve
return err(BlockError.Invalid)
if not verifier.batchVerify(sigs):
info "Block signature verification failed"
info "Block signature verification failed",
signature = shortLog(signedBlock.signature)
return err(BlockError.Invalid)
let sigVerifyTick = Moment.now()
@ -288,21 +277,47 @@ proc addBackfillBlock*(
template blck(): untyped = signedBlock.message # shortcuts without copy
template blockRoot(): untyped = signedBlock.root
if dag.backfill.slot <= signedBlock.message.slot or
signedBlock.message.slot <= dag.genesis.slot:
if blockRoot in dag:
debug "Block already exists"
let startTick = Moment.now()
if blck.slot >= dag.backfill.slot:
let previous = dag.getBlockIdAtSlot(blck.slot)
if previous.isProposed() and blockRoot == previous.bid.root:
# We should not call the block added callback for blocks that already
# existed in the pool, as that may confuse consumers such as the fork
# choice. While the validation result won't be accessed, it's IGNORE,
# according to the spec.
return err(BlockError.Duplicate)
# The block is newer than our backfill position but not in the dag - either
# it sits somewhere between backfill and tail or it comes from an unviable
# fork. We don't have an in-memory way of checking the former condition so
# we return UnviableFork for that condition as well, even though `Duplicate`
# would be more correct
debug "Block unviable or duplicate"
# Block is older than finalized, but different from the block in our
# canonical history: it must be from an unviable branch
debug "Block from unviable fork",
finalizedHead = shortLog(dag.finalizedHead),
backfill = shortLog(dag.backfill)
return err(BlockError.UnviableFork)
if dag.backfill.parent_root != signedBlock.root:
if blck.slot == dag.genesis.slot and
dag.backfill.parent_root == dag.genesis.root:
if blockRoot != dag.genesis.root:
# We've matched the backfill blocks all the way back to genesis via the
# `parent_root` chain and ended up at a different genesis - one way this
# can happen is when an invalid `--network` parameter is given during
# startup (though in theory, we check that - maybe the database was
# swapped or something?).
fatal "Checkpoint given during initial startup inconsistent with genesis - wrong network used when starting the node?"
quit 1
dag.backfillBlocks[blck.slot.int] = blockRoot
dag.backfill = blck.toBeaconBlockSummary()
notice "Received matching genesis block during backfill, backfill complete"
# Backfill done - dag.backfill.slot now points to genesis block just like
# it would if we loaded a fully backfilled database - returning duplicate
# here is appropriate, though one could also call it ... ok?
return err(BlockError.Duplicate)
if dag.backfill.parent_root != blockRoot:
debug "Block does not match expected backfill root"
return err(BlockError.MissingParent) # MissingChild really, but ..
@ -325,18 +340,23 @@ proc addBackfillBlock*(
signedBlock.root,
proposerKey.get(),
signedBlock.signature):
info "Block signature verification failed"
info "Block signature verification failed",
signature = shortLog(signedBlock.signature)
return err(BlockError.Invalid)
let sigVerifyTick = Moment.now
dag.putBlock(signedBlock.asTrusted())
dag.backfill = blck.toBeaconBlockSummary()
# Invariants maintained on startup
doAssert dag.backfillBlocks.lenu64 == dag.tail.slot.uint64
doAssert dag.backfillBlocks.lenu64 > blck.slot.uint64
dag.backfillBlocks[blck.slot.int] = signedBlock.root
dag.backfillBlocks[blck.slot.int] = blockRoot
dag.backfill = blck.toBeaconBlockSummary()
debug "Block backfilled"
let putBlockTick = Moment.now
debug "Block backfilled",
sigVerifyDur = sigVerifyTick - startTick,
putBlockDur = putBlocktick - sigVerifyTick
ok()

View File

@ -28,20 +28,20 @@ export
type
BlockError* {.pure.} = enum
Invalid ##\
Invalid
## Block is broken / doesn't apply cleanly - whoever sent it is fishy (or
## we're buggy)
MissingParent ##\
MissingParent
## We don't know the parent of this block so we can't tell if it's valid
## or not - it'll go into the quarantine and be reexamined when the parent
## appears or be discarded if finality obsoletes it
UnviableFork ##\
UnviableFork
## Block is from a different history / fork than the one we're interested
## in (based on our finalized checkpoint)
Duplicate ##\
Duplicate
## We've seen this block already, can't add again
OnBlockCallback* =
@ -83,7 +83,7 @@ type
# -----------------------------------
# ColdDB - Canonical chain
db*: BeaconChainDB ##\
db*: BeaconChainDB
## ColdDB - Stores the canonical chain
validatorMonitor*: ref ValidatorMonitor
@ -91,58 +91,59 @@ type
# -----------------------------------
# ChainDAGRef - DAG of candidate chains
blocks*: HashSet[KeyedBlockRef] ##\
## Directed acyclic graph of blocks pointing back to a finalized block on the chain we're
## interested in - we call that block the tail
forkBlocks*: HashSet[KeyedBlockRef]
## root -> BlockRef mapping of blocks still relevant to fork choice, ie
## those that have not yet been finalized - covers the slots
## `finalizedHead.slot..head.slot` (inclusive)
finalizedBlocks*: seq[BlockRef] ##\
## Slot -> BlockRef mapping for the canonical chain - use getBlockBySlot
## to access, generally - coverst the slots
## `tail.slot..finalizedHead.slot` (including the finalized head slot) -
## indices are thus offset by tail.slot
finalizedBlocks*: seq[BlockRef]
## Slot -> BlockRef mapping for the canonical chain - use getBlockAtSlot
## to access, generally - covers the slots
## `tail.slot..finalizedHead.slot` (including the finalized head block) -
## indices are thus offset by tail.slot
backfillBlocks*: seq[Eth2Digest] ##\
## Slot -> Eth2Digest, tail.slot entries
backfillBlocks*: seq[Eth2Digest]
## Slot -> Eth2Digest, covers genesis.slot..tail.slot - 1 (inclusive)
genesis*: BlockRef ##\
genesis*: BlockRef
## The genesis block of the network
tail*: BlockRef ##\
tail*: BlockRef
## The earliest finalized block for which we have a corresponding state -
## when making a replay of chain history, this is as far back as we can
## go - the tail block is unique in that its parent is set to `nil`, even
## in the case where a later genesis block exists.
## in the case where an earlier genesis block exists.
backfill*: BeaconBlockSummary ##\
## The backfill points to the oldest block that we have, in the database -
## when backfilling, we'll be fetching its parent first
backfill*: BeaconBlockSummary
## The backfill points to the oldest block that we have in the database -
## when backfilling, the first block to download is the parent of this block
heads*: seq[BlockRef] ##\
heads*: seq[BlockRef]
## Candidate heads of candidate chains
finalizedHead*: BlockSlot ##\
finalizedHead*: BlockSlot
## The latest block that was finalized according to the block in head
## Ancestors of this block are guaranteed to have 1 child only.
# -----------------------------------
# Pruning metadata
lastPrunePoint*: BlockSlot ##\
lastPrunePoint*: BlockSlot
## The last prune point
## We can prune up to finalizedHead
# -----------------------------------
# Rewinder - Mutable state processing
headState*: StateData ##\
headState*: StateData
## State given by the head block - must only be updated in `updateHead` -
## always matches dag.head
epochRefState*: StateData ##\
epochRefState*: StateData
## State used to produce epochRef instances - must only be used in
## `getEpochRef`
clearanceState*: StateData ##\
clearanceState*: StateData
## Cached state used during block clearance - must only be used in
## clearance module
@ -150,7 +151,7 @@ type
cfg*: RuntimeConfig
epochRefs*: array[32, EpochRef] ##\
epochRefs*: array[32, EpochRef]
## Cached information about a particular epoch ending with the given
## block - we limit the number of held EpochRefs to put a cap on
## memory usage
@ -170,7 +171,7 @@ type
onFinHappened*: OnFinalizedCallback
## On finalization callback
headSyncCommittees*: SyncCommitteeCache ##\
headSyncCommittees*: SyncCommitteeCache
## A cache of the sync committees, as they appear in the head state -
## using the head state is slightly wrong - if a reorg deeper than
## EPOCHS_PER_SYNC_COMMITTEE_PERIOD is happening, some valid sync
@ -202,16 +203,10 @@ type
# balances, as used in fork choice
effective_balances_bytes*: seq[byte]
BlockData* = object
## Body and graph in one
data*: ForkedTrustedSignedBeaconBlock # We trust all blocks we have a ref for
refs*: BlockRef
StateData* = object
data*: ForkedHashedBeaconState
blck*: BlockRef ##\
blck*: BlockRef
## The block associated with the state found in data
OnPhase0BlockAdded* = proc(

View File

@ -178,10 +178,23 @@ func effective_balances*(epochRef: EpochRef): seq[Gwei] =
except CatchableError as exc:
raiseAssert exc.msg
func getBlockBySlot*(dag: ChainDAGRef, slot: Slot): BlockSlot =
func getBlockRef*(dag: ChainDAGRef, root: Eth2Digest): Opt[BlockRef] =
## Retrieve a resolved block reference, if available - this function does
## not return historical finalized blocks, see `getBlockAtSlot` for a function
## that covers the entire known history
let key = KeyedBlockRef.asLookupKey(root)
# HashSet lacks the api to do check-and-get in one lookup - `[]` will return
# the copy of the instance in the set which has more fields than `root` set!
if key in dag.forkBlocks:
try: ok(dag.forkBlocks[key].blockRef())
except KeyError: raiseAssert "contains"
else:
err()
func getBlockAtSlot*(dag: ChainDAGRef, slot: Slot): BlockSlot =
## Retrieve the canonical block at the given slot, or the last block that
## comes before - similar to atSlot, but without the linear scan - see
## getBlockSlotIdBySlot for a version that covers backfill blocks as well
## getBlockIdAtSlot for a version that covers backfill blocks as well
## May return an empty BlockSlot (where blck is nil!)
if slot == dag.genesis.slot:
@ -213,14 +226,14 @@ func getBlockBySlot*(dag: ChainDAGRef, slot: Slot): BlockSlot =
BlockSlot() # nil blck!
func getBlockSlotIdBySlot*(dag: ChainDAGRef, slot: Slot): BlockSlotId =
func getBlockIdAtSlot*(dag: ChainDAGRef, slot: Slot): BlockSlotId =
## Retrieve the canonical block at the given slot, or the last block that
## comes before - similar to atSlot, but without the linear scan
if slot == dag.genesis.slot:
return dag.genesis.bid.atSlot(slot)
if slot >= dag.tail.slot:
return dag.getBlockBySlot(slot).toBlockSlotId()
return dag.getBlockAtSlot(slot).toBlockSlotId()
var pos = slot.int
while pos >= dag.backfill.slot.int:
@ -230,6 +243,13 @@ func getBlockSlotIdBySlot*(dag: ChainDAGRef, slot: Slot): BlockSlotId =
BlockSlotId() # not backfilled yet, and not genesis
func getBlockId*(dag: ChainDAGRef, root: Eth2Digest): Opt[BlockId] =
let blck = ? dag.getBlockRef(root)
ok(blck.bid)
func isCanonical*(dag: ChainDAGRef, bid: BlockId): bool =
dag.getBlockIdAtSlot(bid.slot).bid == bid
func epochAncestor*(blck: BlockRef, epoch: Epoch): EpochKey =
## The state transition works by storing information from blocks in a
## "working" area until the epoch transition, then batching work collected
@ -287,8 +307,9 @@ func loadStateCache(
if epoch > 0:
load(epoch - 1)
func contains*(dag: ChainDAGRef, root: Eth2Digest): bool =
KeyedBlockRef.asLookupKey(root) in dag.blocks
func containsForkBlock*(dag: ChainDAGRef, root: Eth2Digest): bool =
## Checks for blocks at the finalized checkpoint or newer
KeyedBlockRef.asLookupKey(root) in dag.forkBlocks
proc containsBlock(
cfg: RuntimeConfig, db: BeaconChainDB, slot: Slot, root: Eth2Digest): bool =
@ -355,6 +376,31 @@ proc getForkedBlock*(db: BeaconChainDB, root: Eth2Digest):
else:
err()
proc getForkedBlock*(
dag: ChainDAGRef, root: Eth2Digest): Opt[ForkedTrustedSignedBeaconBlock] =
dag.db.getForkedBlock(root)
proc getForkedBlock*(
dag: ChainDAGRef, id: BlockId): Opt[ForkedTrustedSignedBeaconBlock] =
case dag.cfg.blockForkAtEpoch(id.slot.epoch)
of BeaconBlockFork.Phase0:
let data = dag.db.getPhase0Block(id.root)
if data.isOk():
return ok ForkedTrustedSignedBeaconBlock.init(data.get)
of BeaconBlockFork.Altair:
let data = dag.db.getAltairBlock(id.root)
if data.isOk():
return ok ForkedTrustedSignedBeaconBlock.init(data.get)
of BeaconBlockFork.Bellatrix:
let data = dag.db.getMergeBlock(id.root)
if data.isOk():
return ok ForkedTrustedSignedBeaconBlock.init(data.get)
proc getForkedBlock*(
dag: ChainDAGRef, blck: BlockRef): ForkedTrustedSignedBeaconBlock =
dag.getForkedBlock(blck.bid).expect(
"BlockRef block should always load, database corrupt?")
proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
validatorMonitor: ref ValidatorMonitor, updateFlags: UpdateFlags,
onBlockCb: OnBlockCallback = nil, onHeadCb: OnHeadCallback = nil,
@ -387,14 +433,8 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
withBlck(genesisBlock): BlockRef.init(genesisBlockRoot, blck.message)
var
blocks: HashSet[KeyedBlockRef]
headRef: BlockRef
blocks.incl(KeyedBlockRef.init(tailRef))
if genesisRef != tailRef:
blocks.incl(KeyedBlockRef.init(genesisRef))
var
backfillBlocks = newSeq[Eth2Digest](tailRef.slot.int)
curRef: BlockRef
@ -430,13 +470,11 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
link(newRef, curRef)
curRef = curRef.parent
blocks.incl(KeyedBlockRef.init(curRef))
trace "Populating block dag", key = curRef.root, val = curRef
if curRef != tailRef:
fatal "Head block does not lead to tail - database corrupt?",
genesisRef, tailRef, headRef, curRef, tailRoot, headRoot,
blocks = blocks.len()
genesisRef, tailRef, headRef, curRef, tailRoot, headRoot
quit 1
@ -457,8 +495,7 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
if tmpState.blck == nil:
fatal "No state found in head history, database corrupt?",
genesisRef, tailRef, headRef, tailRoot, headRoot,
blocks = blocks.len()
genesisRef, tailRef, headRef, tailRoot, headRoot
# TODO Potentially we could recover from here instead of crashing - what
# would be a good recovery model?
quit 1
@ -468,7 +505,6 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
if tmpState.data.phase0Data.data.fork != genesisFork(cfg):
error "State from database does not match network, check --network parameter",
genesisRef, tailRef, headRef, tailRoot, headRoot,
blocks = blocks.len(),
stateFork = tmpState.data.phase0Data.data.fork,
configFork = genesisFork(cfg)
quit 1
@ -476,7 +512,6 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
if tmpState.data.altairData.data.fork != altairFork(cfg):
error "State from database does not match network, check --network parameter",
genesisRef, tailRef, headRef, tailRoot, headRoot,
blocks = blocks.len(),
stateFork = tmpState.data.altairData.data.fork,
configFork = altairFork(cfg)
quit 1
@ -484,7 +519,6 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
if tmpState.data.mergeData.data.fork != mergeFork(cfg):
error "State from database does not match network, check --network parameter",
genesisRef, tailRef, headRef, tailRoot, headRoot,
blocks = blocks.len(),
stateFork = tmpState.data.mergeData.data.fork,
configFork = mergeFork(cfg)
quit 1
@ -492,7 +526,6 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
let dag = ChainDAGRef(
db: db,
validatorMonitor: validatorMonitor,
blocks: blocks,
backfillBlocks: backfillBlocks,
genesis: genesisRef,
tail: tailRef,
@ -546,9 +579,16 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
getStateField(dag.headState.data, finalized_checkpoint)
finalizedSlot = max(finalized_checkpoint.epoch.start_slot(), tailRef.slot)
dag.finalizedHead = headRef.atSlot(finalizedSlot)
block: # Set up finalizedHead -> head
var tmp = dag.head
while tmp.slot > finalizedSlot:
dag.forkBlocks.incl(KeyedBlockRef.init(tmp))
tmp = tmp.parent
block:
dag.forkBlocks.incl(KeyedBlockRef.init(tmp))
dag.finalizedHead = tmp.atSlot(finalizedSlot)
block: # Set up tail -> finalizedHead
dag.finalizedBlocks.setLen((dag.finalizedHead.slot - dag.tail.slot).int + 1)
var tmp = dag.finalizedHead.blck
@ -574,7 +614,8 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
head = shortLog(dag.head),
finalizedHead = shortLog(dag.finalizedHead),
tail = shortLog(dag.tail),
totalBlocks = dag.blocks.len(),
finalizedBlocks = dag.finalizedBlocks.len(),
forkBlocks = dag.forkBlocks.len(),
backfill = (dag.backfill.slot, shortLog(dag.backfill.parent_root))
dag
@ -725,17 +766,6 @@ proc putState(dag: ChainDAGRef, state: StateData) =
debug "Stored state", putStateDur = Moment.now() - startTick
func getRef*(dag: ChainDAGRef, root: Eth2Digest): BlockRef =
## Retrieve a resolved block reference, if available
let key = KeyedBlockRef.asLookupKey(root)
# HashSet lacks the api to do check-and-get in one lookup - `[]` will return
# the copy of the instance in the set which has more fields than `root` set!
if key in dag.blocks:
try: dag.blocks[key].blockRef()
except KeyError: raiseAssert "contains"
else:
nil
proc getBlockRange*(
dag: ChainDAGRef, startSlot: Slot, skipStep: uint64,
output: var openArray[BlockId]): Natural =
@ -781,54 +811,20 @@ proc getBlockRange*(
# Process all blocks that follow the start block (may be zero blocks)
while curSlot > startSlot:
let bs = dag.getBlockSlotIdBySlot(curSlot)
let bs = dag.getBlockIdAtSlot(curSlot)
if bs.isProposed():
o -= 1
output[o] = bs.bid
curSlot -= skipStep
# Handle start slot separately (to avoid underflow when computing curSlot)
let bs = dag.getBlockSlotIdBySlot(startSlot)
let bs = dag.getBlockIdAtSlot(startSlot)
if bs.isProposed():
o -= 1
output[o] = bs.bid
o # Return the index of the first non-nil item in the output
proc getForkedBlock*(dag: ChainDAGRef, id: BlockId): Opt[ForkedTrustedSignedBeaconBlock] =
case dag.cfg.blockForkAtEpoch(id.slot.epoch)
of BeaconBlockFork.Phase0:
let data = dag.db.getPhase0Block(id.root)
if data.isOk():
return ok ForkedTrustedSignedBeaconBlock.init(data.get)
of BeaconBlockFork.Altair:
let data = dag.db.getAltairBlock(id.root)
if data.isOk():
return ok ForkedTrustedSignedBeaconBlock.init(data.get)
of BeaconBlockFork.Bellatrix:
let data = dag.db.getMergeBlock(id.root)
if data.isOk():
return ok ForkedTrustedSignedBeaconBlock.init(data.get)
proc getForkedBlock*(dag: ChainDAGRef, blck: BlockRef): ForkedTrustedSignedBeaconBlock =
dag.getForkedBlock(blck.bid).expect(
"BlockRef block should always load, database corrupt?")
proc get*(dag: ChainDAGRef, blck: BlockRef): BlockData =
## Retrieve the associated block body of a block reference
doAssert (not blck.isNil), "Trying to get nil BlockRef"
BlockData(data: dag.getForkedBlock(blck), refs: blck)
proc get*(dag: ChainDAGRef, root: Eth2Digest): Option[BlockData] =
## Retrieve a resolved block reference and its associated body, if available
let refs = dag.getRef(root)
if not refs.isNil:
some(dag.get(refs))
else:
none(BlockData)
proc advanceSlots(
dag: ChainDAGRef, state: var StateData, slot: Slot, save: bool,
cache: var StateCache, info: var ForkedEpochInfo) =
@ -1134,7 +1130,7 @@ proc pruneBlocksDAG(dag: ChainDAGRef) =
dag.delState(cur) # TODO: should we move that disk I/O to `onSlotEnd`
if cur.isProposed():
dag.blocks.excl(KeyedBlockRef.init(cur.blck))
dag.forkBlocks.excl(KeyedBlockRef.init(cur.blck))
dag.db.delBlock(cur.blck.root)
if cur.blck.parent.isNil:
@ -1433,6 +1429,11 @@ proc updateHead*(
var tmp = finalizedHead.blck
while not isNil(tmp) and tmp.slot >= dag.finalizedHead.slot:
dag.finalizedBlocks[(tmp.slot - dag.tail.slot).int] = tmp
if tmp != finalizedHead.blck:
# The newly finalized block itself should remain in here so that fork
# choice still can find it via root
dag.forkBlocks.excl(KeyedBlockRef.init(tmp))
tmp = tmp.parent
dag.finalizedHead = finalizedHead

View File

@ -111,8 +111,7 @@ proc on_tick(self: var Checkpoints, dag: ChainDAGRef, time: BeaconTime):
if newEpoch and
self.best_justified.epoch > self.justified.checkpoint.epoch:
let blck = dag.getRef(self.best_justified.root)
if blck.isNil:
let blck = dag.getBlockRef(self.best_justified.root).valueOr:
return err ForkChoiceError(
kind: fcJustifiedNodeUnknown,
blockRoot: self.best_justified.root)
@ -233,12 +232,10 @@ func should_update_justified_checkpoint(
let
justified_slot = self.justified.checkpoint.epoch.start_slot()
new_justified_checkpoint = epochRef.current_justified_checkpoint
justified_blck = dag.getRef(new_justified_checkpoint.root)
if justified_blck.isNil:
return err ForkChoiceError(
kind: fcJustifiedNodeUnknown,
blockRoot: new_justified_checkpoint.root)
justified_blck = dag.getBlockRef(new_justified_checkpoint.root).valueOr:
return err ForkChoiceError(
kind: fcJustifiedNodeUnknown,
blockRoot: new_justified_checkpoint.root)
let justified_ancestor = justified_blck.atSlot(justified_slot)
@ -294,8 +291,11 @@ proc process_state(self: var Checkpoints,
self.justified.checkpoint.root != epochRef.current_justified_checkpoint.root:
if (state_justified_epoch > self.justified.checkpoint.epoch) or
(dag.getRef(self.justified.checkpoint.root).atEpochStart(self.finalized.epoch).blck.root !=
self.finalized.root):
((? dag.getBlockRef(self.justified.checkpoint.root).orErr(
ForkChoiceError(
kind: fcJustifiedNodeUnknown,
blockRoot: self.justified.checkpoint.root))).atEpochStart(
self.finalized.epoch).blck.root != self.finalized.root):
let
justifiedBlck = blck.atEpochStart(state_justified_epoch)
@ -337,9 +337,9 @@ proc process_block*(self: var ForkChoice,
let committees_per_slot = get_committee_count_per_slot(epochRef)
for attestation in blck.body.attestations:
let targetBlck = dag.getRef(attestation.data.target.root)
if targetBlck.isNil:
let targetBlck = dag.getBlockRef(attestation.data.target.root).valueOr:
continue
let committee_index = block:
let v = CommitteeIndex.init(attestation.data.index, committees_per_slot)
if v.isErr():

View File

@ -79,8 +79,8 @@ proc updateHead*(self: var ConsensusManager, wallSlot: Slot) =
## `pruneFinalized` must be called for pruning.
# Grab the new head according to our latest attestation data
let newHead = self.attestationPool[].selectHead(wallSlot.start_beacon_time)
if newHead.isNil():
let newHead = self.attestationPool[].selectHead(
wallSlot.start_beacon_time).valueOr:
warn "Head selection failed, using previous head",
head = shortLog(self.dag.head), wallSlot
return

View File

@ -253,27 +253,17 @@ proc checkForPotentialDoppelganger(
# Only check for attestations after node launch. There might be one slot of
# overlap in quick intra-slot restarts so trade off a few true negatives in
# the service of avoiding more likely false positives.
if not self.doppelgangerDetectionEnabled:
return
if attestation.data.slot <= self.doppelgangerDetection.nodeLaunchSlot + 1:
return
if attestation.data.slot.epoch <
self.doppelgangerDetection.broadcastStartEpoch:
let tgtBlck = self.dag.getRef(attestation.data.target.root)
doAssert not tgtBlck.isNil # because attestation is valid above
# We expect the EpochRef not to have been pruned between validating the
# attestation and checking for doppelgangers - this may need to be revisited
# when online pruning of states is implemented
let epochRef = self.dag.getEpochRef(
tgtBlck, attestation.data.target.epoch, true).expect(
"Target block EpochRef must be valid")
for validatorIndex in attesterIndices:
let validatorPubkey = epochRef.validatorKey(validatorIndex).get().toPubKey()
if self.doppelgangerDetectionEnabled and
self.validatorPool[].getValidator(validatorPubkey) !=
default(AttachedValidator):
let validatorPubkey = self.dag.validatorKey(validatorIndex).get().toPubKey()
if not isNil(self.validatorPool[].getValidator(validatorPubkey)):
warn "We believe you are currently running another instance of the same validator. We've disconnected you from the network as this presents a significant slashing risk. Possible next steps are (a) making sure you've disconnected your validator from your old machine before restarting the client; and (b) running the client again with the gossip-slashing-protection option disabled, only if you are absolutely sure this is the only instance of your validator running, and reporting the issue at https://github.com/status-im/nimbus-eth2/issues.",
validatorIndex,
validatorPubkey,

View File

@ -109,8 +109,7 @@ func check_beacon_and_target_block(
# The target block is returned.
# We rely on the chain DAG to have been validated, so check for the existence
# of the block in the pool.
let blck = pool.dag.getRef(data.beacon_block_root)
if blck.isNil:
let blck = pool.dag.getBlockRef(data.beacon_block_root).valueOr:
pool.quarantine[].addMissing(data.beacon_block_root)
return errIgnore("Attestation block unknown")
@ -182,19 +181,21 @@ template checkedReject(error: ValidationError): untyped =
template validateBeaconBlockBellatrix(
signed_beacon_block: phase0.SignedBeaconBlock |
altair.SignedBeaconBlock): untyped =
altair.SignedBeaconBlock,
parent: BlockRef): untyped =
discard
# https://github.com/ethereum/consensus-specs/blob/v1.1.7/specs/merge/p2p-interface.md#beacon_block
template validateBeaconBlockBellatrix(
signed_beacon_block: bellatrix.SignedBeaconBlock): untyped =
signed_beacon_block: bellatrix.SignedBeaconBlock,
parent: BlockRef): untyped =
# If the execution is enabled for the block -- i.e.
# is_execution_enabled(state, block.body) then validate the following:
let executionEnabled =
if signed_beacon_block.message.body.execution_payload !=
default(ExecutionPayload):
true
elif dag.getEpochRef(parent_ref, parent_ref.slot.epoch, true).expect(
elif dag.getEpochRef(parent, parent.slot.epoch, true).expect(
"parent EpochRef doesn't fail").merge_transition_complete:
# Should usually be inexpensive, but could require cache refilling - the
# parent block can be no older than the latest finalized block
@ -204,8 +205,8 @@ template validateBeaconBlockBellatrix(
# mostly relevant around merge transition epochs. It's possible that
# the previous block is phase 0 or Altair, if this is the transition
# block itself.
let blockData = dag.get(parent_ref)
case blockData.data.kind:
let blockData = dag.getForkedBlock(parent)
case blockData.kind:
of BeaconBlockFork.Phase0:
false
of BeaconBlockFork.Altair:
@ -215,7 +216,7 @@ template validateBeaconBlockBellatrix(
# shows how this gets folded into the state each block; checking this
# is equivalent, without ever requiring state replay or any similarly
# expensive computation.
blockData.data.mergeData.message.body.execution_payload !=
blockData.mergeData.message.body.execution_payload !=
default(ExecutionPayload)
if executionEnabled:
@ -280,7 +281,7 @@ proc validateBeaconBlock*(
# TODO might check unresolved/orphaned blocks too, and this might not see all
# blocks at a given slot (though, in theory, those get checked elsewhere), or
# adding metrics that count how often these conditions occur.
if signed_beacon_block.root in dag:
if dag.containsForkBlock(signed_beacon_block.root):
# The gossip algorithm itself already does one round of hashing to find
# already-seen data, but it is fairly aggressive about forgetting about
# what it has seen already
@ -288,14 +289,14 @@ proc validateBeaconBlock*(
return errIgnore("BeaconBlock: already seen")
let
slotBlock = getBlockBySlot(dag, signed_beacon_block.message.slot)
slotBlock = getBlockAtSlot(dag, signed_beacon_block.message.slot)
if slotBlock.isProposed() and
slotBlock.blck.slot == signed_beacon_block.message.slot:
let blck = dag.get(slotBlock.blck).data
if getForkedBlockField(blck, proposer_index) ==
let data = dag.getForkedBlock(slotBlock.blck)
if getForkedBlockField(data, proposer_index) ==
signed_beacon_block.message.proposer_index and
blck.signature.toRaw() != signed_beacon_block.signature.toRaw():
data.signature.toRaw() != signed_beacon_block.signature.toRaw():
return errIgnore("BeaconBlock: already proposed in the same slot")
# [IGNORE] The block's parent (defined by block.parent_root) has been seen
@ -304,12 +305,12 @@ proc validateBeaconBlock*(
#
# And implicitly:
# [REJECT] The block's parent (defined by block.parent_root) passes validation.
let parent_ref = dag.getRef(signed_beacon_block.message.parent_root)
if parent_ref.isNil:
let parent = dag.getBlockRef(signed_beacon_block.message.parent_root).valueOr:
# When the parent is missing, we can't validate the block - we'll queue it
# in the quarantine for later processing
if not quarantine[].add(dag, ForkedSignedBeaconBlock.init(signed_beacon_block)):
debug "Block quarantine full"
return errIgnore("BeaconBlock: Parent not found")
# [REJECT] The current finalized_checkpoint is an ancestor of block -- i.e.
@ -319,7 +320,7 @@ proc validateBeaconBlock*(
let
finalized_checkpoint = getStateField(
dag.headState.data, finalized_checkpoint)
ancestor = get_ancestor(parent_ref, finalized_checkpoint.epoch.start_slot)
ancestor = get_ancestor(parent, finalized_checkpoint.epoch.start_slot)
if ancestor.isNil:
# This shouldn't happen: we should always be able to trace the parent back
@ -336,7 +337,7 @@ proc validateBeaconBlock*(
# processing while proposers for the block's branch are calculated -- in such
# a case do not REJECT, instead IGNORE this message.
let
proposer = getProposer(dag, parent_ref, signed_beacon_block.message.slot)
proposer = getProposer(dag, parent, signed_beacon_block.message.slot)
if proposer.isNone:
warn "cannot compute proposer for message"
@ -354,9 +355,9 @@ proc validateBeaconBlock*(
signed_beacon_block.root,
dag.validatorKey(proposer.get()).get(),
signed_beacon_block.signature):
return errReject("Invalid proposer signature")
return errReject("BeaconBlock: Invalid proposer signature")
validateBeaconBlockBellatrix(signed_beacon_block)
validateBeaconBlockBellatrix(signed_beacon_block, parent)
ok()

View File

@ -115,43 +115,38 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
# https://ethereum.github.io/beacon-APIs/#/Beacon/getStateRoot
router.api(MethodGet, "/eth/v1/beacon/states/{state_id}/root") do (
state_id: StateIdent) -> RestApiResponse:
let bslot =
block:
if state_id.isErr():
return RestApiResponse.jsonError(Http400, InvalidStateIdValueError,
$state_id.error())
let sid = state_id.get()
let bres = node.getBlockSlot(sid)
if bres.isErr():
if sid.kind == StateQueryKind.Root:
# TODO (cheatfate): Its impossible to retrieve state by `state_root`
# in current version of database.
return RestApiResponse.jsonError(Http500, NoImplementationError)
return RestApiResponse.jsonError(Http404, StateNotFoundError,
$bres.error())
bres.get()
let
sid = state_id.valueOr:
return RestApiResponse.jsonError(Http400, InvalidStateIdValueError,
$error)
bslot = node.getBlockSlot(sid).valueOr:
if sid.kind == StateQueryKind.Root:
# TODO (cheatfate): Its impossible to retrieve state by `state_root`
# in current version of database.
return RestApiResponse.jsonError(Http500, NoImplementationError)
return RestApiResponse.jsonError(Http404, StateNotFoundError,
$error)
node.withStateForBlockSlot(bslot):
return RestApiResponse.jsonResponse((root: stateRoot))
return RestApiResponse.jsonError(Http404, StateNotFoundError)
# https://ethereum.github.io/beacon-APIs/#/Beacon/getStateFork
router.api(MethodGet, "/eth/v1/beacon/states/{state_id}/fork") do (
state_id: StateIdent) -> RestApiResponse:
let bslot =
block:
if state_id.isErr():
return RestApiResponse.jsonError(Http400, InvalidStateIdValueError,
$state_id.error())
let sid = state_id.get()
let bres = node.getBlockSlot(sid)
if bres.isErr():
if sid.kind == StateQueryKind.Root:
# TODO (cheatfate): Its impossible to retrieve state by `state_root`
# in current version of database.
return RestApiResponse.jsonError(Http500, NoImplementationError)
return RestApiResponse.jsonError(Http404, StateNotFoundError,
$bres.error())
bres.get()
let
sid = state_id.valueOr:
return RestApiResponse.jsonError(Http400, InvalidStateIdValueError,
$error)
bslot = node.getBlockSlot(sid).valueOr:
if sid.kind == StateQueryKind.Root:
# TODO (cheatfate): Its impossible to retrieve state by `state_root`
# in current version of database.
return RestApiResponse.jsonError(Http500, NoImplementationError)
return RestApiResponse.jsonError(Http404, StateNotFoundError,
$error)
node.withStateForBlockSlot(bslot):
return RestApiResponse.jsonResponse(
(
@ -169,21 +164,18 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
router.api(MethodGet,
"/eth/v1/beacon/states/{state_id}/finality_checkpoints") do (
state_id: StateIdent) -> RestApiResponse:
let bslot =
block:
if state_id.isErr():
return RestApiResponse.jsonError(Http400, InvalidStateIdValueError,
$state_id.error())
let sid = state_id.get()
let bres = node.getBlockSlot(sid)
if bres.isErr():
if sid.kind == StateQueryKind.Root:
# TODO (cheatfate): Its impossible to retrieve state by `state_root`
# in current version of database.
return RestApiResponse.jsonError(Http500, NoImplementationError)
return RestApiResponse.jsonError(Http404, StateNotFoundError,
$bres.error())
bres.get()
let
sid = state_id.valueOr:
return RestApiResponse.jsonError(Http400, InvalidStateIdValueError,
$error)
bslot = node.getBlockSlot(sid).valueOr:
if sid.kind == StateQueryKind.Root:
# TODO (cheatfate): Its impossible to retrieve state by `state_root`
# in current version of database.
return RestApiResponse.jsonError(Http500, NoImplementationError)
return RestApiResponse.jsonError(Http404, StateNotFoundError,
$error)
node.withStateForBlockSlot(bslot):
return RestApiResponse.jsonResponse(
(
@ -200,21 +192,17 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
router.api(MethodGet, "/eth/v1/beacon/states/{state_id}/validators") do (
state_id: StateIdent, id: seq[ValidatorIdent],
status: seq[ValidatorFilter]) -> RestApiResponse:
let bslot =
block:
if state_id.isErr():
return RestApiResponse.jsonError(Http400, InvalidStateIdValueError,
$state_id.error())
let sid = state_id.get()
let bres = node.getBlockSlot(sid)
if bres.isErr():
if sid.kind == StateQueryKind.Root:
# TODO (cheatfate): Its impossible to retrieve state by `state_root`
# in current version of database.
return RestApiResponse.jsonError(Http500, NoImplementationError)
return RestApiResponse.jsonError(Http404, StateNotFoundError,
$bres.error())
bres.get()
let
sid = state_id.valueOr:
return RestApiResponse.jsonError(Http400, InvalidStateIdValueError,
$error)
bslot = node.getBlockSlot(sid).valueOr:
if sid.kind == StateQueryKind.Root:
# TODO (cheatfate): Its impossible to retrieve state by `state_root`
# in current version of database.
return RestApiResponse.jsonError(Http500, NoImplementationError)
return RestApiResponse.jsonError(Http404, StateNotFoundError,
$error)
let validatorIds =
block:
if id.isErr():
@ -328,24 +316,21 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
router.api(MethodGet,
"/eth/v1/beacon/states/{state_id}/validators/{validator_id}") do (
state_id: StateIdent, validator_id: ValidatorIdent) -> RestApiResponse:
let bslot =
block:
if state_id.isErr():
return RestApiResponse.jsonError(Http400, InvalidStateIdValueError,
$state_id.error())
let sid = state_id.get()
let bres = node.getBlockSlot(sid)
if bres.isErr():
if sid.kind == StateQueryKind.Root:
# TODO (cheatfate): Its impossible to retrieve state by `state_root`
# in current version of database.
return RestApiResponse.jsonError(Http500, NoImplementationError)
return RestApiResponse.jsonError(Http404, StateNotFoundError,
$bres.error())
bres.get()
if validator_id.isErr():
return RestApiResponse.jsonError(Http400, InvalidValidatorIdValueError,
$validator_id.error())
let
sid = state_id.valueOr:
return RestApiResponse.jsonError(Http400, InvalidStateIdValueError,
$error)
vid = validator_id.valueOr:
return RestApiResponse.jsonError(Http400, InvalidValidatorIdValueError,
$error)
bslot = node.getBlockSlot(sid).valueOr:
if sid.kind == StateQueryKind.Root:
# TODO (cheatfate): Its impossible to retrieve state by `state_root`
# in current version of database.
return RestApiResponse.jsonError(Http500, NoImplementationError)
return RestApiResponse.jsonError(Http404, StateNotFoundError,
$error)
node.withStateForBlockSlot(bslot):
let
current_epoch = getStateField(stateData.data, slot).epoch()
@ -396,21 +381,18 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
router.api(MethodGet,
"/eth/v1/beacon/states/{state_id}/validator_balances") do (
state_id: StateIdent, id: seq[ValidatorIdent]) -> RestApiResponse:
let bslot =
block:
if state_id.isErr():
return RestApiResponse.jsonError(Http400, InvalidStateIdValueError,
$state_id.error())
let sid = state_id.get()
let bres = node.getBlockSlot(sid)
if bres.isErr():
if sid.kind == StateQueryKind.Root:
# TODO (cheatfate): Its impossible to retrieve state by `state_root`
# in current version of database.
return RestApiResponse.jsonError(Http500, NoImplementationError)
return RestApiResponse.jsonError(Http404, StateNotFoundError,
$bres.error())
bres.get()
let
sid = state_id.valueOr:
return RestApiResponse.jsonError(Http400, InvalidStateIdValueError,
$error)
bslot = node.getBlockSlot(sid).valueOr:
if sid.kind == StateQueryKind.Root:
# TODO (cheatfate): Its impossible to retrieve state by `state_root`
# in current version of database.
return RestApiResponse.jsonError(Http500, NoImplementationError)
return RestApiResponse.jsonError(Http404, StateNotFoundError,
$error)
let validatorIds =
block:
if id.isErr():
@ -489,21 +471,18 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
"/eth/v1/beacon/states/{state_id}/committees") do (
state_id: StateIdent, epoch: Option[Epoch], index: Option[CommitteeIndex],
slot: Option[Slot]) -> RestApiResponse:
let bslot =
block:
if state_id.isErr():
return RestApiResponse.jsonError(Http400, InvalidStateIdValueError,
$state_id.error())
let sid = state_id.get()
let bres = node.getBlockSlot(sid)
if bres.isErr():
if sid.kind == StateQueryKind.Root:
# TODO (cheatfate): Its impossible to retrieve state by `state_root`
# in current version of database.
return RestApiResponse.jsonError(Http500, NoImplementationError)
return RestApiResponse.jsonError(Http404, StateNotFoundError,
$bres.error())
bres.get()
let
sid = state_id.valueOr:
return RestApiResponse.jsonError(Http400, InvalidStateIdValueError,
$error)
bslot = node.getBlockSlot(sid).valueOr:
if sid.kind == StateQueryKind.Root:
# TODO (cheatfate): Its impossible to retrieve state by `state_root`
# in current version of database.
return RestApiResponse.jsonError(Http500, NoImplementationError)
return RestApiResponse.jsonError(Http404, StateNotFoundError,
$error)
let vepoch =
if epoch.isSome():
let repoch = epoch.get()
@ -605,21 +584,17 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
router.api(MethodGet,
"/eth/v1/beacon/states/{state_id}/sync_committees") do (
state_id: StateIdent, epoch: Option[Epoch]) -> RestApiResponse:
let bslot =
block:
if state_id.isErr():
return RestApiResponse.jsonError(Http400, InvalidStateIdValueError,
$state_id.error())
let sid = state_id.get()
let bres = node.getBlockSlot(sid)
if bres.isErr():
if sid.kind == StateQueryKind.Root:
# TODO (cheatfate): Its impossible to retrieve state by `state_root`
# in current version of database.
return RestApiResponse.jsonError(Http500, NoImplementationError)
return RestApiResponse.jsonError(Http404, StateNotFoundError,
$bres.error())
bres.get()
let
sid = state_id.valueOr:
return RestApiResponse.jsonError(Http400, InvalidStateIdValueError,
$error)
bslot = node.getBlockSlot(sid).valueOr:
if sid.kind == StateQueryKind.Root:
# TODO (cheatfate): Its impossible to retrieve state by `state_root`
# in current version of database.
return RestApiResponse.jsonError(Http500, NoImplementationError)
return RestApiResponse.jsonError(Http404, StateNotFoundError,
$error)
let qepoch =
if epoch.isSome():
@ -715,15 +690,15 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
$res.error())
res.get()
let bdata = node.dag.get(blck)
let bdata = node.dag.getForkedBlock(blck)
return
withBlck(bdata.data):
withBlck(bdata):
RestApiResponse.jsonResponse(
[
(
root: blck.root,
canonical: bdata.refs.isAncestorOf(node.dag.head),
canonical: node.dag.isCanonical(
BlockId(root: blck.root, slot: blck.message.slot)),
header: (
message: (
slot: blck.message.slot,
@ -741,22 +716,21 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
# https://ethereum.github.io/beacon-APIs/#/Beacon/getBlockHeader
router.api(MethodGet, "/eth/v1/beacon/headers/{block_id}") do (
block_id: BlockIdent) -> RestApiResponse:
let bdata =
block:
if block_id.isErr():
return RestApiResponse.jsonError(Http400, InvalidBlockIdValueError,
$block_id.error())
let res = node.getBlockDataFromBlockIdent(block_id.get())
if res.isErr():
return RestApiResponse.jsonError(Http404, BlockNotFoundError)
res.get()
let
bid = block_id.valueOr:
return RestApiResponse.jsonError(Http400, InvalidBlockIdValueError,
$error)
bdata = node.getForkedBlock(bid).valueOr:
return RestApiResponse.jsonError(Http404, BlockNotFoundError)
return
withBlck(bdata.data):
withBlck(bdata):
RestApiResponse.jsonResponse(
(
root: blck.root,
canonical: bdata.refs.isAncestorOf(node.dag.head),
canonical: node.dag.isCanonical(
BlockId(root: blck.root, slot: blck.message.slot)),
header: (
message: (
slot: blck.message.slot,
@ -822,15 +796,14 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
# https://ethereum.github.io/beacon-APIs/#/Beacon/getBlock
router.api(MethodGet, "/eth/v1/beacon/blocks/{block_id}") do (
block_id: BlockIdent) -> RestApiResponse:
let bdata =
block:
if block_id.isErr():
return RestApiResponse.jsonError(Http400, InvalidBlockIdValueError,
$block_id.error())
let res = node.getBlockDataFromBlockIdent(block_id.get())
if res.isErr():
return RestApiResponse.jsonError(Http404, BlockNotFoundError)
res.get()
let
bid = block_id.valueOr:
return RestApiResponse.jsonError(Http400, InvalidBlockIdValueError,
$error)
bdata = node.getForkedBlock(bid).valueOr:
return RestApiResponse.jsonError(Http404, BlockNotFoundError)
let contentType =
block:
let res = preferredContentType("application/octet-stream",
@ -839,13 +812,13 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
return RestApiResponse.jsonError(Http406, ContentNotAcceptableError)
res.get()
return
case bdata.data.kind
case bdata.kind
of BeaconBlockFork.Phase0:
case contentType
of "application/octet-stream":
RestApiResponse.sszResponse(bdata.data.phase0Data)
RestApiResponse.sszResponse(bdata.phase0Data)
of "application/json":
RestApiResponse.jsonResponse(bdata.data.phase0Data)
RestApiResponse.jsonResponse(bdata.phase0Data)
else:
RestApiResponse.jsonError(Http500, InvalidAcceptError)
of BeaconBlockFork.Altair, BeaconBlockFork.Bellatrix:
@ -854,15 +827,13 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
# https://ethereum.github.io/beacon-APIs/#/Beacon/getBlockV2
router.api(MethodGet, "/eth/v2/beacon/blocks/{block_id}") do (
block_id: BlockIdent) -> RestApiResponse:
let bdata =
block:
if block_id.isErr():
return RestApiResponse.jsonError(Http400, InvalidBlockIdValueError,
$block_id.error())
let res = node.getBlockDataFromBlockIdent(block_id.get())
if res.isErr():
return RestApiResponse.jsonError(Http404, BlockNotFoundError)
res.get()
let
bid = block_id.valueOr:
return RestApiResponse.jsonError(Http400, InvalidBlockIdValueError,
$error)
bdata = node.getForkedBlock(bid).valueOr:
return RestApiResponse.jsonError(Http404, BlockNotFoundError)
let contentType =
block:
let res = preferredContentType("application/octet-stream",
@ -873,47 +844,40 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
return
case contentType
of "application/octet-stream":
case bdata.data.kind
of BeaconBlockFork.Phase0:
RestApiResponse.sszResponse(bdata.data.phase0Data)
of BeaconBlockFork.Altair:
RestApiResponse.sszResponse(bdata.data.altairData)
of BeaconBlockFork.Bellatrix:
RestApiResponse.sszResponse(bdata.data.mergeData)
withBlck(bdata):
RestApiResponse.sszResponse(blck)
of "application/json":
RestApiResponse.jsonResponsePlain(bdata.data.asSigned())
RestApiResponse.jsonResponsePlain(bdata.asSigned())
else:
RestApiResponse.jsonError(Http500, InvalidAcceptError)
# https://ethereum.github.io/beacon-APIs/#/Beacon/getBlockRoot
router.api(MethodGet, "/eth/v1/beacon/blocks/{block_id}/root") do (
block_id: BlockIdent) -> RestApiResponse:
let blck =
block:
if block_id.isErr():
return RestApiResponse.jsonError(Http400, InvalidBlockIdValueError,
$block_id.error())
let res = node.getBlockRef(block_id.get())
if res.isErr():
return RestApiResponse.jsonError(Http404, BlockNotFoundError)
res.get()
let
bid = block_id.valueOr:
return RestApiResponse.jsonError(Http400, InvalidBlockIdValueError,
$error)
blck = node.getBlockId(bid).valueOr:
return RestApiResponse.jsonError(Http404, BlockNotFoundError)
return RestApiResponse.jsonResponse((root: blck.root))
# https://ethereum.github.io/beacon-APIs/#/Beacon/getBlockAttestations
router.api(MethodGet,
"/eth/v1/beacon/blocks/{block_id}/attestations") do (
block_id: BlockIdent) -> RestApiResponse:
let bdata =
block:
if block_id.isErr():
return RestApiResponse.jsonError(Http400, InvalidBlockIdValueError,
$block_id.error())
let res = node.getBlockDataFromBlockIdent(block_id.get())
if res.isErr():
return RestApiResponse.jsonError(Http404, BlockNotFoundError)
res.get()
let
bid = block_id.valueOr:
return RestApiResponse.jsonError(Http400, InvalidBlockIdValueError,
$error)
bdata = node.getForkedBlock(bid).valueOr:
return RestApiResponse.jsonError(Http404, BlockNotFoundError)
return
withBlck(bdata.data):
withBlck(bdata):
RestApiResponse.jsonResponse(blck.message.body.attestations.asSeq())
# https://ethereum.github.io/beacon-APIs/#/Beacon/getPoolAttestations

View File

@ -48,7 +48,7 @@ func getCurrentSlot*(node: BeaconNode, slot: Slot):
func getCurrentBlock*(node: BeaconNode, slot: Slot):
Result[BlockRef, cstring] =
let bs = node.dag.getBlockBySlot(? node.getCurrentSlot(slot))
let bs = node.dag.getBlockAtSlot(? node.getCurrentSlot(slot))
if bs.isProposed():
ok(bs.blck)
else:
@ -72,7 +72,7 @@ proc getBlockSlot*(node: BeaconNode,
stateIdent: StateIdent): Result[BlockSlot, cstring] =
case stateIdent.kind
of StateQueryKind.Slot:
let bs = node.dag.getBlockBySlot(? node.getCurrentSlot(stateIdent.slot))
let bs = node.dag.getBlockAtSlot(? node.getCurrentSlot(stateIdent.slot))
if not isNil(bs.blck):
ok(bs)
else:
@ -95,29 +95,44 @@ proc getBlockSlot*(node: BeaconNode,
ok(node.dag.head.atEpochStart(getStateField(
node.dag.headState.data, current_justified_checkpoint).epoch))
proc getBlockRef*(node: BeaconNode,
id: BlockIdent): Result[BlockRef, cstring] =
proc getBlockId*(node: BeaconNode, id: BlockIdent): Result[BlockId, cstring] =
case id.kind
of BlockQueryKind.Named:
case id.value
of BlockIdentType.Head:
ok(node.dag.head)
ok(node.dag.head.bid)
of BlockIdentType.Genesis:
ok(node.dag.genesis)
ok(node.dag.genesis.bid)
of BlockIdentType.Finalized:
ok(node.dag.finalizedHead.blck)
ok(node.dag.finalizedHead.blck.bid)
of BlockQueryKind.Root:
let res = node.dag.getRef(id.root)
if isNil(res):
err("Block not found")
else:
ok(res)
node.dag.getBlockId(id.root).orErr(cstring("Block not found"))
of BlockQueryKind.Slot:
node.getCurrentBlock(id.slot)
let bsid = node.dag.getBlockIdAtSlot(id.slot)
if bsid.isProposed():
ok bsid.bid
else:
err("Block not found")
proc getBlockDataFromBlockIdent*(node: BeaconNode,
id: BlockIdent): Result[BlockData, cstring] =
ok(node.dag.get(? node.getBlockRef(id)))
proc getForkedBlock*(node: BeaconNode, id: BlockIdent):
Result[ForkedTrustedSignedBeaconBlock, cstring] =
case id.kind
of BlockQueryKind.Named:
case id.value
of BlockIdentType.Head:
ok(node.dag.getForkedBlock(node.dag.head))
of BlockIdentType.Genesis:
ok(node.dag.getForkedBlock(node.dag.genesis))
of BlockIdentType.Finalized:
ok(node.dag.getForkedBlock(node.dag.finalizedHead.blck))
of BlockQueryKind.Root:
node.dag.getForkedBlock(id.root).orErr(cstring("Block not found"))
of BlockQueryKind.Slot:
let bsid = node.dag.getBlockIdAtSlot(id.slot)
if bsid.isProposed():
node.dag.getForkedBlock(bsid.bid).orErr(cstring("Block not found"))
else:
err("Block not found")
proc disallowInterruptionsAux(body: NimNode) =
for n in body:

View File

@ -264,7 +264,7 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
# in order to compute the sync committee for the epoch. See the following
# discussion for more details:
# https://github.com/status-im/nimbus-eth2/pull/3133#pullrequestreview-817184693
let bs = node.dag.getBlockBySlot(earliestSlotInQSyncPeriod)
let bs = node.dag.getBlockAtSlot(earliestSlotInQSyncPeriod)
if bs.blck.isNil:
return RestApiResponse.jsonError(Http404, StateNotFoundError)

View File

@ -154,27 +154,26 @@ proc getStatus(validator: Validator,
else:
err("Invalid validator status")
proc getBlockDataFromBlockId(node: BeaconNode, blockId: string): BlockData {.
proc getForkedBlockFromBlockId(
node: BeaconNode, blockId: string): ForkedTrustedSignedBeaconBlock {.
raises: [Defect, CatchableError].} =
result = case blockId:
case blockId:
of "head":
node.dag.get(node.dag.head)
node.dag.getForkedBlock(node.dag.head)
of "genesis":
node.dag.get(node.dag.genesis)
node.dag.getForkedBlock(node.dag.genesis)
of "finalized":
node.dag.get(node.dag.finalizedHead.blck)
node.dag.getForkedBlock(node.dag.finalizedHead.blck)
else:
if blockId.startsWith("0x"):
let blckRoot = parseRoot(blockId)
let blockData = node.dag.get(blckRoot)
if blockData.isNone:
let
blckRoot = parseRoot(blockId)
node.dag.getForkedBlock(blckRoot).valueOr:
raise newException(CatchableError, "Block not found")
blockData.get()
else:
let blockSlot = node.getBlockSlotFromString(blockId)
if blockSlot.blck.isNil:
let bid = node.getBlockIdFromString(blockId)
node.dag.getForkedBlock(bid).valueOr:
raise newException(CatchableError, "Block not found")
node.dag.get(blockSlot.blck)
proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
raises: [Defect, CatchableError].} =
@ -384,12 +383,13 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
rpcServer.rpc("get_v1_beacon_headers_blockId") do (
blockId: string) ->
tuple[canonical: bool, header: SignedBeaconBlockHeader]:
let bd = node.getBlockDataFromBlockId(blockId)
return withBlck(bd.data):
let bd = node.getForkedBlockFromBlockId(blockId)
return withBlck(bd):
static: doAssert blck.signature is TrustedSig and
sizeof(ValidatorSig) == sizeof(blck.signature)
(
canonical: bd.refs.isAncestorOf(node.dag.head),
canonical: node.dag.isCanonical(
BlockId(root: blck.root, slot: blck.message.slot)),
header: SignedBeaconBlockHeader(
message: BeaconBlockHeader(
slot: blck.message.slot,
@ -417,7 +417,7 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
rpcServer.rpc("get_v1_beacon_blocks_blockId") do (
blockId: string) -> phase0.TrustedSignedBeaconBlock:
let blck = node.getBlockDataFromBlockId(blockId).data
let blck = node.getForkedBlockFromBlockId(blockId)
if blck.kind == BeaconBlockFork.Phase0:
return blck.phase0Data
else:
@ -425,12 +425,12 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
rpcServer.rpc("get_v1_beacon_blocks_blockId_root") do (
blockId: string) -> Eth2Digest:
return withBlck(node.getBlockDataFromBlockId(blockId).data):
blck.message.state_root
return withBlck(node.getForkedBlockFromBlockId(blockId)):
blck.root
rpcServer.rpc("get_v1_beacon_blocks_blockId_attestations") do (
blockId: string) -> seq[TrustedAttestation]:
return withBlck(node.getBlockDataFromBlockId(blockId).data):
return withBlck(node.getForkedBlockFromBlockId(blockId)):
blck.message.body.attestations.asSeq
rpcServer.rpc("get_v1_beacon_pool_attestations") do (

View File

@ -61,14 +61,27 @@ proc doChecksAndGetCurrentHead*(node: BeaconNode, epoch: Epoch): BlockRef {.rais
checkEpochToSlotOverflow(epoch)
node.doChecksAndGetCurrentHead(epoch.start_slot())
proc getBlockSlotFromString*(node: BeaconNode, slot: string): BlockSlot {.raises: [Defect, CatchableError].} =
proc parseSlot(slot: string): Slot {.raises: [Defect, CatchableError].} =
if slot.len == 0:
raise newException(ValueError, "Empty slot number not allowed")
var parsed: BiggestUInt
if parseBiggestUInt(slot, parsed) != slot.len:
raise newException(ValueError, "Not a valid slot number")
let head = node.doChecksAndGetCurrentHead(parsed.Slot)
head.atSlot(parsed.Slot)
Slot parsed
proc getBlockSlotFromString*(node: BeaconNode, slot: string): BlockSlot {.raises: [Defect, CatchableError].} =
let parsed = parseSlot(slot)
discard node.doChecksAndGetCurrentHead(parsed)
node.dag.getBlockAtSlot(parsed)
proc getBlockIdFromString*(node: BeaconNode, slot: string): BlockId {.raises: [Defect, CatchableError].} =
let parsed = parseSlot(slot)
discard node.doChecksAndGetCurrentHead(parsed)
let bsid = node.dag.getBlockIdAtSlot(parsed)
if bsid.isProposed():
bsid.bid
else:
raise (ref ValueError)(msg: "Block not found")
proc stateIdToBlockSlot*(node: BeaconNode, stateId: string): BlockSlot {.raises: [Defect, CatchableError].} =
case stateId:

View File

@ -266,9 +266,9 @@ p2pProtocol BeaconSync(version = 1,
var found = 0
for i in 0..<count:
let blockRef = dag.getRef(blockRoots[i])
if not isNil(blockRef):
let blk = dag.get(blockRef).data
let blockRef = dag.getBlockRef(blockRoots[i])
if blockRef.isSome():
let blk = dag.getForkedBlock(blockRef[])
case blk.kind
of BeaconBlockFork.Phase0:
await response.write(blk.phase0Data.asSigned)
@ -315,11 +315,10 @@ p2pProtocol BeaconSync(version = 1,
for i in startIndex..endIndex:
let
blk = dag.getForkedBlock(blocks[i])
blck = dag.getForkedBlock(blocks[i]).valueOr:
continue
if blk.isSome():
let blck = blk.get()
await response.write(blck.asSigned)
await response.write(blck.asSigned)
debug "Block range request done",
peer, startSlot, count, reqStep, found = count - startIndex
@ -346,9 +345,9 @@ p2pProtocol BeaconSync(version = 1,
var found = 0
for i in 0..<count:
let blockRef = dag.getRef(blockRoots[i])
if not isNil(blockRef):
let blk = dag.getForkedBlock(blockRef)
let blockRef = dag.getBlockRef(blockRoots[i])
if blockRef.isSome():
let blk = dag.getForkedBlock(blockRef[])
await response.write(blk.asSigned)
inc found

View File

@ -7,7 +7,6 @@
{.push raises: [Defect].}
import
std/[os],
stew/[assign2, base10],
chronicles, chronos,
./sync/sync_manager,
@ -404,6 +403,8 @@ proc doTrustedNodeSync*(
checkpointRoot = checkpointBlock.root
when isMainModule:
import std/[os]
let backfill = os.paramCount() > 3 and os.paramStr(4) == "true"
waitFor doTrustedNodeSync(

View File

@ -1151,32 +1151,23 @@ proc sendAttestation*(node: BeaconNode,
attestation: Attestation): Future[SendResult] {.async.} =
# REST/JSON-RPC API helper procedure.
let
target =
block:
let res = node.dag.getRef(attestation.data.target.root)
if isNil(res):
notice "Attempt to send attestation for unknown target",
attestation = shortLog(attestation)
return SendResult.err(
"Attempt to send attestation for unknown block")
res
epochRef = block:
let tmp = node.dag.getEpochRef(
target, attestation.data.target.epoch, false)
if tmp.isErr(): # Shouldn't happen
warn "Cannot construct EpochRef for attestation, skipping send - report bug",
target = shortLog(target),
attestation = shortLog(attestation)
return
tmp.get()
committee_index = block:
let v = epochRef.get_committee_index(attestation.data.index)
if v.isErr():
target = node.dag.getBlockRef(attestation.data.target.root).valueOr:
notice "Attempt to send attestation for unknown target",
attestation = shortLog(attestation)
return SendResult.err(
"Attempt to send attestation for unknown block")
epochRef = node.dag.getEpochRef(
target, attestation.data.target.epoch, false).valueOr:
warn "Cannot construct EpochRef for attestation, skipping send - report bug",
target = shortLog(target),
attestation = shortLog(attestation)
return
committee_index =
epochRef.get_committee_index(attestation.data.index).valueOr:
notice "Invalid committee index in attestation",
attestation = shortLog(attestation)
return SendResult.err("Invalid committee index in attestation")
v.get()
subnet_id = compute_subnet_for_attestation(
get_committee_count_per_slot(epochRef), attestation.data.slot,
committee_index)

View File

@ -211,7 +211,7 @@ proc cmdBench(conf: DbConf, cfg: RuntimeConfig) =
seq[altair.TrustedSignedBeaconBlock],
seq[bellatrix.TrustedSignedBeaconBlock])
echo &"Loaded {dag.blocks.len} blocks, head slot {dag.head.slot}, selected {blockRefs.len} blocks"
echo &"Loaded head slot {dag.head.slot}, selected {blockRefs.len} blocks"
doAssert blockRefs.len() > 0, "Must select at least one block"
for b in 0 ..< blockRefs.len:
@ -485,8 +485,7 @@ proc cmdRewindState(conf: DbConf, cfg: RuntimeConfig) =
validatorMonitor = newClone(ValidatorMonitor.init())
dag = init(ChainDAGRef, cfg, db, validatorMonitor, {})
let blckRef = dag.getRef(fromHex(Eth2Digest, conf.blockRoot))
if blckRef == nil:
let blckRef = dag.getBlockRef(fromHex(Eth2Digest, conf.blockRoot)).valueOr:
echo "Block not found in database"
return

View File

@ -228,7 +228,7 @@ proc stepChecks(
doAssert fkChoice.checkpoints.time.slotOrZero == time.slotOrZero
elif check == "head":
let headRoot = fkChoice[].get_head(dag, time).get()
let headRef = dag.getRef(headRoot)
let headRef = dag.getBlockRef(headRoot).get()
doAssert headRef.slot == Slot(val["slot"].getInt())
doAssert headRef.root == Eth2Digest.fromHex(val["root"].getStr())
elif check == "justified_checkpoint":

View File

@ -403,8 +403,7 @@ suite "Attestation pool processing" & preset():
pool[].addForkChoice(
epochRef, blckRef, signedBlock.message, blckRef.slot.start_beacon_time)
let head = pool[].selectHead(b1Add[].slot.start_beacon_time)
let head = pool[].selectHead(b1Add[].slot.start_beacon_time).get()
check:
head == b1Add[]
@ -417,7 +416,7 @@ suite "Attestation pool processing" & preset():
pool[].addForkChoice(
epochRef, blckRef, signedBlock.message, blckRef.slot.start_beacon_time)
let head2 = pool[].selectHead(b2Add[].slot.start_beacon_time)
let head2 = pool[].selectHead(b2Add[].slot.start_beacon_time).get()
check:
head2 == b2Add[]
@ -433,7 +432,7 @@ suite "Attestation pool processing" & preset():
pool[].addForkChoice(
epochRef, blckRef, signedBlock.message, blckRef.slot.start_beacon_time)
let head = pool[].selectHead(b10Add[].slot.start_beacon_time)
let head = pool[].selectHead(b10Add[].slot.start_beacon_time).get()
check:
head == b10Add[]
@ -458,7 +457,7 @@ suite "Attestation pool processing" & preset():
attestation0, @[bc1[0]], attestation0.loadSig,
attestation0.data.slot.start_beacon_time)
let head2 = pool[].selectHead(b10Add[].slot.start_beacon_time)
let head2 = pool[].selectHead(b10Add[].slot.start_beacon_time).get()
check:
# Single vote for b10 and no votes for b11
@ -471,7 +470,7 @@ suite "Attestation pool processing" & preset():
attestation1, @[bc1[1]], attestation1.loadSig,
attestation1.data.slot.start_beacon_time)
let head3 = pool[].selectHead(b10Add[].slot.start_beacon_time)
let head3 = pool[].selectHead(b10Add[].slot.start_beacon_time).get()
let bigger = if b11.root.data < b10.root.data: b10Add else: b11Add
check:
@ -482,7 +481,7 @@ suite "Attestation pool processing" & preset():
attestation2, @[bc1[2]], attestation2.loadSig,
attestation2.data.slot.start_beacon_time)
let head4 = pool[].selectHead(b11Add[].slot.start_beacon_time)
let head4 = pool[].selectHead(b11Add[].slot.start_beacon_time).get()
check:
# Two votes for b11
@ -499,7 +498,7 @@ suite "Attestation pool processing" & preset():
pool[].addForkChoice(epochRef, blckRef, signedBlock.message,
blckRef.slot.start_beacon_time)
let head = pool[].selectHead(b10Add[].slot.start_beacon_time)
let head = pool[].selectHead(b10Add[].slot.start_beacon_time).get()
check:
head == b10Add[]
@ -530,7 +529,7 @@ suite "Attestation pool processing" & preset():
pool[].addForkChoice(
epochRef, blckRef, signedBlock.message, blckRef.slot.start_beacon_time)
let head = pool[].selectHead(b10Add[].slot.start_beacon_time)
let head = pool[].selectHead(b10Add[].slot.start_beacon_time).get()
doAssert: head == b10Add[]
@ -556,7 +555,7 @@ suite "Attestation pool processing" & preset():
pool[].addForkChoice(
epochRef, blckRef, signedBlock.message, blckRef.slot.start_beacon_time)
let head = pool[].selectHead(blockRef[].slot.start_beacon_time)
let head = pool[].selectHead(blockRef[].slot.start_beacon_time).get()
doAssert: head == blockRef[]
dag.updateHead(head, quarantine[])
pruneAtFinalization(dag, pool[])

View File

@ -49,39 +49,40 @@ suite "Block processor" & preset():
check: missing.error == BlockError.MissingParent
check:
dag.get(b2.root).isNone() # Unresolved, shouldn't show up
not dag.containsForkBlock(b2.root) # Unresolved, shouldn't show up
FetchRecord(root: b1.root) in quarantine[].checkMissing()
let
status = processor[].storeBlock(
MsgSource.gossip, b2.message.slot.start_beacon_time(), b1)
b1Get = dag.get(b1.root)
b1Get = dag.getBlockRef(b1.root)
check:
status.isOk
b1Get.isSome()
dag.get(b2.root).isNone() # Async pipeline must still run
dag.containsForkBlock(b1.root)
not dag.containsForkBlock(b2.root) # Async pipeline must still run
discard processor.runQueueProcessingLoop()
while processor[].hasBlocks():
poll()
let
b2Get = dag.get(b2.root)
b2Get = dag.getBlockRef(b2.root)
check:
b2Get.isSome()
b2Get.get().refs.parent == b1Get.get().refs
b2Get.get().parent == b1Get.get()
dag.updateHead(b2Get.get().refs, quarantine[])
dag.updateHead(b2Get.get(), quarantine[])
dag.pruneAtFinalization()
# The heads structure should have been updated to contain only the new
# b2 head
check:
dag.heads.mapIt(it) == @[b2Get.get().refs]
dag.heads.mapIt(it) == @[b2Get.get()]
# check that init also reloads block graph
var
@ -92,7 +93,7 @@ suite "Block processor" & preset():
# ensure we loaded the correct head state
dag2.head.root == b2.root
getStateRoot(dag2.headState.data) == b2.message.state_root
dag2.get(b1.root).isSome()
dag2.get(b2.root).isSome()
dag2.getBlockRef(b1.root).isSome()
dag2.getBlockRef(b2.root).isSome()
dag2.heads.len == 1
dag2.heads[0].root == b2.root

View File

@ -69,13 +69,14 @@ suite "Block pool processing" & preset():
att0 = makeFullAttestations(state[], dag.tail.root, 0.Slot, cache)
b1 = addTestBlock(state[], cache, attestations = att0).phase0Data
b2 = addTestBlock(state[], cache).phase0Data
test "getRef returns nil for missing blocks":
test "getBlockRef returns none for missing blocks":
check:
dag.getRef(default Eth2Digest) == nil
dag.getBlockRef(default Eth2Digest).isNone()
test "loading tail block works" & preset():
let
b0 = dag.get(dag.tail.root)
b0 = dag.getForkedBlock(dag.tail.root)
check:
b0.isSome()
@ -83,27 +84,29 @@ suite "Block pool processing" & preset():
test "Simple block add&get" & preset():
let
b1Add = dag.addHeadBlock(verifier, b1, nilPhase0Callback)
b1Get = dag.get(b1.root)
b1Get = dag.getForkedBlock(b1.root)
check:
b1Get.isSome()
b1Get.get().refs.root == b1.root
b1Add[].root == b1Get.get().refs.root
b1Get.get().root == b1.root
b1Add[].root == b1Get.get().root
dag.heads.len == 1
dag.heads[0] == b1Add[]
let
b2Add = dag.addHeadBlock(verifier, b2, nilPhase0Callback)
b2Get = dag.get(b2.root)
b2Get = dag.getForkedBlock(b2.root)
er = dag.findEpochRef(b1Add[], b1Add[].slot.epoch)
validators = getStateField(dag.headState.data, validators).lenu64()
check:
b2Get.isSome()
b2Get.get().refs.root == b2.root
b2Add[].root == b2Get.get().refs.root
b2Get.get().root == b2.root
b2Add[].root == b2Get.get().root
dag.heads.len == 1
dag.heads[0] == b2Add[]
dag.containsForkBlock(b2.root)
not er.isErr()
# Same epoch - same epochRef
er[] == dag.findEpochRef(b2Add[], b2Add[].slot.epoch)[]
@ -388,12 +391,15 @@ suite "chain DAG finalization tests" & preset():
check:
dag.heads.len() == 1
dag.getBlockBySlot(0.Slot) == BlockSlot(blck: dag.genesis, slot: 0.Slot)
dag.getBlockBySlot(dag.head.slot) == BlockSlot(
dag.getBlockAtSlot(0.Slot) == BlockSlot(blck: dag.genesis, slot: 0.Slot)
dag.getBlockAtSlot(dag.head.slot) == BlockSlot(
blck: dag.head, slot: dag.head.slot.Slot)
dag.getBlockBySlot(dag.head.slot + 1) == BlockSlot(
dag.getBlockAtSlot(dag.head.slot + 1) == BlockSlot(
blck: dag.head, slot: dag.head.slot.Slot + 1)
not dag.containsForkBlock(dag.getBlockAtSlot(1.Slot).blck.root)
dag.containsForkBlock(dag.finalizedHead.blck.root)
check:
dag.db.immutableValidators.len() == getStateField(dag.headState.data, validators).len()
@ -430,6 +436,8 @@ suite "chain DAG finalization tests" & preset():
# The late block is a block whose parent was finalized long ago and thus
# is no longer a viable head candidate
let status = dag.addHeadBlock(verifier, lateBlock, nilPhase0Callback)
# This _should_ be Unviable, but we can't tell, from the data that we have
# so MissingParent is the least wrong thing to reply
check: status.error == BlockError.UnviableFork
block:
@ -524,7 +532,7 @@ suite "chain DAG finalization tests" & preset():
assign(tmpStateData[], dag.headState)
check:
dag.updateStateData(tmpStateData[], cur.atSlot(cur.slot), false, cache)
dag.get(cur).data.phase0Data.message.state_root ==
dag.getForkedBlock(cur).phase0Data.message.state_root ==
getStateRoot(tmpStateData[].data)
getStateRoot(tmpStateData[].data) == hash_tree_root(
tmpStateData[].data.phase0Data.data)
@ -667,7 +675,6 @@ suite "Backfill":
0,
makeInitialDeposits(SLOTS_PER_EPOCH.uint64, flags = {skipBlsValidation}),
{skipBlsValidation}))
genBlock = get_initial_beacon_block(genState[])
tailState = assignClone(genState[])
blocks = block:
@ -683,6 +690,7 @@ suite "Backfill":
test "backfill to genesis":
let
tailBlock = blocks[^1]
genBlock = get_initial_beacon_block(genState[])
ChainDAGRef.preInit(
db, genState[], tailState[], tailBlock.asTrusted())
@ -692,15 +700,15 @@ suite "Backfill":
dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {})
check:
dag.getRef(tailBlock.root) == dag.tail
dag.getRef(blocks[^2].root) == nil
dag.getBlockRef(tailBlock.root).get() == dag.tail
dag.getBlockRef(blocks[^2].root).isNone()
dag.getBlockBySlot(dag.tail.slot).blck == dag.tail
dag.getBlockBySlot(dag.tail.slot - 1).blck == nil
dag.getBlockAtSlot(dag.tail.slot).blck == dag.tail
dag.getBlockAtSlot(dag.tail.slot - 1).blck == nil
dag.getBlockBySlot(Slot(0)).blck == dag.genesis
dag.getBlockSlotIdBySlot(Slot(0)) == dag.genesis.bid.atSlot(Slot(0))
dag.getBlockSlotIdBySlot(Slot(1)) == BlockSlotId()
dag.getBlockAtSlot(Slot(0)).blck == dag.genesis
dag.getBlockIdAtSlot(Slot(0)) == dag.genesis.bid.atSlot(Slot(0))
dag.getBlockIdAtSlot(Slot(1)) == BlockSlotId()
# No epochref for pre-tail epochs
dag.getEpochRef(dag.tail, dag.tail.slot.epoch - 1, true).isErr()
@ -715,31 +723,36 @@ suite "Backfill":
check:
dag.addBackfillBlock(blocks[^3].phase0Data).error == BlockError.MissingParent
dag.addBackfillBlock(tailBlock.phase0Data).error == BlockError.Duplicate
dag.addBackfillBlock(genBlock.phase0Data.asSigned()).error == BlockError.Duplicate
dag.addBackfillBlock(genBlock.phase0Data.asSigned()).error == BlockError.MissingParent
check:
dag.addBackfillBlock(blocks[^2].phase0Data).isOk()
dag.getRef(tailBlock.root) == dag.tail
dag.getRef(blocks[^2].root) == nil
dag.getBlockRef(tailBlock.root).get() == dag.tail
dag.getBlockRef(blocks[^2].root).isNone()
dag.getBlockBySlot(dag.tail.slot).blck == dag.tail
dag.getBlockBySlot(dag.tail.slot - 1).blck == nil
dag.getBlockAtSlot(dag.tail.slot).blck == dag.tail
dag.getBlockAtSlot(dag.tail.slot - 1).blck == nil
dag.getBlockSlotIdBySlot(dag.tail.slot - 1) ==
dag.getBlockIdAtSlot(dag.tail.slot - 1) ==
blocks[^2].toBlockId().atSlot()
dag.getBlockSlotIdBySlot(dag.tail.slot - 2) == BlockSlotId()
dag.getBlockIdAtSlot(dag.tail.slot - 2) == BlockSlotId()
check:
dag.addBackfillBlock(blocks[^3].phase0Data).isOk()
dag.getBlockSlotIdBySlot(dag.tail.slot - 2) ==
dag.getBlockIdAtSlot(dag.tail.slot - 2) ==
blocks[^3].toBlockId().atSlot()
dag.getBlockSlotIdBySlot(dag.tail.slot - 3) == BlockSlotId()
dag.getBlockIdAtSlot(dag.tail.slot - 3) == BlockSlotId()
for i in 3..<blocks.len:
check: dag.addBackfillBlock(blocks[blocks.len - i - 1].phase0Data).isOk()
check:
dag.addBackfillBlock(genBlock.phase0Data.asSigned).error == BlockError.Duplicate
dag.backfill.slot == GENESIS_SLOT
test "reload backfill position":
let
tailBlock = blocks[^1]
@ -760,12 +773,13 @@ suite "Backfill":
dag2 = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor2, {})
check:
dag.getRef(tailBlock.root) == dag.tail
dag.getRef(blocks[^2].root) == nil
dag2.getBlockRef(tailBlock.root).get().root == dag.tail.root
dag2.getBlockRef(blocks[^2].root).isNone()
dag.getBlockBySlot(dag.tail.slot).blck == dag.tail
dag.getBlockBySlot(dag.tail.slot - 1).blck == nil
dag2.getBlockAtSlot(dag.tail.slot).blck.root == dag.tail.root
dag2.getBlockAtSlot(dag.tail.slot - 1).blck == nil
dag.getBlockSlotIdBySlot(dag.tail.slot - 1) ==
dag2.getBlockIdAtSlot(dag.tail.slot - 1) ==
blocks[^2].toBlockId().atSlot()
dag.getBlockSlotIdBySlot(dag.tail.slot - 2) == BlockSlotId()
dag2.getBlockIdAtSlot(dag.tail.slot - 2) == BlockSlotId()
dag2.backfill.slot == blocks[^2].toBlockId().slot