Trusted blocks (#1227)

* cleanups

* fix ncli state root check flag
* add block dump to ncli_db
* limit ncli_db benchmark length
* tone down finalization logs

* introduce trusted blocks

We only store blocks whose signature we've verified in the database - as
such, there's no need to check it again, and most importantly, no need
to deserialize the signature when loading from database.

50x startup time improvement, 200x block load time improvement.

* fix rewinding when deposits have invalid signature
* speed up ancestor iteration by avoiding copy
* avoid deserializing signatures for trusted data
* load blocks lazily when rewinding (less memory used)

* chronicles workarounds

* document trustedbeaconblock
This commit is contained in:
Jacek Sieka 2020-06-25 12:23:10 +02:00 committed by GitHub
parent b67a506b3f
commit 1301600341
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 297 additions and 165 deletions

View File

@ -163,7 +163,7 @@ proc addResolved(pool: var AttestationPool, blck: BlockRef, attestation: Attesta
# TODO: stateCache usage # TODO: stateCache usage
var stateCache = get_empty_per_epoch_cache() var stateCache = get_empty_per_epoch_cache()
if not isValidAttestationTargetEpoch(state, attestation): if not isValidAttestationTargetEpoch(state, attestation.data):
notice "Invalid attestation", notice "Invalid attestation",
attestation = shortLog(attestation), attestation = shortLog(attestation),
current_epoch = get_current_epoch(state), current_epoch = get_current_epoch(state),

View File

@ -94,24 +94,31 @@ proc get(db: BeaconChainDB, key: openArray[byte], T: type Eth2Digest): Opt[T] =
res res
proc get(db: BeaconChainDB, key: openArray[byte], T: typedesc): Opt[T] = proc get(db: BeaconChainDB, key: openArray[byte], res: var auto): bool =
var res: Opt[T] var found = false
# TODO address is needed because there's no way to express lifetimes in nim
# we'll use unsafeAddr to find the code later
var resPtr = unsafeAddr res # callback is local, ptr wont escape
proc decode(data: openArray[byte]) = proc decode(data: openArray[byte]) =
try: try:
res.ok SSZ.decode(snappy.decode(data), T) resPtr[] = SSZ.decode(snappy.decode(data), type res)
found = true
except SerializationError as e: except SerializationError as e:
# If the data can't be deserialized, it could be because it's from a # If the data can't be deserialized, it could be because it's from a
# version of the software that uses a different SSZ encoding # version of the software that uses a different SSZ encoding
warn "Unable to deserialize data, old database?", warn "Unable to deserialize data, old database?",
err = e.msg, typ = name(T), dataLen = data.len err = e.msg, typ = name(type res), dataLen = data.len
discard discard
discard db.backend.get(key, decode).expect("working database") discard db.backend.get(key, decode).expect("working database")
res found
proc putBlock*(db: BeaconChainDB, key: Eth2Digest, value: SignedBeaconBlock) = proc putBlock*(db: BeaconChainDB, key: Eth2Digest, value: SignedBeaconBlock) =
db.put(subkey(type value, key), value) db.put(subkey(type value, key), value)
proc putBlock*(db: BeaconChainDB, key: Eth2Digest, value: TrustedSignedBeaconBlock) =
db.put(subkey(SignedBeaconBlock, key), value)
proc putState*(db: BeaconChainDB, key: Eth2Digest, value: BeaconState) = proc putState*(db: BeaconChainDB, key: Eth2Digest, value: BeaconState) =
# TODO prune old states - this is less easy than it seems as we never know # TODO prune old states - this is less easy than it seems as we never know
@ -126,7 +133,9 @@ proc putStateRoot*(db: BeaconChainDB, root: Eth2Digest, slot: Slot,
value: Eth2Digest) = value: Eth2Digest) =
db.put(subkey(root, slot), value) db.put(subkey(root, slot), value)
proc putBlock*(db: BeaconChainDB, value: SignedBeaconBlock) = proc putBlock*(db: BeaconChainDB, value: SomeSignedBeaconBlock) =
# TODO this should perhaps be a TrustedSignedBeaconBlock, but there's no
# trivial way to coerce one type into the other, as it stands..
db.putBlock(hash_tree_root(value.message), value) db.putBlock(hash_tree_root(value.message), value)
proc delBlock*(db: BeaconChainDB, key: Eth2Digest) = proc delBlock*(db: BeaconChainDB, key: Eth2Digest) =
@ -145,8 +154,11 @@ proc putHeadBlock*(db: BeaconChainDB, key: Eth2Digest) =
proc putTailBlock*(db: BeaconChainDB, key: Eth2Digest) = proc putTailBlock*(db: BeaconChainDB, key: Eth2Digest) =
db.put(subkey(kTailBlock), key) db.put(subkey(kTailBlock), key)
proc getBlock*(db: BeaconChainDB, key: Eth2Digest): Opt[SignedBeaconBlock] = proc getBlock*(db: BeaconChainDB, key: Eth2Digest): Opt[TrustedSignedBeaconBlock] =
db.get(subkey(SignedBeaconBlock, key), SignedBeaconBlock) # We only store blocks that we trust in the database
result.ok(TrustedSignedBeaconBlock())
if not db.get(subkey(SignedBeaconBlock, key), result.get):
result.err()
proc getState*( proc getState*(
db: BeaconChainDB, key: Eth2Digest, output: var BeaconState, db: BeaconChainDB, key: Eth2Digest, output: var BeaconState,
@ -159,20 +171,11 @@ proc getState*(
# https://github.com/nim-lang/Nim/issues/14126 # https://github.com/nim-lang/Nim/issues/14126
# TODO RVO is inefficient for large objects: # TODO RVO is inefficient for large objects:
# https://github.com/nim-lang/Nim/issues/13879 # https://github.com/nim-lang/Nim/issues/13879
# TODO address is needed because there's no way to express lifetimes in nim if not db.get(subkey(BeaconState, key), output):
# we'll use unsafeAddr to find the code later rollback(output)
let outputAddr = unsafeAddr output # callback is local false
proc decode(data: openArray[byte]) = else:
try: true
# TODO can't write to output directly..
assign(outputAddr[], SSZ.decode(snappy.decode(data), BeaconState))
except SerializationError as e:
# If the data can't be deserialized, it could be because it's from a
# version of the software that uses a different SSZ encoding
warn "Unable to deserialize data, old database?", err = e.msg
rollback(outputAddr[])
db.backend.get(subkey(BeaconState, key), decode).expect("working database")
proc getStateRoot*(db: BeaconChainDB, proc getStateRoot*(db: BeaconChainDB,
root: Eth2Digest, root: Eth2Digest,
@ -192,14 +195,14 @@ proc containsState*(db: BeaconChainDB, key: Eth2Digest): bool =
db.backend.contains(subkey(BeaconState, key)).expect("working database") db.backend.contains(subkey(BeaconState, key)).expect("working database")
iterator getAncestors*(db: BeaconChainDB, root: Eth2Digest): iterator getAncestors*(db: BeaconChainDB, root: Eth2Digest):
tuple[root: Eth2Digest, blck: SignedBeaconBlock] = tuple[root: Eth2Digest, blck: TrustedSignedBeaconBlock] =
## Load a chain of ancestors for blck - returns a list of blocks with the ## Load a chain of ancestors for blck - returns a list of blocks with the
## oldest block last (blck will be at result[0]). ## oldest block last (blck will be at result[0]).
## ##
## The search will go on until the ancestor cannot be found. ## The search will go on until the ancestor cannot be found.
var root = root var res: tuple[root: Eth2Digest, blck: TrustedSignedBeaconBlock]
while (let blck = db.getBlock(root); blck.isOk()): res.root = root
yield (root, blck.get()) while db.get(subkey(SignedBeaconBlock, res.root), res.blck):
yield res
root = blck.get().message.parent_root res.root = res.blck.message.parent_root

View File

@ -35,7 +35,7 @@ type
# Quarantine dispatch # Quarantine dispatch
# -------------------------------------------- # --------------------------------------------
func checkMissing*(pool: var BlockPool): seq[FetchRecord] {.noInit.} = func checkMissing*(pool: var BlockPool): seq[FetchRecord] =
checkMissing(pool.quarantine) checkMissing(pool.quarantine)
# CandidateChains # CandidateChains

View File

@ -143,7 +143,7 @@ type
BlockData* = object BlockData* = object
## Body and graph in one ## Body and graph in one
data*: SignedBeaconBlock data*: TrustedSignedBeaconBlock # We trust all blocks we have a ref for
refs*: BlockRef refs*: BlockRef
StateData* = object StateData* = object

View File

@ -20,7 +20,8 @@ declareCounter beacon_state_data_cache_misses, "dag.cachedStates misses"
logScope: topics = "hotdb" logScope: topics = "hotdb"
proc putBlock*(dag: var CandidateChains, blockRoot: Eth2Digest, signedBlock: SignedBeaconBlock) {.inline.} = proc putBlock*(
dag: var CandidateChains, blockRoot: Eth2Digest, signedBlock: SignedBeaconBlock) =
dag.db.putBlock(blockRoot, signedBlock) dag.db.putBlock(blockRoot, signedBlock)
proc updateStateData*( proc updateStateData*(
@ -185,7 +186,7 @@ func init(T: type BlockRef, root: Eth2Digest, slot: Slot): BlockRef =
slot: slot slot: slot
) )
func init*(T: type BlockRef, root: Eth2Digest, blck: BeaconBlock): BlockRef = func init*(T: type BlockRef, root: Eth2Digest, blck: SomeBeaconBlock): BlockRef =
BlockRef.init(root, blck.slot) BlockRef.init(root, blck.slot)
proc init*(T: type CandidateChains, db: BeaconChainDB, proc init*(T: type CandidateChains, db: BeaconChainDB,
@ -508,10 +509,10 @@ proc skipAndUpdateState(
ok ok
proc rewindState(dag: CandidateChains, state: var StateData, bs: BlockSlot): proc rewindState(dag: CandidateChains, state: var StateData, bs: BlockSlot):
seq[BlockData] = seq[BlockRef] =
logScope: pcs = "replay_state" logScope: pcs = "replay_state"
var ancestors = @[dag.get(bs.blck)] var ancestors = @[bs.blck]
# Common case: the last block applied is the parent of the block to apply: # Common case: the last block applied is the parent of the block to apply:
if not bs.blck.parent.isNil and state.blck.root == bs.blck.parent.root and if not bs.blck.parent.isNil and state.blck.root == bs.blck.parent.root and
state.data.data.slot < bs.blck.slot: state.data.data.slot < bs.blck.slot:
@ -538,7 +539,7 @@ proc rewindState(dag: CandidateChains, state: var StateData, bs: BlockSlot):
break # Bug probably! break # Bug probably!
if parBs.blck != curBs.blck: if parBs.blck != curBs.blck:
ancestors.add(dag.get(parBs.blck)) ancestors.add(parBs.blck)
# TODO investigate replacing with getStateCached, by refactoring whole # TODO investigate replacing with getStateCached, by refactoring whole
# function. Empirically, this becomes pretty rare once good caches are # function. Empirically, this becomes pretty rare once good caches are
@ -547,12 +548,12 @@ proc rewindState(dag: CandidateChains, state: var StateData, bs: BlockSlot):
if idx >= 0: if idx >= 0:
assign(state.data, dag.cachedStates[idx].state[]) assign(state.data, dag.cachedStates[idx].state[])
let ancestor = ancestors.pop() let ancestor = ancestors.pop()
state.blck = ancestor.refs state.blck = ancestor
beacon_state_data_cache_hits.inc() beacon_state_data_cache_hits.inc()
trace "Replaying state transitions via in-memory cache", trace "Replaying state transitions via in-memory cache",
stateSlot = shortLog(state.data.data.slot), stateSlot = shortLog(state.data.data.slot),
ancestorStateRoot = shortLog(ancestor.data.message.state_root), ancestorStateRoot = shortLog(state.data.root),
ancestorStateSlot = shortLog(state.data.data.slot), ancestorStateSlot = shortLog(state.data.data.slot),
slot = shortLog(bs.slot), slot = shortLog(bs.slot),
blockRoot = shortLog(bs.blck.root), blockRoot = shortLog(bs.blck.root),
@ -584,7 +585,7 @@ proc rewindState(dag: CandidateChains, state: var StateData, bs: BlockSlot):
let let
ancestor = ancestors.pop() ancestor = ancestors.pop()
root = stateRoot.get() root = stateRoot.get()
found = dag.getState(dag.db, root, ancestor.refs, state) found = dag.getState(dag.db, root, ancestor, state)
if not found: if not found:
# TODO this should only happen if the database is corrupt - we walked the # TODO this should only happen if the database is corrupt - we walked the
@ -600,7 +601,6 @@ proc rewindState(dag: CandidateChains, state: var StateData, bs: BlockSlot):
trace "Replaying state transitions", trace "Replaying state transitions",
stateSlot = shortLog(state.data.data.slot), stateSlot = shortLog(state.data.data.slot),
ancestorStateRoot = shortLog(ancestor.data.message.state_root),
ancestorStateSlot = shortLog(state.data.data.slot), ancestorStateSlot = shortLog(state.data.data.slot),
slot = shortLog(bs.slot), slot = shortLog(bs.slot),
blockRoot = shortLog(bs.blck.root), blockRoot = shortLog(bs.blck.root),
@ -689,10 +689,7 @@ proc updateStateData*(dag: CandidateChains, state: var StateData, bs: BlockSlot)
# no state root calculation will take place here, because we can load # no state root calculation will take place here, because we can load
# the final state root from the block itself. # the final state root from the block itself.
let ok = let ok =
dag.skipAndUpdateState( dag.skipAndUpdateState(state, dag.get(ancestors[i]), {}, false)
state, ancestors[i],
{skipBlsValidation, skipStateRootValidation},
false)
doAssert ok, "Blocks in database should never fail to apply.." doAssert ok, "Blocks in database should never fail to apply.."
# We save states here - blocks were guaranteed to have passed through the save # We save states here - blocks were guaranteed to have passed through the save

View File

@ -480,7 +480,7 @@ proc init*[MsgType](T: type SingleChunkResponse[MsgType],
peer: Peer, conn: Connection, noSnappy: bool): T = peer: Peer, conn: Connection, noSnappy: bool): T =
T(UntypedResponse(peer: peer, stream: conn, noSnappy: noSnappy)) T(UntypedResponse(peer: peer, stream: conn, noSnappy: noSnappy))
template write*[M](r: MultipleChunksResponse[M], val: M): untyped = template write*[M](r: MultipleChunksResponse[M], val: auto): untyped =
sendResponseChunkObj(UntypedResponse(r), val) sendResponseChunkObj(UntypedResponse(r), val)
template send*[M](r: SingleChunkResponse[M], val: auto): untyped = template send*[M](r: SingleChunkResponse[M], val: auto): untyped =

View File

@ -394,7 +394,7 @@ proc process_registry_updates*(state: var BeaconState,
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#is_valid_indexed_attestation # https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#is_valid_indexed_attestation
func is_valid_indexed_attestation*( func is_valid_indexed_attestation*(
state: BeaconState, indexed_attestation: IndexedAttestation, state: BeaconState, indexed_attestation: SomeIndexedAttestation,
flags: UpdateFlags): bool = flags: UpdateFlags): bool =
# Check if ``indexed_attestation`` is not empty, has sorted and unique # Check if ``indexed_attestation`` is not empty, has sorted and unique
# indices and has a valid aggregate signature. # indices and has a valid aggregate signature.
@ -474,6 +474,22 @@ func get_indexed_attestation*(state: BeaconState, attestation: Attestation,
signature: attestation.signature signature: attestation.signature
) )
func get_indexed_attestation*(state: BeaconState, attestation: TrustedAttestation,
stateCache: var StateCache): TrustedIndexedAttestation =
# Return the indexed attestation corresponding to ``attestation``.
let
attesting_indices =
get_attesting_indices(
state, attestation.data, attestation.aggregation_bits, stateCache)
TrustedIndexedAttestation(
attesting_indices:
List[uint64, MAX_VALIDATORS_PER_COMMITTEE].init(
sorted(mapIt(attesting_indices.toSeq, it.uint64), system.cmp)),
data: attestation.data,
signature: attestation.signature
)
# Attestation validation # Attestation validation
# ------------------------------------------------------------------------------------------ # ------------------------------------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#attestations # https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#attestations
@ -504,7 +520,7 @@ proc isValidAttestationSlot*(attestationSlot, stateSlot: Slot): bool =
# TODO remove/merge with p2p-interface validation # TODO remove/merge with p2p-interface validation
proc isValidAttestationTargetEpoch*( proc isValidAttestationTargetEpoch*(
state: BeaconState, attestation: Attestation): bool = state: BeaconState, data: AttestationData): bool =
# TODO what constitutes a valid attestation when it's about to be added to # TODO what constitutes a valid attestation when it's about to be added to
# the pool? we're interested in attestations that will become viable # the pool? we're interested in attestations that will become viable
# for inclusion in blocks in the future and on any fork, so we need to # for inclusion in blocks in the future and on any fork, so we need to
@ -517,7 +533,6 @@ proc isValidAttestationTargetEpoch*(
# include an attestation in a block even if the corresponding validator # include an attestation in a block even if the corresponding validator
# was slashed in the same epoch - there's no penalty for doing this and # was slashed in the same epoch - there's no penalty for doing this and
# the vote counting logic will take care of any ill effects (TODO verify) # the vote counting logic will take care of any ill effects (TODO verify)
let data = attestation.data
# TODO re-enable check # TODO re-enable check
#if not (data.crosslink.shard < SHARD_COUNT): #if not (data.crosslink.shard < SHARD_COUNT):
# notice "Attestation shard too high", # notice "Attestation shard too high",
@ -547,7 +562,7 @@ proc isValidAttestationTargetEpoch*(
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.2/specs/phase0/beacon-chain.md#attestations # https://github.com/ethereum/eth2.0-specs/blob/v0.11.2/specs/phase0/beacon-chain.md#attestations
proc check_attestation*( proc check_attestation*(
state: BeaconState, attestation: Attestation, flags: UpdateFlags, state: BeaconState, attestation: SomeAttestation, flags: UpdateFlags,
stateCache: var StateCache): bool = stateCache: var StateCache): bool =
## Check that an attestation follows the rules of being included in the state ## Check that an attestation follows the rules of being included in the state
## at the current slot. When acting as a proposer, the same rules need to ## at the current slot. When acting as a proposer, the same rules need to
@ -572,7 +587,7 @@ proc check_attestation*(
committee_count = get_committee_count_at_slot(state, data.slot)) committee_count = get_committee_count_at_slot(state, data.slot))
return return
if not isValidAttestationTargetEpoch(state, attestation): if not isValidAttestationTargetEpoch(state, data):
# Logging in isValidAttestationTargetEpoch # Logging in isValidAttestationTargetEpoch
return return
@ -610,7 +625,7 @@ proc check_attestation*(
true true
proc process_attestation*( proc process_attestation*(
state: var BeaconState, attestation: Attestation, flags: UpdateFlags, state: var BeaconState, attestation: SomeAttestation, flags: UpdateFlags,
stateCache: var StateCache): bool {.nbench.}= stateCache: var StateCache): bool {.nbench.}=
# In the spec, attestation validation is mixed with state mutation, so here # In the spec, attestation validation is mixed with state mutation, so here
# we've split it into two functions so that the validation logic can be # we've split it into two functions so that the validation logic can be

View File

@ -71,6 +71,11 @@ type
RandomSourceDepleted* = object of CatchableError RandomSourceDepleted* = object of CatchableError
TrustedSig* = object
data*: array[RawSigSize, byte]
SomeSig* = TrustedSig | ValidatorSig
func `==`*(a, b: BlsValue): bool = func `==`*(a, b: BlsValue): bool =
if a.kind != b.kind: return false if a.kind != b.kind: return false
if a.kind == Real: if a.kind == Real:
@ -218,6 +223,9 @@ func toRaw*(x: BlsValue): auto =
else: else:
x.blob x.blob
func toRaw*(x: TrustedSig): auto =
x.data
func toHex*(x: BlsCurveType): string = func toHex*(x: BlsCurveType): string =
toHex(toRaw(x)) toHex(toRaw(x))
@ -325,6 +333,9 @@ func shortLog*(x: ValidatorPrivKey): string =
## Logging for raw unwrapped BLS types ## Logging for raw unwrapped BLS types
x.toRaw()[0..3].toHex() x.toRaw()[0..3].toHex()
func shortLog*(x: TrustedSig): string =
x.data[0..3].toHex()
# Initialization # Initialization
# ---------------------------------------------------------------------- # ----------------------------------------------------------------------

View File

@ -158,6 +158,12 @@ type
data*: AttestationData data*: AttestationData
signature*: ValidatorSig signature*: ValidatorSig
TrustedIndexedAttestation* = object
# TODO ValidatorIndex, but that doesn't serialize properly
attesting_indices*: List[uint64, MAX_VALIDATORS_PER_COMMITTEE]
data*: AttestationData
signature*: TrustedSig
CommitteeValidatorsBits* = BitList[MAX_VALIDATORS_PER_COMMITTEE] CommitteeValidatorsBits* = BitList[MAX_VALIDATORS_PER_COMMITTEE]
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#attestation # https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#attestation
@ -166,6 +172,11 @@ type
data*: AttestationData data*: AttestationData
signature*: ValidatorSig signature*: ValidatorSig
TrustedAttestation* = object
aggregation_bits*: CommitteeValidatorsBits
data*: AttestationData
signature*: TrustedSig
Version* = distinct array[4, byte] Version* = distinct array[4, byte]
ForkDigest* = distinct array[4, byte] ForkDigest* = distinct array[4, byte]
@ -212,6 +223,8 @@ type
pubkey*: ValidatorPubKey pubkey*: ValidatorPubKey
withdrawal_credentials*: Eth2Digest withdrawal_credentials*: Eth2Digest
amount*: Gwei amount*: Gwei
# Cannot use TrustedSig here as invalid signatures are possible and determine
# if the deposit should be added or not during processing
signature*: ValidatorSig # Signing over DepositMessage signature*: ValidatorSig # Signing over DepositMessage
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#voluntaryexit # https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#voluntaryexit
@ -240,6 +253,29 @@ type
body*: BeaconBlockBody body*: BeaconBlockBody
TrustedBeaconBlock* = object
## When we receive blocks from outside sources, they are untrusted and go
## through several layers of validation. Blocks that have gone through
## validations can be trusted to be well-formed, with a correct signature,
## having a parent and applying cleanly to the state that their parent
## left them with.
##
## When loading such blocks from the database, to rewind states for example,
## it is expensive to redo the validations (in particular, the signature
## checks), thus `TrustedBlock` uses a `TrustedSig` type to mark that these
## checks can be skipped.
##
## TODO this could probably be solved with some type trickery, but there
## too many bugs in nim around generics handling, and we've used up
## the trickery budget in the serialization library already. Until
## then, the type must be manually kept compatible with its untrusted
## cousin.
slot*: Slot
proposer_index*: uint64
parent_root*: Eth2Digest ##\
state_root*: Eth2Digest ##\
body*: TrustedBeaconBlockBody
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#beaconblockheader # https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#beaconblockheader
BeaconBlockHeader* = object BeaconBlockHeader* = object
slot*: Slot slot*: Slot
@ -261,8 +297,26 @@ type
deposits*: List[Deposit, MAX_DEPOSITS] deposits*: List[Deposit, MAX_DEPOSITS]
voluntary_exits*: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS] voluntary_exits*: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS]
TrustedBeaconBlockBody* = object
randao_reveal*: TrustedSig
eth1_data*: Eth1Data
graffiti*: Eth2Digest # TODO make that raw bytes
# Operations
proposer_slashings*: List[ProposerSlashing, MAX_PROPOSER_SLASHINGS]
attester_slashings*: List[AttesterSlashing, MAX_ATTESTER_SLASHINGS]
attestations*: List[TrustedAttestation, MAX_ATTESTATIONS]
deposits*: List[Deposit, MAX_DEPOSITS]
voluntary_exits*: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS]
SomeSignedBeaconBlock* = SignedBeaconBlock | TrustedSignedBeaconBlock
SomeBeaconBlock* = BeaconBlock | TrustedBeaconBlock
SomeBeaconBlockBody* = BeaconBlockBody | TrustedBeaconBlockBody
SomeAttestation* = Attestation | TrustedAttestation
SomeIndexedAttestation* = IndexedAttestation | TrustedIndexedAttestation
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#beaconstate # https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#beaconstate
BeaconStateObj* = object BeaconState* = object
# Versioning # Versioning
genesis_time*: uint64 genesis_time*: uint64
genesis_validators_root*: Eth2Digest genesis_validators_root*: Eth2Digest
@ -314,9 +368,8 @@ type
current_justified_checkpoint*: Checkpoint current_justified_checkpoint*: Checkpoint
finalized_checkpoint*: Checkpoint finalized_checkpoint*: Checkpoint
BeaconState* = BeaconStateObj BeaconStateRef* = ref BeaconState not nil
BeaconStateRef* = ref BeaconStateObj not nil NilableBeaconStateRef* = ref BeaconState
NilableBeaconStateRef* = ref BeaconStateObj
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#validator # https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#validator
Validator* = object Validator* = object
@ -381,6 +434,10 @@ type
message*: BeaconBlock message*: BeaconBlock
signature*: ValidatorSig signature*: ValidatorSig
TrustedSignedBeaconBlock* = object
message*: TrustedBeaconBlock
signature*: TrustedSig
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#signedbeaconblockheader # https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#signedbeaconblockheader
SignedBeaconBlockHeader* = object SignedBeaconBlockHeader* = object
message*: BeaconBlockHeader message*: BeaconBlockHeader
@ -599,7 +656,7 @@ func shortLog*(s: Slot): uint64 =
func shortLog*(e: Epoch): uint64 = func shortLog*(e: Epoch): uint64 =
e - GENESIS_EPOCH e - GENESIS_EPOCH
func shortLog*(v: BeaconBlock): auto = func shortLog*(v: SomeBeaconBlock): auto =
( (
slot: shortLog(v.slot), slot: shortLog(v.slot),
proposer_index: v.proposer_index, proposer_index: v.proposer_index,
@ -612,7 +669,7 @@ func shortLog*(v: BeaconBlock): auto =
voluntary_exits_len: v.body.voluntary_exits.len(), voluntary_exits_len: v.body.voluntary_exits.len(),
) )
func shortLog*(v: SignedBeaconBlock): auto = func shortLog*(v: SomeSignedBeaconBlock): auto =
( (
blck: shortLog(v.message), blck: shortLog(v.message),
signature: shortLog(v.signature) signature: shortLog(v.signature)
@ -637,7 +694,7 @@ func shortLog*(v: AttestationData): auto =
target_root: shortLog(v.target.root) target_root: shortLog(v.target.root)
) )
func shortLog*(v: Attestation): auto = func shortLog*(v: SomeAttestation): auto =
( (
aggregation_bits: v.aggregation_bits, aggregation_bits: v.aggregation_bits,
data: shortLog(v.data), data: shortLog(v.data),

View File

@ -10,6 +10,12 @@
import import
./crypto, ./digest, ./datatypes, ./helpers, ../ssz/merkleization ./crypto, ./digest, ./datatypes, ./helpers, ../ssz/merkleization
template withTrust(sig: SomeSig, body: untyped): bool =
when sig is TrustedSig:
true
else:
body
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#aggregation-selection # https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#aggregation-selection
func get_slot_signature*( func get_slot_signature*(
fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot, fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot,
@ -34,7 +40,8 @@ func get_epoch_signature*(
func verify_epoch_signature*( func verify_epoch_signature*(
fork: Fork, genesis_validators_root: Eth2Digest, epoch: Epoch, fork: Fork, genesis_validators_root: Eth2Digest, epoch: Epoch,
pubkey: ValidatorPubKey, signature: ValidatorSig): bool = pubkey: ValidatorPubKey, signature: SomeSig): bool =
withTrust(signature):
let let
domain = get_domain(fork, DOMAIN_RANDAO, epoch, genesis_validators_root) domain = get_domain(fork, DOMAIN_RANDAO, epoch, genesis_validators_root)
signing_root = compute_signing_root(epoch, domain) signing_root = compute_signing_root(epoch, domain)
@ -55,8 +62,10 @@ func get_block_signature*(
func verify_block_signature*( func verify_block_signature*(
fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot, fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot,
blck: Eth2Digest | BeaconBlock | BeaconBlockHeader, pubkey: ValidatorPubKey, blck: Eth2Digest | SomeBeaconBlock | BeaconBlockHeader,
signature: ValidatorSig): bool = pubkey: ValidatorPubKey,
signature: SomeSig): bool =
withTrust(signature):
let let
epoch = compute_epoch_at_slot(slot) epoch = compute_epoch_at_slot(slot)
domain = get_domain( domain = get_domain(
@ -94,7 +103,8 @@ func verify_attestation_signature*(
fork: Fork, genesis_validators_root: Eth2Digest, fork: Fork, genesis_validators_root: Eth2Digest,
attestation_data: AttestationData, attestation_data: AttestationData,
pubkeys: openArray[ValidatorPubKey], pubkeys: openArray[ValidatorPubKey],
signature: ValidatorSig): bool = signature: SomeSig): bool =
withTrust(signature):
let let
epoch = attestation_data.target.epoch epoch = attestation_data.target.epoch
domain = get_domain( domain = get_domain(
@ -128,7 +138,8 @@ func verify_deposit_signature*(deposit: DepositData): bool =
func verify_voluntary_exit_signature*( func verify_voluntary_exit_signature*(
fork: Fork, genesis_validators_root: Eth2Digest, fork: Fork, genesis_validators_root: Eth2Digest,
voluntary_exit: VoluntaryExit, voluntary_exit: VoluntaryExit,
pubkey: ValidatorPubKey, signature: ValidatorSig): bool = pubkey: ValidatorPubKey, signature: SomeSig): bool =
withTrust(signature):
let let
domain = get_domain( domain = get_domain(
fork, DOMAIN_VOLUNTARY_EXIT, voluntary_exit.epoch, genesis_validators_root) fork, DOMAIN_VOLUNTARY_EXIT, voluntary_exit.epoch, genesis_validators_root)

View File

@ -64,7 +64,7 @@ func get_epoch_validator_count(state: BeaconState): int64 {.nbench.} =
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#beacon-chain-state-transition-function # https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#beacon-chain-state-transition-function
proc verify_block_signature*( proc verify_block_signature*(
state: BeaconState, signed_block: SignedBeaconBlock): bool {.nbench.} = state: BeaconState, signed_block: SomeSignedBeaconBlock): bool {.nbench.} =
let let
proposer_index = signed_block.message.proposer_index proposer_index = signed_block.message.proposer_index
if proposer_index >= state.validators.len.uint64: if proposer_index >= state.validators.len.uint64:
@ -88,11 +88,15 @@ proc verifyStateRoot(state: BeaconState, blck: BeaconBlock): bool =
let state_root = hash_tree_root(state) let state_root = hash_tree_root(state)
if state_root != blck.state_root: if state_root != blck.state_root:
notice "Block: root verification failed", notice "Block: root verification failed",
block_state_root = blck.state_root, state_root block_state_root = shortLog(blck.state_root), state_root = shortLog(state_root)
false false
else: else:
true true
proc verifyStateRoot(state: BeaconState, blck: TrustedBeaconBlock): bool =
# This is inlined in state_transition(...) in spec.
true
type type
RollbackProc* = proc(v: var BeaconState) {.gcsafe, raises: [Defect].} RollbackProc* = proc(v: var BeaconState) {.gcsafe, raises: [Defect].}
@ -170,7 +174,7 @@ proc noRollback*(state: var HashedBeaconState) =
trace "Skipping rollback of broken state" trace "Skipping rollback of broken state"
proc state_transition*( proc state_transition*(
state: var HashedBeaconState, signedBlock: SignedBeaconBlock, state: var HashedBeaconState, signedBlock: SomeSignedBeaconBlock,
# TODO this is ... okay, but not perfect; align with EpochRef # TODO this is ... okay, but not perfect; align with EpochRef
stateCache: var StateCache, stateCache: var StateCache,
flags: UpdateFlags, rollback: RollbackHashedProc): bool {.nbench.} = flags: UpdateFlags, rollback: RollbackHashedProc): bool {.nbench.} =
@ -225,7 +229,7 @@ proc state_transition*(
trace "in state_transition: processing block, signature passed", trace "in state_transition: processing block, signature passed",
signature = signedBlock.signature, signature = signedBlock.signature,
blockRoot = hash_tree_root(signedBlock.message) blockRoot = hash_tree_root(signedBlock.message)
if processBlock(state.data, signedBlock.message, flags, stateCache): if process_block(state.data, signedBlock.message, flags, stateCache):
if skipStateRootValidation in flags or verifyStateRoot(state.data, signedBlock.message): if skipStateRootValidation in flags or verifyStateRoot(state.data, signedBlock.message):
# State root is what it should be - we're done! # State root is what it should be - we're done!
@ -245,7 +249,7 @@ proc state_transition*(
false false
proc state_transition*( proc state_transition*(
state: var HashedBeaconState, signedBlock: SignedBeaconBlock, state: var HashedBeaconState, signedBlock: SomeSignedBeaconBlock,
flags: UpdateFlags, rollback: RollbackHashedProc): bool {.nbench.} = flags: UpdateFlags, rollback: RollbackHashedProc): bool {.nbench.} =
# TODO consider moving this to testutils or similar, since non-testing # TODO consider moving this to testutils or similar, since non-testing
# and fuzzing code should always be coming from blockpool which should # and fuzzing code should always be coming from blockpool which should

View File

@ -44,12 +44,13 @@ declareGauge beacon_processed_deposits_total, "Number of total deposits included
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#block-header # https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#block-header
proc process_block_header*( proc process_block_header*(
state: var BeaconState, blck: BeaconBlock, flags: UpdateFlags, state: var BeaconState, blck: SomeBeaconBlock, flags: UpdateFlags,
stateCache: var StateCache): bool {.nbench.}= stateCache: var StateCache): bool {.nbench.} =
logScope:
blck = shortLog(blck)
# Verify that the slots match # Verify that the slots match
if not (blck.slot == state.slot): if not (blck.slot == state.slot):
notice "Block header: slot mismatch", notice "Block header: slot mismatch",
block_slot = shortLog(blck.slot),
state_slot = shortLog(state.slot) state_slot = shortLog(state.slot)
return false return false
@ -66,7 +67,6 @@ proc process_block_header*(
if not (blck.proposer_index.ValidatorIndex == proposer_index.get): if not (blck.proposer_index.ValidatorIndex == proposer_index.get):
notice "Block header: proposer index incorrect", notice "Block header: proposer index incorrect",
block_proposer_index = blck.proposer_index.ValidatorIndex,
proposer_index = proposer_index.get proposer_index = proposer_index.get
return false return false
@ -74,7 +74,6 @@ proc process_block_header*(
if not (blck.parent_root == hash_tree_root(state.latest_block_header)): if not (blck.parent_root == hash_tree_root(state.latest_block_header)):
notice "Block header: previous block root mismatch", notice "Block header: previous block root mismatch",
latest_block_header = state.latest_block_header, latest_block_header = state.latest_block_header,
blck = shortLog(blck),
latest_block_header_root = shortLog(hash_tree_root(state.latest_block_header)) latest_block_header_root = shortLog(hash_tree_root(state.latest_block_header))
return false return false
@ -101,8 +100,8 @@ proc `xor`[T: array](a, b: T): T =
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#randao # https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#randao
proc process_randao( proc process_randao(
state: var BeaconState, body: BeaconBlockBody, flags: UpdateFlags, state: var BeaconState, body: SomeBeaconBlockBody, flags: UpdateFlags,
stateCache: var StateCache): bool {.nbench.}= stateCache: var StateCache): bool {.nbench.} =
let let
proposer_index = get_beacon_proposer_index(state, stateCache) proposer_index = get_beacon_proposer_index(state, stateCache)
@ -137,7 +136,7 @@ proc process_randao(
true true
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#eth1-data # https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#eth1-data
func process_eth1_data(state: var BeaconState, body: BeaconBlockBody) {.nbench.}= func process_eth1_data(state: var BeaconState, body: SomeBeaconBlockBody) {.nbench.}=
state.eth1_data_votes.add body.eth1_data state.eth1_data_votes.add body.eth1_data
if state.eth1_data_votes.asSeq.count(body.eth1_data) * 2 > SLOTS_PER_ETH1_VOTING_PERIOD.int: if state.eth1_data_votes.asSeq.count(body.eth1_data) * 2 > SLOTS_PER_ETH1_VOTING_PERIOD.int:
@ -320,7 +319,7 @@ proc process_voluntary_exit*(
true true
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#operations # https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#operations
proc process_operations(state: var BeaconState, body: BeaconBlockBody, proc process_operations(state: var BeaconState, body: SomeBeaconBlockBody,
flags: UpdateFlags, stateCache: var StateCache): bool {.nbench.} = flags: UpdateFlags, stateCache: var StateCache): bool {.nbench.} =
# Verify that outstanding deposits are processed up to the maximum number of # Verify that outstanding deposits are processed up to the maximum number of
# deposits # deposits
@ -356,7 +355,7 @@ proc process_operations(state: var BeaconState, body: BeaconBlockBody,
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#block-processing # https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#block-processing
proc process_block*( proc process_block*(
state: var BeaconState, blck: BeaconBlock, flags: UpdateFlags, state: var BeaconState, blck: SomeBeaconBlock, flags: UpdateFlags,
stateCache: var StateCache): bool {.nbench.}= stateCache: var StateCache): bool {.nbench.}=
## When there's a new block, we need to verify that the block is sane and ## When there's a new block, we need to verify that the block is sane and
## update the state accordingly ## update the state accordingly
@ -382,8 +381,8 @@ proc process_block*(
notice "Block header not valid", slot = shortLog(state.slot) notice "Block header not valid", slot = shortLog(state.slot)
return false return false
if not processRandao(state, blck.body, flags, stateCache): if not process_randao(state, blck.body, flags, stateCache):
debug "[Block processing] Randao failure", slot = shortLog(state.slot) debug "Randao failure", slot = shortLog(state.slot)
return false return false
process_eth1_data(state, blck.body) process_eth1_data(state, blck.body)

View File

@ -173,7 +173,7 @@ proc process_justification_and_finalization*(state: var BeaconState,
root: get_block_root(state, previous_epoch)) root: get_block_root(state, previous_epoch))
state.justification_bits.setBit 1 state.justification_bits.setBit 1
info "Justified with previous epoch", debug "Justified with previous epoch",
current_epoch = current_epoch, current_epoch = current_epoch,
checkpoint = shortLog(state.current_justified_checkpoint), checkpoint = shortLog(state.current_justified_checkpoint),
cat = "justification" cat = "justification"
@ -187,7 +187,7 @@ proc process_justification_and_finalization*(state: var BeaconState,
root: get_block_root(state, current_epoch)) root: get_block_root(state, current_epoch))
state.justification_bits.setBit 0 state.justification_bits.setBit 0
info "Justified with current epoch", debug "Justified with current epoch",
current_epoch = current_epoch, current_epoch = current_epoch,
checkpoint = shortLog(state.current_justified_checkpoint), checkpoint = shortLog(state.current_justified_checkpoint),
cat = "justification" cat = "justification"
@ -201,7 +201,7 @@ proc process_justification_and_finalization*(state: var BeaconState,
old_previous_justified_checkpoint.epoch + 3 == current_epoch: old_previous_justified_checkpoint.epoch + 3 == current_epoch:
state.finalized_checkpoint = old_previous_justified_checkpoint state.finalized_checkpoint = old_previous_justified_checkpoint
info "Finalized with rule 234", debug "Finalized with rule 234",
current_epoch = current_epoch, current_epoch = current_epoch,
checkpoint = shortLog(state.finalized_checkpoint), checkpoint = shortLog(state.finalized_checkpoint),
cat = "finalization" cat = "finalization"
@ -212,7 +212,7 @@ proc process_justification_and_finalization*(state: var BeaconState,
old_previous_justified_checkpoint.epoch + 2 == current_epoch: old_previous_justified_checkpoint.epoch + 2 == current_epoch:
state.finalized_checkpoint = old_previous_justified_checkpoint state.finalized_checkpoint = old_previous_justified_checkpoint
info "Finalized with rule 23", debug "Finalized with rule 23",
current_epoch = current_epoch, current_epoch = current_epoch,
checkpoint = shortLog(state.finalized_checkpoint), checkpoint = shortLog(state.finalized_checkpoint),
cat = "finalization" cat = "finalization"
@ -223,7 +223,7 @@ proc process_justification_and_finalization*(state: var BeaconState,
old_current_justified_checkpoint.epoch + 2 == current_epoch: old_current_justified_checkpoint.epoch + 2 == current_epoch:
state.finalized_checkpoint = old_current_justified_checkpoint state.finalized_checkpoint = old_current_justified_checkpoint
info "Finalized with rule 123", debug "Finalized with rule 123",
current_epoch = current_epoch, current_epoch = current_epoch,
checkpoint = shortLog(state.finalized_checkpoint), checkpoint = shortLog(state.finalized_checkpoint),
cat = "finalization" cat = "finalization"
@ -234,7 +234,7 @@ proc process_justification_and_finalization*(state: var BeaconState,
old_current_justified_checkpoint.epoch + 1 == current_epoch: old_current_justified_checkpoint.epoch + 1 == current_epoch:
state.finalized_checkpoint = old_current_justified_checkpoint state.finalized_checkpoint = old_current_justified_checkpoint
info "Finalized with rule 12", debug "Finalized with rule 12",
current_epoch = current_epoch, current_epoch = current_epoch,
checkpoint = shortLog(state.finalized_checkpoint), checkpoint = shortLog(state.finalized_checkpoint),
cat = "finalization" cat = "finalization"

View File

@ -22,7 +22,11 @@ proc dump*(dir: string, v: SignedBeaconBlock, root: Eth2Digest) =
logErrors: logErrors:
SSZ.saveFile(dir / &"block-{v.message.slot}-{shortLog(root)}.ssz", v) SSZ.saveFile(dir / &"block-{v.message.slot}-{shortLog(root)}.ssz", v)
proc dump*(dir: string, v: SignedBeaconBlock, blck: BlockRef) = proc dump*(dir: string, v: TrustedSignedBeaconBlock, root: Eth2Digest) =
logErrors:
SSZ.saveFile(dir / &"block-{v.message.slot}-{shortLog(root)}.ssz", v)
proc dump*(dir: string, v: SomeSignedBeaconBlock, blck: BlockRef) =
dump(dir, v, blck.root) dump(dir, v, blck.root)
proc dump*(dir: string, v: HashedBeaconState, blck: BlockRef) = proc dump*(dir: string, v: HashedBeaconState, blck: BlockRef) =

View File

@ -128,7 +128,8 @@ p2pProtocol BeaconSync(version = 1,
{.libp2pProtocol("metadata", 1).} = {.libp2pProtocol("metadata", 1).} =
return peer.network.metadata return peer.network.metadata
proc beaconBlocksByRange(peer: Peer, proc beaconBlocksByRange(
peer: Peer,
startSlot: Slot, startSlot: Slot,
count: uint64, count: uint64,
step: uint64, step: uint64,
@ -156,7 +157,8 @@ p2pProtocol BeaconSync(version = 1,
debug "Block range request done", debug "Block range request done",
peer, startSlot, count, step, found = count - startIndex peer, startSlot, count, step, found = count - startIndex
proc beaconBlocksByRoot(peer: Peer, proc beaconBlocksByRoot(
peer: Peer,
blockRoots: BlockRootsList, blockRoots: BlockRootsList,
response: MultipleChunksResponse[SignedBeaconBlock]) response: MultipleChunksResponse[SignedBeaconBlock])
{.async, libp2pProtocol("beacon_blocks_by_root", 1).} = {.async, libp2pProtocol("beacon_blocks_by_root", 1).} =

View File

@ -9,7 +9,7 @@ import
# Standard library # Standard library
os, tables, os, tables,
# Status libraries # Status libraries
confutils/defs, serialization, confutils/defs, serialization, chronicles,
# Beacon-chain # Beacon-chain
../beacon_chain/spec/[ ../beacon_chain/spec/[
datatypes, crypto, helpers, beaconstate, validator, datatypes, crypto, helpers, beaconstate, validator,

View File

@ -2,8 +2,8 @@
confutils, stats, chronicles, strformat, tables, confutils, stats, chronicles, strformat, tables,
stew/byteutils, stew/byteutils,
../beacon_chain/[beacon_chain_db, block_pool, extras], ../beacon_chain/[beacon_chain_db, block_pool, extras],
../beacon_chain/spec/[crypto, datatypes, digest, helpers, state_transition], ../beacon_chain/spec/[crypto, datatypes, digest, helpers, state_transition, validator],
../beacon_chain/sszdump, ../beacon_chain/sszdump, ../beacon_chain/ssz/merkleization,
../research/simutils, ../research/simutils,
eth/db/[kvstore, kvstore_sqlite3] eth/db/[kvstore, kvstore_sqlite3]
@ -18,6 +18,7 @@ type
DbCmd* = enum DbCmd* = enum
bench bench
dumpState dumpState
dumpBlock
rewindState rewindState
DbConf = object DbConf = object
@ -32,15 +33,20 @@ type
.}: DbCmd .}: DbCmd
of bench: of bench:
validate* {. slots* {.
defaultValue: true defaultValue: 50000
desc: "Enable BLS validation" }: bool desc: "Number of slots to run benchmark for".}: uint64
of dumpState: of dumpState:
stateRoot* {. stateRoot* {.
argument argument
desc: "State roots to save".}: seq[string] desc: "State roots to save".}: seq[string]
of dumpBlock:
blockRootx* {.
argument
desc: "Block roots to save".}: seq[string]
of rewindState: of rewindState:
blockRoot* {. blockRoot* {.
argument argument
@ -70,7 +76,7 @@ proc cmdBench(conf: DbConf) =
var var
blockRefs: seq[BlockRef] blockRefs: seq[BlockRef]
blocks: seq[SignedBeaconBlock] blocks: seq[TrustedSignedBeaconBlock]
cur = pool.head.blck cur = pool.head.blck
while cur != nil: while cur != nil:
@ -78,6 +84,9 @@ proc cmdBench(conf: DbConf) =
cur = cur.parent cur = cur.parent
for b in 1..<blockRefs.len: # Skip genesis block for b in 1..<blockRefs.len: # Skip genesis block
if blockRefs[blockRefs.len - b - 1].slot > conf.slots:
break
withTimer(timers[tLoadBlock]): withTimer(timers[tLoadBlock]):
blocks.add db.getBlock(blockRefs[blockRefs.len - b - 1].root).get() blocks.add db.getBlock(blockRefs[blockRefs.len - b - 1].root).get()
@ -88,15 +97,17 @@ proc cmdBench(conf: DbConf) =
withTimer(timers[tLoadState]): withTimer(timers[tLoadState]):
discard db.getState(state[].root, state[].data, noRollback) discard db.getState(state[].root, state[].data, noRollback)
let flags = if conf.validate: {} else: {skipBlsValidation}
for b in blocks: for b in blocks:
let let
isEpoch = state[].data.slot.compute_epoch_at_slot != isEpoch = state[].data.slot.compute_epoch_at_slot !=
b.message.slot.compute_epoch_at_slot b.message.slot.compute_epoch_at_slot
withTimer(timers[if isEpoch: tApplyEpochBlock else: tApplyBlock]): withTimer(timers[if isEpoch: tApplyEpochBlock else: tApplyBlock]):
discard state_transition(state[], b, flags, noRollback) if not state_transition(state[], b, {}, noRollback):
dump("./", b, hash_tree_root(b.message))
echo "State transition failed (!)"
quit 1
printTimers(conf.validate, timers) printTimers(false, timers)
proc cmdDumpState(conf: DbConf) = proc cmdDumpState(conf: DbConf) =
let let
@ -114,6 +125,21 @@ proc cmdDumpState(conf: DbConf) =
except CatchableError as e: except CatchableError as e:
echo "Couldn't load ", stateRoot, ": ", e.msg echo "Couldn't load ", stateRoot, ": ", e.msg
proc cmdDumpBlock(conf: DbConf) =
let
db = BeaconChainDB.init(
kvStore SqStoreRef.init(conf.databaseDir.string, "nbc").tryGet())
for blockRoot in conf.blockRootx:
try:
let root = Eth2Digest(data: hexToByteArray[32](blockRoot))
if (let blck = db.getBlock(root); blck.isSome):
dump("./", blck.get(), root)
else:
echo "Couldn't load ", root
except CatchableError as e:
echo "Couldn't load ", blockRoot, ": ", e.msg
proc cmdRewindState(conf: DbConf) = proc cmdRewindState(conf: DbConf) =
echo "Opening database..." echo "Opening database..."
let let
@ -145,5 +171,7 @@ when isMainModule:
cmdBench(conf) cmdBench(conf)
of dumpState: of dumpState:
cmdDumpState(conf) cmdDumpState(conf)
of dumpBlock:
cmdDumpBlock(conf)
of rewindState: of rewindState:
cmdRewindState(conf) cmdRewindState(conf)

View File

@ -4,13 +4,13 @@ import
../beacon_chain/extras, ../beacon_chain/extras,
../beacon_chain/ssz/[merkleization, ssz_serialization] ../beacon_chain/ssz/[merkleization, ssz_serialization]
cli do(pre: string, blck: string, post: string, verifyStateRoot = false): cli do(pre: string, blck: string, post: string, verifyStateRoot = true):
let let
stateY = (ref HashedBeaconState)( stateY = (ref HashedBeaconState)(
data: SSZ.loadFile(pre, BeaconState), data: SSZ.loadFile(pre, BeaconState),
) )
blckX = SSZ.loadFile(blck, SignedBeaconBlock) blckX = SSZ.loadFile(blck, SignedBeaconBlock)
flags = if verifyStateRoot: {skipStateRootValidation} else: {} flags = if not verifyStateRoot: {skipStateRootValidation} else: {}
stateY.root = hash_tree_root(stateY.data) stateY.root = hash_tree_root(stateY.data)

View File

@ -2,7 +2,7 @@
# https://github.com/nim-lang/Nim/issues/11225 # https://github.com/nim-lang/Nim/issues/11225
import import
stew/ptrops, stew/ranges/ptr_arith, stew/ptrops, stew/ranges/ptr_arith, chronicles,
../beacon_chain/extras, ../beacon_chain/extras,
../beacon_chain/spec/[crypto, datatypes, digest, validator, beaconstate, ../beacon_chain/spec/[crypto, datatypes, digest, validator, beaconstate,
state_transition_block, state_transition], state_transition_block, state_transition],
@ -33,8 +33,8 @@ type
FuzzCrashError = object of CatchableError FuzzCrashError = object of CatchableError
# TODO: change ptr uint to ptr csize_t when available in newer Nim version. # TODO: change ptr uint to ptr csize_t when available in newer Nim version.
proc copyState(state: BeaconState, output: ptr byte, proc copyState(state: BeaconState, xoutput: ptr byte,
output_size: ptr uint): bool {.raises: [FuzzCrashError, Defect].} = xoutput_size: ptr uint): bool {.raises: [FuzzCrashError, Defect].} =
var resultState = var resultState =
try: try:
SSZ.encode(state) SSZ.encode(state)
@ -42,18 +42,18 @@ proc copyState(state: BeaconState, output: ptr byte,
# Shouldn't occur as the writer isn't a file # Shouldn't occur as the writer isn't a file
raise newException(FuzzCrashError, "Unexpected failure to serialize.", e) raise newException(FuzzCrashError, "Unexpected failure to serialize.", e)
if unlikely(resultState.len.uint > output_size[]): if unlikely(resultState.len.uint > xoutput_size[]):
let msg = ( let msg = (
"Not enough output buffer provided to nimbus harness. Provided: " & "Not enough xoutput buffer provided to nimbus harness. Provided: " &
$(output_size[]) & $(xoutput_size[]) &
"Required: " & "Required: " &
$resultState.len.uint $resultState.len.uint
) )
raise newException(FuzzCrashError, msg) raise newException(FuzzCrashError, msg)
output_size[] = resultState.len.uint xoutput_size[] = resultState.len.uint
# TODO: improvement might be to write directly to buffer with OutputStream # TODO: improvement might be to write directly to buffer with xoutputStream
# and SszWriter (but then need to ensure length doesn't overflow) # and SszWriter (but then need to ensure length doesn't overflow)
copyMem(output, unsafeAddr resultState[0], output_size[]) copyMem(xoutput, unsafeAddr resultState[0], xoutput_size[])
result = true result = true
template decodeAndProcess(typ, process: untyped): bool = template decodeAndProcess(typ, process: untyped): bool =
@ -90,22 +90,22 @@ template decodeAndProcess(typ, process: untyped): bool =
raise newException(FuzzCrashError, "Unexpected Exception in state transition", e) raise newException(FuzzCrashError, "Unexpected Exception in state transition", e)
if processOk: if processOk:
copyState(data.state, output, output_size) copyState(data.state, xoutput, xoutput_size)
else: else:
false false
proc nfuzz_attestation(input: openArray[byte], output: ptr byte, proc nfuzz_attestation(input: openArray[byte], xoutput: ptr byte,
output_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError, Defect].} = xoutput_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError, Defect].} =
decodeAndProcess(AttestationInput): decodeAndProcess(AttestationInput):
process_attestation(data.state, data.attestation, flags, cache) process_attestation(data.state, data.attestation, flags, cache)
proc nfuzz_attester_slashing(input: openArray[byte], output: ptr byte, proc nfuzz_attester_slashing(input: openArray[byte], xoutput: ptr byte,
output_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError, Defect].} = xoutput_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError, Defect].} =
decodeAndProcess(AttesterSlashingInput): decodeAndProcess(AttesterSlashingInput):
process_attester_slashing(data.state, data.attesterSlashing, flags, cache) process_attester_slashing(data.state, data.attesterSlashing, flags, cache)
proc nfuzz_block(input: openArray[byte], output: ptr byte, proc nfuzz_block(input: openArray[byte], xoutput: ptr byte,
output_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError, Defect].} = xoutput_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError, Defect].} =
# There's not a perfect approach here, but it's not worth switching the rest # There's not a perfect approach here, but it's not worth switching the rest
# and requiring HashedBeaconState (yet). So to keep consistent, puts wrapper # and requiring HashedBeaconState (yet). So to keep consistent, puts wrapper
# only in one function. # only in one function.
@ -120,35 +120,35 @@ proc nfuzz_block(input: openArray[byte], output: ptr byte,
decodeAndProcess(BlockInput): decodeAndProcess(BlockInput):
state_transition(data, data.beaconBlock, flags, noRollback) state_transition(data, data.beaconBlock, flags, noRollback)
proc nfuzz_block_header(input: openArray[byte], output: ptr byte, proc nfuzz_block_header(input: openArray[byte], xoutput: ptr byte,
output_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError, Defect].} = xoutput_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError, Defect].} =
decodeAndProcess(BlockHeaderInput): decodeAndProcess(BlockHeaderInput):
process_block_header(data.state, data.beaconBlock.message, flags, cache) process_block_header(data.state, data.beaconBlock.message, flags, cache)
proc nfuzz_deposit(input: openArray[byte], output: ptr byte, proc nfuzz_deposit(input: openArray[byte], xoutput: ptr byte,
output_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError, Defect].} = xoutput_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError, Defect].} =
decodeAndProcess(DepositInput): decodeAndProcess(DepositInput):
process_deposit(data.state, data.deposit, flags) process_deposit(data.state, data.deposit, flags)
proc nfuzz_proposer_slashing(input: openArray[byte], output: ptr byte, proc nfuzz_proposer_slashing(input: openArray[byte], xoutput: ptr byte,
output_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError, Defect].} = xoutput_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError, Defect].} =
decodeAndProcess(ProposerSlashingInput): decodeAndProcess(ProposerSlashingInput):
process_proposer_slashing(data.state, data.proposerSlashing, flags, cache) process_proposer_slashing(data.state, data.proposerSlashing, flags, cache)
proc nfuzz_voluntary_exit(input: openArray[byte], output: ptr byte, proc nfuzz_voluntary_exit(input: openArray[byte], xoutput: ptr byte,
output_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError, Defect].} = xoutput_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError, Defect].} =
decodeAndProcess(VoluntaryExitInput): decodeAndProcess(VoluntaryExitInput):
process_voluntary_exit(data.state, data.exit, flags) process_voluntary_exit(data.state, data.exit, flags)
# Note: Could also accept raw input pointer and access list_size + seed here. # Note: Could also accept raw input pointer and access list_size + seed here.
# However, list_size needs to be known also outside this proc to allocate output. # However, list_size needs to be known also outside this proc to allocate xoutput.
# TODO: rework to copy immediatly in an uint8 openArray, considering we have to # TODO: rework to copy immediatly in an uint8 openArray, considering we have to
# go over the list anyhow? # go over the list anyhow?
proc nfuzz_shuffle(input_seed: ptr byte, output: var openArray[uint64]): bool proc nfuzz_shuffle(input_seed: ptr byte, xoutput: var openArray[uint64]): bool
{.exportc, raises: [Defect].} = {.exportc, raises: [Defect].} =
var seed: Eth2Digest var seed: Eth2Digest
# Should be OK as max 2 bytes are passed by the framework. # Should be OK as max 2 bytes are passed by the framework.
let list_size = output.len.uint64 let list_size = xoutput.len.uint64
copyMem(addr(seed.data), input_seed, sizeof(seed.data)) copyMem(addr(seed.data), input_seed, sizeof(seed.data))
@ -162,8 +162,8 @@ proc nfuzz_shuffle(input_seed: ptr byte, output: var openArray[uint64]): bool
for i in 0..<list_size: for i in 0..<list_size:
# ValidatorIndex is currently wrongly uint32 so we copy this 1 by 1, # ValidatorIndex is currently wrongly uint32 so we copy this 1 by 1,
# assumes passed output is zeroed. # assumes passed xoutput is zeroed.
copyMem(offset(addr output, i.int), shuffled_seq[i.int].unsafeAddr, copyMem(offset(addr xoutput, i.int), shuffled_seq[i.int].unsafeAddr,
sizeof(ValidatorIndex)) sizeof(ValidatorIndex))
result = true result = true

View File

@ -42,7 +42,7 @@ suiteReport "Beacon chain DB" & preset():
db = init(BeaconChainDB, kvStore MemStoreRef.init()) db = init(BeaconChainDB, kvStore MemStoreRef.init())
let let
signedBlock = SignedBeaconBlock() signedBlock = TrustedSignedBeaconBlock()
root = hash_tree_root(signedBlock.message) root = hash_tree_root(signedBlock.message)
db.putBlock(signedBlock) db.putBlock(signedBlock)
@ -74,13 +74,14 @@ suiteReport "Beacon chain DB" & preset():
db = init(BeaconChainDB, kvStore MemStoreRef.init()) db = init(BeaconChainDB, kvStore MemStoreRef.init())
let let
a0 = SignedBeaconBlock(message: BeaconBlock(slot: GENESIS_SLOT + 0)) a0 = TrustedSignedBeaconBlock(message:
TrustedBeaconBlock(slot: GENESIS_SLOT + 0))
a0r = hash_tree_root(a0.message) a0r = hash_tree_root(a0.message)
a1 = SignedBeaconBlock(message: a1 = TrustedSignedBeaconBlock(message:
BeaconBlock(slot: GENESIS_SLOT + 1, parent_root: a0r)) TrustedBeaconBlock(slot: GENESIS_SLOT + 1, parent_root: a0r))
a1r = hash_tree_root(a1.message) a1r = hash_tree_root(a1.message)
a2 = SignedBeaconBlock(message: a2 = TrustedSignedBeaconBlock(message:
BeaconBlock(slot: GENESIS_SLOT + 2, parent_root: a1r)) TrustedBeaconBlock(slot: GENESIS_SLOT + 2, parent_root: a1r))
a2r = hash_tree_root(a2.message) a2r = hash_tree_root(a2.message)
doAssert toSeq(db.getAncestors(a0r)) == [] doAssert toSeq(db.getAncestors(a0r)) == []