Merge branch 'devel'
This commit is contained in:
commit
3cecb68f84
|
@ -16,7 +16,7 @@ Nimbus beacon chain is a research implementation of the beacon chain component o
|
||||||
## Related
|
## Related
|
||||||
|
|
||||||
* [status-im/nimbus](https://github.com/status-im/nimbus/): Nimbus for Ethereum 1
|
* [status-im/nimbus](https://github.com/status-im/nimbus/): Nimbus for Ethereum 1
|
||||||
* [ethereum/eth2.0-specs](https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md): Serenity specification that this project implements
|
* [ethereum/eth2.0-specs](https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md): Serenity specification that this project implements
|
||||||
|
|
||||||
You can check where the beacon chain fits in the Ethereum ecosystem our Two-Point-Oh series: https://our.status.im/tag/two-point-oh/
|
You can check where the beacon chain fits in the Ethereum ecosystem our Two-Point-Oh series: https://our.status.im/tag/two-point-oh/
|
||||||
|
|
||||||
|
@ -62,7 +62,7 @@ Nim is not an external dependency, Nimbus will build its own local copy.
|
||||||
On common Linux distributions the dependencies can be installed with:
|
On common Linux distributions the dependencies can be installed with:
|
||||||
```sh
|
```sh
|
||||||
# Debian and Ubuntu
|
# Debian and Ubuntu
|
||||||
sudo apt-get install build-essentials golang-go librocksdb-dev libpcre3-dev
|
sudo apt-get install build-essential git golang-go librocksdb-dev libpcre3-dev
|
||||||
|
|
||||||
# Fedora
|
# Fedora
|
||||||
dnf install @development-tools go rocksdb-devel pcre
|
dnf install @development-tools go rocksdb-devel pcre
|
||||||
|
@ -290,7 +290,7 @@ cd status
|
||||||
git clone https://github.com/facebook/rocksdb.git
|
git clone https://github.com/facebook/rocksdb.git
|
||||||
cd rocksdb
|
cd rocksdb
|
||||||
make shared_lib
|
make shared_lib
|
||||||
sudo make install
|
sudo make install-shared
|
||||||
cd ..
|
cd ..
|
||||||
|
|
||||||
# Raspberry pi doesn't include /usr/local/lib in library search path
|
# Raspberry pi doesn't include /usr/local/lib in library search path
|
||||||
|
|
|
@ -54,12 +54,13 @@ task test, "Run all tests":
|
||||||
buildBinary "test_fixture_ssz_generic_types", "tests/official/", "-r -d:release -d:chronicles_log_level=DEBUG"
|
buildBinary "test_fixture_ssz_generic_types", "tests/official/", "-r -d:release -d:chronicles_log_level=DEBUG"
|
||||||
|
|
||||||
# Consensus object SSZ tests
|
# Consensus object SSZ tests
|
||||||
buildBinary "test_fixture_ssz_consensus_objects", "tests/official/", "-r -d:release -d:chronicles_log_level=DEBUG -d:const_preset=minimal"
|
buildBinary "test_fixture_ssz_consensus_objects", "tests/official/", "-r -d:release -d:chronicles_log_level=TRACE -d:const_preset=minimal"
|
||||||
buildBinary "test_fixture_ssz_consensus_objects", "tests/official/", "-r -d:release -d:chronicles_log_level=DEBUG -d:const_preset=mainnet"
|
buildBinary "test_fixture_ssz_consensus_objects", "tests/official/", "-r -d:release -d:chronicles_log_level=DEBUG -d:const_preset=mainnet"
|
||||||
|
|
||||||
buildBinary "all_fixtures_require_ssz", "tests/official/", "-r -d:release -d:chronicles_log_level=DEBUG -d:const_preset=minimal"
|
buildBinary "all_fixtures_require_ssz", "tests/official/", "-r -d:release -d:chronicles_log_level=DEBUG -d:const_preset=minimal"
|
||||||
buildBinary "all_fixtures_require_ssz", "tests/official/", "-r -d:release -d:chronicles_log_level=DEBUG -d:const_preset=mainnet"
|
buildBinary "all_fixtures_require_ssz", "tests/official/", "-r -d:release -d:chronicles_log_level=TRACE -d:const_preset=mainnet"
|
||||||
|
|
||||||
# State sim; getting into 4th epoch useful to trigger consensus checks
|
# State sim; getting into 4th epoch useful to trigger consensus checks
|
||||||
buildBinary "state_sim", "research/", "-r -d:release", "--validators=128 --slots=40"
|
buildBinary "state_sim", "research/", "-r -d:release", "--validators=1024 --slots=32"
|
||||||
|
buildBinary "state_sim", "research/", "-r -d:release -d:const_preset=mainnet", "--validators=1024 --slots=128"
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
# beacon_chain
|
# beacon_chain
|
||||||
# Copyright (c) 2018-2019 Status Research & Development GmbH
|
# Copyright (c) 2019 Status Research & Development GmbH
|
||||||
# Licensed and distributed under either of
|
# Licensed and distributed under either of
|
||||||
# * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT).
|
# * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT).
|
||||||
# * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0).
|
# * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
@ -12,7 +12,7 @@
|
||||||
# The other part is arguably part of attestation pool -- the validation's
|
# The other part is arguably part of attestation pool -- the validation's
|
||||||
# something that should be happing on receipt, not aggregation per se. In
|
# something that should be happing on receipt, not aggregation per se. In
|
||||||
# that part, check that messages conform -- so, check for each type
|
# that part, check that messages conform -- so, check for each type
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/networking/p2p-interface.md#topics-and-messages
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/networking/p2p-interface.md#topics-and-messages
|
||||||
# specifies. So by the time this calls attestation pool, all validation's
|
# specifies. So by the time this calls attestation pool, all validation's
|
||||||
# already done.
|
# already done.
|
||||||
#
|
#
|
||||||
|
@ -30,17 +30,17 @@ import
|
||||||
# https://github.com/status-im/nim-beacon-chain/issues/122#issuecomment-562479965
|
# https://github.com/status-im/nim-beacon-chain/issues/122#issuecomment-562479965
|
||||||
|
|
||||||
const
|
const
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/networking/p2p-interface.md#configuration
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/networking/p2p-interface.md#configuration
|
||||||
ATTESTATION_PROPAGATION_SLOT_RANGE = 32
|
ATTESTATION_PROPAGATION_SLOT_RANGE = 32
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/validator/0_beacon-chain-validator.md#aggregation-selection
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/validator/0_beacon-chain-validator.md#aggregation-selection
|
||||||
func get_slot_signature(state: BeaconState, slot: Slot, privkey: ValidatorPrivKey):
|
func get_slot_signature(state: BeaconState, slot: Slot, privkey: ValidatorPrivKey):
|
||||||
ValidatorSig =
|
ValidatorSig =
|
||||||
let domain =
|
let domain =
|
||||||
get_domain(state, DOMAIN_BEACON_ATTESTER, compute_epoch_at_slot(slot))
|
get_domain(state, DOMAIN_BEACON_ATTESTER, compute_epoch_at_slot(slot))
|
||||||
bls_sign(privkey, hash_tree_root(slot).data, domain)
|
bls_sign(privkey, hash_tree_root(slot).data, domain)
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/validator/0_beacon-chain-validator.md#aggregation-selection
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/validator/0_beacon-chain-validator.md#aggregation-selection
|
||||||
func is_aggregator(state: BeaconState, slot: Slot, index: uint64,
|
func is_aggregator(state: BeaconState, slot: Slot, index: uint64,
|
||||||
slot_signature: ValidatorSig): bool =
|
slot_signature: ValidatorSig): bool =
|
||||||
# TODO index is a CommitteeIndex, aka uint64
|
# TODO index is a CommitteeIndex, aka uint64
|
||||||
|
@ -65,17 +65,17 @@ proc aggregate_attestations*(
|
||||||
doAssert slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= state.slot
|
doAssert slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= state.slot
|
||||||
doAssert state.slot >= slot
|
doAssert state.slot >= slot
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/validator/0_beacon-chain-validator.md#aggregation-selection
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/validator/0_beacon-chain-validator.md#aggregation-selection
|
||||||
if not is_aggregator(state, slot, index, slot_signature):
|
if not is_aggregator(state, slot, index, slot_signature):
|
||||||
return none(AggregateAndProof)
|
return none(AggregateAndProof)
|
||||||
|
|
||||||
let attestation_data =
|
let attestation_data =
|
||||||
makeAttestationData(state, slot, index, get_block_root_at_slot(state, slot))
|
makeAttestationData(state, slot, index, get_block_root_at_slot(state, slot))
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/validator/0_beacon-chain-validator.md#construct-aggregate
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/validator/0_beacon-chain-validator.md#construct-aggregate
|
||||||
for attestation in getAttestationsForBlock(pool, state, slot):
|
for attestation in getAttestationsForBlock(pool, state, slot):
|
||||||
if attestation.data == attestation_data:
|
if attestation.data == attestation_data:
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/validator/0_beacon-chain-validator.md#aggregateandproof
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/validator/0_beacon-chain-validator.md#aggregateandproof
|
||||||
return some(AggregateAndProof(
|
return some(AggregateAndProof(
|
||||||
aggregator_index: index,
|
aggregator_index: index,
|
||||||
aggregate: attestation,
|
aggregate: attestation,
|
||||||
|
|
|
@ -1,9 +1,8 @@
|
||||||
import
|
import
|
||||||
deques, sequtils, tables,
|
deques, sequtils, tables,
|
||||||
chronicles, stew/bitseqs, json_serialization/std/sets,
|
chronicles, stew/[bitseqs, byteutils], json_serialization/std/sets,
|
||||||
./spec/[beaconstate, datatypes, crypto, digest, helpers, validator],
|
./spec/[beaconstate, datatypes, crypto, digest, helpers, validator],
|
||||||
./extras, ./ssz, ./block_pool,
|
./extras, ./ssz, ./block_pool, ./beacon_node_types
|
||||||
beacon_node_types
|
|
||||||
|
|
||||||
logScope: topics = "attpool"
|
logScope: topics = "attpool"
|
||||||
|
|
||||||
|
@ -146,19 +145,33 @@ func get_attesting_indices_seq(state: BeaconState,
|
||||||
toSeq(items(get_attesting_indices(
|
toSeq(items(get_attesting_indices(
|
||||||
state, attestation_data, bits, cache)))
|
state, attestation_data, bits, cache)))
|
||||||
|
|
||||||
proc add*(pool: var AttestationPool,
|
func addUnresolved(pool: var AttestationPool, attestation: Attestation) =
|
||||||
state: BeaconState,
|
pool.unresolved[attestation.data.beacon_block_root] =
|
||||||
blck: BlockRef,
|
UnresolvedAttestation(
|
||||||
attestation: Attestation) =
|
attestation: attestation,
|
||||||
# TODO there are constraints on the state and block being passed in here
|
)
|
||||||
# but what these are is unclear.. needs analyzing from a high-level
|
|
||||||
# perspective / spec intent
|
|
||||||
# TODO should update the state correctly in here instead of forcing the caller
|
|
||||||
# to do it...
|
|
||||||
logScope: pcs = "atp_add_attestation"
|
|
||||||
|
|
||||||
|
proc addResolved(pool: var AttestationPool, blck: BlockRef, attestation: Attestation) =
|
||||||
doAssert blck.root == attestation.data.beacon_block_root
|
doAssert blck.root == attestation.data.beacon_block_root
|
||||||
|
|
||||||
|
# TODO Which state should we use to validate the attestation? It seems
|
||||||
|
# reasonable to involve the head being voted for as well as the intended
|
||||||
|
# slot of the attestation - double-check this with spec
|
||||||
|
|
||||||
|
# A basic check is that the attestation is at least as new as the block being
|
||||||
|
# voted for..
|
||||||
|
if blck.slot > attestation.data.slot:
|
||||||
|
notice "Invalid attestation (too new!)",
|
||||||
|
attestationData = shortLog(attestation.data),
|
||||||
|
blockSlot = shortLog(blck.slot)
|
||||||
|
return
|
||||||
|
|
||||||
|
updateStateData(
|
||||||
|
pool.blockPool, pool.blockPool.tmpState,
|
||||||
|
BlockSlot(blck: blck, slot: attestation.data.slot))
|
||||||
|
|
||||||
|
template state(): BeaconState = pool.blockPool.tmpState.data.data
|
||||||
|
|
||||||
if not validate(state, attestation):
|
if not validate(state, attestation):
|
||||||
notice "Invalid attestation",
|
notice "Invalid attestation",
|
||||||
attestationData = shortLog(attestation.data),
|
attestationData = shortLog(attestation.data),
|
||||||
|
@ -245,11 +258,16 @@ proc add*(pool: var AttestationPool,
|
||||||
validations = 1,
|
validations = 1,
|
||||||
cat = "filtering"
|
cat = "filtering"
|
||||||
|
|
||||||
func addUnresolved*(pool: var AttestationPool, attestation: Attestation) =
|
proc add*(pool: var AttestationPool, attestation: Attestation) =
|
||||||
pool.unresolved[attestation.data.beacon_block_root] =
|
logScope: pcs = "atp_add_attestation"
|
||||||
UnresolvedAttestation(
|
|
||||||
attestation: attestation,
|
let blck = pool.blockPool.getOrResolve(attestation.data.beacon_block_root)
|
||||||
)
|
|
||||||
|
if blck.isNil:
|
||||||
|
pool.addUnresolved(attestation)
|
||||||
|
return
|
||||||
|
|
||||||
|
pool.addResolved(blck, attestation)
|
||||||
|
|
||||||
proc getAttestationsForBlock*(
|
proc getAttestationsForBlock*(
|
||||||
pool: AttestationPool, state: BeaconState,
|
pool: AttestationPool, state: BeaconState,
|
||||||
|
@ -333,7 +351,9 @@ proc getAttestationsForBlock*(
|
||||||
if result.len >= MAX_ATTESTATIONS:
|
if result.len >= MAX_ATTESTATIONS:
|
||||||
return
|
return
|
||||||
|
|
||||||
proc resolve*(pool: var AttestationPool, cache: var StateData) =
|
proc resolve*(pool: var AttestationPool) =
|
||||||
|
logScope: pcs = "atp_resolve"
|
||||||
|
|
||||||
var
|
var
|
||||||
done: seq[Eth2Digest]
|
done: seq[Eth2Digest]
|
||||||
resolved: seq[tuple[blck: BlockRef, attestation: Attestation]]
|
resolved: seq[tuple[blck: BlockRef, attestation: Attestation]]
|
||||||
|
@ -351,11 +371,71 @@ proc resolve*(pool: var AttestationPool, cache: var StateData) =
|
||||||
pool.unresolved.del(k)
|
pool.unresolved.del(k)
|
||||||
|
|
||||||
for a in resolved:
|
for a in resolved:
|
||||||
pool.blockPool.updateStateData(
|
pool.addResolved(a.blck, a.attestation)
|
||||||
cache, BlockSlot(blck: a.blck, slot: a.blck.slot))
|
|
||||||
|
|
||||||
pool.add(cache.data.data, a.blck, a.attestation)
|
|
||||||
|
|
||||||
func latestAttestation*(
|
func latestAttestation*(
|
||||||
pool: AttestationPool, pubKey: ValidatorPubKey): BlockRef =
|
pool: AttestationPool, pubKey: ValidatorPubKey): BlockRef =
|
||||||
pool.latestAttestations.getOrDefault(pubKey)
|
pool.latestAttestations.getOrDefault(pubKey)
|
||||||
|
|
||||||
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.4/specs/core/0_fork-choice.md
|
||||||
|
# The structure of this code differs from the spec since we use a different
|
||||||
|
# strategy for storing states and justification points - it should nonetheless
|
||||||
|
# be close in terms of functionality.
|
||||||
|
func lmdGhost*(
|
||||||
|
pool: AttestationPool, start_state: BeaconState,
|
||||||
|
start_block: BlockRef): BlockRef =
|
||||||
|
# TODO: a Fenwick Tree datastructure to keep track of cumulated votes
|
||||||
|
# in O(log N) complexity
|
||||||
|
# https://en.wikipedia.org/wiki/Fenwick_tree
|
||||||
|
# Nim implementation for cumulative frequencies at
|
||||||
|
# https://github.com/numforge/laser/blob/990e59fffe50779cdef33aa0b8f22da19e1eb328/benchmarks/random_sampling/fenwicktree.nim
|
||||||
|
|
||||||
|
let
|
||||||
|
active_validator_indices =
|
||||||
|
get_active_validator_indices(
|
||||||
|
start_state, compute_epoch_at_slot(start_state.slot))
|
||||||
|
|
||||||
|
var latest_messages: seq[tuple[validator: ValidatorIndex, blck: BlockRef]]
|
||||||
|
for i in active_validator_indices:
|
||||||
|
let pubKey = start_state.validators[i].pubkey
|
||||||
|
if (let vote = pool.latestAttestation(pubKey); not vote.isNil):
|
||||||
|
latest_messages.add((i, vote))
|
||||||
|
|
||||||
|
template get_latest_attesting_balance(blck: BlockRef): uint64 =
|
||||||
|
var res: uint64
|
||||||
|
for validator_index, target in latest_messages.items():
|
||||||
|
if get_ancestor(target, blck.slot) == blck:
|
||||||
|
res += start_state.validators[validator_index].effective_balance
|
||||||
|
res
|
||||||
|
|
||||||
|
var head = start_block
|
||||||
|
while true:
|
||||||
|
if head.children.len() == 0:
|
||||||
|
return head
|
||||||
|
|
||||||
|
if head.children.len() == 1:
|
||||||
|
head = head.children[0]
|
||||||
|
else:
|
||||||
|
var
|
||||||
|
winner = head.children[0]
|
||||||
|
winCount = get_latest_attesting_balance(winner)
|
||||||
|
|
||||||
|
for i in 1..<head.children.len:
|
||||||
|
let
|
||||||
|
candidate = head.children[i]
|
||||||
|
candCount = get_latest_attesting_balance(candidate)
|
||||||
|
|
||||||
|
if (candCount > winCount) or
|
||||||
|
((candCount == winCount and candidate.root.data < winner.root.data)):
|
||||||
|
winner = candidate
|
||||||
|
winCount = candCount
|
||||||
|
head = winner
|
||||||
|
|
||||||
|
proc selectHead*(pool: AttestationPool): BlockRef =
|
||||||
|
let
|
||||||
|
justifiedHead = pool.blockPool.latestJustifiedBlock()
|
||||||
|
|
||||||
|
let newHead =
|
||||||
|
lmdGhost(pool, pool.blockPool.justifiedState.data.data, justifiedHead.blck)
|
||||||
|
|
||||||
|
newHead
|
||||||
|
|
|
@ -10,7 +10,7 @@ import
|
||||||
|
|
||||||
# Local modules
|
# Local modules
|
||||||
spec/[datatypes, digest, crypto, beaconstate, helpers, validator, network],
|
spec/[datatypes, digest, crypto, beaconstate, helpers, validator, network],
|
||||||
conf, time, state_transition, fork_choice, beacon_chain_db,
|
conf, time, state_transition, beacon_chain_db,
|
||||||
validator_pool, extras, attestation_pool, block_pool, eth2_network,
|
validator_pool, extras, attestation_pool, block_pool, eth2_network,
|
||||||
beacon_node_types, mainchain_monitor, version, ssz, ssz/dynamic_navigator,
|
beacon_node_types, mainchain_monitor, version, ssz, ssz/dynamic_navigator,
|
||||||
sync_protocol, request_manager, validator_keygen, interop, statusbar
|
sync_protocol, request_manager, validator_keygen, interop, statusbar
|
||||||
|
@ -47,7 +47,6 @@ type
|
||||||
forkVersion: array[4, byte]
|
forkVersion: array[4, byte]
|
||||||
networkIdentity: Eth2NodeIdentity
|
networkIdentity: Eth2NodeIdentity
|
||||||
requestManager: RequestManager
|
requestManager: RequestManager
|
||||||
isBootstrapNode: bool
|
|
||||||
bootstrapNodes: seq[BootstrapAddr]
|
bootstrapNodes: seq[BootstrapAddr]
|
||||||
db: BeaconChainDB
|
db: BeaconChainDB
|
||||||
config: BeaconNodeConf
|
config: BeaconNodeConf
|
||||||
|
@ -57,17 +56,6 @@ type
|
||||||
mainchainMonitor: MainchainMonitor
|
mainchainMonitor: MainchainMonitor
|
||||||
beaconClock: BeaconClock
|
beaconClock: BeaconClock
|
||||||
|
|
||||||
stateCache: StateData ##\
|
|
||||||
## State cache object that's used as a scratch pad
|
|
||||||
## TODO this is pretty dangerous - for example if someone sets it
|
|
||||||
## to a particular state then does `await`, it might change - prone to
|
|
||||||
## async races
|
|
||||||
|
|
||||||
justifiedStateCache: StateData ##\
|
|
||||||
## A second state cache that's used during head selection, to avoid
|
|
||||||
## state replaying.
|
|
||||||
# TODO Something smarter, so we don't need to keep two full copies, wasteful
|
|
||||||
|
|
||||||
proc onBeaconBlock*(node: BeaconNode, blck: SignedBeaconBlock) {.gcsafe.}
|
proc onBeaconBlock*(node: BeaconNode, blck: SignedBeaconBlock) {.gcsafe.}
|
||||||
proc updateHead(node: BeaconNode): BlockRef
|
proc updateHead(node: BeaconNode): BlockRef
|
||||||
|
|
||||||
|
@ -146,12 +134,10 @@ proc commitGenesisState(node: BeaconNode, tailState: BeaconState) =
|
||||||
quit 1
|
quit 1
|
||||||
|
|
||||||
proc addBootstrapNode(node: BeaconNode, bootstrapNode: BootstrapAddr) =
|
proc addBootstrapNode(node: BeaconNode, bootstrapNode: BootstrapAddr) =
|
||||||
if bootstrapNode.isSameNode(node.networkIdentity):
|
if not bootstrapNode.isSameNode(node.networkIdentity):
|
||||||
node.isBootstrapNode = true
|
|
||||||
else:
|
|
||||||
node.bootstrapNodes.add bootstrapNode
|
node.bootstrapNodes.add bootstrapNode
|
||||||
|
|
||||||
proc useBootstrapFile(node: BeaconNode, bootstrapFile: string) =
|
proc loadBootstrapFile(node: BeaconNode, bootstrapFile: string) =
|
||||||
for ln in lines(bootstrapFile):
|
for ln in lines(bootstrapFile):
|
||||||
node.addBootstrapNode BootstrapAddr.initAddress(string ln)
|
node.addBootstrapNode BootstrapAddr.initAddress(string ln)
|
||||||
|
|
||||||
|
@ -167,11 +153,11 @@ proc init*(T: type BeaconNode, conf: BeaconNodeConf): Future[BeaconNode] {.async
|
||||||
|
|
||||||
let bootstrapFile = string conf.bootstrapNodesFile
|
let bootstrapFile = string conf.bootstrapNodesFile
|
||||||
if bootstrapFile.len > 0:
|
if bootstrapFile.len > 0:
|
||||||
result.useBootstrapFile(bootstrapFile)
|
result.loadBootstrapFile(bootstrapFile)
|
||||||
|
|
||||||
let siteLocalBootstrapFile = conf.dataDir / "bootstrap_nodes.txt"
|
let siteLocalBootstrapFile = conf.dataDir / "bootstrap_nodes.txt"
|
||||||
if fileExists(siteLocalBootstrapFile):
|
if fileExists(siteLocalBootstrapFile):
|
||||||
result.useBootstrapFile(siteLocalBootstrapFile)
|
result.loadBootstrapFile(siteLocalBootstrapFile)
|
||||||
|
|
||||||
result.attachedValidators = ValidatorPool.init
|
result.attachedValidators = ValidatorPool.init
|
||||||
|
|
||||||
|
@ -238,12 +224,9 @@ proc init*(T: type BeaconNode, conf: BeaconNodeConf): Future[BeaconNode] {.async
|
||||||
|
|
||||||
onBeaconBlock(result, signedBlock))
|
onBeaconBlock(result, signedBlock))
|
||||||
|
|
||||||
result.stateCache = result.blockPool.loadTailState()
|
|
||||||
result.justifiedStateCache = result.stateCache
|
|
||||||
|
|
||||||
let addressFile = string(conf.dataDir) / "beacon_node.address"
|
let addressFile = string(conf.dataDir) / "beacon_node.address"
|
||||||
result.network.saveConnectionAddressFile(addressFile)
|
result.network.saveConnectionAddressFile(addressFile)
|
||||||
result.beaconClock = BeaconClock.init(result.stateCache.data.data)
|
result.beaconClock = BeaconClock.init(result.blockPool.headState.data.data)
|
||||||
|
|
||||||
when useInsecureFeatures:
|
when useInsecureFeatures:
|
||||||
if conf.metricsServer:
|
if conf.metricsServer:
|
||||||
|
@ -251,22 +234,6 @@ proc init*(T: type BeaconNode, conf: BeaconNodeConf): Future[BeaconNode] {.async
|
||||||
info "Starting metrics HTTP server", address = metricsAddress, port = conf.metricsServerPort
|
info "Starting metrics HTTP server", address = metricsAddress, port = conf.metricsServerPort
|
||||||
metrics.startHttpServer(metricsAddress, Port(conf.metricsServerPort))
|
metrics.startHttpServer(metricsAddress, Port(conf.metricsServerPort))
|
||||||
|
|
||||||
template withState(
|
|
||||||
pool: BlockPool, cache: var StateData, blockSlot: BlockSlot, body: untyped): untyped =
|
|
||||||
## Helper template that updates state to a particular BlockSlot - usage of
|
|
||||||
## cache is unsafe outside of block.
|
|
||||||
## TODO async transformations will lead to a race where cache gets updated
|
|
||||||
## while waiting for future to complete - catch this here somehow?
|
|
||||||
|
|
||||||
updateStateData(pool, cache, blockSlot)
|
|
||||||
|
|
||||||
template hashedState(): HashedBeaconState {.inject, used.} = cache.data
|
|
||||||
template state(): BeaconState {.inject, used.} = cache.data.data
|
|
||||||
template blck(): BlockRef {.inject, used.} = cache.blck
|
|
||||||
template root(): Eth2Digest {.inject, used.} = cache.data.root
|
|
||||||
|
|
||||||
body
|
|
||||||
|
|
||||||
proc connectToNetwork(node: BeaconNode) {.async.} =
|
proc connectToNetwork(node: BeaconNode) {.async.} =
|
||||||
if node.bootstrapNodes.len > 0:
|
if node.bootstrapNodes.len > 0:
|
||||||
info "Connecting to bootstrap nodes", bootstrapNodes = node.bootstrapNodes
|
info "Connecting to bootstrap nodes", bootstrapNodes = node.bootstrapNodes
|
||||||
|
@ -335,21 +302,15 @@ proc isSynced(node: BeaconNode, head: BlockRef): bool =
|
||||||
true
|
true
|
||||||
|
|
||||||
proc updateHead(node: BeaconNode): BlockRef =
|
proc updateHead(node: BeaconNode): BlockRef =
|
||||||
# Use head state for attestation resolution below
|
|
||||||
|
|
||||||
# Check pending attestations - maybe we found some blocks for them
|
# Check pending attestations - maybe we found some blocks for them
|
||||||
node.attestationPool.resolve(node.stateCache)
|
node.attestationPool.resolve()
|
||||||
|
|
||||||
# TODO move all of this logic to BlockPool
|
# Grab the new head according to our latest attestation data
|
||||||
|
let newHead = node.attestationPool.selectHead()
|
||||||
|
|
||||||
let
|
# Store the new head in the block pool - this may cause epochs to be
|
||||||
justifiedHead = node.blockPool.latestJustifiedBlock()
|
# justified and finalized
|
||||||
|
node.blockPool.updateHead(newHead)
|
||||||
let newHead = node.blockPool.withState(
|
|
||||||
node.justifiedStateCache, justifiedHead):
|
|
||||||
lmdGhost(node.attestationPool, state, justifiedHead.blck)
|
|
||||||
|
|
||||||
node.blockPool.updateHead(node.stateCache, newHead)
|
|
||||||
beacon_head_root.set newHead.root.toGaugeValue
|
beacon_head_root.set newHead.root.toGaugeValue
|
||||||
|
|
||||||
newHead
|
newHead
|
||||||
|
@ -366,7 +327,7 @@ proc sendAttestation(node: BeaconNode,
|
||||||
validatorSignature = await validator.signAttestation(attestationData, fork)
|
validatorSignature = await validator.signAttestation(attestationData, fork)
|
||||||
|
|
||||||
var aggregationBits = CommitteeValidatorsBits.init(committeeLen)
|
var aggregationBits = CommitteeValidatorsBits.init(committeeLen)
|
||||||
aggregationBits.raiseBit indexInCommittee
|
aggregationBits.setBit indexInCommittee
|
||||||
|
|
||||||
var attestation = Attestation(
|
var attestation = Attestation(
|
||||||
data: attestationData,
|
data: attestationData,
|
||||||
|
@ -397,40 +358,20 @@ proc proposeBlock(node: BeaconNode,
|
||||||
slot: Slot): Future[BlockRef] {.async.} =
|
slot: Slot): Future[BlockRef] {.async.} =
|
||||||
logScope: pcs = "block_proposal"
|
logScope: pcs = "block_proposal"
|
||||||
|
|
||||||
if head.slot > slot:
|
if head.slot >= slot:
|
||||||
notice "Skipping proposal, we've already selected a newer head",
|
# We should normally not have a head newer than the slot we're proposing for
|
||||||
|
# but this can happen if block proposal is delayed
|
||||||
|
warn "Skipping proposal, have newer head already",
|
||||||
headSlot = shortLog(head.slot),
|
headSlot = shortLog(head.slot),
|
||||||
headBlockRoot = shortLog(head.root),
|
headBlockRoot = shortLog(head.root),
|
||||||
slot = shortLog(slot),
|
slot = shortLog(slot),
|
||||||
cat = "fastforward"
|
cat = "fastforward"
|
||||||
return head
|
return head
|
||||||
|
|
||||||
if head.slot == 0 and slot == 0:
|
|
||||||
# TODO there's been a startup assertion, which sometimes (but not always
|
|
||||||
# evidently) crashes exactly one node on simulation startup, the one the
|
|
||||||
# beacon chain proposer index points to first for slot 0. it tries using
|
|
||||||
# slot 0 as required, notices head block's slot is also 0 (which, that's
|
|
||||||
# how it's created; it's never less), and promptly fails, with assertion
|
|
||||||
# occuring downstream via async code. This is most easily reproduced via
|
|
||||||
# make clean_eth2_network_simulation_files && make eth2_network_simulation
|
|
||||||
return head
|
|
||||||
|
|
||||||
if head.slot == slot:
|
|
||||||
# Weird, we should never see as head the same slot as we're proposing a
|
|
||||||
# block for - did someone else steal our slot? why didn't we discard it?
|
|
||||||
warn "Found head at same slot as we're supposed to propose for!",
|
|
||||||
headSlot = shortLog(head.slot),
|
|
||||||
headBlockRoot = shortLog(head.root),
|
|
||||||
cat = "consensus_conflict"
|
|
||||||
# TODO investigate how and when this happens.. maybe it shouldn't be an
|
|
||||||
# assert?
|
|
||||||
doAssert false, "head slot matches proposal slot (!)"
|
|
||||||
# return
|
|
||||||
|
|
||||||
# Advance state to the slot immediately preceding the one we're creating a
|
# Advance state to the slot immediately preceding the one we're creating a
|
||||||
# block for - potentially we will be processing empty slots along the way.
|
# block for - potentially we will be processing empty slots along the way.
|
||||||
let (nroot, nblck) = node.blockPool.withState(
|
let (nroot, nblck) = node.blockPool.withState(
|
||||||
node.stateCache, BlockSlot(blck: head, slot: slot - 1)):
|
node.blockPool.tmpState, head.atSlot(slot)):
|
||||||
let (eth1data, deposits) =
|
let (eth1data, deposits) =
|
||||||
if node.mainchainMonitor.isNil:
|
if node.mainchainMonitor.isNil:
|
||||||
(get_eth1data_stub(
|
(get_eth1data_stub(
|
||||||
|
@ -476,7 +417,7 @@ proc proposeBlock(node: BeaconNode,
|
||||||
|
|
||||||
(blockRoot, newBlock)
|
(blockRoot, newBlock)
|
||||||
|
|
||||||
let newBlockRef = node.blockPool.add(node.stateCache, nroot, nblck)
|
let newBlockRef = node.blockPool.add(nroot, nblck)
|
||||||
if newBlockRef == nil:
|
if newBlockRef == nil:
|
||||||
warn "Unable to add proposed block to block pool",
|
warn "Unable to add proposed block to block pool",
|
||||||
newBlock = shortLog(newBlock.message),
|
newBlock = shortLog(newBlock.message),
|
||||||
|
@ -516,8 +457,6 @@ proc onAttestation(node: BeaconNode, attestation: Attestation) =
|
||||||
signature = shortLog(attestation.signature),
|
signature = shortLog(attestation.signature),
|
||||||
cat = "consensus" # Tag "consensus|attestation"?
|
cat = "consensus" # Tag "consensus|attestation"?
|
||||||
|
|
||||||
if (let attestedBlock = node.blockPool.getOrResolve(
|
|
||||||
attestation.data.beacon_block_root); attestedBlock != nil):
|
|
||||||
let
|
let
|
||||||
wallSlot = node.beaconClock.now().toSlot()
|
wallSlot = node.beaconClock.now().toSlot()
|
||||||
head = node.blockPool.head
|
head = node.blockPool.head
|
||||||
|
@ -530,22 +469,13 @@ proc onAttestation(node: BeaconNode, attestation: Attestation) =
|
||||||
cat = "clock_drift" # Tag "attestation|clock_drift"?
|
cat = "clock_drift" # Tag "attestation|clock_drift"?
|
||||||
return
|
return
|
||||||
|
|
||||||
# TODO seems reasonable to use the latest head state here.. needs thinking
|
|
||||||
# though - maybe we should use the state from the block pointed to by
|
|
||||||
# the attestation for some of the check? Consider interop with block
|
|
||||||
# production!
|
|
||||||
if attestation.data.slot > head.blck.slot and
|
if attestation.data.slot > head.blck.slot and
|
||||||
(attestation.data.slot - head.blck.slot) > maxEmptySlotCount:
|
(attestation.data.slot - head.blck.slot) > maxEmptySlotCount:
|
||||||
warn "Ignoring attestation, head block too old (out of sync?)",
|
warn "Ignoring attestation, head block too old (out of sync?)",
|
||||||
attestationSlot = attestation.data.slot, headSlot = head.blck.slot
|
attestationSlot = attestation.data.slot, headSlot = head.blck.slot
|
||||||
else:
|
return
|
||||||
let
|
|
||||||
bs = BlockSlot(blck: head.blck, slot: wallSlot.slot)
|
|
||||||
|
|
||||||
node.blockPool.withState(node.stateCache, bs):
|
node.attestationPool.add(attestation)
|
||||||
node.attestationPool.add(state, attestedBlock, attestation)
|
|
||||||
else:
|
|
||||||
node.attestationPool.addUnresolved(attestation)
|
|
||||||
|
|
||||||
proc onBeaconBlock(node: BeaconNode, blck: SignedBeaconBlock) =
|
proc onBeaconBlock(node: BeaconNode, blck: SignedBeaconBlock) =
|
||||||
# We received a block but don't know much about it yet - in particular, we
|
# We received a block but don't know much about it yet - in particular, we
|
||||||
|
@ -559,7 +489,7 @@ proc onBeaconBlock(node: BeaconNode, blck: SignedBeaconBlock) =
|
||||||
|
|
||||||
beacon_blocks_received.inc()
|
beacon_blocks_received.inc()
|
||||||
|
|
||||||
if node.blockPool.add(node.stateCache, blockRoot, blck).isNil:
|
if node.blockPool.add(blockRoot, blck).isNil:
|
||||||
return
|
return
|
||||||
|
|
||||||
# The block we received contains attestations, and we might not yet know about
|
# The block we received contains attestations, and we might not yet know about
|
||||||
|
@ -618,7 +548,7 @@ proc handleAttestations(node: BeaconNode, head: BlockRef, slot: Slot) =
|
||||||
# epoch since it doesn't change, but that has to be weighed against
|
# epoch since it doesn't change, but that has to be weighed against
|
||||||
# the complexity of handling forks correctly - instead, we use an adapted
|
# the complexity of handling forks correctly - instead, we use an adapted
|
||||||
# version here that calculates the committee for a single slot only
|
# version here that calculates the committee for a single slot only
|
||||||
node.blockPool.withState(node.stateCache, attestationHead):
|
node.blockPool.withState(node.blockPool.tmpState, attestationHead):
|
||||||
var cache = get_empty_per_epoch_cache()
|
var cache = get_empty_per_epoch_cache()
|
||||||
let committees_per_slot = get_committee_count_at_slot(state, slot)
|
let committees_per_slot = get_committee_count_at_slot(state, slot)
|
||||||
|
|
||||||
|
@ -641,12 +571,11 @@ proc handleProposal(node: BeaconNode, head: BlockRef, slot: Slot):
|
||||||
## Perform the proposal for the given slot, iff we have a validator attached
|
## Perform the proposal for the given slot, iff we have a validator attached
|
||||||
## that is supposed to do so, given the shuffling in head
|
## that is supposed to do so, given the shuffling in head
|
||||||
|
|
||||||
# TODO here we advanced the state to the new slot, but later we'll be
|
# TODO here we advance the state to the new slot, but later we'll be
|
||||||
# proposing for it - basically, we're selecting proposer based on an
|
# proposing for it - basically, we're selecting proposer based on an
|
||||||
# empty slot.. wait for the committee selection to settle, then
|
# empty slot
|
||||||
# revisit this - we should be able to advance behind
|
|
||||||
var cache = get_empty_per_epoch_cache()
|
var cache = get_empty_per_epoch_cache()
|
||||||
node.blockPool.withState(node.stateCache, BlockSlot(blck: head, slot: slot)):
|
node.blockPool.withState(node.blockPool.tmpState, head.atSlot(slot)):
|
||||||
let proposerIdx = get_beacon_proposer_index(state, cache)
|
let proposerIdx = get_beacon_proposer_index(state, cache)
|
||||||
if proposerIdx.isNone:
|
if proposerIdx.isNone:
|
||||||
notice "Missing proposer index",
|
notice "Missing proposer index",
|
||||||
|
@ -687,14 +616,26 @@ proc onSlotStart(node: BeaconNode, lastSlot, scheduledSlot: Slot) {.gcsafe, asyn
|
||||||
beaconTime = node.beaconClock.now()
|
beaconTime = node.beaconClock.now()
|
||||||
wallSlot = beaconTime.toSlot()
|
wallSlot = beaconTime.toSlot()
|
||||||
|
|
||||||
debug "Slot start",
|
info "Slot start",
|
||||||
lastSlot = shortLog(lastSlot),
|
lastSlot = shortLog(lastSlot),
|
||||||
scheduledSlot = shortLog(scheduledSlot),
|
scheduledSlot = shortLog(scheduledSlot),
|
||||||
beaconTime = shortLog(beaconTime),
|
beaconTime = shortLog(beaconTime),
|
||||||
peers = node.network.peersCount,
|
peers = node.network.peersCount,
|
||||||
|
headSlot = shortLog(node.blockPool.head.blck.slot),
|
||||||
|
headEpoch = shortLog(node.blockPool.head.blck.slot.compute_epoch_at_slot()),
|
||||||
|
headRoot = shortLog(node.blockPool.head.blck.root),
|
||||||
|
finalizedSlot = shortLog(node.blockPool.finalizedHead.blck.slot),
|
||||||
|
finalizedRoot = shortLog(node.blockPool.finalizedHead.blck.root),
|
||||||
|
finalizedSlot = shortLog(node.blockPool.finalizedHead.blck.slot.compute_epoch_at_slot()),
|
||||||
cat = "scheduling"
|
cat = "scheduling"
|
||||||
|
|
||||||
if not wallSlot.afterGenesis or (wallSlot.slot < lastSlot):
|
if not wallSlot.afterGenesis or (wallSlot.slot < lastSlot):
|
||||||
|
let
|
||||||
|
slot =
|
||||||
|
if wallSlot.afterGenesis: wallSlot.slot
|
||||||
|
else: GENESIS_SLOT
|
||||||
|
nextSlot = slot + 1 # At least GENESIS_SLOT + 1!
|
||||||
|
|
||||||
# This can happen if the system clock changes time for example, and it's
|
# This can happen if the system clock changes time for example, and it's
|
||||||
# pretty bad
|
# pretty bad
|
||||||
# TODO shut down? time either was or is bad, and PoS relies on accuracy..
|
# TODO shut down? time either was or is bad, and PoS relies on accuracy..
|
||||||
|
@ -702,15 +643,9 @@ proc onSlotStart(node: BeaconNode, lastSlot, scheduledSlot: Slot) {.gcsafe, asyn
|
||||||
beaconTime = shortLog(beaconTime),
|
beaconTime = shortLog(beaconTime),
|
||||||
lastSlot = shortLog(lastSlot),
|
lastSlot = shortLog(lastSlot),
|
||||||
scheduledSlot = shortLog(scheduledSlot),
|
scheduledSlot = shortLog(scheduledSlot),
|
||||||
|
nextSlot = shortLog(nextSlot),
|
||||||
cat = "clock_drift" # tag "scheduling|clock_drift"?
|
cat = "clock_drift" # tag "scheduling|clock_drift"?
|
||||||
|
|
||||||
let
|
|
||||||
slot = Slot(
|
|
||||||
if wallSlot.afterGenesis:
|
|
||||||
max(1'u64, wallSlot.slot.uint64)
|
|
||||||
else: GENESIS_SLOT.uint64 + 1)
|
|
||||||
nextSlot = slot + 1
|
|
||||||
|
|
||||||
addTimer(saturate(node.beaconClock.fromNow(nextSlot))) do (p: pointer):
|
addTimer(saturate(node.beaconClock.fromNow(nextSlot))) do (p: pointer):
|
||||||
asyncCheck node.onSlotStart(slot, nextSlot)
|
asyncCheck node.onSlotStart(slot, nextSlot)
|
||||||
|
|
||||||
|
@ -728,9 +663,10 @@ proc onSlotStart(node: BeaconNode, lastSlot, scheduledSlot: Slot) {.gcsafe, asyn
|
||||||
# TODO how long should the period be? Using an epoch because that's roughly
|
# TODO how long should the period be? Using an epoch because that's roughly
|
||||||
# how long attestations remain interesting
|
# how long attestations remain interesting
|
||||||
# TODO should we shut down instead? clearly we're unable to keep up
|
# TODO should we shut down instead? clearly we're unable to keep up
|
||||||
warn "Unable to keep up, skipping ahead without doing work",
|
warn "Unable to keep up, skipping ahead",
|
||||||
lastSlot = shortLog(lastSlot),
|
lastSlot = shortLog(lastSlot),
|
||||||
slot = shortLog(slot),
|
slot = shortLog(slot),
|
||||||
|
nextSlot = shortLog(nextSlot),
|
||||||
scheduledSlot = shortLog(scheduledSlot),
|
scheduledSlot = shortLog(scheduledSlot),
|
||||||
cat = "overload"
|
cat = "overload"
|
||||||
|
|
||||||
|
@ -812,7 +748,7 @@ proc onSlotStart(node: BeaconNode, lastSlot, scheduledSlot: Slot) {.gcsafe, asyn
|
||||||
# with any clock discrepancies once only, at the start of slot timer
|
# with any clock discrepancies once only, at the start of slot timer
|
||||||
# processing..
|
# processing..
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/validator/0_beacon-chain-validator.md#attesting
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/validator/0_beacon-chain-validator.md#attesting
|
||||||
# A validator should create and broadcast the attestation to the
|
# A validator should create and broadcast the attestation to the
|
||||||
# associated attestation subnet one-third of the way through the slot
|
# associated attestation subnet one-third of the way through the slot
|
||||||
# during which the validator is assigned―that is, SECONDS_PER_SLOT / 3
|
# during which the validator is assigned―that is, SECONDS_PER_SLOT / 3
|
||||||
|
@ -842,6 +778,17 @@ proc onSlotStart(node: BeaconNode, lastSlot, scheduledSlot: Slot) {.gcsafe, asyn
|
||||||
let
|
let
|
||||||
nextSlotStart = saturate(node.beaconClock.fromNow(nextSlot))
|
nextSlotStart = saturate(node.beaconClock.fromNow(nextSlot))
|
||||||
|
|
||||||
|
info "Slot end",
|
||||||
|
slot = shortLog(slot),
|
||||||
|
nextSlot = shortLog(nextSlot),
|
||||||
|
headSlot = shortLog(node.blockPool.head.blck.slot),
|
||||||
|
headEpoch = shortLog(node.blockPool.head.blck.slot.compute_epoch_at_slot()),
|
||||||
|
headRoot = shortLog(node.blockPool.head.blck.root),
|
||||||
|
finalizedSlot = shortLog(node.blockPool.finalizedHead.blck.slot),
|
||||||
|
finalizedEpoch = shortLog(node.blockPool.finalizedHead.blck.slot.compute_epoch_at_slot()),
|
||||||
|
finalizedRoot = shortLog(node.blockPool.finalizedHead.blck.root),
|
||||||
|
cat = "scheduling"
|
||||||
|
|
||||||
addTimer(nextSlotStart) do (p: pointer):
|
addTimer(nextSlotStart) do (p: pointer):
|
||||||
asyncCheck node.onSlotStart(slot, nextSlot)
|
asyncCheck node.onSlotStart(slot, nextSlot)
|
||||||
|
|
||||||
|
@ -884,19 +831,20 @@ proc run*(node: BeaconNode) =
|
||||||
node.onAttestation(attestation)
|
node.onAttestation(attestation)
|
||||||
|
|
||||||
let
|
let
|
||||||
t = node.beaconClock.now()
|
t = node.beaconClock.now().toSlot()
|
||||||
startSlot = if t > BeaconTime(0): t.toSlot.slot + 1
|
curSlot = if t.afterGenesis: t.slot
|
||||||
else: GENESIS_SLOT + 1
|
else: GENESIS_SLOT
|
||||||
fromNow = saturate(node.beaconClock.fromNow(startSlot))
|
nextSlot = curSlot + 1 # No earlier than GENESIS_SLOT + 1
|
||||||
|
fromNow = saturate(node.beaconClock.fromNow(nextSlot))
|
||||||
|
|
||||||
info "Scheduling first slot action",
|
info "Scheduling first slot action",
|
||||||
beaconTime = shortLog(node.beaconClock.now()),
|
beaconTime = shortLog(node.beaconClock.now()),
|
||||||
nextSlot = shortLog(startSlot),
|
nextSlot = shortLog(nextSlot),
|
||||||
fromNow = shortLog(fromNow),
|
fromNow = shortLog(fromNow),
|
||||||
cat = "scheduling"
|
cat = "scheduling"
|
||||||
|
|
||||||
addTimer(fromNow) do (p: pointer):
|
addTimer(fromNow) do (p: pointer):
|
||||||
asyncCheck node.onSlotStart(startSlot - 1, startSlot)
|
asyncCheck node.onSlotStart(curSlot, nextSlot)
|
||||||
|
|
||||||
let second = Moment.now() + chronos.seconds(1)
|
let second = Moment.now() + chronos.seconds(1)
|
||||||
addTimer(second) do (p: pointer):
|
addTimer(second) do (p: pointer):
|
||||||
|
@ -941,7 +889,7 @@ proc start(node: BeaconNode) =
|
||||||
let
|
let
|
||||||
bs = BlockSlot(blck: head.blck, slot: head.blck.slot)
|
bs = BlockSlot(blck: head.blck, slot: head.blck.slot)
|
||||||
|
|
||||||
node.blockPool.withState(node.stateCache, bs):
|
node.blockPool.withState(node.blockPool.tmpState, bs):
|
||||||
node.addLocalValidators(state)
|
node.addLocalValidators(state)
|
||||||
|
|
||||||
node.run()
|
node.run()
|
||||||
|
@ -1034,9 +982,9 @@ when hasPrompt:
|
||||||
of "attached_validators_balance":
|
of "attached_validators_balance":
|
||||||
var balance = uint64(0)
|
var balance = uint64(0)
|
||||||
# TODO slow linear scan!
|
# TODO slow linear scan!
|
||||||
for idx, b in node.stateCache.data.data.balances:
|
for idx, b in node.blockPool.headState.data.data.balances:
|
||||||
if node.getAttachedValidator(
|
if node.getAttachedValidator(
|
||||||
node.stateCache.data.data, ValidatorIndex(idx)) != nil:
|
node.blockPool.headState.data.data, ValidatorIndex(idx)) != nil:
|
||||||
balance += b
|
balance += b
|
||||||
formatGwei(balance)
|
formatGwei(balance)
|
||||||
|
|
||||||
|
|
|
@ -138,6 +138,11 @@ type
|
||||||
|
|
||||||
inAdd*: bool
|
inAdd*: bool
|
||||||
|
|
||||||
|
headState*: StateData ## State given by the head block
|
||||||
|
justifiedState*: StateData ## Latest justified state, as seen from the head
|
||||||
|
|
||||||
|
tmpState*: StateData ## Scratchpad - may be any state
|
||||||
|
|
||||||
MissingBlock* = object
|
MissingBlock* = object
|
||||||
slots*: uint64 # number of slots that are suspected missing
|
slots*: uint64 # number of slots that are suspected missing
|
||||||
tries*: int
|
tries*: int
|
||||||
|
|
|
@ -8,7 +8,34 @@ declareCounter beacon_reorgs_total, "Total occurrences of reorganizations of the
|
||||||
|
|
||||||
logScope: topics = "blkpool"
|
logScope: topics = "blkpool"
|
||||||
|
|
||||||
|
proc updateStateData*(
|
||||||
|
pool: BlockPool, state: var StateData, bs: BlockSlot) {.gcsafe.}
|
||||||
|
proc add*(
|
||||||
|
pool: var BlockPool, blockRoot: Eth2Digest,
|
||||||
|
signedBlock: SignedBeaconBlock): BlockRef {.gcsafe.}
|
||||||
|
|
||||||
|
template withState*(
|
||||||
|
pool: BlockPool, cache: var StateData, blockSlot: BlockSlot, body: untyped): untyped =
|
||||||
|
## Helper template that updates state to a particular BlockSlot - usage of
|
||||||
|
## cache is unsafe outside of block.
|
||||||
|
## TODO async transformations will lead to a race where cache gets updated
|
||||||
|
## while waiting for future to complete - catch this here somehow?
|
||||||
|
|
||||||
|
updateStateData(pool, cache, blockSlot)
|
||||||
|
|
||||||
|
template hashedState(): HashedBeaconState {.inject, used.} = cache.data
|
||||||
|
template state(): BeaconState {.inject, used.} = cache.data.data
|
||||||
|
template blck(): BlockRef {.inject, used.} = cache.blck
|
||||||
|
template root(): Eth2Digest {.inject, used.} = cache.data.root
|
||||||
|
|
||||||
|
body
|
||||||
|
|
||||||
func parent*(bs: BlockSlot): BlockSlot =
|
func parent*(bs: BlockSlot): BlockSlot =
|
||||||
|
## Return a blockslot representing the previous slot, using the parent block
|
||||||
|
## if the current slot had a block
|
||||||
|
if bs.slot == Slot(0):
|
||||||
|
BlockSlot(blck: nil, slot: Slot(0))
|
||||||
|
else:
|
||||||
BlockSlot(
|
BlockSlot(
|
||||||
blck: if bs.slot > bs.blck.slot: bs.blck else: bs.blck.parent,
|
blck: if bs.slot > bs.blck.slot: bs.blck else: bs.blck.parent,
|
||||||
slot: bs.slot - 1
|
slot: bs.slot - 1
|
||||||
|
@ -40,6 +67,60 @@ func isAncestorOf*(a, b: BlockRef): bool =
|
||||||
doAssert b.slot > b.parent.slot
|
doAssert b.slot > b.parent.slot
|
||||||
b = b.parent
|
b = b.parent
|
||||||
|
|
||||||
|
func getAncestorAt*(blck: BlockRef, slot: Slot): BlockRef =
|
||||||
|
## Return the most recent block as of the time at `slot` that not more recent
|
||||||
|
## than `blck` itself
|
||||||
|
|
||||||
|
var blck = blck
|
||||||
|
|
||||||
|
var depth = 0
|
||||||
|
const maxDepth = (100'i64 * 365 * 24 * 60 * 60 div SECONDS_PER_SLOT.int)
|
||||||
|
|
||||||
|
while true:
|
||||||
|
if blck.slot <= slot:
|
||||||
|
return blck
|
||||||
|
|
||||||
|
if blck.parent.isNil:
|
||||||
|
return nil
|
||||||
|
|
||||||
|
doAssert depth < maxDepth
|
||||||
|
depth += 1
|
||||||
|
|
||||||
|
blck = blck.parent
|
||||||
|
|
||||||
|
func get_ancestor*(blck: BlockRef, slot: Slot): BlockRef =
|
||||||
|
## https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_fork-choice.md#get_ancestor
|
||||||
|
## Return ancestor at slot, or nil if queried block is older
|
||||||
|
var blck = blck
|
||||||
|
|
||||||
|
var depth = 0
|
||||||
|
const maxDepth = (100'i64 * 365 * 24 * 60 * 60 div SECONDS_PER_SLOT.int)
|
||||||
|
|
||||||
|
while true:
|
||||||
|
if blck.slot == slot:
|
||||||
|
return blck
|
||||||
|
|
||||||
|
if blck.slot < slot:
|
||||||
|
return nil
|
||||||
|
|
||||||
|
if blck.parent.isNil:
|
||||||
|
return nil
|
||||||
|
|
||||||
|
doAssert depth < maxDepth
|
||||||
|
depth += 1
|
||||||
|
|
||||||
|
blck = blck.parent
|
||||||
|
|
||||||
|
func atSlot*(blck: BlockRef, slot: Slot): BlockSlot =
|
||||||
|
## Return a BlockSlot at a given slot, with the block set to the closest block
|
||||||
|
## available. If slot comes from before the block, a suitable block ancestor
|
||||||
|
## will be used, else blck is returned as if all slots after it were empty.
|
||||||
|
## This helper is useful when imagining what the chain looked like at a
|
||||||
|
## particular moment in time, or when imagining what it will look like in the
|
||||||
|
## near future if nothing happens (such as when looking ahead for the next
|
||||||
|
## block proposal)
|
||||||
|
BlockSlot(blck: blck.getAncestorAt(slot), slot: slot)
|
||||||
|
|
||||||
func init*(T: type BlockRef, root: Eth2Digest, slot: Slot): BlockRef =
|
func init*(T: type BlockRef, root: Eth2Digest, slot: Slot): BlockRef =
|
||||||
BlockRef(
|
BlockRef(
|
||||||
root: root,
|
root: root,
|
||||||
|
@ -114,6 +195,10 @@ proc init*(T: type BlockPool, db: BeaconChainDB): BlockPool =
|
||||||
let slot = db.getBlock(b.root).get().message.slot
|
let slot = db.getBlock(b.root).get().message.slot
|
||||||
blocksBySlot.mgetOrPut(slot, @[]).add(b)
|
blocksBySlot.mgetOrPut(slot, @[]).add(b)
|
||||||
|
|
||||||
|
# TODO can't do straight init because in mainnet config, there are too
|
||||||
|
# many live beaconstates on the stack...
|
||||||
|
var tmpState = new Option[BeaconState]
|
||||||
|
|
||||||
let
|
let
|
||||||
# The head state is necessary to find out what we considered to be the
|
# The head state is necessary to find out what we considered to be the
|
||||||
# finalized epoch last time we saved something.
|
# finalized epoch last time we saved something.
|
||||||
|
@ -127,14 +212,17 @@ proc init*(T: type BlockPool, db: BeaconChainDB): BlockPool =
|
||||||
# be the latest justified state or newer, meaning it's enough for
|
# be the latest justified state or newer, meaning it's enough for
|
||||||
# establishing what we consider to be the finalized head. This logic
|
# establishing what we consider to be the finalized head. This logic
|
||||||
# will need revisiting however
|
# will need revisiting however
|
||||||
headState = db.getState(headStateRoot).get()
|
tmpState[] = db.getState(headStateRoot)
|
||||||
|
let
|
||||||
finalizedHead =
|
finalizedHead =
|
||||||
headRef.findAncestorBySlot(
|
headRef.findAncestorBySlot(
|
||||||
headState.finalized_checkpoint.epoch.compute_start_slot_at_epoch())
|
tmpState[].get().finalized_checkpoint.epoch.compute_start_slot_at_epoch())
|
||||||
justifiedSlot =
|
justifiedSlot =
|
||||||
headState.current_justified_checkpoint.epoch.compute_start_slot_at_epoch()
|
tmpState[].get().current_justified_checkpoint.epoch.compute_start_slot_at_epoch()
|
||||||
justifiedHead = headRef.findAncestorBySlot(justifiedSlot)
|
justifiedHead = headRef.findAncestorBySlot(justifiedSlot)
|
||||||
head = Head(blck: headRef, justified: justifiedHead)
|
head = Head(blck: headRef, justified: justifiedHead)
|
||||||
|
justifiedBlock = db.getBlock(justifiedHead.blck.root).get()
|
||||||
|
justifiedStateRoot = justifiedBlock.message.state_root
|
||||||
|
|
||||||
doAssert justifiedHead.slot >= finalizedHead.slot,
|
doAssert justifiedHead.slot >= finalizedHead.slot,
|
||||||
"justified head comes before finalized head - database corrupt?"
|
"justified head comes before finalized head - database corrupt?"
|
||||||
|
@ -143,7 +231,7 @@ proc init*(T: type BlockPool, db: BeaconChainDB): BlockPool =
|
||||||
head = head.blck, finalizedHead, tail = tailRef,
|
head = head.blck, finalizedHead, tail = tailRef,
|
||||||
totalBlocks = blocks.len, totalKnownSlots = blocksBySlot.len
|
totalBlocks = blocks.len, totalKnownSlots = blocksBySlot.len
|
||||||
|
|
||||||
BlockPool(
|
let res = BlockPool(
|
||||||
pending: initTable[Eth2Digest, SignedBeaconBlock](),
|
pending: initTable[Eth2Digest, SignedBeaconBlock](),
|
||||||
missing: initTable[Eth2Digest, MissingBlock](),
|
missing: initTable[Eth2Digest, MissingBlock](),
|
||||||
blocks: blocks,
|
blocks: blocks,
|
||||||
|
@ -152,9 +240,22 @@ proc init*(T: type BlockPool, db: BeaconChainDB): BlockPool =
|
||||||
head: head,
|
head: head,
|
||||||
finalizedHead: finalizedHead,
|
finalizedHead: finalizedHead,
|
||||||
db: db,
|
db: db,
|
||||||
heads: @[head]
|
heads: @[head],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
res.headState = StateData(
|
||||||
|
data: HashedBeaconState(data: tmpState[].get(), root: headStateRoot),
|
||||||
|
blck: headRef)
|
||||||
|
res.tmpState = res.headState
|
||||||
|
|
||||||
|
tmpState[] = db.getState(justifiedStateRoot)
|
||||||
|
res.justifiedState = StateData(
|
||||||
|
data: HashedBeaconState(data: tmpState[].get(), root: justifiedStateRoot),
|
||||||
|
blck: justifiedHead.blck)
|
||||||
|
|
||||||
|
|
||||||
|
res
|
||||||
|
|
||||||
proc addSlotMapping(pool: BlockPool, br: BlockRef) =
|
proc addSlotMapping(pool: BlockPool, br: BlockRef) =
|
||||||
proc addIfMissing(s: var seq[BlockRef], v: BlockRef) =
|
proc addIfMissing(s: var seq[BlockRef], v: BlockRef) =
|
||||||
if v notin s:
|
if v notin s:
|
||||||
|
@ -171,13 +272,6 @@ proc delSlotMapping(pool: BlockPool, br: BlockRef) =
|
||||||
else:
|
else:
|
||||||
pool.blocksBySlot[br.slot] = blks
|
pool.blocksBySlot[br.slot] = blks
|
||||||
|
|
||||||
proc updateStateData*(
|
|
||||||
pool: BlockPool, state: var StateData, bs: BlockSlot) {.gcsafe.}
|
|
||||||
|
|
||||||
proc add*(
|
|
||||||
pool: var BlockPool, state: var StateData, blockRoot: Eth2Digest,
|
|
||||||
signedBlock: SignedBeaconBlock): BlockRef {.gcsafe.}
|
|
||||||
|
|
||||||
proc addResolvedBlock(
|
proc addResolvedBlock(
|
||||||
pool: var BlockPool, state: var StateData, blockRoot: Eth2Digest,
|
pool: var BlockPool, state: var StateData, blockRoot: Eth2Digest,
|
||||||
signedBlock: SignedBeaconBlock, parent: BlockRef): BlockRef =
|
signedBlock: SignedBeaconBlock, parent: BlockRef): BlockRef =
|
||||||
|
@ -245,14 +339,14 @@ proc addResolvedBlock(
|
||||||
while keepGoing:
|
while keepGoing:
|
||||||
let retries = pool.pending
|
let retries = pool.pending
|
||||||
for k, v in retries:
|
for k, v in retries:
|
||||||
discard pool.add(state, k, v)
|
discard pool.add(k, v)
|
||||||
# Keep going for as long as the pending pool is shrinking
|
# Keep going for as long as the pending pool is shrinking
|
||||||
# TODO inefficient! so what?
|
# TODO inefficient! so what?
|
||||||
keepGoing = pool.pending.len < retries.len
|
keepGoing = pool.pending.len < retries.len
|
||||||
blockRef
|
blockRef
|
||||||
|
|
||||||
proc add*(
|
proc add*(
|
||||||
pool: var BlockPool, state: var StateData, blockRoot: Eth2Digest,
|
pool: var BlockPool, blockRoot: Eth2Digest,
|
||||||
signedBlock: SignedBeaconBlock): BlockRef {.gcsafe.} =
|
signedBlock: SignedBeaconBlock): BlockRef {.gcsafe.} =
|
||||||
## return the block, if resolved...
|
## return the block, if resolved...
|
||||||
## the state parameter may be updated to include the given block, if
|
## the state parameter may be updated to include the given block, if
|
||||||
|
@ -290,6 +384,16 @@ proc add*(
|
||||||
let parent = pool.blocks.getOrDefault(blck.parent_root)
|
let parent = pool.blocks.getOrDefault(blck.parent_root)
|
||||||
|
|
||||||
if parent != nil:
|
if parent != nil:
|
||||||
|
if parent.slot >= blck.slot:
|
||||||
|
# TODO Malicious block? inform peer pool?
|
||||||
|
notice "Invalid block slot",
|
||||||
|
blck = shortLog(blck),
|
||||||
|
blockRoot = shortLog(blockRoot),
|
||||||
|
parentRoot = shortLog(parent.root),
|
||||||
|
parentSlot = shortLog(parent.slot)
|
||||||
|
|
||||||
|
return
|
||||||
|
|
||||||
# The block might have been in either of these - we don't want any more
|
# The block might have been in either of these - we don't want any more
|
||||||
# work done on its behalf
|
# work done on its behalf
|
||||||
pool.pending.del(blockRoot)
|
pool.pending.del(blockRoot)
|
||||||
|
@ -299,9 +403,9 @@ proc add*(
|
||||||
|
|
||||||
# TODO if the block is from the future, we should not be resolving it (yet),
|
# TODO if the block is from the future, we should not be resolving it (yet),
|
||||||
# but maybe we should use it as a hint that our clock is wrong?
|
# but maybe we should use it as a hint that our clock is wrong?
|
||||||
updateStateData(pool, state, BlockSlot(blck: parent, slot: blck.slot - 1))
|
updateStateData(pool, pool.tmpState, BlockSlot(blck: parent, slot: blck.slot - 1))
|
||||||
|
|
||||||
if not state_transition(state.data, blck, {}):
|
if not state_transition(pool.tmpState.data, blck, {}):
|
||||||
# TODO find a better way to log all this block data
|
# TODO find a better way to log all this block data
|
||||||
notice "Invalid block",
|
notice "Invalid block",
|
||||||
blck = shortLog(blck),
|
blck = shortLog(blck),
|
||||||
|
@ -310,7 +414,9 @@ proc add*(
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
||||||
return pool.addResolvedBlock(state, blockRoot, signedBlock, parent)
|
# Careful, pool.tmpState is now partially inconsistent and will be updated
|
||||||
|
# inside addResolvedBlock
|
||||||
|
return pool.addResolvedBlock(pool.tmpState, blockRoot, signedBlock, parent)
|
||||||
|
|
||||||
# TODO already checked hash though? main reason to keep this is because
|
# TODO already checked hash though? main reason to keep this is because
|
||||||
# the pending pool calls this function back later in a loop, so as long
|
# the pending pool calls this function back later in a loop, so as long
|
||||||
|
@ -505,7 +611,6 @@ proc maybePutState(pool: BlockPool, state: HashedBeaconState, blck: BlockRef) =
|
||||||
# TODO this is out of sync with epoch def now, I think -- (slot + 1) mod foo.
|
# TODO this is out of sync with epoch def now, I think -- (slot + 1) mod foo.
|
||||||
logScope: pcs = "save_state_at_epoch_start"
|
logScope: pcs = "save_state_at_epoch_start"
|
||||||
|
|
||||||
|
|
||||||
if state.data.slot mod SLOTS_PER_EPOCH == 0:
|
if state.data.slot mod SLOTS_PER_EPOCH == 0:
|
||||||
if not pool.db.containsState(state.root):
|
if not pool.db.containsState(state.root):
|
||||||
info "Storing state",
|
info "Storing state",
|
||||||
|
@ -531,7 +636,6 @@ proc rewindState(pool: BlockPool, state: var StateData, bs: BlockSlot):
|
||||||
# chain of ancestors of the new block. We will do this by loading each
|
# chain of ancestors of the new block. We will do this by loading each
|
||||||
# successive parent block and checking if we can find the corresponding state
|
# successive parent block and checking if we can find the corresponding state
|
||||||
# in the database.
|
# in the database.
|
||||||
|
|
||||||
var
|
var
|
||||||
stateRoot = pool.db.getStateRoot(bs.blck.root, bs.slot)
|
stateRoot = pool.db.getStateRoot(bs.blck.root, bs.slot)
|
||||||
curBs = bs
|
curBs = bs
|
||||||
|
@ -561,7 +665,7 @@ proc rewindState(pool: BlockPool, state: var StateData, bs: BlockSlot):
|
||||||
doAssert false, "Oh noes, we passed big bang!"
|
doAssert false, "Oh noes, we passed big bang!"
|
||||||
|
|
||||||
let
|
let
|
||||||
ancestor = ancestors[^1]
|
ancestor = ancestors.pop()
|
||||||
ancestorState = pool.db.getState(stateRoot.get())
|
ancestorState = pool.db.getState(stateRoot.get())
|
||||||
|
|
||||||
if ancestorState.isNone():
|
if ancestorState.isNone():
|
||||||
|
@ -576,7 +680,7 @@ proc rewindState(pool: BlockPool, state: var StateData, bs: BlockSlot):
|
||||||
|
|
||||||
trace "Replaying state transitions",
|
trace "Replaying state transitions",
|
||||||
stateSlot = shortLog(state.data.data.slot),
|
stateSlot = shortLog(state.data.data.slot),
|
||||||
ancestorStateRoot = shortLog(ancestor.data.state_root),
|
ancestorStateRoot = shortLog(ancestor.data.message.state_root),
|
||||||
ancestorStateSlot = shortLog(ancestorState.get().slot),
|
ancestorStateSlot = shortLog(ancestorState.get().slot),
|
||||||
slot = shortLog(bs.slot),
|
slot = shortLog(bs.slot),
|
||||||
blockRoot = shortLog(bs.blck.root),
|
blockRoot = shortLog(bs.blck.root),
|
||||||
|
@ -616,7 +720,7 @@ proc updateStateData*(pool: BlockPool, state: var StateData, bs: BlockSlot) =
|
||||||
# Time to replay all the blocks between then and now. We skip one because
|
# Time to replay all the blocks between then and now. We skip one because
|
||||||
# it's the one that we found the state with, and it has already been
|
# it's the one that we found the state with, and it has already been
|
||||||
# applied
|
# applied
|
||||||
for i in countdown(ancestors.len - 2, 0):
|
for i in countdown(ancestors.len - 1, 0):
|
||||||
let ok =
|
let ok =
|
||||||
skipAndUpdateState(state.data, ancestors[i].data.message, {skipValidation}) do(
|
skipAndUpdateState(state.data, ancestors[i].data.message, {skipValidation}) do(
|
||||||
state: HashedBeaconState):
|
state: HashedBeaconState):
|
||||||
|
@ -677,65 +781,71 @@ proc setTailBlock(pool: BlockPool, newTail: BlockRef) =
|
||||||
slot = newTail.slot,
|
slot = newTail.slot,
|
||||||
root = shortLog(newTail.root)
|
root = shortLog(newTail.root)
|
||||||
|
|
||||||
proc updateHead*(pool: BlockPool, state: var StateData, blck: BlockRef) =
|
proc updateHead*(pool: BlockPool, newHead: BlockRef) =
|
||||||
## Update what we consider to be the current head, as given by the fork
|
## Update what we consider to be the current head, as given by the fork
|
||||||
## choice.
|
## choice.
|
||||||
## The choice of head affects the choice of finalization point - the order
|
## The choice of head affects the choice of finalization point - the order
|
||||||
## of operations naturally becomes important here - after updating the head,
|
## of operations naturally becomes important here - after updating the head,
|
||||||
## blocks that were once considered potential candidates for a tree will
|
## blocks that were once considered potential candidates for a tree will
|
||||||
## now fall from grace, or no longer be considered resolved.
|
## now fall from grace, or no longer be considered resolved.
|
||||||
doAssert blck.parent != nil or blck.slot == 0
|
doAssert newHead.parent != nil or newHead.slot == 0
|
||||||
logScope: pcs = "fork_choice"
|
logScope: pcs = "fork_choice"
|
||||||
|
|
||||||
if pool.head.blck == blck:
|
if pool.head.blck == newHead:
|
||||||
info "No head block update",
|
info "No head block update",
|
||||||
headBlockRoot = shortLog(blck.root),
|
headBlockRoot = shortLog(newHead.root),
|
||||||
headBlockSlot = shortLog(blck.slot),
|
headBlockSlot = shortLog(newHead.slot),
|
||||||
cat = "fork_choice"
|
cat = "fork_choice"
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
||||||
let
|
let
|
||||||
lastHead = pool.head
|
lastHead = pool.head
|
||||||
pool.db.putHeadBlock(blck.root)
|
pool.db.putHeadBlock(newHead.root)
|
||||||
|
|
||||||
# Start off by making sure we have the right state
|
# Start off by making sure we have the right state
|
||||||
updateStateData(pool, state, BlockSlot(blck: blck, slot: blck.slot))
|
updateStateData(
|
||||||
let justifiedSlot = state.data.data
|
pool, pool.headState, BlockSlot(blck: newHead, slot: newHead.slot))
|
||||||
|
|
||||||
|
let
|
||||||
|
justifiedSlot = pool.headState.data.data
|
||||||
.current_justified_checkpoint
|
.current_justified_checkpoint
|
||||||
.epoch
|
.epoch
|
||||||
.compute_start_slot_at_epoch()
|
.compute_start_slot_at_epoch()
|
||||||
pool.head = Head(blck: blck, justified: blck.findAncestorBySlot(justifiedSlot))
|
justifiedBS = newHead.findAncestorBySlot(justifiedSlot)
|
||||||
|
|
||||||
if lastHead.blck != blck.parent:
|
pool.head = Head(blck: newHead, justified: justifiedBS)
|
||||||
|
updateStateData(pool, pool.justifiedState, justifiedBS)
|
||||||
|
|
||||||
|
# TODO isAncestorOf may be expensive - too expensive?
|
||||||
|
if not lastHead.blck.isAncestorOf(newHead):
|
||||||
info "Updated head block (new parent)",
|
info "Updated head block (new parent)",
|
||||||
lastHeadRoot = shortLog(lastHead.blck.root),
|
lastHeadRoot = shortLog(lastHead.blck.root),
|
||||||
parentRoot = shortLog(blck.parent.root),
|
parentRoot = shortLog(newHead.parent.root),
|
||||||
stateRoot = shortLog(state.data.root),
|
stateRoot = shortLog(pool.headState.data.root),
|
||||||
headBlockRoot = shortLog(state.blck.root),
|
headBlockRoot = shortLog(pool.headState.blck.root),
|
||||||
stateSlot = shortLog(state.data.data.slot),
|
stateSlot = shortLog(pool.headState.data.data.slot),
|
||||||
justifiedEpoch = shortLog(state.data.data.current_justified_checkpoint.epoch),
|
justifiedEpoch = shortLog(pool.headState.data.data.current_justified_checkpoint.epoch),
|
||||||
finalizedEpoch = shortLog(state.data.data.finalized_checkpoint.epoch),
|
finalizedEpoch = shortLog(pool.headState.data.data.finalized_checkpoint.epoch),
|
||||||
cat = "fork_choice"
|
cat = "fork_choice"
|
||||||
|
|
||||||
# A reasonable criterion for "reorganizations of the chain"
|
# A reasonable criterion for "reorganizations of the chain"
|
||||||
# TODO if multiple heads have gotten skipped, could fire at
|
|
||||||
# spurious times - for example when multiple blocks have been added between
|
|
||||||
# head updates
|
|
||||||
beacon_reorgs_total.inc()
|
beacon_reorgs_total.inc()
|
||||||
else:
|
else:
|
||||||
info "Updated head block",
|
info "Updated head block",
|
||||||
stateRoot = shortLog(state.data.root),
|
stateRoot = shortLog(pool.headState.data.root),
|
||||||
headBlockRoot = shortLog(state.blck.root),
|
headBlockRoot = shortLog(pool.headState.blck.root),
|
||||||
stateSlot = shortLog(state.data.data.slot),
|
stateSlot = shortLog(pool.headState.data.data.slot),
|
||||||
justifiedEpoch = shortLog(state.data.data.current_justified_checkpoint.epoch),
|
justifiedEpoch = shortLog(pool.headState.data.data.current_justified_checkpoint.epoch),
|
||||||
finalizedEpoch = shortLog(state.data.data.finalized_checkpoint.epoch),
|
finalizedEpoch = shortLog(pool.headState.data.data.finalized_checkpoint.epoch),
|
||||||
cat = "fork_choice"
|
cat = "fork_choice"
|
||||||
|
|
||||||
let
|
let
|
||||||
finalizedEpochStartSlot = state.data.data.finalized_checkpoint.epoch.compute_start_slot_at_epoch()
|
finalizedEpochStartSlot =
|
||||||
|
pool.headState.data.data.finalized_checkpoint.epoch.
|
||||||
|
compute_start_slot_at_epoch()
|
||||||
# TODO there might not be a block at the epoch boundary - what then?
|
# TODO there might not be a block at the epoch boundary - what then?
|
||||||
finalizedHead = blck.findAncestorBySlot(finalizedEpochStartSlot)
|
finalizedHead = newHead.findAncestorBySlot(finalizedEpochStartSlot)
|
||||||
|
|
||||||
doAssert (not finalizedHead.blck.isNil),
|
doAssert (not finalizedHead.blck.isNil),
|
||||||
"Block graph should always lead to a finalized block"
|
"Block graph should always lead to a finalized block"
|
||||||
|
@ -744,8 +854,8 @@ proc updateHead*(pool: BlockPool, state: var StateData, blck: BlockRef) =
|
||||||
info "Finalized block",
|
info "Finalized block",
|
||||||
finalizedBlockRoot = shortLog(finalizedHead.blck.root),
|
finalizedBlockRoot = shortLog(finalizedHead.blck.root),
|
||||||
finalizedBlockSlot = shortLog(finalizedHead.slot),
|
finalizedBlockSlot = shortLog(finalizedHead.slot),
|
||||||
headBlockRoot = shortLog(blck.root),
|
headBlockRoot = shortLog(newHead.root),
|
||||||
headBlockSlot = shortLog(blck.slot),
|
headBlockSlot = shortLog(newHead.slot),
|
||||||
cat = "fork_choice"
|
cat = "fork_choice"
|
||||||
|
|
||||||
pool.finalizedHead = finalizedHead
|
pool.finalizedHead = finalizedHead
|
||||||
|
|
|
@ -1,70 +0,0 @@
|
||||||
import
|
|
||||||
deques, options, sequtils, tables,
|
|
||||||
./spec/[datatypes, crypto, helpers],
|
|
||||||
./attestation_pool, ./beacon_node_types, ./ssz
|
|
||||||
|
|
||||||
func get_ancestor(blck: BlockRef, slot: Slot): BlockRef =
|
|
||||||
var blck = blck
|
|
||||||
|
|
||||||
var depth = 0
|
|
||||||
const maxDepth = (100'i64 * 365 * 24 * 60 * 60 div SECONDS_PER_SLOT.int)
|
|
||||||
|
|
||||||
while true:
|
|
||||||
if blck.slot == slot:
|
|
||||||
return blck
|
|
||||||
|
|
||||||
if blck.slot < slot:
|
|
||||||
return nil
|
|
||||||
|
|
||||||
if blck.parent == nil:
|
|
||||||
return nil
|
|
||||||
|
|
||||||
doAssert depth < maxDepth
|
|
||||||
depth += 1
|
|
||||||
|
|
||||||
blck = blck.parent
|
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.4/specs/core/0_fork-choice.md
|
|
||||||
# The structure of this code differs from the spec since we use a different
|
|
||||||
# strategy for storing states and justification points - it should nonetheless
|
|
||||||
# be close in terms of functionality.
|
|
||||||
func lmdGhost*(
|
|
||||||
pool: AttestationPool, start_state: BeaconState,
|
|
||||||
start_block: BlockRef): BlockRef =
|
|
||||||
# TODO: a Fenwick Tree datastructure to keep track of cumulated votes
|
|
||||||
# in O(log N) complexity
|
|
||||||
# https://en.wikipedia.org/wiki/Fenwick_tree
|
|
||||||
# Nim implementation for cumulative frequencies at
|
|
||||||
# https://github.com/numforge/laser/blob/990e59fffe50779cdef33aa0b8f22da19e1eb328/benchmarks/random_sampling/fenwicktree.nim
|
|
||||||
|
|
||||||
let
|
|
||||||
active_validator_indices =
|
|
||||||
get_active_validator_indices(
|
|
||||||
start_state, compute_epoch_at_slot(start_state.slot))
|
|
||||||
|
|
||||||
var latest_messages: seq[tuple[validator: ValidatorIndex, blck: BlockRef]]
|
|
||||||
for i in active_validator_indices:
|
|
||||||
let pubKey = start_state.validators[i].pubkey
|
|
||||||
if (let vote = pool.latestAttestation(pubKey); not vote.isNil):
|
|
||||||
latest_messages.add((i, vote))
|
|
||||||
|
|
||||||
template get_latest_attesting_balance(blck: BlockRef): uint64 =
|
|
||||||
var res: uint64
|
|
||||||
for validator_index, target in latest_messages.items():
|
|
||||||
if get_ancestor(target, blck.slot) == blck:
|
|
||||||
res += start_state.validators[validator_index].effective_balance
|
|
||||||
res
|
|
||||||
|
|
||||||
var head = start_block
|
|
||||||
while true:
|
|
||||||
if head.children.len() == 0:
|
|
||||||
return head
|
|
||||||
|
|
||||||
head = head.children[0]
|
|
||||||
var
|
|
||||||
headCount = get_latest_attesting_balance(head)
|
|
||||||
|
|
||||||
for i in 1..<head.children.len:
|
|
||||||
if (let hc = get_latest_attesting_balance(head.children[i]); hc > headCount):
|
|
||||||
head = head.children[i]
|
|
||||||
headCount = hc
|
|
|
@ -185,11 +185,11 @@ proc run(conf: InspectorConf) {.async.} =
|
||||||
if conf.decode:
|
if conf.decode:
|
||||||
try:
|
try:
|
||||||
if ticket.topic.startsWith(topicBeaconBlocks):
|
if ticket.topic.startsWith(topicBeaconBlocks):
|
||||||
info "BeaconBlock", msg = SSZ.decode(message.data, BeaconBlock)
|
info "SignedBeaconBlock", msg = SSZ.decode(message.data, SignedBeaconBlock)
|
||||||
elif ticket.topic.startsWith(topicAttestations):
|
elif ticket.topic.startsWith(topicAttestations):
|
||||||
info "Attestation", msg = SSZ.decode(message.data, Attestation)
|
info "Attestation", msg = SSZ.decode(message.data, Attestation)
|
||||||
elif ticket.topic.startsWith(topicVoluntaryExits):
|
elif ticket.topic.startsWith(topicVoluntaryExits):
|
||||||
info "VoluntaryExit", msg = SSZ.decode(message.data, VoluntaryExit)
|
info "SignedVoluntaryExit", msg = SSZ.decode(message.data, SignedVoluntaryExit)
|
||||||
elif ticket.topic.startsWith(topicProposerSlashings):
|
elif ticket.topic.startsWith(topicProposerSlashings):
|
||||||
info "ProposerSlashing", msg = SSZ.decode(message.data, ProposerSlashing)
|
info "ProposerSlashing", msg = SSZ.decode(message.data, ProposerSlashing)
|
||||||
elif ticket.topic.startsWith(topicAttesterSlashings):
|
elif ticket.topic.startsWith(topicAttesterSlashings):
|
||||||
|
|
|
@ -39,8 +39,8 @@ const eth1BlockHash* = block:
|
||||||
for v in x.data.mitems: v = 0x42
|
for v in x.data.mitems: v = 0x42
|
||||||
x
|
x
|
||||||
|
|
||||||
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_deposit-contract.md#withdrawal-credentials
|
||||||
func makeWithdrawalCredentials*(k: ValidatorPubKey): Eth2Digest =
|
func makeWithdrawalCredentials*(k: ValidatorPubKey): Eth2Digest =
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_deposit-contract.md#withdrawal-credentials
|
|
||||||
var bytes = eth2hash(k.getBytes())
|
var bytes = eth2hash(k.getBytes())
|
||||||
bytes.data[0] = BLS_WITHDRAWAL_PREFIX.uint8
|
bytes.data[0] = BLS_WITHDRAWAL_PREFIX.uint8
|
||||||
bytes
|
bytes
|
||||||
|
@ -59,6 +59,7 @@ func makeDeposit*(
|
||||||
if skipValidation notin flags:
|
if skipValidation notin flags:
|
||||||
ret.data.signature =
|
ret.data.signature =
|
||||||
bls_sign(
|
bls_sign(
|
||||||
privkey, hash_tree_root(ret.data).data, compute_domain(DOMAIN_DEPOSIT))
|
privkey, hash_tree_root(ret.getDepositMessage).data,
|
||||||
|
compute_domain(DOMAIN_DEPOSIT))
|
||||||
|
|
||||||
ret
|
ret
|
||||||
|
|
|
@ -9,10 +9,11 @@ import
|
||||||
tables, algorithm, math, sequtils, options,
|
tables, algorithm, math, sequtils, options,
|
||||||
json_serialization/std/sets, chronicles, stew/bitseqs,
|
json_serialization/std/sets, chronicles, stew/bitseqs,
|
||||||
../extras, ../ssz,
|
../extras, ../ssz,
|
||||||
./crypto, ./datatypes, ./digest, ./helpers, ./validator
|
./crypto, ./datatypes, ./digest, ./helpers, ./validator,
|
||||||
|
../../nbench/bench_lab
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#is_valid_merkle_branch
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#is_valid_merkle_branch
|
||||||
func is_valid_merkle_branch*(leaf: Eth2Digest, branch: openarray[Eth2Digest], depth: uint64, index: uint64, root: Eth2Digest): bool =
|
func is_valid_merkle_branch*(leaf: Eth2Digest, branch: openarray[Eth2Digest], depth: uint64, index: uint64, root: Eth2Digest): bool {.nbench.}=
|
||||||
## Check if ``leaf`` at ``index`` verifies against the Merkle ``root`` and
|
## Check if ``leaf`` at ``index`` verifies against the Merkle ``root`` and
|
||||||
## ``branch``.
|
## ``branch``.
|
||||||
var
|
var
|
||||||
|
@ -29,13 +30,13 @@ func is_valid_merkle_branch*(leaf: Eth2Digest, branch: openarray[Eth2Digest], de
|
||||||
value = eth2hash(buf)
|
value = eth2hash(buf)
|
||||||
value == root
|
value == root
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#increase_balance
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#increase_balance
|
||||||
func increase_balance*(
|
func increase_balance*(
|
||||||
state: var BeaconState, index: ValidatorIndex, delta: Gwei) =
|
state: var BeaconState, index: ValidatorIndex, delta: Gwei) =
|
||||||
# Increase the validator balance at index ``index`` by ``delta``.
|
# Increase the validator balance at index ``index`` by ``delta``.
|
||||||
state.balances[index] += delta
|
state.balances[index] += delta
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#decrease_balance
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#decrease_balance
|
||||||
func decrease_balance*(
|
func decrease_balance*(
|
||||||
state: var BeaconState, index: ValidatorIndex, delta: Gwei) =
|
state: var BeaconState, index: ValidatorIndex, delta: Gwei) =
|
||||||
## Decrease the validator balance at index ``index`` by ``delta``, with
|
## Decrease the validator balance at index ``index`` by ``delta``, with
|
||||||
|
@ -48,13 +49,13 @@ func decrease_balance*(
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.4/specs/core/0_beacon-chain.md#deposits
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.4/specs/core/0_beacon-chain.md#deposits
|
||||||
func process_deposit*(
|
func process_deposit*(
|
||||||
state: var BeaconState, deposit: Deposit, flags: UpdateFlags = {}): bool =
|
state: var BeaconState, deposit: Deposit, flags: UpdateFlags = {}): bool {.nbench.}=
|
||||||
# Process an Eth1 deposit, registering a validator or increasing its balance.
|
# Process an Eth1 deposit, registering a validator or increasing its balance.
|
||||||
|
|
||||||
# Verify the Merkle branch
|
# Verify the Merkle branch
|
||||||
# TODO enable this check, but don't use doAssert
|
# TODO enable this check, but don't use doAssert
|
||||||
if not is_valid_merkle_branch(
|
if not is_valid_merkle_branch(
|
||||||
hash_tree_root(deposit.data),
|
hash_tree_root(deposit.getDepositMessage),
|
||||||
deposit.proof,
|
deposit.proof,
|
||||||
DEPOSIT_CONTRACT_TREE_DEPTH,
|
DEPOSIT_CONTRACT_TREE_DEPTH,
|
||||||
state.eth1_deposit_index,
|
state.eth1_deposit_index,
|
||||||
|
@ -80,8 +81,8 @@ func process_deposit*(
|
||||||
if index == -1:
|
if index == -1:
|
||||||
# Verify the deposit signature (proof of possession)
|
# Verify the deposit signature (proof of possession)
|
||||||
if skipValidation notin flags and not bls_verify(
|
if skipValidation notin flags and not bls_verify(
|
||||||
pubkey, hash_tree_root(deposit.data).data, deposit.data.signature,
|
pubkey, hash_tree_root(deposit.getDepositMessage).data,
|
||||||
compute_domain(DOMAIN_DEPOSIT)):
|
deposit.data.signature, compute_domain(DOMAIN_DEPOSIT)):
|
||||||
return false
|
return false
|
||||||
|
|
||||||
# Add validator and balance entries
|
# Add validator and balance entries
|
||||||
|
@ -102,13 +103,13 @@ func process_deposit*(
|
||||||
|
|
||||||
true
|
true
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#compute_activation_exit_epoch
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#compute_activation_exit_epoch
|
||||||
func compute_activation_exit_epoch(epoch: Epoch): Epoch =
|
func compute_activation_exit_epoch(epoch: Epoch): Epoch =
|
||||||
## Return the epoch during which validator activations and exits initiated in
|
## Return the epoch during which validator activations and exits initiated in
|
||||||
## ``epoch`` take effect.
|
## ``epoch`` take effect.
|
||||||
epoch + 1 + MAX_SEED_LOOKAHEAD
|
epoch + 1 + MAX_SEED_LOOKAHEAD
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#get_validator_churn_limit
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#get_validator_churn_limit
|
||||||
func get_validator_churn_limit(state: BeaconState): uint64 =
|
func get_validator_churn_limit(state: BeaconState): uint64 =
|
||||||
# Return the validator churn limit for the current epoch.
|
# Return the validator churn limit for the current epoch.
|
||||||
let active_validator_indices =
|
let active_validator_indices =
|
||||||
|
@ -116,7 +117,7 @@ func get_validator_churn_limit(state: BeaconState): uint64 =
|
||||||
max(MIN_PER_EPOCH_CHURN_LIMIT,
|
max(MIN_PER_EPOCH_CHURN_LIMIT,
|
||||||
len(active_validator_indices) div CHURN_LIMIT_QUOTIENT).uint64
|
len(active_validator_indices) div CHURN_LIMIT_QUOTIENT).uint64
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#initiate_validator_exit
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#initiate_validator_exit
|
||||||
func initiate_validator_exit*(state: var BeaconState,
|
func initiate_validator_exit*(state: var BeaconState,
|
||||||
index: ValidatorIndex) =
|
index: ValidatorIndex) =
|
||||||
# Initiate the exit of the validator with index ``index``.
|
# Initiate the exit of the validator with index ``index``.
|
||||||
|
@ -189,12 +190,12 @@ proc slash_validator*(state: var BeaconState, slashed_index: ValidatorIndex,
|
||||||
increase_balance(
|
increase_balance(
|
||||||
state, whistleblower_index, whistleblowing_reward - proposer_reward)
|
state, whistleblower_index, whistleblowing_reward - proposer_reward)
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.3/specs/core/0_beacon-chain.md#genesis
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#genesis
|
||||||
func initialize_beacon_state_from_eth1*(
|
func initialize_beacon_state_from_eth1*(
|
||||||
eth1_block_hash: Eth2Digest,
|
eth1_block_hash: Eth2Digest,
|
||||||
eth1_timestamp: uint64,
|
eth1_timestamp: uint64,
|
||||||
deposits: openArray[Deposit],
|
deposits: openArray[Deposit],
|
||||||
flags: UpdateFlags = {}): BeaconState =
|
flags: UpdateFlags = {}): BeaconState {.nbench.}=
|
||||||
## Get the genesis ``BeaconState``.
|
## Get the genesis ``BeaconState``.
|
||||||
##
|
##
|
||||||
## Before the beacon chain starts, validators will register in the Eth1 chain
|
## Before the beacon chain starts, validators will register in the Eth1 chain
|
||||||
|
@ -274,7 +275,7 @@ func get_initial_beacon_block*(state: BeaconState): SignedBeaconBlock =
|
||||||
# parent_root, randao_reveal, eth1_data, signature, and body automatically
|
# parent_root, randao_reveal, eth1_data, signature, and body automatically
|
||||||
# initialized to default values.
|
# initialized to default values.
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#get_block_root_at_slot
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#get_block_root_at_slot
|
||||||
func get_block_root_at_slot*(state: BeaconState,
|
func get_block_root_at_slot*(state: BeaconState,
|
||||||
slot: Slot): Eth2Digest =
|
slot: Slot): Eth2Digest =
|
||||||
# Return the block root at a recent ``slot``.
|
# Return the block root at a recent ``slot``.
|
||||||
|
@ -283,12 +284,12 @@ func get_block_root_at_slot*(state: BeaconState,
|
||||||
doAssert slot < state.slot
|
doAssert slot < state.slot
|
||||||
state.block_roots[slot mod SLOTS_PER_HISTORICAL_ROOT]
|
state.block_roots[slot mod SLOTS_PER_HISTORICAL_ROOT]
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#get_block_root
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#get_block_root
|
||||||
func get_block_root*(state: BeaconState, epoch: Epoch): Eth2Digest =
|
func get_block_root*(state: BeaconState, epoch: Epoch): Eth2Digest =
|
||||||
# Return the block root at the start of a recent ``epoch``.
|
# Return the block root at the start of a recent ``epoch``.
|
||||||
get_block_root_at_slot(state, compute_start_slot_at_epoch(epoch))
|
get_block_root_at_slot(state, compute_start_slot_at_epoch(epoch))
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#get_total_balance
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#get_total_balance
|
||||||
func get_total_balance*(state: BeaconState, validators: auto): Gwei =
|
func get_total_balance*(state: BeaconState, validators: auto): Gwei =
|
||||||
## Return the combined effective balance of the ``indices``. (1 Gwei minimum
|
## Return the combined effective balance of the ``indices``. (1 Gwei minimum
|
||||||
## to avoid divisions by zero.)
|
## to avoid divisions by zero.)
|
||||||
|
@ -298,13 +299,13 @@ func get_total_balance*(state: BeaconState, validators: auto): Gwei =
|
||||||
|
|
||||||
# XXX: Move to state_transition_epoch.nim?
|
# XXX: Move to state_transition_epoch.nim?
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.3/specs/core/0_beacon-chain.md#is_eligible_for_activation_queue
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#is_eligible_for_activation_queue
|
||||||
func is_eligible_for_activation_queue(validator: Validator): bool =
|
func is_eligible_for_activation_queue(validator: Validator): bool =
|
||||||
# Check if ``validator`` is eligible to be placed into the activation queue.
|
# Check if ``validator`` is eligible to be placed into the activation queue.
|
||||||
validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and
|
validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and
|
||||||
validator.effective_balance == MAX_EFFECTIVE_BALANCE
|
validator.effective_balance == MAX_EFFECTIVE_BALANCE
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.3/specs/core/0_beacon-chain.md#is_eligible_for_activation
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#is_eligible_for_activation
|
||||||
func is_eligible_for_activation(state: BeaconState, validator: Validator):
|
func is_eligible_for_activation(state: BeaconState, validator: Validator):
|
||||||
bool =
|
bool =
|
||||||
# Check if ``validator`` is eligible for activation.
|
# Check if ``validator`` is eligible for activation.
|
||||||
|
@ -314,8 +315,8 @@ func is_eligible_for_activation(state: BeaconState, validator: Validator):
|
||||||
# Has not yet been activated
|
# Has not yet been activated
|
||||||
validator.activation_epoch == FAR_FUTURE_EPOCH
|
validator.activation_epoch == FAR_FUTURE_EPOCH
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.3/specs/core/0_beacon-chain.md#registry-updates
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#registry-updates
|
||||||
proc process_registry_updates*(state: var BeaconState) =
|
proc process_registry_updates*(state: var BeaconState) {.nbench.}=
|
||||||
## Process activation eligibility and ejections
|
## Process activation eligibility and ejections
|
||||||
## Try to avoid caching here, since this could easily become undefined
|
## Try to avoid caching here, since this could easily become undefined
|
||||||
|
|
||||||
|
@ -366,7 +367,7 @@ proc process_registry_updates*(state: var BeaconState) =
|
||||||
validator.activation_epoch =
|
validator.activation_epoch =
|
||||||
compute_activation_exit_epoch(get_current_epoch(state))
|
compute_activation_exit_epoch(get_current_epoch(state))
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#is_valid_indexed_attestation
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#is_valid_indexed_attestation
|
||||||
proc is_valid_indexed_attestation*(
|
proc is_valid_indexed_attestation*(
|
||||||
state: BeaconState, indexed_attestation: IndexedAttestation): bool =
|
state: BeaconState, indexed_attestation: IndexedAttestation): bool =
|
||||||
## Check if ``indexed_attestation`` has valid indices and signature.
|
## Check if ``indexed_attestation`` has valid indices and signature.
|
||||||
|
@ -399,7 +400,7 @@ proc is_valid_indexed_attestation*(
|
||||||
|
|
||||||
true
|
true
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#get_attesting_indices
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#get_attesting_indices
|
||||||
func get_attesting_indices*(state: BeaconState,
|
func get_attesting_indices*(state: BeaconState,
|
||||||
data: AttestationData,
|
data: AttestationData,
|
||||||
bits: CommitteeValidatorsBits,
|
bits: CommitteeValidatorsBits,
|
||||||
|
@ -412,7 +413,7 @@ func get_attesting_indices*(state: BeaconState,
|
||||||
if bits[i]:
|
if bits[i]:
|
||||||
result.incl index
|
result.incl index
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#get_indexed_attestation
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#get_indexed_attestation
|
||||||
func get_indexed_attestation(state: BeaconState, attestation: Attestation,
|
func get_indexed_attestation(state: BeaconState, attestation: Attestation,
|
||||||
stateCache: var StateCache): IndexedAttestation =
|
stateCache: var StateCache): IndexedAttestation =
|
||||||
# Return the indexed attestation corresponding to ``attestation``.
|
# Return the indexed attestation corresponding to ``attestation``.
|
||||||
|
@ -500,7 +501,7 @@ proc check_attestation*(
|
||||||
|
|
||||||
proc process_attestation*(
|
proc process_attestation*(
|
||||||
state: var BeaconState, attestation: Attestation, flags: UpdateFlags,
|
state: var BeaconState, attestation: Attestation, flags: UpdateFlags,
|
||||||
stateCache: var StateCache): bool =
|
stateCache: var StateCache): bool {.nbench.}=
|
||||||
# In the spec, attestation validation is mixed with state mutation, so here
|
# In the spec, attestation validation is mixed with state mutation, so here
|
||||||
# we've split it into two functions so that the validation logic can be
|
# we've split it into two functions so that the validation logic can be
|
||||||
# reused when looking for suitable blocks to include in attestations.
|
# reused when looking for suitable blocks to include in attestations.
|
||||||
|
|
|
@ -141,15 +141,15 @@ func combine*[T](x: var BlsValue[T], other: BlsValue[T]) =
|
||||||
doAssert x.kind == Real and other.kind == Real
|
doAssert x.kind == Real and other.kind == Real
|
||||||
x.blsValue.combine(other.blsValue)
|
x.blsValue.combine(other.blsValue)
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/bls_signature.md#bls_aggregate_pubkeys
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/bls_signature.md#bls_aggregate_pubkeys
|
||||||
func bls_aggregate_pubkeys*(keys: openArray[ValidatorPubKey]): ValidatorPubKey =
|
func bls_aggregate_pubkeys*(keys: openArray[ValidatorPubKey]): ValidatorPubKey =
|
||||||
keys.combine()
|
keys.combine()
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/bls_signature.md#bls_aggregate_signatures
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/bls_signature.md#bls_aggregate_signatures
|
||||||
func bls_aggregate_signatures*(keys: openArray[ValidatorSig]): ValidatorSig =
|
func bls_aggregate_signatures*(keys: openArray[ValidatorSig]): ValidatorSig =
|
||||||
keys.combine()
|
keys.combine()
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/bls_signature.md#bls_verify
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/bls_signature.md#bls_verify
|
||||||
func bls_verify*(
|
func bls_verify*(
|
||||||
pubkey: ValidatorPubKey, msg: openArray[byte], sig: ValidatorSig,
|
pubkey: ValidatorPubKey, msg: openArray[byte], sig: ValidatorSig,
|
||||||
domain: Domain): bool =
|
domain: Domain): bool =
|
||||||
|
|
|
@ -52,13 +52,13 @@ else:
|
||||||
{.fatal: "Preset \"" & const_preset ".nim\" is not supported.".}
|
{.fatal: "Preset \"" & const_preset ".nim\" is not supported.".}
|
||||||
|
|
||||||
const
|
const
|
||||||
SPEC_VERSION* = "0.9.3" ## \
|
SPEC_VERSION* = "0.9.4" ## \
|
||||||
## Spec version we're aiming to be compatible with, right now
|
## Spec version we're aiming to be compatible with, right now
|
||||||
## TODO: improve this scheme once we can negotiate versions in protocol
|
## TODO: improve this scheme once we can negotiate versions in protocol
|
||||||
|
|
||||||
# Initial values
|
# Initial values
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#initial-values
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#initial-values
|
||||||
GENESIS_EPOCH* = (GENESIS_SLOT.uint64 div SLOTS_PER_EPOCH).Epoch ##\
|
GENESIS_EPOCH* = (GENESIS_SLOT.uint64 div SLOTS_PER_EPOCH).Epoch ##\
|
||||||
## compute_epoch_at_slot(GENESIS_SLOT)
|
## compute_epoch_at_slot(GENESIS_SLOT)
|
||||||
|
|
||||||
|
@ -80,7 +80,7 @@ type
|
||||||
|
|
||||||
# Domains
|
# Domains
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#domain-types
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#domain-types
|
||||||
DomainType* {.pure.} = enum
|
DomainType* {.pure.} = enum
|
||||||
DOMAIN_BEACON_PROPOSER = 0
|
DOMAIN_BEACON_PROPOSER = 0
|
||||||
DOMAIN_BEACON_ATTESTER = 1
|
DOMAIN_BEACON_ATTESTER = 1
|
||||||
|
@ -88,10 +88,10 @@ type
|
||||||
DOMAIN_DEPOSIT = 3
|
DOMAIN_DEPOSIT = 3
|
||||||
DOMAIN_VOLUNTARY_EXIT = 4
|
DOMAIN_VOLUNTARY_EXIT = 4
|
||||||
# Phase 1 - Custody game
|
# Phase 1 - Custody game
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/1_custody-game.md#signature-domain-types
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/1_custody-game.md#signature-domain-types
|
||||||
DOMAIN_CUSTODY_BIT_CHALLENGE = 6
|
DOMAIN_CUSTODY_BIT_CHALLENGE = 6
|
||||||
# Phase 1 - Sharding
|
# Phase 1 - Sharding
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/1_shard-data-chains.md#signature-domain-types
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/1_shard-data-chains.md#signature-domain-types
|
||||||
DOMAIN_SHARD_PROPOSER = 128
|
DOMAIN_SHARD_PROPOSER = 128
|
||||||
DOMAIN_SHARD_ATTESTER = 129
|
DOMAIN_SHARD_ATTESTER = 129
|
||||||
|
|
||||||
|
@ -107,18 +107,18 @@ type
|
||||||
|
|
||||||
BitList*[maxLen: static int] = distinct BitSeq
|
BitList*[maxLen: static int] = distinct BitSeq
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#proposerslashing
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#proposerslashing
|
||||||
ProposerSlashing* = object
|
ProposerSlashing* = object
|
||||||
proposer_index*: uint64
|
proposer_index*: uint64
|
||||||
signed_header_1*: SignedBeaconBlockHeader
|
signed_header_1*: SignedBeaconBlockHeader
|
||||||
signed_header_2*: SignedBeaconBlockHeader
|
signed_header_2*: SignedBeaconBlockHeader
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#attesterslashing
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#attesterslashing
|
||||||
AttesterSlashing* = object
|
AttesterSlashing* = object
|
||||||
attestation_1*: IndexedAttestation
|
attestation_1*: IndexedAttestation
|
||||||
attestation_2*: IndexedAttestation
|
attestation_2*: IndexedAttestation
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#indexedattestation
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#indexedattestation
|
||||||
IndexedAttestation* = object
|
IndexedAttestation* = object
|
||||||
# TODO ValidatorIndex, but that doesn't serialize properly
|
# TODO ValidatorIndex, but that doesn't serialize properly
|
||||||
attesting_indices*: List[uint64, MAX_VALIDATORS_PER_COMMITTEE]
|
attesting_indices*: List[uint64, MAX_VALIDATORS_PER_COMMITTEE]
|
||||||
|
@ -127,18 +127,18 @@ type
|
||||||
|
|
||||||
CommitteeValidatorsBits* = BitList[MAX_VALIDATORS_PER_COMMITTEE]
|
CommitteeValidatorsBits* = BitList[MAX_VALIDATORS_PER_COMMITTEE]
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#attestation
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#attestation
|
||||||
Attestation* = object
|
Attestation* = object
|
||||||
aggregation_bits*: CommitteeValidatorsBits
|
aggregation_bits*: CommitteeValidatorsBits
|
||||||
data*: AttestationData
|
data*: AttestationData
|
||||||
signature*: ValidatorSig
|
signature*: ValidatorSig
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#checkpoint
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#checkpoint
|
||||||
Checkpoint* = object
|
Checkpoint* = object
|
||||||
epoch*: Epoch
|
epoch*: Epoch
|
||||||
root*: Eth2Digest
|
root*: Eth2Digest
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#AttestationData
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#AttestationData
|
||||||
AttestationData* = object
|
AttestationData* = object
|
||||||
slot*: Slot
|
slot*: Slot
|
||||||
index*: uint64
|
index*: uint64
|
||||||
|
@ -150,34 +150,34 @@ type
|
||||||
source*: Checkpoint
|
source*: Checkpoint
|
||||||
target*: Checkpoint
|
target*: Checkpoint
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#deposit
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#deposit
|
||||||
Deposit* = object
|
Deposit* = object
|
||||||
proof*: array[DEPOSIT_CONTRACT_TREE_DEPTH + 1, Eth2Digest] ##\
|
proof*: array[DEPOSIT_CONTRACT_TREE_DEPTH + 1, Eth2Digest] ##\
|
||||||
## Merkle path to deposit data list root
|
## Merkle path to deposit data list root
|
||||||
|
|
||||||
data*: DepositData
|
data*: DepositData
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.3/specs/core/0_beacon-chain.md#depositdata
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#depositdata
|
||||||
DepositMessage* = object
|
DepositMessage* = object
|
||||||
pubkey*: ValidatorPubKey
|
pubkey*: ValidatorPubKey
|
||||||
withdrawal_credentials*: Eth2Digest
|
withdrawal_credentials*: Eth2Digest
|
||||||
amount*: Gwei
|
amount*: Gwei
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#depositdata
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#depositdata
|
||||||
DepositData* = object
|
DepositData* = object
|
||||||
pubkey*: ValidatorPubKey
|
pubkey*: ValidatorPubKey
|
||||||
withdrawal_credentials*: Eth2Digest
|
withdrawal_credentials*: Eth2Digest
|
||||||
amount*: uint64
|
amount*: uint64
|
||||||
signature*: ValidatorSig # signing over DepositMessage
|
signature*: ValidatorSig # signing over DepositMessage
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#voluntaryexit
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#voluntaryexit
|
||||||
VoluntaryExit* = object
|
VoluntaryExit* = object
|
||||||
epoch*: Epoch ##\
|
epoch*: Epoch ##\
|
||||||
## Earliest epoch when voluntary exit can be processed
|
## Earliest epoch when voluntary exit can be processed
|
||||||
|
|
||||||
validator_index*: uint64
|
validator_index*: uint64
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#beaconblock
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#beaconblock
|
||||||
BeaconBlock* = object
|
BeaconBlock* = object
|
||||||
## For each slot, a proposer is chosen from the validator pool to propose
|
## For each slot, a proposer is chosen from the validator pool to propose
|
||||||
## a new block. Once the block as been proposed, it is transmitted to
|
## a new block. Once the block as been proposed, it is transmitted to
|
||||||
|
@ -195,14 +195,14 @@ type
|
||||||
|
|
||||||
body*: BeaconBlockBody
|
body*: BeaconBlockBody
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#beaconblockheader
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#beaconblockheader
|
||||||
BeaconBlockHeader* = object
|
BeaconBlockHeader* = object
|
||||||
slot*: Slot
|
slot*: Slot
|
||||||
parent_root*: Eth2Digest
|
parent_root*: Eth2Digest
|
||||||
state_root*: Eth2Digest
|
state_root*: Eth2Digest
|
||||||
body_root*: Eth2Digest
|
body_root*: Eth2Digest
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#beaconblockbody
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#beaconblockbody
|
||||||
BeaconBlockBody* = object
|
BeaconBlockBody* = object
|
||||||
randao_reveal*: ValidatorSig
|
randao_reveal*: ValidatorSig
|
||||||
eth1_data*: Eth1Data
|
eth1_data*: Eth1Data
|
||||||
|
@ -215,7 +215,7 @@ type
|
||||||
deposits*: List[Deposit, MAX_DEPOSITS]
|
deposits*: List[Deposit, MAX_DEPOSITS]
|
||||||
voluntary_exits*: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS]
|
voluntary_exits*: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS]
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#beaconstate
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#beaconstate
|
||||||
BeaconState* = object
|
BeaconState* = object
|
||||||
# Versioning
|
# Versioning
|
||||||
genesis_time*: uint64
|
genesis_time*: uint64
|
||||||
|
@ -242,7 +242,7 @@ type
|
||||||
validators*: seq[Validator]
|
validators*: seq[Validator]
|
||||||
balances*: seq[uint64]
|
balances*: seq[uint64]
|
||||||
|
|
||||||
# Shuffling
|
# Randomness
|
||||||
randao_mixes*: array[EPOCHS_PER_HISTORICAL_VECTOR, Eth2Digest]
|
randao_mixes*: array[EPOCHS_PER_HISTORICAL_VECTOR, Eth2Digest]
|
||||||
|
|
||||||
# Slashings
|
# Slashings
|
||||||
|
@ -267,7 +267,7 @@ type
|
||||||
current_justified_checkpoint*: Checkpoint
|
current_justified_checkpoint*: Checkpoint
|
||||||
finalized_checkpoint*: Checkpoint
|
finalized_checkpoint*: Checkpoint
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#validator
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#validator
|
||||||
Validator* = object
|
Validator* = object
|
||||||
pubkey*: ValidatorPubKey
|
pubkey*: ValidatorPubKey
|
||||||
|
|
||||||
|
@ -277,8 +277,7 @@ type
|
||||||
effective_balance*: uint64 ##\
|
effective_balance*: uint64 ##\
|
||||||
## Balance at stake
|
## Balance at stake
|
||||||
|
|
||||||
slashed*: bool ##\
|
slashed*: bool
|
||||||
## Was the validator slashed
|
|
||||||
|
|
||||||
# Status epochs
|
# Status epochs
|
||||||
activation_eligibility_epoch*: Epoch ##\
|
activation_eligibility_epoch*: Epoch ##\
|
||||||
|
@ -290,7 +289,7 @@ type
|
||||||
withdrawable_epoch*: Epoch ##\
|
withdrawable_epoch*: Epoch ##\
|
||||||
## When validator can withdraw or transfer funds
|
## When validator can withdraw or transfer funds
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#pendingattestation
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#pendingattestation
|
||||||
PendingAttestation* = object
|
PendingAttestation* = object
|
||||||
aggregation_bits*: CommitteeValidatorsBits
|
aggregation_bits*: CommitteeValidatorsBits
|
||||||
data*: AttestationData
|
data*: AttestationData
|
||||||
|
@ -300,12 +299,12 @@ type
|
||||||
|
|
||||||
proposer_index*: uint64
|
proposer_index*: uint64
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#historicalbatch
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#historicalbatch
|
||||||
HistoricalBatch* = object
|
HistoricalBatch* = object
|
||||||
block_roots* : array[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest]
|
block_roots* : array[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest]
|
||||||
state_roots* : array[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest]
|
state_roots* : array[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest]
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#fork
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#fork
|
||||||
Fork* = object
|
Fork* = object
|
||||||
# TODO: Spec introduced an alias for Version = array[4, byte]
|
# TODO: Spec introduced an alias for Version = array[4, byte]
|
||||||
# and a default parameter to compute_domain
|
# and a default parameter to compute_domain
|
||||||
|
@ -315,28 +314,28 @@ type
|
||||||
epoch*: Epoch ##\
|
epoch*: Epoch ##\
|
||||||
## Epoch of latest fork
|
## Epoch of latest fork
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#eth1data
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#eth1data
|
||||||
Eth1Data* = object
|
Eth1Data* = object
|
||||||
deposit_root*: Eth2Digest
|
deposit_root*: Eth2Digest
|
||||||
deposit_count*: uint64
|
deposit_count*: uint64
|
||||||
block_hash*: Eth2Digest
|
block_hash*: Eth2Digest
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.3/specs/core/0_beacon-chain.md#signedvoluntaryexit
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#signedvoluntaryexit
|
||||||
SignedVoluntaryExit* = object
|
SignedVoluntaryExit* = object
|
||||||
message*: VoluntaryExit
|
message*: VoluntaryExit
|
||||||
signature*: ValidatorSig
|
signature*: ValidatorSig
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.3/specs/core/0_beacon-chain.md#signedbeaconblock
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#signedbeaconblock
|
||||||
SignedBeaconBlock* = object
|
SignedBeaconBlock* = object
|
||||||
message*: BeaconBlock
|
message*: BeaconBlock
|
||||||
signature*: ValidatorSig
|
signature*: ValidatorSig
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.3/specs/core/0_beacon-chain.md#signedvoluntaryexit
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#signedvoluntaryexit
|
||||||
SignedBeaconBlockHeader* = object
|
SignedBeaconBlockHeader* = object
|
||||||
message*: BeaconBlockHeader
|
message*: BeaconBlockHeader
|
||||||
signature*: ValidatorSig
|
signature*: ValidatorSig
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/validator/0_beacon-chain-validator.md#aggregateandproof
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/validator/0_beacon-chain-validator.md#aggregateandproof
|
||||||
AggregateAndProof* = object
|
AggregateAndProof* = object
|
||||||
aggregator_index*: uint64
|
aggregator_index*: uint64
|
||||||
aggregate*: Attestation
|
aggregate*: Attestation
|
||||||
|
@ -429,6 +428,14 @@ macro fieldMaxLen*(x: typed): untyped =
|
||||||
func shortValidatorKey*(state: BeaconState, validatorIdx: int): string =
|
func shortValidatorKey*(state: BeaconState, validatorIdx: int): string =
|
||||||
($state.validators[validatorIdx].pubkey)[0..7]
|
($state.validators[validatorIdx].pubkey)[0..7]
|
||||||
|
|
||||||
|
func getDepositMessage*(depositData: DepositData): DepositMessage =
|
||||||
|
result.pubkey = depositData.pubkey
|
||||||
|
result.amount = depositData.amount
|
||||||
|
result.withdrawal_credentials = depositData.withdrawal_credentials
|
||||||
|
|
||||||
|
func getDepositMessage*(deposit: Deposit): DepositMessage =
|
||||||
|
deposit.data.getDepositMessage
|
||||||
|
|
||||||
template ethTimeUnit(typ: type) {.dirty.} =
|
template ethTimeUnit(typ: type) {.dirty.} =
|
||||||
proc `+`*(x: typ, y: uint64): typ {.borrow.}
|
proc `+`*(x: typ, y: uint64): typ {.borrow.}
|
||||||
proc `-`*(x: typ, y: uint64): typ {.borrow.}
|
proc `-`*(x: typ, y: uint64): typ {.borrow.}
|
||||||
|
@ -537,8 +544,8 @@ template bytes*(x: BitList): auto = bytes(BitSeq(x))
|
||||||
template `[]`*(x: BitList, idx: auto): auto = BitSeq(x)[idx]
|
template `[]`*(x: BitList, idx: auto): auto = BitSeq(x)[idx]
|
||||||
template `[]=`*(x: var BitList, idx: auto, val: bool) = BitSeq(x)[idx] = val
|
template `[]=`*(x: var BitList, idx: auto, val: bool) = BitSeq(x)[idx] = val
|
||||||
template `==`*(a, b: BitList): bool = BitSeq(a) == BitSeq(b)
|
template `==`*(a, b: BitList): bool = BitSeq(a) == BitSeq(b)
|
||||||
template raiseBit*(x: var BitList, idx: int) = raiseBit(BitSeq(x), idx)
|
template setBit*(x: var BitList, idx: int) = setBit(BitSeq(x), idx)
|
||||||
template lowerBit*(x: var BitList, idx: int) = lowerBit(BitSeq(x), idx)
|
template clearBit*(x: var BitList, idx: int) = clearBit(BitSeq(x), idx)
|
||||||
template overlaps*(a, b: BitList): bool = overlaps(BitSeq(a), BitSeq(b))
|
template overlaps*(a, b: BitList): bool = overlaps(BitSeq(a), BitSeq(b))
|
||||||
template combine*(a: var BitList, b: BitList) = combine(BitSeq(a), BitSeq(b))
|
template combine*(a: var BitList, b: BitList) = combine(BitSeq(a), BitSeq(b))
|
||||||
template isSubsetOf*(a, b: BitList): bool = isSubsetOf(BitSeq(a), BitSeq(b))
|
template isSubsetOf*(a, b: BitList): bool = isSubsetOf(BitSeq(a), BitSeq(b))
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
|
|
||||||
# Serenity hash function / digest
|
# Serenity hash function / digest
|
||||||
#
|
#
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#hash
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#hash
|
||||||
#
|
#
|
||||||
# In Phase 0 the beacon chain is deployed with SHA256 (SHA2-256).
|
# In Phase 0 the beacon chain is deployed with SHA256 (SHA2-256).
|
||||||
# Note that is is different from Keccak256 (often mistakenly called SHA3-256)
|
# Note that is is different from Keccak256 (often mistakenly called SHA3-256)
|
||||||
|
|
|
@ -15,7 +15,7 @@ import
|
||||||
# Internal
|
# Internal
|
||||||
./datatypes, ./digest
|
./datatypes, ./digest
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#integer_squareroot
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#integer_squareroot
|
||||||
func integer_squareroot*(n: SomeInteger): SomeInteger =
|
func integer_squareroot*(n: SomeInteger): SomeInteger =
|
||||||
# Return the largest integer ``x`` such that ``x**2 <= n``.
|
# Return the largest integer ``x`` such that ``x**2 <= n``.
|
||||||
doAssert n >= 0'u64
|
doAssert n >= 0'u64
|
||||||
|
@ -28,25 +28,25 @@ func integer_squareroot*(n: SomeInteger): SomeInteger =
|
||||||
y = (x + n div x) div 2
|
y = (x + n div x) div 2
|
||||||
x
|
x
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#compute_epoch_at_slot
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#compute_epoch_at_slot
|
||||||
func compute_epoch_at_slot*(slot: Slot|uint64): Epoch =
|
func compute_epoch_at_slot*(slot: Slot|uint64): Epoch =
|
||||||
# Return the epoch number of the given ``slot``.
|
# Return the epoch number at ``slot``.
|
||||||
(slot div SLOTS_PER_EPOCH).Epoch
|
(slot div SLOTS_PER_EPOCH).Epoch
|
||||||
|
|
||||||
template epoch*(slot: Slot): Epoch =
|
template epoch*(slot: Slot): Epoch =
|
||||||
compute_epoch_at_slot(slot)
|
compute_epoch_at_slot(slot)
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#compute_start_slot_at_epoch
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#compute_start_slot_at_epoch
|
||||||
func compute_start_slot_at_epoch*(epoch: Epoch): Slot =
|
func compute_start_slot_at_epoch*(epoch: Epoch): Slot =
|
||||||
# Return the start slot of ``epoch``.
|
# Return the start slot of ``epoch``.
|
||||||
(epoch * SLOTS_PER_EPOCH).Slot
|
(epoch * SLOTS_PER_EPOCH).Slot
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#is_active_validator
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#is_active_validator
|
||||||
func is_active_validator*(validator: Validator, epoch: Epoch): bool =
|
func is_active_validator*(validator: Validator, epoch: Epoch): bool =
|
||||||
### Check if ``validator`` is active
|
### Check if ``validator`` is active
|
||||||
validator.activation_epoch <= epoch and epoch < validator.exit_epoch
|
validator.activation_epoch <= epoch and epoch < validator.exit_epoch
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#get_active_validator_indices
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#get_active_validator_indices
|
||||||
func get_active_validator_indices*(state: BeaconState, epoch: Epoch):
|
func get_active_validator_indices*(state: BeaconState, epoch: Epoch):
|
||||||
seq[ValidatorIndex] =
|
seq[ValidatorIndex] =
|
||||||
# Return the sequence of active validator indices at ``epoch``.
|
# Return the sequence of active validator indices at ``epoch``.
|
||||||
|
@ -54,7 +54,7 @@ func get_active_validator_indices*(state: BeaconState, epoch: Epoch):
|
||||||
if is_active_validator(val, epoch):
|
if is_active_validator(val, epoch):
|
||||||
result.add idx.ValidatorIndex
|
result.add idx.ValidatorIndex
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#get_committee_count_at_slot
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#get_committee_count_at_slot
|
||||||
func get_committee_count_at_slot*(state: BeaconState, slot: Slot): uint64 =
|
func get_committee_count_at_slot*(state: BeaconState, slot: Slot): uint64 =
|
||||||
# Return the number of committees at ``slot``.
|
# Return the number of committees at ``slot``.
|
||||||
let epoch = compute_epoch_at_slot(slot)
|
let epoch = compute_epoch_at_slot(slot)
|
||||||
|
@ -67,13 +67,13 @@ func get_committee_count_at_slot*(state: BeaconState, slot: Slot): uint64 =
|
||||||
# Otherwise, get_beacon_committee(...) cannot access some committees.
|
# Otherwise, get_beacon_committee(...) cannot access some committees.
|
||||||
doAssert (SLOTS_PER_EPOCH * MAX_COMMITTEES_PER_SLOT).uint64 >= result
|
doAssert (SLOTS_PER_EPOCH * MAX_COMMITTEES_PER_SLOT).uint64 >= result
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#get_current_epoch
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#get_current_epoch
|
||||||
func get_current_epoch*(state: BeaconState): Epoch =
|
func get_current_epoch*(state: BeaconState): Epoch =
|
||||||
# Return the current epoch.
|
# Return the current epoch.
|
||||||
doAssert state.slot >= GENESIS_SLOT, $state.slot
|
doAssert state.slot >= GENESIS_SLOT, $state.slot
|
||||||
compute_epoch_at_slot(state.slot)
|
compute_epoch_at_slot(state.slot)
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#get_randao_mix
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#get_randao_mix
|
||||||
func get_randao_mix*(state: BeaconState,
|
func get_randao_mix*(state: BeaconState,
|
||||||
epoch: Epoch): Eth2Digest =
|
epoch: Epoch): Eth2Digest =
|
||||||
## Returns the randao mix at a recent ``epoch``.
|
## Returns the randao mix at a recent ``epoch``.
|
||||||
|
@ -114,15 +114,15 @@ func int_to_bytes4*(x: uint64): array[4, byte] =
|
||||||
result[2] = ((x shr 16) and 0xff).byte
|
result[2] = ((x shr 16) and 0xff).byte
|
||||||
result[3] = ((x shr 24) and 0xff).byte
|
result[3] = ((x shr 24) and 0xff).byte
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#compute_domain
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#compute_domain
|
||||||
func compute_domain*(
|
func compute_domain*(
|
||||||
domain_type: DomainType,
|
domain_type: DomainType,
|
||||||
fork_version: array[4, byte] = [0'u8, 0, 0, 0]): Domain =
|
fork_version: array[4, byte] = [0'u8, 0, 0, 0]): Domain =
|
||||||
|
# Return the domain for the ``domain_type`` and ``fork_version``.
|
||||||
result[0..3] = int_to_bytes4(domain_type.uint64)
|
result[0..3] = int_to_bytes4(domain_type.uint64)
|
||||||
result[4..7] = fork_version
|
result[4..7] = fork_version
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#get_domain
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#get_domain
|
||||||
|
|
||||||
func get_domain*(
|
func get_domain*(
|
||||||
fork: Fork, domain_type: DomainType, epoch: Epoch): Domain =
|
fork: Fork, domain_type: DomainType, epoch: Epoch): Domain =
|
||||||
## Return the signature domain (fork version concatenated with domain type)
|
## Return the signature domain (fork version concatenated with domain type)
|
||||||
|
@ -144,9 +144,9 @@ func get_domain*(
|
||||||
func get_domain*(state: BeaconState, domain_type: DomainType): Domain =
|
func get_domain*(state: BeaconState, domain_type: DomainType): Domain =
|
||||||
get_domain(state, domain_type, get_current_epoch(state))
|
get_domain(state, domain_type, get_current_epoch(state))
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#get_seed
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#get_seed
|
||||||
func get_seed*(state: BeaconState, epoch: Epoch, domain_type: DomainType): Eth2Digest =
|
func get_seed*(state: BeaconState, epoch: Epoch, domain_type: DomainType): Eth2Digest =
|
||||||
# Generate a seed for the given ``epoch``.
|
# Return the seed at ``epoch``.
|
||||||
|
|
||||||
var seed_input : array[4+8+32, byte]
|
var seed_input : array[4+8+32, byte]
|
||||||
|
|
||||||
|
@ -157,6 +157,6 @@ func get_seed*(state: BeaconState, epoch: Epoch, domain_type: DomainType): Eth2D
|
||||||
seed_input[0..3] = int_to_bytes4(domain_type.uint64)
|
seed_input[0..3] = int_to_bytes4(domain_type.uint64)
|
||||||
seed_input[4..11] = int_to_bytes8(epoch.uint64)
|
seed_input[4..11] = int_to_bytes8(epoch.uint64)
|
||||||
seed_input[12..43] =
|
seed_input[12..43] =
|
||||||
get_randao_mix(state,
|
get_randao_mix(state, # Avoid underflow
|
||||||
epoch + EPOCHS_PER_HISTORICAL_VECTOR - MIN_SEED_LOOKAHEAD - 1).data
|
epoch + EPOCHS_PER_HISTORICAL_VECTOR - MIN_SEED_LOOKAHEAD - 1).data
|
||||||
eth2hash(seed_input)
|
eth2hash(seed_input)
|
||||||
|
|
|
@ -45,7 +45,7 @@ const
|
||||||
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT* {.intdefine.} = 16384
|
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT* {.intdefine.} = 16384
|
||||||
|
|
||||||
# Constants (TODO: not actually configurable)
|
# Constants (TODO: not actually configurable)
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#constants
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/configs/mainnet.yaml#L110
|
||||||
BASE_REWARDS_PER_EPOCH* = 4
|
BASE_REWARDS_PER_EPOCH* = 4
|
||||||
|
|
||||||
DEPOSIT_CONTRACT_TREE_DEPTH* = 32
|
DEPOSIT_CONTRACT_TREE_DEPTH* = 32
|
||||||
|
@ -153,12 +153,12 @@ const
|
||||||
|
|
||||||
# Fork choice
|
# Fork choice
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_fork-choice.md#configuration
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/configs/mainnet.yaml#L26
|
||||||
SAFE_SLOTS_TO_UPDATE_JUSTIFIED* = 8 # 96 seconds
|
SAFE_SLOTS_TO_UPDATE_JUSTIFIED* = 8 # 96 seconds
|
||||||
|
|
||||||
# Validators
|
# Validators
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/validator/0_beacon-chain-validator.md#misc
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/configs/mainnet.yaml#L32
|
||||||
ETH1_FOLLOW_DISTANCE* = 1024 # blocks ~ 4 hours
|
ETH1_FOLLOW_DISTANCE* = 1024 # blocks ~ 4 hours
|
||||||
TARGET_AGGREGATORS_PER_COMMITTEE* = 16 # validators
|
TARGET_AGGREGATORS_PER_COMMITTEE* = 16 # validators
|
||||||
RANDOM_SUBNETS_PER_VALIDATOR* = 1 # subnet
|
RANDOM_SUBNETS_PER_VALIDATOR* = 1 # subnet
|
||||||
|
@ -166,7 +166,7 @@ const
|
||||||
|
|
||||||
# Phase 1 - Sharding
|
# Phase 1 - Sharding
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/1_shard-data-chains.md#time-parameters
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/1_shard-data-chains.md#time-parameters
|
||||||
# TODO those are included in minimal.yaml but not mainnet.yaml
|
# TODO those are included in minimal.yaml but not mainnet.yaml
|
||||||
# Why?
|
# Why?
|
||||||
# SHARD_SLOTS_PER_BEACON_SLOT* = 2 # spec: SHARD_SLOTS_PER_EPOCH
|
# SHARD_SLOTS_PER_BEACON_SLOT* = 2 # spec: SHARD_SLOTS_PER_EPOCH
|
||||||
|
@ -176,7 +176,7 @@ const
|
||||||
|
|
||||||
# Phase 1 - Custody game
|
# Phase 1 - Custody game
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/1_custody-game.md#constants
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/1_custody-game.md#constants
|
||||||
# TODO those are included in minimal.yaml but not mainnet.yaml
|
# TODO those are included in minimal.yaml but not mainnet.yaml
|
||||||
# Why?
|
# Why?
|
||||||
# EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS* = 4096 # epochs
|
# EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS* = 4096 # epochs
|
||||||
|
|
|
@ -38,7 +38,7 @@ const
|
||||||
|
|
||||||
# Constants
|
# Constants
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#constants
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#constants
|
||||||
# TODO "The following values are (non-configurable) constants" ...
|
# TODO "The following values are (non-configurable) constants" ...
|
||||||
# Unchanged
|
# Unchanged
|
||||||
BASE_REWARDS_PER_EPOCH* = 4
|
BASE_REWARDS_PER_EPOCH* = 4
|
||||||
|
@ -91,6 +91,10 @@ const
|
||||||
# Changed
|
# Changed
|
||||||
MIN_EPOCHS_TO_INACTIVITY_PENALTY* = 2'u64^2
|
MIN_EPOCHS_TO_INACTIVITY_PENALTY* = 2'u64^2
|
||||||
|
|
||||||
|
EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS* = 4096 # epochs
|
||||||
|
EPOCHS_PER_CUSTODY_PERIOD* = 4
|
||||||
|
CUSTODY_PERIOD_TO_RANDAO_PADDING* = 4
|
||||||
|
|
||||||
# State vector lengths
|
# State vector lengths
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/configs/minimal.yaml#L101
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/configs/minimal.yaml#L101
|
||||||
|
@ -123,7 +127,7 @@ const
|
||||||
|
|
||||||
# Fork choice
|
# Fork choice
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_fork-choice.md#configuration
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/configs/minimal.yaml#L26
|
||||||
|
|
||||||
# Changed
|
# Changed
|
||||||
SAFE_SLOTS_TO_UPDATE_JUSTIFIED* = 2
|
SAFE_SLOTS_TO_UPDATE_JUSTIFIED* = 2
|
||||||
|
@ -142,19 +146,10 @@ const
|
||||||
|
|
||||||
# Phase 1 - Sharding
|
# Phase 1 - Sharding
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/1_shard-data-chains.md#time-parameters
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/configs/minimal.yaml#L153
|
||||||
# TODO those are included in minimal.yaml but not mainnet.yaml
|
# TODO those are included in minimal.yaml but not mainnet.yaml
|
||||||
# Why?
|
# Why?
|
||||||
SHARD_SLOTS_PER_BEACON_SLOT* = 2 # spec: SHARD_SLOTS_PER_EPOCH
|
SHARD_SLOTS_PER_BEACON_SLOT* = 2 # spec: SHARD_SLOTS_PER_EPOCH
|
||||||
EPOCHS_PER_SHARD_PERIOD* = 4
|
EPOCHS_PER_SHARD_PERIOD* = 4
|
||||||
PHASE_1_FORK_EPOCH* = 8
|
PHASE_1_FORK_EPOCH* = 8
|
||||||
PHASE_1_FORK_SLOT* = 64
|
PHASE_1_FORK_SLOT* = 64
|
||||||
|
|
||||||
# Phase 1 - Custody game
|
|
||||||
# ---------------------------------------------------------------
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/1_custody-game.md#constants
|
|
||||||
# TODO those are included in minimal.yaml but not mainnet.yaml
|
|
||||||
# Why?
|
|
||||||
EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS* = 4096 # epochs
|
|
||||||
EPOCHS_PER_CUSTODY_PERIOD* = 4
|
|
||||||
CUSTODY_PERIOD_TO_RANDAO_PADDING* = 4
|
|
||||||
|
|
|
@ -35,7 +35,8 @@
|
||||||
import
|
import
|
||||||
algorithm, collections/sets, chronicles, options, sequtils, sets, tables,
|
algorithm, collections/sets, chronicles, options, sequtils, sets, tables,
|
||||||
../extras, ../ssz, metrics,
|
../extras, ../ssz, metrics,
|
||||||
beaconstate, crypto, datatypes, digest, helpers, validator
|
beaconstate, crypto, datatypes, digest, helpers, validator,
|
||||||
|
../../nbench/bench_lab
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#additional-metrics
|
# https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#additional-metrics
|
||||||
declareGauge beacon_current_live_validators, "Number of active validators that successfully included attestation on chain for current epoch" # On block
|
declareGauge beacon_current_live_validators, "Number of active validators that successfully included attestation on chain for current epoch" # On block
|
||||||
|
@ -43,10 +44,10 @@ declareGauge beacon_previous_live_validators, "Number of active validators that
|
||||||
declareGauge beacon_pending_deposits, "Number of pending deposits (state.eth1_data.deposit_count - state.eth1_deposit_index)" # On block
|
declareGauge beacon_pending_deposits, "Number of pending deposits (state.eth1_data.deposit_count - state.eth1_deposit_index)" # On block
|
||||||
declareGauge beacon_processed_deposits_total, "Number of total deposits included on chain" # On block
|
declareGauge beacon_processed_deposits_total, "Number of total deposits included on chain" # On block
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.3/specs/core/0_beacon-chain.md#block-header
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#block-header
|
||||||
proc process_block_header*(
|
proc process_block_header*(
|
||||||
state: var BeaconState, blck: BeaconBlock, flags: UpdateFlags,
|
state: var BeaconState, blck: BeaconBlock, flags: UpdateFlags,
|
||||||
stateCache: var StateCache): bool =
|
stateCache: var StateCache): bool {.nbench.}=
|
||||||
# Verify that the slots match
|
# Verify that the slots match
|
||||||
if not (blck.slot == state.slot):
|
if not (blck.slot == state.slot):
|
||||||
notice "Block header: slot mismatch",
|
notice "Block header: slot mismatch",
|
||||||
|
@ -89,7 +90,7 @@ proc process_block_header*(
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#randao
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#randao
|
||||||
proc process_randao(
|
proc process_randao(
|
||||||
state: var BeaconState, body: BeaconBlockBody, flags: UpdateFlags,
|
state: var BeaconState, body: BeaconBlockBody, flags: UpdateFlags,
|
||||||
stateCache: var StateCache): bool =
|
stateCache: var StateCache): bool {.nbench.}=
|
||||||
let
|
let
|
||||||
epoch = state.get_current_epoch()
|
epoch = state.get_current_epoch()
|
||||||
proposer_index = get_beacon_proposer_index(state, stateCache)
|
proposer_index = get_beacon_proposer_index(state, stateCache)
|
||||||
|
@ -124,24 +125,24 @@ proc process_randao(
|
||||||
|
|
||||||
true
|
true
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#eth1-data
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#eth1-data
|
||||||
func process_eth1_data(state: var BeaconState, body: BeaconBlockBody) =
|
func process_eth1_data(state: var BeaconState, body: BeaconBlockBody) {.nbench.}=
|
||||||
state.eth1_data_votes.add body.eth1_data
|
state.eth1_data_votes.add body.eth1_data
|
||||||
if state.eth1_data_votes.count(body.eth1_data) * 2 >
|
if state.eth1_data_votes.count(body.eth1_data) * 2 >
|
||||||
SLOTS_PER_ETH1_VOTING_PERIOD:
|
SLOTS_PER_ETH1_VOTING_PERIOD:
|
||||||
state.eth1_data = body.eth1_data
|
state.eth1_data = body.eth1_data
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#is_slashable_validator
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#is_slashable_validator
|
||||||
func is_slashable_validator(validator: Validator, epoch: Epoch): bool =
|
func is_slashable_validator(validator: Validator, epoch: Epoch): bool =
|
||||||
# Check if ``validator`` is slashable.
|
# Check if ``validator`` is slashable.
|
||||||
(not validator.slashed) and
|
(not validator.slashed) and
|
||||||
(validator.activation_epoch <= epoch) and
|
(validator.activation_epoch <= epoch) and
|
||||||
(epoch < validator.withdrawable_epoch)
|
(epoch < validator.withdrawable_epoch)
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.3/specs/core/0_beacon-chain.md#proposer-slashings
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#proposer-slashings
|
||||||
proc process_proposer_slashing*(
|
proc process_proposer_slashing*(
|
||||||
state: var BeaconState, proposer_slashing: ProposerSlashing,
|
state: var BeaconState, proposer_slashing: ProposerSlashing,
|
||||||
flags: UpdateFlags, stateCache: var StateCache): bool =
|
flags: UpdateFlags, stateCache: var StateCache): bool {.nbench.}=
|
||||||
if proposer_slashing.proposer_index.int >= state.validators.len:
|
if proposer_slashing.proposer_index.int >= state.validators.len:
|
||||||
notice "Proposer slashing: invalid proposer index"
|
notice "Proposer slashing: invalid proposer index"
|
||||||
return false
|
return false
|
||||||
|
@ -187,7 +188,7 @@ proc process_proposer_slashing*(
|
||||||
|
|
||||||
proc processProposerSlashings(
|
proc processProposerSlashings(
|
||||||
state: var BeaconState, blck: BeaconBlock, flags: UpdateFlags,
|
state: var BeaconState, blck: BeaconBlock, flags: UpdateFlags,
|
||||||
stateCache: var StateCache): bool =
|
stateCache: var StateCache): bool {.nbench.}=
|
||||||
if len(blck.body.proposer_slashings) > MAX_PROPOSER_SLASHINGS:
|
if len(blck.body.proposer_slashings) > MAX_PROPOSER_SLASHINGS:
|
||||||
notice "PropSlash: too many!",
|
notice "PropSlash: too many!",
|
||||||
proposer_slashings = len(blck.body.proposer_slashings)
|
proposer_slashings = len(blck.body.proposer_slashings)
|
||||||
|
@ -200,7 +201,7 @@ proc processProposerSlashings(
|
||||||
|
|
||||||
true
|
true
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#is_slashable_attestation_data
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#is_slashable_attestation_data
|
||||||
func is_slashable_attestation_data(
|
func is_slashable_attestation_data(
|
||||||
data_1: AttestationData, data_2: AttestationData): bool =
|
data_1: AttestationData, data_2: AttestationData): bool =
|
||||||
## Check if ``data_1`` and ``data_2`` are slashable according to Casper FFG
|
## Check if ``data_1`` and ``data_2`` are slashable according to Casper FFG
|
||||||
|
@ -212,12 +213,12 @@ func is_slashable_attestation_data(
|
||||||
(data_1.source.epoch < data_2.source.epoch and
|
(data_1.source.epoch < data_2.source.epoch and
|
||||||
data_2.target.epoch < data_1.target.epoch)
|
data_2.target.epoch < data_1.target.epoch)
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#attester-slashings
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#attester-slashings
|
||||||
proc process_attester_slashing*(
|
proc process_attester_slashing*(
|
||||||
state: var BeaconState,
|
state: var BeaconState,
|
||||||
attester_slashing: AttesterSlashing,
|
attester_slashing: AttesterSlashing,
|
||||||
stateCache: var StateCache
|
stateCache: var StateCache
|
||||||
): bool =
|
): bool {.nbench.}=
|
||||||
let
|
let
|
||||||
attestation_1 = attester_slashing.attestation_1
|
attestation_1 = attester_slashing.attestation_1
|
||||||
attestation_2 = attester_slashing.attestation_2
|
attestation_2 = attester_slashing.attestation_2
|
||||||
|
@ -235,11 +236,8 @@ proc process_attester_slashing*(
|
||||||
notice "Attester slashing: invalid attestation 2"
|
notice "Attester slashing: invalid attestation 2"
|
||||||
return false
|
return false
|
||||||
|
|
||||||
var slashed_any = false # Detect if trying to slash twice
|
var slashed_any = false
|
||||||
|
|
||||||
## TODO there's a lot of sorting/set construction here and
|
|
||||||
## verify_indexed_attestation, but go by spec unless there
|
|
||||||
## is compelling perf evidence otherwise.
|
|
||||||
for index in sorted(toSeq(intersection(
|
for index in sorted(toSeq(intersection(
|
||||||
toHashSet(attestation_1.attesting_indices),
|
toHashSet(attestation_1.attesting_indices),
|
||||||
toHashSet(attestation_2.attesting_indices)).items), system.cmp):
|
toHashSet(attestation_2.attesting_indices)).items), system.cmp):
|
||||||
|
@ -252,9 +250,9 @@ proc process_attester_slashing*(
|
||||||
return false
|
return false
|
||||||
return true
|
return true
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#attester-slashings
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#attester-slashings
|
||||||
proc processAttesterSlashings(state: var BeaconState, blck: BeaconBlock,
|
proc processAttesterSlashings(state: var BeaconState, blck: BeaconBlock,
|
||||||
stateCache: var StateCache): bool =
|
stateCache: var StateCache): bool {.nbench.}=
|
||||||
# Process ``AttesterSlashing`` operation.
|
# Process ``AttesterSlashing`` operation.
|
||||||
if len(blck.body.attester_slashings) > MAX_ATTESTER_SLASHINGS:
|
if len(blck.body.attester_slashings) > MAX_ATTESTER_SLASHINGS:
|
||||||
notice "Attester slashing: too many!"
|
notice "Attester slashing: too many!"
|
||||||
|
@ -268,7 +266,7 @@ proc processAttesterSlashings(state: var BeaconState, blck: BeaconBlock,
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.4/specs/core/0_beacon-chain.md#attestations
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.4/specs/core/0_beacon-chain.md#attestations
|
||||||
proc processAttestations(
|
proc processAttestations(
|
||||||
state: var BeaconState, blck: BeaconBlock, flags: UpdateFlags,
|
state: var BeaconState, blck: BeaconBlock, flags: UpdateFlags,
|
||||||
stateCache: var StateCache): bool =
|
stateCache: var StateCache): bool {.nbench.}=
|
||||||
## Each block includes a number of attestations that the proposer chose. Each
|
## Each block includes a number of attestations that the proposer chose. Each
|
||||||
## attestation represents an update to a specific shard and is signed by a
|
## attestation represents an update to a specific shard and is signed by a
|
||||||
## committee of validators.
|
## committee of validators.
|
||||||
|
@ -288,7 +286,7 @@ proc processAttestations(
|
||||||
true
|
true
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.4/specs/core/0_beacon-chain.md#deposits
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.4/specs/core/0_beacon-chain.md#deposits
|
||||||
proc processDeposits(state: var BeaconState, blck: BeaconBlock): bool =
|
proc processDeposits(state: var BeaconState, blck: BeaconBlock): bool {.nbench.}=
|
||||||
if not (len(blck.body.deposits) <= MAX_DEPOSITS):
|
if not (len(blck.body.deposits) <= MAX_DEPOSITS):
|
||||||
notice "processDeposits: too many deposits"
|
notice "processDeposits: too many deposits"
|
||||||
return false
|
return false
|
||||||
|
@ -300,11 +298,11 @@ proc processDeposits(state: var BeaconState, blck: BeaconBlock): bool =
|
||||||
|
|
||||||
true
|
true
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.3/specs/core/0_beacon-chain.md#voluntary-exits
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#voluntary-exits
|
||||||
proc process_voluntary_exit*(
|
proc process_voluntary_exit*(
|
||||||
state: var BeaconState,
|
state: var BeaconState,
|
||||||
signed_voluntary_exit: SignedVoluntaryExit,
|
signed_voluntary_exit: SignedVoluntaryExit,
|
||||||
flags: UpdateFlags): bool =
|
flags: UpdateFlags): bool {.nbench.}=
|
||||||
|
|
||||||
let voluntary_exit = signed_voluntary_exit.message
|
let voluntary_exit = signed_voluntary_exit.message
|
||||||
|
|
||||||
|
@ -364,7 +362,7 @@ proc process_voluntary_exit*(
|
||||||
|
|
||||||
true
|
true
|
||||||
|
|
||||||
proc processVoluntaryExits(state: var BeaconState, blck: BeaconBlock, flags: UpdateFlags): bool =
|
proc processVoluntaryExits(state: var BeaconState, blck: BeaconBlock, flags: UpdateFlags): bool {.nbench.}=
|
||||||
if len(blck.body.voluntary_exits) > MAX_VOLUNTARY_EXITS:
|
if len(blck.body.voluntary_exits) > MAX_VOLUNTARY_EXITS:
|
||||||
notice "[Block processing - Voluntary Exit]: too many exits!"
|
notice "[Block processing - Voluntary Exit]: too many exits!"
|
||||||
return false
|
return false
|
||||||
|
@ -375,7 +373,7 @@ proc processVoluntaryExits(state: var BeaconState, blck: BeaconBlock, flags: Upd
|
||||||
|
|
||||||
proc processBlock*(
|
proc processBlock*(
|
||||||
state: var BeaconState, blck: BeaconBlock, flags: UpdateFlags,
|
state: var BeaconState, blck: BeaconBlock, flags: UpdateFlags,
|
||||||
stateCache: var StateCache): bool =
|
stateCache: var StateCache): bool {.nbench.}=
|
||||||
## When there's a new block, we need to verify that the block is sane and
|
## When there's a new block, we need to verify that the block is sane and
|
||||||
## update the state accordingly
|
## update the state accordingly
|
||||||
|
|
||||||
|
|
|
@ -37,7 +37,8 @@ import
|
||||||
stew/[bitseqs, bitops2], chronicles, json_serialization/std/sets,
|
stew/[bitseqs, bitops2], chronicles, json_serialization/std/sets,
|
||||||
metrics, ../ssz,
|
metrics, ../ssz,
|
||||||
beaconstate, crypto, datatypes, digest, helpers, validator,
|
beaconstate, crypto, datatypes, digest, helpers, validator,
|
||||||
state_transition_helpers
|
state_transition_helpers,
|
||||||
|
../../nbench/bench_lab
|
||||||
|
|
||||||
# Logging utilities
|
# Logging utilities
|
||||||
# --------------------------------------------------------
|
# --------------------------------------------------------
|
||||||
|
@ -62,10 +63,11 @@ declareGauge epoch_transition_final_updates, "Epoch transition final updates tim
|
||||||
# Spec
|
# Spec
|
||||||
# --------------------------------------------------------
|
# --------------------------------------------------------
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#get_total_active_balance
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#get_total_active_balance
|
||||||
func get_total_active_balance*(state: BeaconState): Gwei =
|
func get_total_active_balance*(state: BeaconState): Gwei =
|
||||||
# Return the combined effective balance of the active validators.
|
# Return the combined effective balance of the active validators.
|
||||||
return get_total_balance(
|
# TODO it calls get_total_balance with set(g_a_v_i(...))
|
||||||
|
get_total_balance(
|
||||||
state,
|
state,
|
||||||
get_active_validator_indices(state, get_current_epoch(state)))
|
get_active_validator_indices(state, get_current_epoch(state)))
|
||||||
|
|
||||||
|
@ -99,9 +101,9 @@ func get_attesting_balance(
|
||||||
get_total_balance(state, get_unslashed_attesting_indices(
|
get_total_balance(state, get_unslashed_attesting_indices(
|
||||||
state, attestations, stateCache))
|
state, attestations, stateCache))
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#justification-and-finalization
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#justification-and-finalization
|
||||||
proc process_justification_and_finalization*(
|
proc process_justification_and_finalization*(
|
||||||
state: var BeaconState, stateCache: var StateCache) =
|
state: var BeaconState, stateCache: var StateCache) {.nbench.}=
|
||||||
|
|
||||||
logScope: pcs = "process_justification_and_finalization"
|
logScope: pcs = "process_justification_and_finalization"
|
||||||
|
|
||||||
|
@ -138,11 +140,11 @@ proc process_justification_and_finalization*(
|
||||||
## matter -- in the next epoch, they'll be 2 epochs old, when BeaconState
|
## matter -- in the next epoch, they'll be 2 epochs old, when BeaconState
|
||||||
## tracks current_epoch_attestations and previous_epoch_attestations only
|
## tracks current_epoch_attestations and previous_epoch_attestations only
|
||||||
## per
|
## per
|
||||||
## https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#attestations
|
## https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#attestations
|
||||||
## and `get_matching_source_attestations(...)` via
|
## and `get_matching_source_attestations(...)` via
|
||||||
## https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#helper-functions-1
|
## https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#helper-functions-1
|
||||||
## and
|
## and
|
||||||
## https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#final-updates
|
## https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#final-updates
|
||||||
## after which the state.previous_epoch_attestations is replaced.
|
## after which the state.previous_epoch_attestations is replaced.
|
||||||
trace "Non-attesting indices in previous epoch",
|
trace "Non-attesting indices in previous epoch",
|
||||||
missing_all_validators=
|
missing_all_validators=
|
||||||
|
@ -163,7 +165,7 @@ proc process_justification_and_finalization*(
|
||||||
state.current_justified_checkpoint =
|
state.current_justified_checkpoint =
|
||||||
Checkpoint(epoch: previous_epoch,
|
Checkpoint(epoch: previous_epoch,
|
||||||
root: get_block_root(state, previous_epoch))
|
root: get_block_root(state, previous_epoch))
|
||||||
state.justification_bits.raiseBit 1
|
state.justification_bits.setBit 1
|
||||||
|
|
||||||
debug "Justified with previous epoch",
|
debug "Justified with previous epoch",
|
||||||
current_epoch = current_epoch,
|
current_epoch = current_epoch,
|
||||||
|
@ -177,7 +179,7 @@ proc process_justification_and_finalization*(
|
||||||
state.current_justified_checkpoint =
|
state.current_justified_checkpoint =
|
||||||
Checkpoint(epoch: current_epoch,
|
Checkpoint(epoch: current_epoch,
|
||||||
root: get_block_root(state, current_epoch))
|
root: get_block_root(state, current_epoch))
|
||||||
state.justification_bits.raiseBit 0
|
state.justification_bits.setBit 0
|
||||||
|
|
||||||
debug "Justified with current epoch",
|
debug "Justified with current epoch",
|
||||||
current_epoch = current_epoch,
|
current_epoch = current_epoch,
|
||||||
|
@ -242,7 +244,7 @@ func get_base_reward(state: BeaconState, index: ValidatorIndex,
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#rewards-and-penalties-1
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#rewards-and-penalties-1
|
||||||
func get_attestation_deltas(state: BeaconState, stateCache: var StateCache):
|
func get_attestation_deltas(state: BeaconState, stateCache: var StateCache):
|
||||||
tuple[a: seq[Gwei], b: seq[Gwei]] =
|
tuple[a: seq[Gwei], b: seq[Gwei]] {.nbench.}=
|
||||||
let
|
let
|
||||||
previous_epoch = get_previous_epoch(state)
|
previous_epoch = get_previous_epoch(state)
|
||||||
total_balance = get_total_active_balance(state)
|
total_balance = get_total_active_balance(state)
|
||||||
|
@ -336,9 +338,9 @@ func get_attestation_deltas(state: BeaconState, stateCache: var StateCache):
|
||||||
|
|
||||||
(rewards, penalties)
|
(rewards, penalties)
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#rewards-and-penalties-1
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#rewards-and-penalties-1
|
||||||
func process_rewards_and_penalties(
|
func process_rewards_and_penalties(
|
||||||
state: var BeaconState, cache: var StateCache) =
|
state: var BeaconState, cache: var StateCache) {.nbench.}=
|
||||||
if get_current_epoch(state) == GENESIS_EPOCH:
|
if get_current_epoch(state) == GENESIS_EPOCH:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -348,7 +350,7 @@ func process_rewards_and_penalties(
|
||||||
increase_balance(state, i.ValidatorIndex, rewards[i])
|
increase_balance(state, i.ValidatorIndex, rewards[i])
|
||||||
decrease_balance(state, i.ValidatorIndex, penalties[i])
|
decrease_balance(state, i.ValidatorIndex, penalties[i])
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#slashings
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#slashings
|
||||||
func process_slashings*(state: var BeaconState) =
|
func process_slashings*(state: var BeaconState) =
|
||||||
let
|
let
|
||||||
epoch = get_current_epoch(state)
|
epoch = get_current_epoch(state)
|
||||||
|
@ -365,8 +367,8 @@ func process_slashings*(state: var BeaconState) =
|
||||||
let penalty = penalty_numerator div total_balance * increment
|
let penalty = penalty_numerator div total_balance * increment
|
||||||
decrease_balance(state, index.ValidatorIndex, penalty)
|
decrease_balance(state, index.ValidatorIndex, penalty)
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#final-updates
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#final-updates
|
||||||
func process_final_updates*(state: var BeaconState) =
|
func process_final_updates*(state: var BeaconState) {.nbench.}=
|
||||||
let
|
let
|
||||||
current_epoch = get_current_epoch(state)
|
current_epoch = get_current_epoch(state)
|
||||||
next_epoch = current_epoch + 1
|
next_epoch = current_epoch + 1
|
||||||
|
@ -405,8 +407,8 @@ func process_final_updates*(state: var BeaconState) =
|
||||||
state.previous_epoch_attestations = state.current_epoch_attestations
|
state.previous_epoch_attestations = state.current_epoch_attestations
|
||||||
state.current_epoch_attestations = @[]
|
state.current_epoch_attestations = @[]
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#epoch-processing
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#epoch-processing
|
||||||
proc process_epoch*(state: var BeaconState) =
|
proc process_epoch*(state: var BeaconState) {.nbench.}=
|
||||||
# @proc are placeholders
|
# @proc are placeholders
|
||||||
|
|
||||||
trace "process_epoch",
|
trace "process_epoch",
|
||||||
|
@ -414,7 +416,7 @@ proc process_epoch*(state: var BeaconState) =
|
||||||
|
|
||||||
var per_epoch_cache = get_empty_per_epoch_cache()
|
var per_epoch_cache = get_empty_per_epoch_cache()
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#justification-and-finalization
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#justification-and-finalization
|
||||||
process_justification_and_finalization(state, per_epoch_cache)
|
process_justification_and_finalization(state, per_epoch_cache)
|
||||||
|
|
||||||
trace "ran process_justification_and_finalization",
|
trace "ran process_justification_and_finalization",
|
||||||
|
@ -423,7 +425,7 @@ proc process_epoch*(state: var BeaconState) =
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#rewards-and-penalties-1
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#rewards-and-penalties-1
|
||||||
process_rewards_and_penalties(state, per_epoch_cache)
|
process_rewards_and_penalties(state, per_epoch_cache)
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#registry-updates
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#registry-updates
|
||||||
# Don't rely on caching here.
|
# Don't rely on caching here.
|
||||||
process_registry_updates(state)
|
process_registry_updates(state)
|
||||||
|
|
||||||
|
@ -434,12 +436,12 @@ proc process_epoch*(state: var BeaconState) =
|
||||||
# @process_reveal_deadlines
|
# @process_reveal_deadlines
|
||||||
# @process_challenge_deadlines
|
# @process_challenge_deadlines
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#slashings
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#slashings
|
||||||
process_slashings(state)
|
process_slashings(state)
|
||||||
|
|
||||||
# @update_period_committee
|
# @update_period_committee
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#final-updates
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#final-updates
|
||||||
process_final_updates(state)
|
process_final_updates(state)
|
||||||
|
|
||||||
# @after_process_final_updates
|
# @after_process_final_updates
|
||||||
|
|
|
@ -32,7 +32,7 @@ func get_attesting_indices*(
|
||||||
result = result.union(get_attesting_indices(
|
result = result.union(get_attesting_indices(
|
||||||
state, a.data, a.aggregation_bits, stateCache))
|
state, a.data, a.aggregation_bits, stateCache))
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#helper-functions-1
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#helper-functions-1
|
||||||
func get_unslashed_attesting_indices*(
|
func get_unslashed_attesting_indices*(
|
||||||
state: BeaconState, attestations: openarray[PendingAttestation],
|
state: BeaconState, attestations: openarray[PendingAttestation],
|
||||||
stateCache: var StateCache): HashSet[ValidatorIndex] =
|
stateCache: var StateCache): HashSet[ValidatorIndex] =
|
||||||
|
|
|
@ -11,8 +11,8 @@ import
|
||||||
./datatypes, ./digest, ./helpers
|
./datatypes, ./digest, ./helpers
|
||||||
|
|
||||||
# TODO: Proceed to renaming and signature changes
|
# TODO: Proceed to renaming and signature changes
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#compute_shuffled_index
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#compute_shuffled_index
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#compute_committee
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#compute_committee
|
||||||
func get_shuffled_seq*(seed: Eth2Digest,
|
func get_shuffled_seq*(seed: Eth2Digest,
|
||||||
list_size: uint64,
|
list_size: uint64,
|
||||||
): seq[ValidatorIndex] =
|
): seq[ValidatorIndex] =
|
||||||
|
@ -78,7 +78,7 @@ func get_shuffled_seq*(seed: Eth2Digest,
|
||||||
|
|
||||||
result = shuffled_active_validator_indices
|
result = shuffled_active_validator_indices
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#get_previous_epoch
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#get_previous_epoch
|
||||||
func get_previous_epoch*(state: BeaconState): Epoch =
|
func get_previous_epoch*(state: BeaconState): Epoch =
|
||||||
# Return the previous epoch (unless the current epoch is ``GENESIS_EPOCH``).
|
# Return the previous epoch (unless the current epoch is ``GENESIS_EPOCH``).
|
||||||
let current_epoch = get_current_epoch(state)
|
let current_epoch = get_current_epoch(state)
|
||||||
|
@ -87,7 +87,7 @@ func get_previous_epoch*(state: BeaconState): Epoch =
|
||||||
else:
|
else:
|
||||||
current_epoch - 1
|
current_epoch - 1
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#compute_committee
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#compute_committee
|
||||||
func compute_committee(indices: seq[ValidatorIndex], seed: Eth2Digest,
|
func compute_committee(indices: seq[ValidatorIndex], seed: Eth2Digest,
|
||||||
index: uint64, count: uint64, stateCache: var StateCache): seq[ValidatorIndex] =
|
index: uint64, count: uint64, stateCache: var StateCache): seq[ValidatorIndex] =
|
||||||
## Return the committee corresponding to ``indices``, ``seed``, ``index``,
|
## Return the committee corresponding to ``indices``, ``seed``, ``index``,
|
||||||
|
|
|
@ -474,7 +474,7 @@ func bitlistHashTreeRoot(merkelizer: SszChunksMerkelizer, x: BitSeq): Eth2Digest
|
||||||
lastCorrectedByte = Bytes(x)[^2]
|
lastCorrectedByte = Bytes(x)[^2]
|
||||||
else:
|
else:
|
||||||
let markerPos = log2trunc(lastCorrectedByte)
|
let markerPos = log2trunc(lastCorrectedByte)
|
||||||
lastCorrectedByte.lowerBit(markerPos)
|
lastCorrectedByte.clearBit(markerPos)
|
||||||
|
|
||||||
var
|
var
|
||||||
bytesInLastChunk = totalBytes mod bytesPerChunk
|
bytesInLastChunk = totalBytes mod bytesPerChunk
|
||||||
|
|
|
@ -34,7 +34,8 @@ import
|
||||||
collections/sets, chronicles, sets,
|
collections/sets, chronicles, sets,
|
||||||
./extras, ./ssz, metrics,
|
./extras, ./ssz, metrics,
|
||||||
./spec/[datatypes, digest, helpers, validator],
|
./spec/[datatypes, digest, helpers, validator],
|
||||||
./spec/[state_transition_block, state_transition_epoch]
|
./spec/[state_transition_block, state_transition_epoch],
|
||||||
|
../nbench/bench_lab
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#additional-metrics
|
# https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#additional-metrics
|
||||||
declareGauge beacon_current_validators, """Number of status="pending|active|exited|withdrawable" validators in current epoch""" # On epoch transition
|
declareGauge beacon_current_validators, """Number of status="pending|active|exited|withdrawable" validators in current epoch""" # On epoch transition
|
||||||
|
@ -44,7 +45,7 @@ declareGauge beacon_previous_validators, """Number of status="pending|active|exi
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#beacon-chain-state-transition-function
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#beacon-chain-state-transition-function
|
||||||
func process_slot*(state: var BeaconState) =
|
func process_slot*(state: var BeaconState) {.nbench.}=
|
||||||
# Cache state root
|
# Cache state root
|
||||||
let previous_state_root = hash_tree_root(state)
|
let previous_state_root = hash_tree_root(state)
|
||||||
state.state_roots[state.slot mod SLOTS_PER_HISTORICAL_ROOT] =
|
state.state_roots[state.slot mod SLOTS_PER_HISTORICAL_ROOT] =
|
||||||
|
@ -81,7 +82,7 @@ func get_epoch_validator_count(state: BeaconState): int64 =
|
||||||
result += 1
|
result += 1
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#beacon-chain-state-transition-function
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#beacon-chain-state-transition-function
|
||||||
proc process_slots*(state: var BeaconState, slot: Slot) =
|
proc process_slots*(state: var BeaconState, slot: Slot) {.nbench.}=
|
||||||
doAssert state.slot <= slot
|
doAssert state.slot <= slot
|
||||||
|
|
||||||
# Catch up to the target slot
|
# Catch up to the target slot
|
||||||
|
@ -96,7 +97,7 @@ proc process_slots*(state: var BeaconState, slot: Slot) =
|
||||||
if is_epoch_transition:
|
if is_epoch_transition:
|
||||||
beacon_current_validators.set(get_epoch_validator_count(state))
|
beacon_current_validators.set(get_epoch_validator_count(state))
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#beacon-chain-state-transition-function
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#beacon-chain-state-transition-function
|
||||||
proc verifyStateRoot(state: BeaconState, blck: BeaconBlock): bool =
|
proc verifyStateRoot(state: BeaconState, blck: BeaconBlock): bool =
|
||||||
# This is inlined in state_transition(...) in spec.
|
# This is inlined in state_transition(...) in spec.
|
||||||
let state_root = hash_tree_root(state)
|
let state_root = hash_tree_root(state)
|
||||||
|
@ -108,7 +109,7 @@ proc verifyStateRoot(state: BeaconState, blck: BeaconBlock): bool =
|
||||||
true
|
true
|
||||||
|
|
||||||
proc state_transition*(
|
proc state_transition*(
|
||||||
state: var BeaconState, blck: BeaconBlock, flags: UpdateFlags): bool =
|
state: var BeaconState, blck: BeaconBlock, flags: UpdateFlags): bool {.nbench.}=
|
||||||
## Time in the beacon chain moves by slots. Every time (haha.) that happens,
|
## Time in the beacon chain moves by slots. Every time (haha.) that happens,
|
||||||
## we will update the beacon state. Normally, the state updates will be driven
|
## we will update the beacon state. Normally, the state updates will be driven
|
||||||
## by the contents of a new block, but it may happen that the block goes
|
## by the contents of a new block, but it may happen that the block goes
|
||||||
|
|
|
@ -14,7 +14,7 @@ type
|
||||||
## which blocks are valid - in particular, blocks are not valid if they
|
## which blocks are valid - in particular, blocks are not valid if they
|
||||||
## come from the future as seen from the local clock.
|
## come from the future as seen from the local clock.
|
||||||
##
|
##
|
||||||
## https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_fork-choice.md#fork-choice
|
## https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_fork-choice.md#fork-choice
|
||||||
##
|
##
|
||||||
# TODO replace time in chronos with a proper unit type, then this code can
|
# TODO replace time in chronos with a proper unit type, then this code can
|
||||||
# follow:
|
# follow:
|
||||||
|
|
|
@ -0,0 +1,70 @@
|
||||||
|
# Nimbus-bench
|
||||||
|
|
||||||
|
Nbench is a profiler dedicated to the Nimbus Beacon Chain.
|
||||||
|
|
||||||
|
It is built as a domain specific profiler that aims to be
|
||||||
|
as unintrusive as possible while providing complementary reports
|
||||||
|
to dedicated tools like ``perf``, ``Apple Instruments`` or ``Intel Vtune``
|
||||||
|
that allows you to dive deep down to a specific line or assembly instructions.
|
||||||
|
|
||||||
|
In particular, those tools cannot tell you that your cryptographic subsystem
|
||||||
|
or your parsing routines or your random number generation should be revisited,
|
||||||
|
may sample at to high a resolution (millisecond) instead of per-function statistics,
|
||||||
|
and are much less useful without debugging symbols which requires a lot of space.
|
||||||
|
I.e. ``perf`` and other generic profiler tools give you the laser-thin focused pictures
|
||||||
|
while nbench strives to give you the big picture.
|
||||||
|
|
||||||
|
Features
|
||||||
|
- by default nbench will collect the number of calls and time spent in
|
||||||
|
each function.
|
||||||
|
- like ncli or nfuzz, you can provide nbench isolated scenarios in SSZ format
|
||||||
|
to analyze Nimbus behaviour.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
```
|
||||||
|
nim c -d:const_preset=mainnet -d:nbench -d:release -o:build/nbench nbench/nbench.nim
|
||||||
|
export SCENARIOS=tests/official/fixtures/tests-v0.9.3/mainnet/phase0
|
||||||
|
|
||||||
|
# Full state transition
|
||||||
|
build/nbench cmdFullStateTransition -d="${SCENARIOS}"/sanity/blocks/pyspec_tests/voluntary_exit/ -q=2
|
||||||
|
|
||||||
|
# Slot processing
|
||||||
|
build/nbench cmdSlotProcessing -d="${SCENARIOS}"/sanity/slots/pyspec_tests/slots_1
|
||||||
|
|
||||||
|
# Block header processing
|
||||||
|
build/nbench cmdBlockProcessing --blockProcessingCat=catBlockHeader -d="${SCENARIOS}"/operations/block_header/pyspec_tests/proposer_slashed/
|
||||||
|
|
||||||
|
# Proposer slashing
|
||||||
|
build/nbench cmdBlockProcessing --blockProcessingCat=catProposerSlashings -d="${SCENARIOS}"/operations/proposer_slashing/pyspec_tests/invalid_proposer_index/
|
||||||
|
|
||||||
|
# Attester slashing
|
||||||
|
build/nbench cmdBlockProcessing --blockProcessingCat=catAttesterSlashings -d="${SCENARIOS}"/operations/attester_slashing/pyspec_tests/success_surround/
|
||||||
|
|
||||||
|
# Attestation processing
|
||||||
|
build/nbench cmdBlockProcessing --blockProcessingCat=catAttestations -d="${SCENARIOS}"/operations/attestation/pyspec_tests/success_multi_proposer_index_iterations/
|
||||||
|
|
||||||
|
# Deposit processing
|
||||||
|
build/nbench cmdBlockProcessing --blockProcessingCat=catDeposits -d="${SCENARIOS}"/operations/deposit/pyspec_tests/new_deposit_max/
|
||||||
|
|
||||||
|
# Voluntary exit
|
||||||
|
build/nbench cmdBlockProcessing --blockProcessingCat=catVoluntaryExits -d="${SCENARIOS}"/operations/voluntary_exit/pyspec_tests/validator_exit_in_future/
|
||||||
|
```
|
||||||
|
|
||||||
|
## Running the whole test suite
|
||||||
|
|
||||||
|
Warning: this is a proof-of-concept, there is a slight degree of interleaving in output.
|
||||||
|
Furthermore benchmarks are run in parallel and might interfere which each other.
|
||||||
|
|
||||||
|
```
|
||||||
|
nim c -d:const_preset=mainnet -d:nbench -d:release -o:build/nbench nbench/nbench.nim
|
||||||
|
nim c -o:build/nbench_tests nbench/nbench_official_fixtures.nim
|
||||||
|
nbench_tests --nbench=build/nbench --tests=tests/official/fixtures/tests-v0.9.4/mainnet/
|
||||||
|
```
|
||||||
|
|
||||||
|
## TODO Reporting
|
||||||
|
- Dumping as CSV files also for archival, perf regression suite and/or data mining.
|
||||||
|
- Piggybacking on eth-metrics and can report over Prometheus or StatsD.
|
||||||
|
- you can augment it via label pragmas that can be applied file-wide
|
||||||
|
to tag "cryptography", "block_transition", "database" to have a global view
|
||||||
|
of the system.
|
|
@ -0,0 +1,135 @@
|
||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2018 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
import
|
||||||
|
# Standard lib
|
||||||
|
macros, std/[monotimes, times],
|
||||||
|
# Internal
|
||||||
|
platforms/x86
|
||||||
|
|
||||||
|
# Bench laboratory
|
||||||
|
# --------------------------------------------------
|
||||||
|
#
|
||||||
|
# This file defines support data structures to enable profiling.
|
||||||
|
|
||||||
|
# Utils
|
||||||
|
# --------------------------------------------------
|
||||||
|
const someGcc = defined(gcc) or defined(llvm_gcc) or defined(clang) or defined(icc)
|
||||||
|
const hasThreadSupport = defined(threads)
|
||||||
|
|
||||||
|
proc atomicInc*(memLoc: var int64, x = 1'i64): int64 =
|
||||||
|
when someGcc and hasThreadSupport:
|
||||||
|
result = atomicAddFetch(memLoc.addr, x, ATOMIC_RELAXED)
|
||||||
|
elif defined(vcc) and hasThreadSupport:
|
||||||
|
result = addAndFetch(memLoc.addr, x)
|
||||||
|
result += x
|
||||||
|
else:
|
||||||
|
memloc += x
|
||||||
|
result = memLoc
|
||||||
|
|
||||||
|
# Types
|
||||||
|
# --------------------------------------------------
|
||||||
|
|
||||||
|
type
|
||||||
|
Metadata* = object
|
||||||
|
procName*: string
|
||||||
|
module: string
|
||||||
|
package: string
|
||||||
|
tag: string # Can be change to multi-tags later
|
||||||
|
# TODO - replace by eth-metrics once we figure out a CSV/JSON/Console backend
|
||||||
|
numCalls*: int64
|
||||||
|
cumulatedTimeNs*: int64 # in nanoseconds
|
||||||
|
cumulatedCycles*: int64
|
||||||
|
|
||||||
|
var ctBenchMetrics*{.compileTime.}: seq[Metadata]
|
||||||
|
## Metrics are collected here, this is just a temporary holder of compileTime values
|
||||||
|
## Unfortunately the "seq" is emptied when passing the compileTime/runtime boundaries
|
||||||
|
## due to Nim bugs
|
||||||
|
|
||||||
|
var BenchMetrics*: seq[Metadata]
|
||||||
|
## We can't directly use it at compileTime because it doesn't exist.
|
||||||
|
## We need `BenchMetrics = static(ctBenchMetrics)`
|
||||||
|
## To transfer the compileTime content to runtime at an opportune time.
|
||||||
|
|
||||||
|
template ntag(tagname: string){.pragma.}
|
||||||
|
## This will allow tagging proc in the future with
|
||||||
|
## "crypto", "ssz", "block_transition", "epoch_transition" ...
|
||||||
|
|
||||||
|
# Symbols
|
||||||
|
# --------------------------------------------------
|
||||||
|
|
||||||
|
template fnEntry(name: string, id: int, startTime, startCycle: untyped): untyped =
|
||||||
|
## Bench tracing to insert on function entry
|
||||||
|
{.noSideEffect.}:
|
||||||
|
discard BenchMetrics[id].numCalls.atomicInc()
|
||||||
|
let startTime = getMonoTime()
|
||||||
|
let startCycle = getTicks()
|
||||||
|
|
||||||
|
const nbench_trace {.booldefine.} = off # For manual "debug-echo"-style timing.
|
||||||
|
when nbench_trace:
|
||||||
|
# strformat doesn't work in templates.
|
||||||
|
from strutils import alignLeft, formatFloat
|
||||||
|
|
||||||
|
template fnExit(name: string, id: int, startTime, startCycle: untyped): untyped =
|
||||||
|
## Bench tracing to insert before each function exit
|
||||||
|
{.noSideEffect.}:
|
||||||
|
let stopCycle = getTicks()
|
||||||
|
let stopTime = getMonoTime()
|
||||||
|
let elapsedCycles = stopCycle - startCycle
|
||||||
|
let elapsedTime = inNanoseconds(stopTime - startTime)
|
||||||
|
|
||||||
|
discard BenchMetrics[id].cumulatedTimeNs.atomicInc(elapsedTime)
|
||||||
|
discard BenchMetrics[id].cumulatedCycles.atomicInc(elapsedCycles)
|
||||||
|
|
||||||
|
when nbench_trace:
|
||||||
|
# Advice: Use "when name == relevantProc" to isolate specific procedures.
|
||||||
|
# strformat doesn't work in templates.
|
||||||
|
echo static(alignLeft(name, 50)),
|
||||||
|
"Time (ms): ", alignLeft(formatFloat(elapsedTime.float64 * 1e-6, precision=3), 10),
|
||||||
|
"Cycles (billions): ", formatFloat(elapsedCycles.float64 * 1e-9, precision=3)
|
||||||
|
|
||||||
|
macro nbenchAnnotate(procAst: untyped): untyped =
|
||||||
|
procAst.expectKind({nnkProcDef, nnkFuncDef})
|
||||||
|
|
||||||
|
let id = ctBenchMetrics.len
|
||||||
|
let name = procAst[0]
|
||||||
|
# TODO, get the module and the package the proc is coming from
|
||||||
|
# and the tag "crypto", "ssz", "block_transition", "epoch_transition" ...
|
||||||
|
|
||||||
|
ctBenchMetrics.add Metadata(procName: $name, numCalls: 0, cumulatedTimeNs: 0, cumulatedCycles: 0)
|
||||||
|
var newBody = newStmtList()
|
||||||
|
let startTime = genSym(nskLet, "nbench_" & $name & "_startTime_")
|
||||||
|
let startCycle = genSym(nskLet, "nbench_" & $name & "_startCycles_")
|
||||||
|
newBody.add getAst(fnEntry($name, id, startTime, startCycle))
|
||||||
|
newbody.add nnkDefer.newTree(getAst(fnExit($name, id, startTime, startCycle)))
|
||||||
|
newBody.add procAst.body
|
||||||
|
|
||||||
|
procAst.body = newBody
|
||||||
|
result = procAst
|
||||||
|
|
||||||
|
template nbench*(procBody: untyped): untyped =
|
||||||
|
when defined(nbench):
|
||||||
|
nbenchAnnotate(procBody)
|
||||||
|
else:
|
||||||
|
procBody
|
||||||
|
|
||||||
|
# Sanity checks
|
||||||
|
# ---------------------------------------------------
|
||||||
|
|
||||||
|
when isMainModule:
|
||||||
|
|
||||||
|
expandMacros:
|
||||||
|
proc foo(x: int): int{.nbench.} =
|
||||||
|
echo "Hey hey hey"
|
||||||
|
result = x
|
||||||
|
|
||||||
|
BenchMetrics = static(ctBenchMetrics)
|
||||||
|
|
||||||
|
echo BenchMetrics
|
||||||
|
discard foo(10)
|
||||||
|
echo BenchMetrics
|
||||||
|
doAssert BenchMetrics[0].numCalls == 1
|
|
@ -0,0 +1,5 @@
|
||||||
|
import scenarios, confutils
|
||||||
|
|
||||||
|
let scenario = ScenarioConf.load()
|
||||||
|
|
||||||
|
echo scenario.attestation
|
|
@ -0,0 +1,111 @@
|
||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2018 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
import
|
||||||
|
# Standard library
|
||||||
|
os,
|
||||||
|
# Status libraries
|
||||||
|
confutils, serialization,
|
||||||
|
# Beacon-chain
|
||||||
|
../beacon_chain/spec/datatypes,
|
||||||
|
# Bench specific
|
||||||
|
scenarios, bench_lab, reports
|
||||||
|
|
||||||
|
# Example:
|
||||||
|
# build/nbench cmdFullStateTransition -d
|
||||||
|
|
||||||
|
# Nimbus Bench
|
||||||
|
# --------------------------------------------------
|
||||||
|
#
|
||||||
|
# Run select scenarios and get statistics on Nimbus runtime behaviour
|
||||||
|
|
||||||
|
when not defined(nbench):
|
||||||
|
{.error: "`nbench` requires `-d:nbench` flag to enable tracing on procedures.".}
|
||||||
|
|
||||||
|
proc main() =
|
||||||
|
# TODO versioning
|
||||||
|
echo "Nimbus bench, preset \"", const_preset, '\"'
|
||||||
|
|
||||||
|
BenchMetrics = static(ctBenchMetrics) # Make compile-time data available at runtime
|
||||||
|
let scenario = ScenarioConf.load()
|
||||||
|
|
||||||
|
case scenario.cmd
|
||||||
|
of cmdFullStateTransition:
|
||||||
|
runFullTransition(
|
||||||
|
scenario.scenarioDir.string,
|
||||||
|
scenario.preState,
|
||||||
|
scenario.blocksPrefix,
|
||||||
|
scenario.blocksQty,
|
||||||
|
scenario.skipBLS
|
||||||
|
)
|
||||||
|
of cmdSlotProcessing:
|
||||||
|
runProcessSlots(
|
||||||
|
scenario.scenarioDir.string,
|
||||||
|
scenario.preState,
|
||||||
|
scenario.numSlots
|
||||||
|
)
|
||||||
|
of cmdBlockProcessing:
|
||||||
|
case scenario.blockProcessingCat
|
||||||
|
of catBlockHeader:
|
||||||
|
runProcessBlockHeader(
|
||||||
|
scenario.scenarioDir.string,
|
||||||
|
scenario.preState,
|
||||||
|
"block", # Pending https://github.com/status-im/nim-confutils/issues/11
|
||||||
|
# scenario.attesterSlashing
|
||||||
|
scenario.skipBLS
|
||||||
|
)
|
||||||
|
of catProposerSlashings:
|
||||||
|
runProcessProposerSlashing(
|
||||||
|
scenario.scenarioDir.string,
|
||||||
|
scenario.preState,
|
||||||
|
"proposer_slashing", # Pending https://github.com/status-im/nim-confutils/issues/11
|
||||||
|
# scenario.attesterSlashing
|
||||||
|
scenario.skipBLS
|
||||||
|
)
|
||||||
|
of catAttesterSlashings:
|
||||||
|
runProcessAttesterSlashing(
|
||||||
|
scenario.scenarioDir.string,
|
||||||
|
scenario.preState,
|
||||||
|
"attester_slashing" # Pending https://github.com/status-im/nim-confutils/issues/11
|
||||||
|
# scenario.attesterSlashing
|
||||||
|
)
|
||||||
|
of catAttestations:
|
||||||
|
runProcessAttestation(
|
||||||
|
scenario.scenarioDir.string,
|
||||||
|
scenario.preState,
|
||||||
|
"attestation", # Pending https://github.com/status-im/nim-confutils/issues/11
|
||||||
|
# scenario.attestation,
|
||||||
|
scenario.skipBLS
|
||||||
|
)
|
||||||
|
of catDeposits:
|
||||||
|
runProcessDeposit(
|
||||||
|
scenario.scenarioDir.string,
|
||||||
|
scenario.preState,
|
||||||
|
"deposit", # Pending https://github.com/status-im/nim-confutils/issues/11
|
||||||
|
# scenario.deposit,
|
||||||
|
scenario.skipBLS
|
||||||
|
)
|
||||||
|
of catVoluntaryExits:
|
||||||
|
runProcessVoluntaryExits(
|
||||||
|
scenario.scenarioDir.string,
|
||||||
|
scenario.preState,
|
||||||
|
"voluntary_exit", # Pending https://github.com/status-im/nim-confutils/issues/11
|
||||||
|
# scenario.voluntary_exit,
|
||||||
|
scenario.skipBLS
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
quit "Unsupported"
|
||||||
|
else:
|
||||||
|
quit "Unsupported"
|
||||||
|
|
||||||
|
# TODO: Nimbus not fine-grained enough in UpdateFlags
|
||||||
|
let flags = if scenario.skipBLS: "[skipBLS, skipStateRootVerification]"
|
||||||
|
else: "[withBLS, withStateRootVerification]"
|
||||||
|
reportCli(BenchMetrics, const_preset, flags)
|
||||||
|
|
||||||
|
when isMainModule:
|
||||||
|
main()
|
|
@ -0,0 +1 @@
|
||||||
|
-d:nbench
|
|
@ -0,0 +1,70 @@
|
||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2018 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
import
|
||||||
|
# Standard library
|
||||||
|
os, osproc, strformat,
|
||||||
|
# Status libraries
|
||||||
|
confutils
|
||||||
|
|
||||||
|
# Nimbus Bench Batch
|
||||||
|
# --------------------------------------------------
|
||||||
|
# This script calls Nimbus bench in parallel batch
|
||||||
|
# to run a series of benchmarks from the official SSZ tests
|
||||||
|
|
||||||
|
type
|
||||||
|
CmdLists = seq[string]
|
||||||
|
|
||||||
|
proc collectTarget(cmds: var CmdLists, nbench, name, cmd, cat, path: string) =
|
||||||
|
echo "----------------------------------------"
|
||||||
|
echo "Collecting ", name, " transitions"
|
||||||
|
echo "----------------------------------------"
|
||||||
|
for folder in walkDirRec(path, yieldFilter = {pcDir}, relative = true):
|
||||||
|
echo "Found: ", folder
|
||||||
|
var cat = cat
|
||||||
|
if cmd == "cmdBlockProcessing":
|
||||||
|
cat = "--blockProcessingCat=" & cat
|
||||||
|
cmds.add &"{nbench} {cmd} {cat} -d={path/folder}"
|
||||||
|
|
||||||
|
proc collectBenchTargets(nbench, basePath: string): CmdLists =
|
||||||
|
block: # Full state transitions
|
||||||
|
echo "----------------------------------------"
|
||||||
|
echo "Collecting full state transitions"
|
||||||
|
echo "----------------------------------------"
|
||||||
|
let path = basePath/"phase0"/"sanity"/"blocks"/"pyspec_tests"
|
||||||
|
for folder in walkDirRec(path, yieldFilter = {pcDir}, relative = true):
|
||||||
|
var countBlocks = 0
|
||||||
|
for _ in walkFiles(path/folder/"blocks_*.ssz"):
|
||||||
|
inc countBlocks
|
||||||
|
echo "Found: ", folder, " with ", countBlocks, " blocks"
|
||||||
|
result.add &"{nbench} cmdFullStateTransition -d={path/folder} -q={$countBlocks}"
|
||||||
|
block: # Slot processing
|
||||||
|
let path = basePath/"phase0"/"sanity"/"slots"/"pyspec_tests"
|
||||||
|
result.collectTarget(nbench, "slot", "cmdSlotProcessing", "", path)
|
||||||
|
block: # Attestation
|
||||||
|
let path = basePath/"phase0"/"operations"/"attestation"/"pyspec_tests"
|
||||||
|
result.collectTarget(nbench, "attestation", "cmdBlockProcessing", "catAttestations", path)
|
||||||
|
block: # Attester_slashing
|
||||||
|
let path = basePath/"phase0"/"operations"/"attester_slashing"/"pyspec_tests"
|
||||||
|
result.collectTarget(nbench, "attester_slashing", "cmdBlockProcessing", "catAttesterSlashings", path)
|
||||||
|
block: # block_header
|
||||||
|
let path = basePath/"phase0"/"operations"/"block_header"/"pyspec_tests"
|
||||||
|
result.collectTarget(nbench, "block_header", "cmdBlockProcessing", "catBlockHeader", path)
|
||||||
|
block: # deposit
|
||||||
|
let path = basePath/"phase0"/"operations"/"deposit"/"pyspec_tests"
|
||||||
|
result.collectTarget(nbench, "deposit", "cmdBlockProcessing", "catDeposits", path)
|
||||||
|
block: # proposer_slashing
|
||||||
|
let path = basePath/"phase0"/"operations"/"proposer_slashing"/"pyspec_tests"
|
||||||
|
result.collectTarget(nbench, "proposer_slashing", "cmdBlockProcessing", "catProposerSlashings", path)
|
||||||
|
block: # voluntary_exit
|
||||||
|
let path = basePath/"phase0"/"operations"/"voluntary_exit"/"pyspec_tests"
|
||||||
|
result.collectTarget(nbench, "voluntary_exit", "cmdBlockProcessing", "catVoluntaryExits", path)
|
||||||
|
|
||||||
|
cli do(nbench: string, tests: string):
|
||||||
|
let cmdLists = collectBenchTargets(nbench, tests)
|
||||||
|
let err = execProcesses(cmdLists)
|
||||||
|
quit err
|
|
@ -0,0 +1,125 @@
|
||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2018 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
# Cpu Name
|
||||||
|
# -------------------------------------------------------
|
||||||
|
|
||||||
|
{.passC:"-std=gnu99".} # TODO may conflict with milagro "-std=c99"
|
||||||
|
|
||||||
|
proc cpuID(eaxi, ecxi: int32): tuple[eax, ebx, ecx, edx: int32] =
|
||||||
|
when defined(vcc):
|
||||||
|
proc cpuidVcc(cpuInfo: ptr int32; functionID: int32)
|
||||||
|
{.importc: "__cpuidex", header: "intrin.h".}
|
||||||
|
cpuidVcc(addr result.eax, eaxi, ecxi)
|
||||||
|
else:
|
||||||
|
var (eaxr, ebxr, ecxr, edxr) = (0'i32, 0'i32, 0'i32, 0'i32)
|
||||||
|
asm """
|
||||||
|
cpuid
|
||||||
|
:"=a"(`eaxr`), "=b"(`ebxr`), "=c"(`ecxr`), "=d"(`edxr`)
|
||||||
|
:"a"(`eaxi`), "c"(`ecxi`)"""
|
||||||
|
(eaxr, ebxr, ecxr, edxr)
|
||||||
|
|
||||||
|
proc cpuName*(): string =
|
||||||
|
var leaves {.global.} = cast[array[48, char]]([
|
||||||
|
cpuID(eaxi = 0x80000002'i32, ecxi = 0),
|
||||||
|
cpuID(eaxi = 0x80000003'i32, ecxi = 0),
|
||||||
|
cpuID(eaxi = 0x80000004'i32, ecxi = 0)])
|
||||||
|
result = $cast[cstring](addr leaves[0])
|
||||||
|
|
||||||
|
# Counting cycles
|
||||||
|
# -------------------------------------------------------
|
||||||
|
|
||||||
|
# From Linux
|
||||||
|
#
|
||||||
|
# The RDTSC instruction is not ordered relative to memory
|
||||||
|
# access. The Intel SDM and the AMD APM are both vague on this
|
||||||
|
# point, but empirically an RDTSC instruction can be
|
||||||
|
# speculatively executed before prior loads. An RDTSC
|
||||||
|
# immediately after an appropriate barrier appears to be
|
||||||
|
# ordered as a normal load, that is, it provides the same
|
||||||
|
# ordering guarantees as reading from a global memory location
|
||||||
|
# that some other imaginary CPU is updating continuously with a
|
||||||
|
# time stamp.
|
||||||
|
#
|
||||||
|
# From Intel SDM
|
||||||
|
# https://www.intel.com/content/dam/www/public/us/en/documents/white-papers/ia-32-ia-64-benchmark-code-execution-paper.pdf
|
||||||
|
|
||||||
|
proc getTicks*(): int64 {.inline.} =
|
||||||
|
when defined(vcc):
|
||||||
|
proc rdtsc(): int64 {.sideeffect, importc: "__rdtsc", header: "<intrin.h>".}
|
||||||
|
proc lfence() {.importc: "__mm_lfence", header: "<intrin.h>".}
|
||||||
|
|
||||||
|
lfence()
|
||||||
|
return rdtsc()
|
||||||
|
|
||||||
|
else:
|
||||||
|
when defined(amd64):
|
||||||
|
var lo, hi: int64
|
||||||
|
# TODO: Provide a compile-time flag for RDTSCP support
|
||||||
|
# and use it instead of lfence + RDTSC
|
||||||
|
{.emit: """asm volatile(
|
||||||
|
"lfence\n"
|
||||||
|
"rdtsc\n"
|
||||||
|
: "=a"(`lo`), "=d"(`hi`)
|
||||||
|
:
|
||||||
|
: "memory"
|
||||||
|
);""".}
|
||||||
|
return (hi shl 32) or lo
|
||||||
|
else: # 32-bit x86
|
||||||
|
# TODO: Provide a compile-time flag for RDTSCP support
|
||||||
|
# and use it instead of lfence + RDTSC
|
||||||
|
{.emit: """asm volatile(
|
||||||
|
"lfence\n"
|
||||||
|
"rdtsc\n"
|
||||||
|
: "=a"(`result`)
|
||||||
|
:
|
||||||
|
: "memory"
|
||||||
|
);""".}
|
||||||
|
|
||||||
|
# Sanity check
|
||||||
|
# -------------------------------------------------------
|
||||||
|
|
||||||
|
when isMainModule:
|
||||||
|
|
||||||
|
import std/[times, monotimes, math, volatile, os]
|
||||||
|
|
||||||
|
block: # CpuName
|
||||||
|
echo "Your CPU is: "
|
||||||
|
echo " ", cpuName()
|
||||||
|
|
||||||
|
block: # Cycle Count
|
||||||
|
echo "The cost of an int64 modulo operation on your platform is:"
|
||||||
|
|
||||||
|
# Dealing with compiler optimization on microbenchmarks is hard
|
||||||
|
{.pragma: volatile, codegenDecl: "volatile $# $#".}
|
||||||
|
|
||||||
|
proc modNtimes(a, b: int64, N: int) {.noinline.} =
|
||||||
|
var c{.volatile.}: int64
|
||||||
|
for i in 0 ..< N:
|
||||||
|
c.addr.volatileStore(a.unsafeAddr.volatileLoad() mod b.unsafeAddr.volatileLoad())
|
||||||
|
|
||||||
|
let a {.volatile.} = 1000003'i64 # a prime number
|
||||||
|
let b {.volatile.} = 10007'i64 # another prime number
|
||||||
|
let N {.volatile.} = 3_000_000
|
||||||
|
|
||||||
|
let startMono = getMonoTime()
|
||||||
|
let startCycles = getTicks()
|
||||||
|
modNtimes(a, b, N)
|
||||||
|
let stopCycles = getTicks()
|
||||||
|
let stopMono = getMonoTime()
|
||||||
|
|
||||||
|
|
||||||
|
let elapsedMono = inNanoseconds(stopMono - startMono)
|
||||||
|
let elapsedCycles = stopCycles - startCycles
|
||||||
|
let timerResolutionGHz = round(elapsedCycles.float32 / elapsedMono.float32, 3)
|
||||||
|
|
||||||
|
echo " ", (elapsedCycles) div N, " cycles"
|
||||||
|
echo " ", (elapsedMono) div N, " ns/iter"
|
||||||
|
echo " ", timerResolutionGHz, " GHz (timer resolution)"
|
||||||
|
|
||||||
|
block: # CPU Frequency
|
||||||
|
discard # TODO, surprisingly this is very complex
|
|
@ -0,0 +1,50 @@
|
||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2018 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
import
|
||||||
|
# Standard library
|
||||||
|
strformat, strutils,
|
||||||
|
# Bench
|
||||||
|
bench_lab
|
||||||
|
|
||||||
|
template cpuX86(body: untyped): untyped =
|
||||||
|
when defined(i386) or defined(amd64):
|
||||||
|
body
|
||||||
|
|
||||||
|
cpuX86:
|
||||||
|
import platforms/x86
|
||||||
|
|
||||||
|
# Reporting benchmark result
|
||||||
|
# -------------------------------------------------------
|
||||||
|
|
||||||
|
proc reportCli*(metrics: seq[Metadata], preset, flags: string) =
|
||||||
|
|
||||||
|
cpuX86:
|
||||||
|
let name = cpuName()
|
||||||
|
echo "\nCPU: ", name
|
||||||
|
|
||||||
|
# https://blog.trailofbits.com/2019/10/03/tsc-frequency-for-all-better-profiling-and-benchmarking/
|
||||||
|
# https://www.agner.org/optimize/blog/read.php?i=838
|
||||||
|
echo "The CPU Cycle Count is indicative only. It cannot be used to compare across systems, works at your CPU nominal frequency and is sensitive to overclocking, throttling and frequency scaling (powersaving and Turbo Boost)."
|
||||||
|
|
||||||
|
const lineSep = &"""|{'-'.repeat(50)}|{'-'.repeat(14)}|{'-'.repeat(15)}|{'-'.repeat(17)}|{'-'.repeat(26)}|{'-'.repeat(26)}|"""
|
||||||
|
echo "\n"
|
||||||
|
echo lineSep
|
||||||
|
echo &"""|{"Procedures (" & preset & ')':^50}|{"# of Calls":^14}|{"Time (ms)":^15}|{"Avg Time (ms)":^17}|{"CPU cycles (in billions)":^26}|{"Avg cycles (in billions)":^26}|"""
|
||||||
|
echo &"""|{flags:^50}|{' '.repeat(14)}|{' '.repeat(15)}|{' '.repeat(17)}|{"indicative only":^26}|{"indicative only":^26}|"""
|
||||||
|
echo lineSep
|
||||||
|
for m in metrics:
|
||||||
|
if m.numCalls == 0:
|
||||||
|
continue
|
||||||
|
# TODO: running variance / standard deviation but the Welford method is quite costly.
|
||||||
|
# https://nim-lang.org/docs/stats.html / https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
|
||||||
|
let cumulTimeMs = m.cumulatedTimeNs.float64 * 1e-6
|
||||||
|
let avgTimeMs = cumulTimeMs / m.numCalls.float64
|
||||||
|
let cumulCyclesBillions = m.cumulatedCycles.float64 * 1e-9
|
||||||
|
let avgCyclesBillions = cumulCyclesBillions / m.numCalls.float64
|
||||||
|
echo &"""|{m.procName:<50}|{m.numCalls:>14}|{cumulTimeMs:>15.3f}|{avgTimeMs:>17.3f}|{cumulCyclesBillions:>26.3f}|{avgCyclesBillions:>26.3f}|"""
|
||||||
|
echo lineSep
|
|
@ -0,0 +1,209 @@
|
||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2018 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
import
|
||||||
|
# Standard library
|
||||||
|
os,
|
||||||
|
# Status libraries
|
||||||
|
confutils/defs, serialization,
|
||||||
|
# Beacon-chain
|
||||||
|
../beacon_chain/spec/[datatypes, crypto, beaconstate, validator, state_transition_block],
|
||||||
|
../beacon_chain/[ssz, state_transition, extras]
|
||||||
|
|
||||||
|
# Nimbus Bench - Scenario configuration
|
||||||
|
# --------------------------------------------------
|
||||||
|
|
||||||
|
type
|
||||||
|
StartupCommand* = enum
|
||||||
|
noCommand
|
||||||
|
cmdFullStateTransition
|
||||||
|
cmdSlotProcessing
|
||||||
|
cmdBlockProcessing
|
||||||
|
cmdEpochProcessing
|
||||||
|
|
||||||
|
BlockProcessingCat* = enum
|
||||||
|
catBlockHeader
|
||||||
|
catRANDAO
|
||||||
|
catEth1Data
|
||||||
|
catProposerSlashings
|
||||||
|
catAttesterSlashings
|
||||||
|
catAttestations
|
||||||
|
catDeposits
|
||||||
|
catVoluntaryExits
|
||||||
|
|
||||||
|
ScenarioConf* = object
|
||||||
|
scenarioDir* {.
|
||||||
|
desc: "The directory of your benchmark scenario"
|
||||||
|
name: "scenario-dir"
|
||||||
|
abbr: "d"
|
||||||
|
required .}: InputDir
|
||||||
|
preState* {.
|
||||||
|
desc: "The name of your pre-state (without .ssz)"
|
||||||
|
name: "pre"
|
||||||
|
abbr: "p"
|
||||||
|
defaultValue: "pre".}: string
|
||||||
|
blocksPrefix* {.
|
||||||
|
desc: "The prefix of your blocks file, for exemple \"blocks_\" for blocks in the form \"blocks_XX.ssz\""
|
||||||
|
name: "blocks-prefix"
|
||||||
|
abbr: "b"
|
||||||
|
defaultValue: "blocks_".}: string
|
||||||
|
blocksQty* {.
|
||||||
|
desc: "The number of blocks to process for this transition. Blocks should start at 0."
|
||||||
|
name: "block-quantity"
|
||||||
|
abbr: "q"
|
||||||
|
defaultValue: 1.}: int
|
||||||
|
skipBLS*{.
|
||||||
|
desc: "Skip BLS public keys and signature verification"
|
||||||
|
name: "skip-bls"
|
||||||
|
defaultValue: true.}: bool
|
||||||
|
case cmd*{.
|
||||||
|
command
|
||||||
|
defaultValue: noCommand }: StartupCommand
|
||||||
|
of noCommand:
|
||||||
|
discard
|
||||||
|
of cmdFullStateTransition:
|
||||||
|
discard
|
||||||
|
of cmdSlotProcessing:
|
||||||
|
numSlots* {.
|
||||||
|
desc: "The number of slots the pre-state will be advanced by"
|
||||||
|
name: "num-slots"
|
||||||
|
abbr: "s"
|
||||||
|
defaultValue: 1.}: uint64
|
||||||
|
of cmdBlockProcessing:
|
||||||
|
case blockProcessingCat* {.
|
||||||
|
desc: "block transitions"
|
||||||
|
# name: "process-blocks" # Pending https://github.com/status-im/nim-confutils/issues/10
|
||||||
|
implicitlySelectable
|
||||||
|
required .}: BlockProcessingCat
|
||||||
|
of catBlockHeader:
|
||||||
|
blockHeader*{.
|
||||||
|
desc: "Block header filename (without .ssz)"
|
||||||
|
name: "block-header"
|
||||||
|
defaultValue: "block".}: string
|
||||||
|
of catRANDAO:
|
||||||
|
discard
|
||||||
|
of catEth1Data:
|
||||||
|
discard
|
||||||
|
of catProposerSlashings:
|
||||||
|
proposerSlashing*{.
|
||||||
|
desc: "Proposer slashing filename (without .ssz)"
|
||||||
|
name: "proposer-slashing"
|
||||||
|
defaultValue: "proposer_slashing".}: string
|
||||||
|
of catAttesterSlashings:
|
||||||
|
attesterSlashing*{.
|
||||||
|
desc: "Attester slashing filename (without .ssz)"
|
||||||
|
name: "attester-slashing"
|
||||||
|
defaultValue: "attester_slashing".}: string
|
||||||
|
of catAttestations:
|
||||||
|
attestation*{.
|
||||||
|
desc: "Attestation filename (without .ssz)"
|
||||||
|
name: "attestation"
|
||||||
|
defaultValue: "attestation".}: string
|
||||||
|
of catDeposits:
|
||||||
|
deposit*{.
|
||||||
|
desc: "Deposit filename (without .ssz)"
|
||||||
|
name: "deposit"
|
||||||
|
defaultValue: "deposit".}: string
|
||||||
|
of catVoluntaryExits:
|
||||||
|
voluntaryExit*{.
|
||||||
|
desc: "Voluntary Exit filename (without .ssz)"
|
||||||
|
name: "voluntary_exit"
|
||||||
|
defaultValue: "voluntary_exit".}: string
|
||||||
|
of cmdEpochProcessing:
|
||||||
|
discard
|
||||||
|
|
||||||
|
proc parseSSZ(path: string, T: typedesc): T =
|
||||||
|
try:
|
||||||
|
result = SSZ.loadFile(path, T)
|
||||||
|
except SerializationError as err:
|
||||||
|
writeStackTrace()
|
||||||
|
stderr.write "SSZ load issue for file \"", path, "\"\n"
|
||||||
|
stderr.write err.formatMsg(path), "\n"
|
||||||
|
quit 1
|
||||||
|
except CatchableError as err:
|
||||||
|
writeStackTrace()
|
||||||
|
stderr.write "SSZ load issue for file \"", path, "\"\n"
|
||||||
|
quit 1
|
||||||
|
|
||||||
|
proc runFullTransition*(dir, preState, blocksPrefix: string, blocksQty: int, skipBLS: bool) =
|
||||||
|
let prePath = dir / preState & ".ssz"
|
||||||
|
|
||||||
|
var state: ref BeaconState
|
||||||
|
new state
|
||||||
|
echo "Running: ", prePath
|
||||||
|
state[] = parseSSZ(prePath, BeaconState)
|
||||||
|
|
||||||
|
for i in 0 ..< blocksQty:
|
||||||
|
let blockPath = dir / blocksPrefix & $i & ".ssz"
|
||||||
|
echo "Processing: ", blockPath
|
||||||
|
|
||||||
|
let blck = parseSSZ(blockPath, SignedBeaconBlock)
|
||||||
|
let flags = if skipBLS: {skipValidation} # TODO: this also skips state root verification
|
||||||
|
else: {}
|
||||||
|
let success = state_transition(state[], blck.message, flags)
|
||||||
|
echo "State transition status: ", if success: "SUCCESS ✓" else: "FAILURE ⚠️"
|
||||||
|
|
||||||
|
proc runProcessSlots*(dir, preState: string, numSlots: uint64) =
|
||||||
|
let prePath = dir / preState & ".ssz"
|
||||||
|
|
||||||
|
var state: ref BeaconState
|
||||||
|
new state
|
||||||
|
echo "Running: ", prePath
|
||||||
|
state[] = parseSSZ(prePath, BeaconState)
|
||||||
|
|
||||||
|
process_slots(state[], state.slot + numSlots)
|
||||||
|
|
||||||
|
template processScenarioImpl(
|
||||||
|
dir, preState: string, skipBLS: bool,
|
||||||
|
transitionFn, paramName: untyped,
|
||||||
|
ConsensusObject: typedesc,
|
||||||
|
needFlags, needCache: static bool): untyped =
|
||||||
|
let prePath = dir/preState & ".ssz"
|
||||||
|
|
||||||
|
var state: ref BeaconState
|
||||||
|
new state
|
||||||
|
echo "Running: ", prePath
|
||||||
|
state[] = parseSSZ(prePath, BeaconState)
|
||||||
|
|
||||||
|
var consObj: ref `ConsensusObject`
|
||||||
|
new consObj
|
||||||
|
when needCache:
|
||||||
|
var cache = get_empty_per_epoch_cache()
|
||||||
|
when needFlags:
|
||||||
|
let flags = if skipBLS: {skipValidation} # TODO: this also skips state root verification
|
||||||
|
else: {}
|
||||||
|
|
||||||
|
let consObjPath = dir/paramName & ".ssz"
|
||||||
|
echo "Processing: ", consObjPath
|
||||||
|
consObj[] = parseSSZ(consObjPath, ConsensusObject)
|
||||||
|
|
||||||
|
when needFlags and needCache:
|
||||||
|
let success = transitionFn(state[], consObj[], flags, cache)
|
||||||
|
elif needFlags:
|
||||||
|
let success = transitionFn(state[], consObj[], flags)
|
||||||
|
elif needCache:
|
||||||
|
let success = transitionFn(state[], consObj[], cache)
|
||||||
|
else:
|
||||||
|
let success = transitionFn(state[], consObj[])
|
||||||
|
|
||||||
|
echo astToStr(transitionFn) & " status: ", if success: "SUCCESS ✓" else: "FAILURE ⚠️"
|
||||||
|
|
||||||
|
template genProcessScenario(name, transitionFn, paramName: untyped, ConsensusObject: typedesc, needFlags, needCache: static bool): untyped =
|
||||||
|
when needFlags:
|
||||||
|
proc `name`*(dir, preState, `paramName`: string, skipBLS: bool) =
|
||||||
|
processScenarioImpl(dir, preState, skipBLS, transitionFn, paramName, ConsensusObject, needFlags, needCache)
|
||||||
|
else:
|
||||||
|
proc `name`*(dir, preState, `paramName`: string) =
|
||||||
|
# skipBLS is a dummy to avoid undeclared identifier
|
||||||
|
processScenarioImpl(dir, preState, skipBLS = false, transitionFn, paramName, ConsensusObject, needFlags, needCache)
|
||||||
|
|
||||||
|
genProcessScenario(runProcessBlockHeader, process_block_header, block_header, BeaconBlock, needFlags = true, needCache = true)
|
||||||
|
genProcessScenario(runProcessProposerSlashing, process_proposer_slashing, proposer_slashing, ProposerSlashing, needFlags = true, needCache = true)
|
||||||
|
genProcessScenario(runProcessAttestation, process_attestation, attestation, Attestation, needFlags = true, needCache = true)
|
||||||
|
genProcessScenario(runProcessAttesterSlashing, process_attester_slashing, att_slash, AttesterSlashing, needFlags = false, needCache = true)
|
||||||
|
genProcessScenario(runProcessDeposit, process_deposit, deposit, Deposit, needFlags = true, needCache = false)
|
||||||
|
genProcessScenario(runProcessVoluntaryExits, process_voluntary_exit, deposit, SignedVoluntaryExit, needFlags = true, needCache = false)
|
|
@ -1,3 +1,10 @@
|
||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2019 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
import
|
import
|
||||||
confutils, stats, times, std/monotimes,
|
confutils, stats, times, std/monotimes,
|
||||||
strformat,
|
strformat,
|
||||||
|
|
|
@ -17,7 +17,7 @@ import # Unit test
|
||||||
./test_beaconstate,
|
./test_beaconstate,
|
||||||
./test_block_pool,
|
./test_block_pool,
|
||||||
./test_helpers,
|
./test_helpers,
|
||||||
#./test_interop, TODO check zcli
|
./test_interop,
|
||||||
./test_ssz,
|
./test_ssz,
|
||||||
./test_state_transition,
|
./test_state_transition,
|
||||||
./test_sync_protocol,
|
./test_sync_protocol,
|
||||||
|
|
|
@ -25,7 +25,7 @@ func signMockDepositData(
|
||||||
# No state --> Genesis
|
# No state --> Genesis
|
||||||
deposit_data.signature = bls_sign(
|
deposit_data.signature = bls_sign(
|
||||||
key = privkey,
|
key = privkey,
|
||||||
msg = deposit_data.hash_tree_root().data,
|
msg = deposit_data.getDepositMessage().hash_tree_root().data,
|
||||||
domain = compute_domain(
|
domain = compute_domain(
|
||||||
DOMAIN_DEPOSIT,
|
DOMAIN_DEPOSIT,
|
||||||
default(array[4, byte]) # Genesis is fork_version 0
|
default(array[4, byte]) # Genesis is fork_version 0
|
||||||
|
@ -39,7 +39,7 @@ func signMockDepositData(
|
||||||
) =
|
) =
|
||||||
deposit_data.signature = bls_sign(
|
deposit_data.signature = bls_sign(
|
||||||
key = privkey,
|
key = privkey,
|
||||||
msg = deposit_data.hash_tree_root().data,
|
msg = deposit_data.getDepositMessage().hash_tree_root().data,
|
||||||
domain = get_domain(
|
domain = get_domain(
|
||||||
state,
|
state,
|
||||||
DOMAIN_DEPOSIT
|
DOMAIN_DEPOSIT
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Subproject commit a26def415f2969d625e39bfc160c97497dfe37b3
|
Subproject commit 330d343cb5e5c68e16eb57963e46d64a424751e6
|
|
@ -36,7 +36,7 @@ proc readValue*(r: var JsonReader, a: var seq[byte]) {.inline.} =
|
||||||
|
|
||||||
const
|
const
|
||||||
FixturesDir* = currentSourcePath.rsplit(DirSep, 1)[0] / "fixtures"
|
FixturesDir* = currentSourcePath.rsplit(DirSep, 1)[0] / "fixtures"
|
||||||
SszTestsDir* = FixturesDir/"tests-v0.9.3"
|
SszTestsDir* = FixturesDir/"tests-v0.9.4"
|
||||||
|
|
||||||
proc parseTest*(path: string, Format: typedesc[Json or SSZ], T: typedesc): T =
|
proc parseTest*(path: string, Format: typedesc[Json or SSZ], T: typedesc): T =
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -20,7 +20,7 @@ const
|
||||||
SpecDir = currentSourcePath.rsplit(DirSep, 1)[0] /
|
SpecDir = currentSourcePath.rsplit(DirSep, 1)[0] /
|
||||||
".."/".."/"beacon_chain"/"spec"
|
".."/".."/"beacon_chain"/"spec"
|
||||||
FixturesDir = currentSourcePath.rsplit(DirSep, 1)[0] / "fixtures"
|
FixturesDir = currentSourcePath.rsplit(DirSep, 1)[0] / "fixtures"
|
||||||
Config = FixturesDir/"tests-v0.9.2"/const_preset/"config.yaml"
|
Config = FixturesDir/"tests-v0.9.4"/const_preset/"config.yaml"
|
||||||
|
|
||||||
type
|
type
|
||||||
CheckedType = SomeInteger or Slot or Epoch
|
CheckedType = SomeInteger or Slot or Epoch
|
||||||
|
@ -119,5 +119,5 @@ proc checkConfig() =
|
||||||
else:
|
else:
|
||||||
check: ConstsToCheck[constant] == value.getBiggestInt().uint64()
|
check: ConstsToCheck[constant] == value.getBiggestInt().uint64()
|
||||||
|
|
||||||
suite "Official - 0.9.2 - constants & config " & preset():
|
suite "Official - 0.9.4 - constants & config " & preset():
|
||||||
checkConfig()
|
checkConfig()
|
||||||
|
|
|
@ -30,10 +30,7 @@ template runTest(identifier: untyped) =
|
||||||
|
|
||||||
proc `testImpl _ operations_attester_slashing _ identifier`() =
|
proc `testImpl _ operations_attester_slashing _ identifier`() =
|
||||||
|
|
||||||
var flags: UpdateFlags
|
|
||||||
var prefix: string
|
var prefix: string
|
||||||
if not existsFile(testDir/"meta.yaml"):
|
|
||||||
flags.incl skipValidation
|
|
||||||
if existsFile(testDir/"post.ssz"):
|
if existsFile(testDir/"post.ssz"):
|
||||||
prefix = "[Valid] "
|
prefix = "[Valid] "
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
# beacon_chain
|
# beacon_chain
|
||||||
# Copyright (c) 2018 Status Research & Development GmbH
|
# Copyright (c) 2018-2019 Status Research & Development GmbH
|
||||||
# Licensed and distributed under either of
|
# Licensed and distributed under either of
|
||||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
@ -26,7 +26,7 @@ import
|
||||||
|
|
||||||
const
|
const
|
||||||
FixturesDir = currentSourcePath.rsplit(DirSep, 1)[0] / "fixtures"
|
FixturesDir = currentSourcePath.rsplit(DirSep, 1)[0] / "fixtures"
|
||||||
SSZDir = FixturesDir/"tests-v0.9.3"/const_preset/"phase0"/"ssz_static"
|
SSZDir = FixturesDir/"tests-v0.9.4"/const_preset/"phase0"/"ssz_static"
|
||||||
|
|
||||||
type
|
type
|
||||||
SSZHashTreeRoot = object
|
SSZHashTreeRoot = object
|
||||||
|
@ -102,5 +102,5 @@ proc runSSZtests() =
|
||||||
else:
|
else:
|
||||||
raise newException(ValueError, "Unsupported test: " & sszType)
|
raise newException(ValueError, "Unsupported test: " & sszType)
|
||||||
|
|
||||||
suite "Official - 0.9.2 - SSZ consensus objects " & preset():
|
suite "Official - 0.9.4 - SSZ consensus objects " & preset():
|
||||||
runSSZtests()
|
runSSZtests()
|
||||||
|
|
|
@ -23,7 +23,7 @@ import
|
||||||
|
|
||||||
const
|
const
|
||||||
FixturesDir = currentSourcePath.rsplit(DirSep, 1)[0] / "fixtures"
|
FixturesDir = currentSourcePath.rsplit(DirSep, 1)[0] / "fixtures"
|
||||||
SSZDir = FixturesDir/"tests-v0.9.2"/"general"/"phase0"/"ssz_generic"
|
SSZDir = FixturesDir/"tests-v0.9.4"/"general"/"phase0"/"ssz_generic"
|
||||||
|
|
||||||
type
|
type
|
||||||
SSZHashTreeRoot = object
|
SSZHashTreeRoot = object
|
||||||
|
@ -278,5 +278,5 @@ proc runSSZtests() =
|
||||||
# test "Testing " & name & " inputs (" & $T & ") - invalid":
|
# test "Testing " & name & " inputs (" & $T & ") - invalid":
|
||||||
# const path = SSZDir/name/"invalid"
|
# const path = SSZDir/name/"invalid"
|
||||||
|
|
||||||
suite "Official - 0.9.0 - SSZ generic types":
|
suite "Official - 0.9.4 - SSZ generic types":
|
||||||
runSSZtests()
|
runSSZtests()
|
||||||
|
|
|
@ -1,12 +1,12 @@
|
||||||
# beacon_chain
|
# beacon_chain
|
||||||
# Copyright (c) 2018 Status Research & Development GmbH
|
# Copyright (c) 2018-2019 Status Research & Development GmbH
|
||||||
# Licensed and distributed under either of
|
# Licensed and distributed under either of
|
||||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
# process_attestation (beaconstate.nim)
|
# process_attestation (beaconstate.nim)
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#attestations
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#attestations
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
||||||
{.used.}
|
{.used.}
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
|
|
||||||
|
|
||||||
# process_deposit (beaconstate.nim)
|
# process_deposit (beaconstate.nim)
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#deposits
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#deposits
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
||||||
{.used.}
|
{.used.}
|
||||||
|
|
|
@ -46,8 +46,8 @@ proc finalizeOn234(state: var BeaconState, epoch: Epoch, sufficient_support: boo
|
||||||
state.justification_bits = 0'u8 # Bitvector of length 4
|
state.justification_bits = 0'u8 # Bitvector of length 4
|
||||||
# mock 3rd and 4th latest epochs as justified
|
# mock 3rd and 4th latest epochs as justified
|
||||||
# indices are pre-shift
|
# indices are pre-shift
|
||||||
state.justification_bits.raiseBit 1
|
state.justification_bits.setBit 1
|
||||||
state.justification_bits.raiseBit 2
|
state.justification_bits.setBit 2
|
||||||
# mock the 2nd latest epoch as justifiable, with 4th as the source
|
# mock the 2nd latest epoch as justifiable, with 4th as the source
|
||||||
addMockAttestations(
|
addMockAttestations(
|
||||||
state,
|
state,
|
||||||
|
@ -92,7 +92,7 @@ proc finalizeOn23(state: var BeaconState, epoch: Epoch, sufficient_support: bool
|
||||||
state.justification_bits = 0'u8 # Bitvector of length 4
|
state.justification_bits = 0'u8 # Bitvector of length 4
|
||||||
# mock 3rd as justified
|
# mock 3rd as justified
|
||||||
# indices are pre-shift
|
# indices are pre-shift
|
||||||
state.justification_bits.raiseBit 1
|
state.justification_bits.setBit 1
|
||||||
# mock the 2nd latest epoch as justifiable, with 3rd as the source
|
# mock the 2nd latest epoch as justifiable, with 3rd as the source
|
||||||
addMockAttestations(
|
addMockAttestations(
|
||||||
state,
|
state,
|
||||||
|
@ -137,7 +137,7 @@ proc finalizeOn123(state: var BeaconState, epoch: Epoch, sufficient_support: boo
|
||||||
state.justification_bits = 0'u8 # Bitvector of length 4
|
state.justification_bits = 0'u8 # Bitvector of length 4
|
||||||
# mock 3rd as justified
|
# mock 3rd as justified
|
||||||
# indices are pre-shift
|
# indices are pre-shift
|
||||||
state.justification_bits.raiseBit 1
|
state.justification_bits.setBit 1
|
||||||
# mock the 2nd latest epoch as justifiable, with 5th as the source
|
# mock the 2nd latest epoch as justifiable, with 5th as the source
|
||||||
addMockAttestations(
|
addMockAttestations(
|
||||||
state,
|
state,
|
||||||
|
@ -190,7 +190,7 @@ proc finalizeOn12(state: var BeaconState, epoch: Epoch, sufficient_support: bool
|
||||||
state.justification_bits = 0'u8 # Bitvector of length 4
|
state.justification_bits = 0'u8 # Bitvector of length 4
|
||||||
# mock 3rd as justified
|
# mock 3rd as justified
|
||||||
# indices are pre-shift
|
# indices are pre-shift
|
||||||
state.justification_bits.raiseBit 0
|
state.justification_bits.setBit 0
|
||||||
# mock the 2nd latest epoch as justifiable, with 3rd as the source
|
# mock the 2nd latest epoch as justifiable, with 3rd as the source
|
||||||
addMockAttestations(
|
addMockAttestations(
|
||||||
state,
|
state,
|
||||||
|
|
|
@ -10,37 +10,27 @@
|
||||||
import
|
import
|
||||||
options, unittest,
|
options, unittest,
|
||||||
chronicles,
|
chronicles,
|
||||||
|
stew/byteutils,
|
||||||
./testutil, ./testblockutil,
|
./testutil, ./testblockutil,
|
||||||
../beacon_chain/spec/[beaconstate, crypto, datatypes, digest, helpers, validator],
|
../beacon_chain/spec/[beaconstate, crypto, datatypes, digest, helpers, validator],
|
||||||
../beacon_chain/[beacon_node_types, attestation_pool, block_pool, extras, state_transition, ssz]
|
../beacon_chain/[beacon_node_types, attestation_pool, block_pool, extras, state_transition, ssz]
|
||||||
|
|
||||||
template withPool(body: untyped) =
|
when const_preset == "minimal": # Too much stack space used on mainnet
|
||||||
mixin genState, genBlock
|
suite "Attestation pool processing" & preset():
|
||||||
|
|
||||||
var
|
|
||||||
blockPool {.inject.} = BlockPool.init(makeTestDB(genState, genBlock))
|
|
||||||
pool {.inject.} = AttestationPool.init(blockPool)
|
|
||||||
state {.inject.} = loadTailState(blockPool)
|
|
||||||
# Slot 0 is a finalized slot - won't be making attestations for it..
|
|
||||||
process_slots(state.data, state.data.data.slot + 1)
|
|
||||||
|
|
||||||
body
|
|
||||||
|
|
||||||
suite "Attestation pool processing" & preset():
|
|
||||||
## For now just test that we can compile and execute block processing with
|
## For now just test that we can compile and execute block processing with
|
||||||
## mock data.
|
## mock data.
|
||||||
|
|
||||||
# Genesis state that results in 2 members per committee
|
setup:
|
||||||
let
|
# Genesis state that results in 3 members per committee
|
||||||
genState = initialize_beacon_state_from_eth1(
|
var
|
||||||
Eth2Digest(), 0,
|
blockPool = BlockPool.init(makeTestDB(SLOTS_PER_EPOCH * 3))
|
||||||
makeInitialDeposits(SLOTS_PER_EPOCH * 2, {skipValidation}),
|
pool = AttestationPool.init(blockPool)
|
||||||
{skipValidation})
|
state = loadTailState(blockPool)
|
||||||
genBlock = get_initial_beacon_block(genState)
|
# Slot 0 is a finalized slot - won't be making attestations for it..
|
||||||
|
process_slots(state.data, state.data.data.slot + 1)
|
||||||
|
|
||||||
timedTest "Can add and retrieve simple attestation" & preset():
|
timedTest "Can add and retrieve simple attestation" & preset():
|
||||||
var cache = get_empty_per_epoch_cache()
|
var cache = get_empty_per_epoch_cache()
|
||||||
withPool:
|
|
||||||
let
|
let
|
||||||
# Create an attestation for slot 1!
|
# Create an attestation for slot 1!
|
||||||
beacon_committee = get_beacon_committee(state.data.data,
|
beacon_committee = get_beacon_committee(state.data.data,
|
||||||
|
@ -48,7 +38,7 @@ suite "Attestation pool processing" & preset():
|
||||||
attestation = makeAttestation(
|
attestation = makeAttestation(
|
||||||
state.data.data, state.blck.root, beacon_committee[0], cache)
|
state.data.data, state.blck.root, beacon_committee[0], cache)
|
||||||
|
|
||||||
pool.add(state.data.data, state.blck, attestation)
|
pool.add(attestation)
|
||||||
|
|
||||||
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot) # minus 1?
|
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot) # minus 1?
|
||||||
|
|
||||||
|
@ -60,7 +50,6 @@ suite "Attestation pool processing" & preset():
|
||||||
|
|
||||||
timedTest "Attestations may arrive in any order" & preset():
|
timedTest "Attestations may arrive in any order" & preset():
|
||||||
var cache = get_empty_per_epoch_cache()
|
var cache = get_empty_per_epoch_cache()
|
||||||
withPool:
|
|
||||||
let
|
let
|
||||||
# Create an attestation for slot 1!
|
# Create an attestation for slot 1!
|
||||||
bc0 = get_beacon_committee(state.data.data,
|
bc0 = get_beacon_committee(state.data.data,
|
||||||
|
@ -77,8 +66,8 @@ suite "Attestation pool processing" & preset():
|
||||||
state.data.data, state.blck.root, bc1[0], cache)
|
state.data.data, state.blck.root, bc1[0], cache)
|
||||||
|
|
||||||
# test reverse order
|
# test reverse order
|
||||||
pool.add(state.data.data, state.blck, attestation1)
|
pool.add(attestation1)
|
||||||
pool.add(state.data.data, state.blck, attestation0)
|
pool.add(attestation0)
|
||||||
|
|
||||||
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot) # minus 1?
|
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot) # minus 1?
|
||||||
|
|
||||||
|
@ -90,7 +79,6 @@ suite "Attestation pool processing" & preset():
|
||||||
|
|
||||||
timedTest "Attestations should be combined" & preset():
|
timedTest "Attestations should be combined" & preset():
|
||||||
var cache = get_empty_per_epoch_cache()
|
var cache = get_empty_per_epoch_cache()
|
||||||
withPool:
|
|
||||||
let
|
let
|
||||||
# Create an attestation for slot 1!
|
# Create an attestation for slot 1!
|
||||||
bc0 = get_beacon_committee(state.data.data,
|
bc0 = get_beacon_committee(state.data.data,
|
||||||
|
@ -100,8 +88,8 @@ suite "Attestation pool processing" & preset():
|
||||||
attestation1 = makeAttestation(
|
attestation1 = makeAttestation(
|
||||||
state.data.data, state.blck.root, bc0[1], cache)
|
state.data.data, state.blck.root, bc0[1], cache)
|
||||||
|
|
||||||
pool.add(state.data.data, state.blck, attestation0)
|
pool.add(attestation0)
|
||||||
pool.add(state.data.data, state.blck, attestation1)
|
pool.add(attestation1)
|
||||||
|
|
||||||
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot) # minus 1?
|
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot) # minus 1?
|
||||||
|
|
||||||
|
@ -113,7 +101,6 @@ suite "Attestation pool processing" & preset():
|
||||||
|
|
||||||
timedTest "Attestations may overlap, bigger first" & preset():
|
timedTest "Attestations may overlap, bigger first" & preset():
|
||||||
var cache = get_empty_per_epoch_cache()
|
var cache = get_empty_per_epoch_cache()
|
||||||
withPool:
|
|
||||||
|
|
||||||
var
|
var
|
||||||
# Create an attestation for slot 1!
|
# Create an attestation for slot 1!
|
||||||
|
@ -126,8 +113,8 @@ suite "Attestation pool processing" & preset():
|
||||||
|
|
||||||
attestation0.combine(attestation1, {skipValidation})
|
attestation0.combine(attestation1, {skipValidation})
|
||||||
|
|
||||||
pool.add(state.data.data, state.blck, attestation0)
|
pool.add(attestation0)
|
||||||
pool.add(state.data.data, state.blck, attestation1)
|
pool.add(attestation1)
|
||||||
|
|
||||||
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot) # minus 1?
|
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot) # minus 1?
|
||||||
|
|
||||||
|
@ -139,7 +126,6 @@ suite "Attestation pool processing" & preset():
|
||||||
|
|
||||||
timedTest "Attestations may overlap, smaller first" & preset():
|
timedTest "Attestations may overlap, smaller first" & preset():
|
||||||
var cache = get_empty_per_epoch_cache()
|
var cache = get_empty_per_epoch_cache()
|
||||||
withPool:
|
|
||||||
var
|
var
|
||||||
# Create an attestation for slot 1!
|
# Create an attestation for slot 1!
|
||||||
bc0 = get_beacon_committee(state.data.data,
|
bc0 = get_beacon_committee(state.data.data,
|
||||||
|
@ -151,8 +137,8 @@ suite "Attestation pool processing" & preset():
|
||||||
|
|
||||||
attestation0.combine(attestation1, {skipValidation})
|
attestation0.combine(attestation1, {skipValidation})
|
||||||
|
|
||||||
pool.add(state.data.data, state.blck, attestation1)
|
pool.add(attestation1)
|
||||||
pool.add(state.data.data, state.blck, attestation0)
|
pool.add(attestation0)
|
||||||
|
|
||||||
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot) # minus 1?
|
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot) # minus 1?
|
||||||
|
|
||||||
|
@ -161,3 +147,75 @@ suite "Attestation pool processing" & preset():
|
||||||
|
|
||||||
check:
|
check:
|
||||||
attestations.len == 1
|
attestations.len == 1
|
||||||
|
|
||||||
|
timedTest "Fork choice returns latest block with no attestations":
|
||||||
|
let
|
||||||
|
b1 = addBlock(state.data.data, blockPool.tail.root, BeaconBlockBody())
|
||||||
|
b1Root = hash_tree_root(b1.message)
|
||||||
|
b1Add = blockPool.add(b1Root, b1)
|
||||||
|
head = pool.selectHead()
|
||||||
|
|
||||||
|
check:
|
||||||
|
head == b1Add
|
||||||
|
|
||||||
|
let
|
||||||
|
b2 = addBlock(state.data.data, b1Root, BeaconBlockBody())
|
||||||
|
b2Root = hash_tree_root(b2.message)
|
||||||
|
b2Add = blockPool.add(b2Root, b2)
|
||||||
|
head2 = pool.selectHead()
|
||||||
|
|
||||||
|
check:
|
||||||
|
head2 == b2Add
|
||||||
|
|
||||||
|
timedTest "Fork choice returns block with attestation":
|
||||||
|
var cache = get_empty_per_epoch_cache()
|
||||||
|
let
|
||||||
|
b10 = makeBlock(state.data.data, blockPool.tail.root, BeaconBlockBody())
|
||||||
|
b10Root = hash_tree_root(b10.message)
|
||||||
|
b10Add = blockPool.add(b10Root, b10)
|
||||||
|
head = pool.selectHead()
|
||||||
|
|
||||||
|
check:
|
||||||
|
head == b10Add
|
||||||
|
|
||||||
|
let
|
||||||
|
b11 = makeBlock(state.data.data, blockPool.tail.root, BeaconBlockBody(
|
||||||
|
graffiti: Eth2Digest(data: [1'u8, 0, 0, 0 ,0 ,0 ,0 ,0 ,0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
|
||||||
|
))
|
||||||
|
b11Root = hash_tree_root(b11.message)
|
||||||
|
b11Add = blockPool.add(b11Root, b11)
|
||||||
|
|
||||||
|
bc1 = get_beacon_committee(state.data.data,
|
||||||
|
state.data.data.slot, 1, cache)
|
||||||
|
attestation0 = makeAttestation(
|
||||||
|
state.data.data, b10Root, bc1[0], cache)
|
||||||
|
|
||||||
|
pool.add(attestation0)
|
||||||
|
|
||||||
|
let head2 = pool.selectHead()
|
||||||
|
|
||||||
|
check:
|
||||||
|
# Single vote for b10 and no votes for b11
|
||||||
|
head2 == b10Add
|
||||||
|
|
||||||
|
let
|
||||||
|
attestation1 = makeAttestation(
|
||||||
|
state.data.data, b11Root, bc1[1], cache)
|
||||||
|
attestation2 = makeAttestation(
|
||||||
|
state.data.data, b11Root, bc1[2], cache)
|
||||||
|
pool.add(attestation1)
|
||||||
|
|
||||||
|
let head3 = pool.selectHead()
|
||||||
|
let smaller = if b10Root.data < b11Root.data: b10Add else: b11Add
|
||||||
|
|
||||||
|
check:
|
||||||
|
# Ties broken lexicographically
|
||||||
|
head3 == smaller
|
||||||
|
|
||||||
|
pool.add(attestation2)
|
||||||
|
|
||||||
|
let head4 = pool.selectHead()
|
||||||
|
|
||||||
|
check:
|
||||||
|
# Two votes for b11
|
||||||
|
head4 == b11Add
|
||||||
|
|
|
@ -10,77 +10,140 @@
|
||||||
import
|
import
|
||||||
options, sequtils, unittest, chronicles,
|
options, sequtils, unittest, chronicles,
|
||||||
./testutil, ./testblockutil,
|
./testutil, ./testblockutil,
|
||||||
../beacon_chain/spec/[beaconstate, datatypes, digest],
|
../beacon_chain/spec/[datatypes, digest],
|
||||||
../beacon_chain/[beacon_node_types, block_pool, beacon_chain_db, extras, ssz]
|
../beacon_chain/[beacon_node_types, block_pool, beacon_chain_db, ssz]
|
||||||
|
|
||||||
suite "Block pool processing" & preset():
|
suite "BlockRef and helpers" & preset():
|
||||||
|
timedTest "isAncestorOf sanity" & preset():
|
||||||
let
|
let
|
||||||
genState = initialize_beacon_state_from_eth1(
|
s0 = BlockRef(slot: Slot(0))
|
||||||
Eth2Digest(), 0,
|
s1 = BlockRef(slot: Slot(1), parent: s0)
|
||||||
makeInitialDeposits(flags = {skipValidation}), {skipValidation})
|
s2 = BlockRef(slot: Slot(2), parent: s1)
|
||||||
genBlock = get_initial_beacon_block(genState)
|
|
||||||
|
|
||||||
|
check:
|
||||||
|
s0.isAncestorOf(s0)
|
||||||
|
s0.isAncestorOf(s1)
|
||||||
|
s0.isAncestorOf(s2)
|
||||||
|
s1.isAncestorOf(s1)
|
||||||
|
s1.isAncestorOf(s2)
|
||||||
|
|
||||||
|
not s2.isAncestorOf(s0)
|
||||||
|
not s2.isAncestorOf(s1)
|
||||||
|
not s1.isAncestorOf(s0)
|
||||||
|
|
||||||
|
timedTest "getAncestorAt sanity" & preset():
|
||||||
|
let
|
||||||
|
s0 = BlockRef(slot: Slot(0))
|
||||||
|
s1 = BlockRef(slot: Slot(1), parent: s0)
|
||||||
|
s2 = BlockRef(slot: Slot(2), parent: s1)
|
||||||
|
s4 = BlockRef(slot: Slot(4), parent: s2)
|
||||||
|
|
||||||
|
check:
|
||||||
|
s0.getAncestorAt(Slot(0)) == s0
|
||||||
|
s0.getAncestorAt(Slot(1)) == s0
|
||||||
|
|
||||||
|
s1.getAncestorAt(Slot(0)) == s0
|
||||||
|
s1.getAncestorAt(Slot(1)) == s1
|
||||||
|
|
||||||
|
s4.getAncestorAt(Slot(0)) == s0
|
||||||
|
s4.getAncestorAt(Slot(1)) == s1
|
||||||
|
s4.getAncestorAt(Slot(2)) == s2
|
||||||
|
s4.getAncestorAt(Slot(3)) == s2
|
||||||
|
s4.getAncestorAt(Slot(4)) == s4
|
||||||
|
|
||||||
|
suite "BlockSlot and helpers" & preset():
|
||||||
|
timedTest "atSlot sanity" & preset():
|
||||||
|
let
|
||||||
|
s0 = BlockRef(slot: Slot(0))
|
||||||
|
s1 = BlockRef(slot: Slot(1), parent: s0)
|
||||||
|
s2 = BlockRef(slot: Slot(2), parent: s1)
|
||||||
|
s4 = BlockRef(slot: Slot(4), parent: s2)
|
||||||
|
|
||||||
|
check:
|
||||||
|
s0.atSlot(Slot(0)).blck == s0
|
||||||
|
s0.atSlot(Slot(0)) == s1.atSlot(Slot(0))
|
||||||
|
s1.atSlot(Slot(1)).blck == s1
|
||||||
|
|
||||||
|
s4.atSlot(Slot(0)).blck == s0
|
||||||
|
|
||||||
|
timedTest "parent sanity" & preset():
|
||||||
|
let
|
||||||
|
s0 = BlockRef(slot: Slot(0))
|
||||||
|
s00 = BlockSlot(blck: s0, slot: Slot(0))
|
||||||
|
s01 = BlockSlot(blck: s0, slot: Slot(1))
|
||||||
|
s2 = BlockRef(slot: Slot(2), parent: s0)
|
||||||
|
s22 = BlockSlot(blck: s2, slot: Slot(2))
|
||||||
|
s24 = BlockSlot(blck: s2, slot: Slot(4))
|
||||||
|
|
||||||
|
check:
|
||||||
|
s00.parent == BlockSlot(blck: nil, slot: Slot(0))
|
||||||
|
s01.parent == s00
|
||||||
|
s22.parent == s01
|
||||||
|
s24.parent == BlockSlot(blck: s2, slot: Slot(3))
|
||||||
|
s24.parent.parent == s22
|
||||||
|
|
||||||
|
when const_preset == "minimal": # Too much stack space used on mainnet
|
||||||
|
suite "Block pool processing" & preset():
|
||||||
setup:
|
setup:
|
||||||
var
|
var
|
||||||
db = makeTestDB(genState, genBlock)
|
db = makeTestDB(SLOTS_PER_EPOCH)
|
||||||
pool = BlockPool.init(db)
|
pool = BlockPool.init(db)
|
||||||
state = pool.loadTailState()
|
state = pool.loadTailState().data.data
|
||||||
|
b1 = addBlock(state, pool.tail.root, BeaconBlockBody())
|
||||||
|
b1Root = hash_tree_root(b1.message)
|
||||||
|
b2 = addBlock(state, b1Root, BeaconBlockBody())
|
||||||
|
b2Root = hash_tree_root(b2.message)
|
||||||
|
|
||||||
timedTest "getRef returns nil for missing blocks":
|
timedTest "getRef returns nil for missing blocks":
|
||||||
check:
|
check:
|
||||||
pool.getRef(default Eth2Digest) == nil
|
pool.getRef(default Eth2Digest) == nil
|
||||||
|
|
||||||
timedTest "loadTailState gets genesis block on first load" & preset():
|
timedTest "loadTailState gets genesis block on first load" & preset():
|
||||||
var
|
let
|
||||||
b0 = pool.get(state.blck.root)
|
b0 = pool.get(pool.tail.root)
|
||||||
|
|
||||||
check:
|
check:
|
||||||
state.data.data.slot == GENESIS_SLOT
|
|
||||||
b0.isSome()
|
b0.isSome()
|
||||||
toSeq(pool.blockRootsForSlot(GENESIS_SLOT)) == @[state.blck.root]
|
toSeq(pool.blockRootsForSlot(GENESIS_SLOT)) == @[pool.tail.root]
|
||||||
|
|
||||||
timedTest "Simple block add&get" & preset():
|
timedTest "Simple block add&get" & preset():
|
||||||
let
|
let
|
||||||
b1 = makeBlock(state.data.data, state.blck.root, BeaconBlockBody())
|
b1Add = pool.add(b1Root, b1)
|
||||||
b1Root = hash_tree_root(b1.message)
|
b1Get = pool.get(b1Root)
|
||||||
|
|
||||||
# TODO the return value is ugly here, need to fix and test..
|
|
||||||
discard pool.add(state, b1Root, b1)
|
|
||||||
|
|
||||||
let b1Ref = pool.get(b1Root)
|
|
||||||
|
|
||||||
check:
|
check:
|
||||||
b1Ref.isSome()
|
b1Get.isSome()
|
||||||
b1Ref.get().refs.root == b1Root
|
b1Get.get().refs.root == b1Root
|
||||||
hash_tree_root(state.data.data) == state.data.root
|
b1Add.root == b1Get.get().refs.root
|
||||||
|
|
||||||
|
let
|
||||||
|
b2Add = pool.add(b2Root, b2)
|
||||||
|
b2Get = pool.get(b2Root)
|
||||||
|
|
||||||
|
check:
|
||||||
|
b2Get.isSome()
|
||||||
|
b2Get.get().refs.root == b2Root
|
||||||
|
b2Add.root == b2Get.get().refs.root
|
||||||
|
|
||||||
timedTest "Reverse order block add & get" & preset():
|
timedTest "Reverse order block add & get" & preset():
|
||||||
let
|
discard pool.add(b2Root, b2)
|
||||||
b1 = addBlock(state.data.data, state.blck.root, BeaconBlockBody(), {})
|
|
||||||
b1Root = hash_tree_root(b1.message)
|
|
||||||
b2 = addBlock(state.data.data, b1Root, BeaconBlockBody(), {})
|
|
||||||
b2Root = hash_tree_root(b2.message)
|
|
||||||
|
|
||||||
discard pool.add(state, b2Root, b2)
|
|
||||||
|
|
||||||
check:
|
check:
|
||||||
pool.get(b2Root).isNone() # Unresolved, shouldn't show up
|
pool.get(b2Root).isNone() # Unresolved, shouldn't show up
|
||||||
FetchRecord(root: b1Root, historySlots: 1) in pool.checkMissing()
|
FetchRecord(root: b1Root, historySlots: 1) in pool.checkMissing()
|
||||||
|
|
||||||
discard pool.add(state, b1Root, b1)
|
discard pool.add(b1Root, b1)
|
||||||
|
|
||||||
check: hash_tree_root(state.data.data) == state.data.root
|
|
||||||
|
|
||||||
let
|
let
|
||||||
b1r = pool.get(b1Root)
|
b1Get = pool.get(b1Root)
|
||||||
b2r = pool.get(b2Root)
|
b2Get = pool.get(b2Root)
|
||||||
|
|
||||||
check:
|
check:
|
||||||
b1r.isSome()
|
b1Get.isSome()
|
||||||
b2r.isSome()
|
b2Get.isSome()
|
||||||
|
|
||||||
b1r.get().refs.children[0] == b2r.get().refs
|
b1Get.get().refs.children[0] == b2Get.get().refs
|
||||||
b2r.get().refs.parent == b1r.get().refs
|
b2Get.get().refs.parent == b1Get.get().refs
|
||||||
toSeq(pool.blockRootsForSlot(b1.message.slot)) == @[b1Root]
|
toSeq(pool.blockRootsForSlot(b1.message.slot)) == @[b1Root]
|
||||||
toSeq(pool.blockRootsForSlot(b2.message.slot)) == @[b2Root]
|
toSeq(pool.blockRootsForSlot(b2.message.slot)) == @[b2Root]
|
||||||
|
|
||||||
|
@ -89,29 +152,31 @@ suite "Block pool processing" & preset():
|
||||||
# The heads structure should have been updated to contain only the new
|
# The heads structure should have been updated to contain only the new
|
||||||
# b2 head
|
# b2 head
|
||||||
check:
|
check:
|
||||||
pool.heads.mapIt(it.blck) == @[b2r.get().refs]
|
pool.heads.mapIt(it.blck) == @[b2Get.get().refs]
|
||||||
|
|
||||||
# check that init also reloads block graph
|
# check that init also reloads block graph
|
||||||
var
|
var
|
||||||
pool2 = BlockPool.init(db)
|
pool2 = BlockPool.init(db)
|
||||||
|
|
||||||
check:
|
check:
|
||||||
hash_tree_root(state.data.data) == state.data.root
|
|
||||||
pool2.get(b1Root).isSome()
|
pool2.get(b1Root).isSome()
|
||||||
pool2.get(b2Root).isSome()
|
pool2.get(b2Root).isSome()
|
||||||
|
|
||||||
timedTest "isAncestorOf sanity" & preset():
|
timedTest "Can add same block twice" & preset():
|
||||||
let
|
let
|
||||||
a = BlockRef(slot: Slot(1))
|
b10 = pool.add(b1Root, b1)
|
||||||
b = BlockRef(slot: Slot(2), parent: a)
|
b11 = pool.add(b1Root, b1)
|
||||||
c = BlockRef(slot: Slot(3), parent: b)
|
|
||||||
|
|
||||||
check:
|
check:
|
||||||
a.isAncestorOf(a)
|
b10 == b11
|
||||||
a.isAncestorOf(b)
|
not b10.isNil
|
||||||
a.isAncestorOf(c)
|
|
||||||
b.isAncestorOf(c)
|
|
||||||
|
|
||||||
not c.isAncestorOf(a)
|
timedTest "updateHead updates head and headState" & preset():
|
||||||
not c.isAncestorOf(b)
|
let
|
||||||
not b.isAncestorOf(a)
|
b1Add = pool.add(b1Root, b1)
|
||||||
|
|
||||||
|
pool.updateHead(b1Add)
|
||||||
|
|
||||||
|
check:
|
||||||
|
pool.head.blck == b1Add
|
||||||
|
pool.headState.data.data.slot == b1Add.slot
|
||||||
|
|
|
@ -140,7 +140,7 @@ suite "Interop":
|
||||||
timedTest "Interop genesis":
|
timedTest "Interop genesis":
|
||||||
# Check against https://github.com/protolambda/zcli:
|
# Check against https://github.com/protolambda/zcli:
|
||||||
# zcli keys generate --to 64 | zcli genesis mock --genesis-time 1570500000 > /tmp/state.ssz
|
# zcli keys generate --to 64 | zcli genesis mock --genesis-time 1570500000 > /tmp/state.ssz
|
||||||
# zcli hash-tree-root /tmp.state.ssz
|
# zcli hash-tree-root state /tmp/state.ssz
|
||||||
var deposits: seq[Deposit]
|
var deposits: seq[Deposit]
|
||||||
|
|
||||||
for i in 0..<64:
|
for i in 0..<64:
|
||||||
|
@ -157,11 +157,10 @@ suite "Interop":
|
||||||
|
|
||||||
let expected =
|
let expected =
|
||||||
when const_preset == "minimal":
|
when const_preset == "minimal":
|
||||||
"75016055f843b92972d647a849168e8c5f559e8d41e05f94fc3f6a9665d1cabb"
|
"5a3bbcae4ab2b4eafded947689fd7bd8214a616ffffd2521befdfe2a3b2f74c0"
|
||||||
elif const_preset == "mainnet":
|
elif const_preset == "mainnet":
|
||||||
"27e4b5dfc67b97fd7d441c60bd5c92851fc1ceebe22435903183d915b3e4e678"
|
"db0a887acd5e201ac579d6cdc0c4932f2a0adf342d84dc5cd11ce959fbce3760"
|
||||||
else:
|
else:
|
||||||
"unimplemented"
|
"unimplemented"
|
||||||
check:
|
check:
|
||||||
hash_tree_root(initialState).data.toHex() == expected
|
hash_tree_root(initialState).data.toHex() == expected
|
||||||
true
|
|
||||||
|
|
|
@ -64,7 +64,7 @@ func makeDeposit(i: int, flags: UpdateFlags): Deposit =
|
||||||
|
|
||||||
if skipValidation notin flags:
|
if skipValidation notin flags:
|
||||||
result.data.signature =
|
result.data.signature =
|
||||||
bls_sign(privkey, hash_tree_root(result.data).data,
|
bls_sign(privkey, hash_tree_root(result.getDepositMessage).data,
|
||||||
domain)
|
domain)
|
||||||
|
|
||||||
func makeInitialDeposits*(
|
func makeInitialDeposits*(
|
||||||
|
@ -164,7 +164,7 @@ proc makeAttestation*(
|
||||||
doAssert sac_index != -1, "find_beacon_committee should guarantee this"
|
doAssert sac_index != -1, "find_beacon_committee should guarantee this"
|
||||||
|
|
||||||
var aggregation_bits = CommitteeValidatorsBits.init(committee.len)
|
var aggregation_bits = CommitteeValidatorsBits.init(committee.len)
|
||||||
aggregation_bits.raiseBit sac_index
|
aggregation_bits.setBit sac_index
|
||||||
|
|
||||||
let
|
let
|
||||||
msg = hash_tree_root(data)
|
msg = hash_tree_root(data)
|
||||||
|
|
|
@ -8,8 +8,9 @@
|
||||||
import
|
import
|
||||||
algorithm, strformat, stats, times, std/monotimes, stew/endians2,
|
algorithm, strformat, stats, times, std/monotimes, stew/endians2,
|
||||||
chronicles, eth/trie/[db],
|
chronicles, eth/trie/[db],
|
||||||
../beacon_chain/[beacon_chain_db, block_pool, ssz, beacon_node_types],
|
../beacon_chain/[beacon_chain_db, block_pool, extras, ssz, beacon_node_types],
|
||||||
../beacon_chain/spec/datatypes
|
../beacon_chain/spec/[digest, beaconstate, datatypes],
|
||||||
|
testblockutil
|
||||||
|
|
||||||
type
|
type
|
||||||
TestDuration = tuple[duration: float, label: string]
|
TestDuration = tuple[duration: float, label: string]
|
||||||
|
@ -75,4 +76,12 @@ proc makeTestDB*(tailState: BeaconState, tailBlock: SignedBeaconBlock): BeaconCh
|
||||||
result = init(BeaconChainDB, newMemoryDB())
|
result = init(BeaconChainDB, newMemoryDB())
|
||||||
BlockPool.preInit(result, tailState, tailBlock)
|
BlockPool.preInit(result, tailState, tailBlock)
|
||||||
|
|
||||||
|
proc makeTestDB*(validators: int): BeaconChainDB =
|
||||||
|
let
|
||||||
|
genState = initialize_beacon_state_from_eth1(
|
||||||
|
Eth2Digest(), 0,
|
||||||
|
makeInitialDeposits(validators, flags = {skipValidation}), {skipValidation})
|
||||||
|
genBlock = get_initial_beacon_block(genState)
|
||||||
|
makeTestDB(genState, genBlock)
|
||||||
|
|
||||||
export inMicroseconds
|
export inMicroseconds
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Subproject commit e9d75c05f62a7a9628b28b822b5190a6682e2a7e
|
Subproject commit 1edeabb453724aa452a940ab03ddc82f5aeeff6e
|
Loading…
Reference in New Issue