mirror of
https://github.com/status-im/nimbus-eth2.git
synced 2025-01-10 22:36:01 +00:00
Merge pull request #645 from status-im/devel
Testnet0 release 2019-12-16
This commit is contained in:
commit
9050db7b95
2
Makefile
2
Makefile
@ -16,11 +16,11 @@ BUILD_SYSTEM_DIR := vendor/nimbus-build-system
|
||||
# unconditionally built by the default Make target
|
||||
TOOLS := \
|
||||
beacon_node \
|
||||
inspector \
|
||||
bench_bls_sig_agggregation \
|
||||
deposit_contract \
|
||||
ncli_hash_tree_root \
|
||||
ncli_pretty \
|
||||
ncli_signing_root \
|
||||
ncli_transition \
|
||||
process_dashboard
|
||||
TOOLS_DIRS := \
|
||||
|
20
README.md
20
README.md
@ -36,6 +36,7 @@ You can check where the beacon chain fits in the Ethereum ecosystem our Two-Poin
|
||||
- [State transition simulation](#state-transition-simulation)
|
||||
- [Local network simulation](#local-network-simulation)
|
||||
- [Visualising simulation metrics](#visualising-simulation-metrics)
|
||||
- [Network inspection](#network-inspection)
|
||||
- [For developers](#for-developers)
|
||||
- [Windows dev environment](#windows-dev-environment)
|
||||
- [Linux, MacOS](#linux-macos)
|
||||
@ -182,6 +183,25 @@ The dashboard you need to import in Grafana is "tests/simulation/beacon-chain-si
|
||||
|
||||
![monitoring dashboard](./media/monitoring.png)
|
||||
|
||||
### Network inspection
|
||||
|
||||
The [inspector tool](./beacon_chain/inspector.nim) can help monitor the libp2p network and the various channels where blocks and attestations are being transmitted, showing message and connectivity metadata. By default, it will monitor all ethereum 2 gossip traffic.
|
||||
|
||||
```bash
|
||||
. ./env.sh
|
||||
# Build inspector for minimal config:
|
||||
./env.sh nim c -d:const_preset=minimal -o:build/inspector_minimal beacon_chain/inspector.nim
|
||||
|
||||
# Build inspector for mainnet config:
|
||||
./env.sh nim c -d:const_preset=mainnet -o:build/inspector_mainnet beacon_chain/inspector.nim
|
||||
|
||||
# See available options
|
||||
./env.sh build/inspector_minimal --help
|
||||
|
||||
# Connect to a network from eth2 testnet repo bootstrap file - --decode option attempts to decode the messages as well
|
||||
./env.sh build/inspector_minimal --decode -b:$(curl -s https://raw.githubusercontent.com/eth2-clients/eth2-testnets/master/nimbus/testnet0/bootstrap_nodes.txt | head -n1)
|
||||
```
|
||||
|
||||
## For developers
|
||||
|
||||
Latest updates happen in the `devel` branch which is merged into `master` every week on Tuesday before deploying a new testnets
|
||||
|
86
beacon_chain/attestation_aggregation.nim
Normal file
86
beacon_chain/attestation_aggregation.nim
Normal file
@ -0,0 +1,86 @@
|
||||
# beacon_chain
|
||||
# Copyright (c) 2018-2019 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
# Have an an aggregated aggregation ready for broadcast at
|
||||
# SECONDS_PER_SLOT * 2 / 3, i.e. 2/3 through relevant slot
|
||||
# intervals.
|
||||
#
|
||||
# The other part is arguably part of attestation pool -- the validation's
|
||||
# something that should be happing on receipt, not aggregation per se. In
|
||||
# that part, check that messages conform -- so, check for each type
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/networking/p2p-interface.md#topics-and-messages
|
||||
# specifies. So by the time this calls attestation pool, all validation's
|
||||
# already done.
|
||||
#
|
||||
# Finally, some of the filtering's libp2p stuff. Consistency checks between
|
||||
# topic/message types and GOSSIP_MAX_SIZE -- mostly doesn't belong here, so
|
||||
# while TODO, isn't TODO for this module.
|
||||
|
||||
import
|
||||
options,
|
||||
./spec/[beaconstate, datatypes, crypto, digest, helpers, validator],
|
||||
./attestation_pool, ./beacon_node_types, ./ssz
|
||||
|
||||
# TODO gossipsub validation lives somewhere, maybe here
|
||||
# TODO add tests, especially for validation
|
||||
# https://github.com/status-im/nim-beacon-chain/issues/122#issuecomment-562479965
|
||||
|
||||
const
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/networking/p2p-interface.md#configuration
|
||||
ATTESTATION_PROPAGATION_SLOT_RANGE = 32
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/validator/0_beacon-chain-validator.md#aggregation-selection
|
||||
func get_slot_signature(state: BeaconState, slot: Slot, privkey: ValidatorPrivKey):
|
||||
ValidatorSig =
|
||||
let domain =
|
||||
get_domain(state, DOMAIN_BEACON_ATTESTER, compute_epoch_at_slot(slot))
|
||||
bls_sign(privkey, hash_tree_root(slot).data, domain)
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/validator/0_beacon-chain-validator.md#aggregation-selection
|
||||
func is_aggregator(state: BeaconState, slot: Slot, index: uint64,
|
||||
slot_signature: ValidatorSig): bool =
|
||||
# TODO index is a CommitteeIndex, aka uint64
|
||||
var cache = get_empty_per_epoch_cache()
|
||||
|
||||
let
|
||||
committee = get_beacon_committee(state, slot, index, cache)
|
||||
modulo = max(1, len(committee) div TARGET_AGGREGATORS_PER_COMMITTEE).uint64
|
||||
bytes_to_int(eth2hash(slot_signature.getBytes).data[0..7]) mod modulo == 0
|
||||
|
||||
proc aggregate_attestations*(
|
||||
pool: AttestationPool, state: BeaconState, index: uint64,
|
||||
privkey: ValidatorPrivKey): Option[AggregateAndProof] =
|
||||
# TODO alias CommitteeIndex to actual type then convert various uint64's here
|
||||
|
||||
let
|
||||
slot = state.slot - 2
|
||||
slot_signature = get_slot_signature(state, slot, privkey)
|
||||
|
||||
if slot < 0:
|
||||
return none(AggregateAndProof)
|
||||
doAssert slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= state.slot
|
||||
doAssert state.slot >= slot
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/validator/0_beacon-chain-validator.md#aggregation-selection
|
||||
if not is_aggregator(state, slot, index, slot_signature):
|
||||
return none(AggregateAndProof)
|
||||
|
||||
let attestation_data =
|
||||
makeAttestationData(state, slot, index, get_block_root_at_slot(state, slot))
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/validator/0_beacon-chain-validator.md#construct-aggregate
|
||||
for attestation in getAttestationsForBlock(pool, state, slot):
|
||||
if attestation.data == attestation_data:
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/validator/0_beacon-chain-validator.md#aggregateandproof
|
||||
return some(AggregateAndProof(
|
||||
aggregator_index: index,
|
||||
aggregate: attestation,
|
||||
selection_proof: slot_signature))
|
||||
|
||||
# TODO in catch-up mode, we could get here, so probably shouldn't assert
|
||||
doAssert false
|
||||
none(AggregateAndProof)
|
@ -37,7 +37,7 @@ func subkey(kind: DbKeyKind, key: uint64): array[sizeof(key) + 1, byte] =
|
||||
func subkey(kind: type BeaconState, key: Eth2Digest): auto =
|
||||
subkey(kHashToState, key.data)
|
||||
|
||||
func subkey(kind: type BeaconBlock, key: Eth2Digest): auto =
|
||||
func subkey(kind: type SignedBeaconBlock, key: Eth2Digest): auto =
|
||||
subkey(kHashToBlock, key.data)
|
||||
|
||||
func subkey(root: Eth2Digest, slot: Slot): auto =
|
||||
@ -64,7 +64,7 @@ func subkey(root: Eth2Digest, slot: Slot): auto =
|
||||
proc init*(T: type BeaconChainDB, backend: TrieDatabaseRef): BeaconChainDB =
|
||||
T(backend: backend)
|
||||
|
||||
proc putBlock*(db: BeaconChainDB, key: Eth2Digest, value: BeaconBlock) =
|
||||
proc putBlock*(db: BeaconChainDB, key: Eth2Digest, value: SignedBeaconBlock) =
|
||||
db.backend.put(subkey(type value, key), SSZ.encode(value))
|
||||
|
||||
proc putHead*(db: BeaconChainDB, key: Eth2Digest) =
|
||||
@ -83,11 +83,11 @@ proc putStateRoot*(db: BeaconChainDB, root: Eth2Digest, slot: Slot,
|
||||
value: Eth2Digest) =
|
||||
db.backend.put(subkey(root, slot), value.data)
|
||||
|
||||
proc putBlock*(db: BeaconChainDB, value: BeaconBlock) =
|
||||
db.putBlock(signing_root(value), value)
|
||||
proc putBlock*(db: BeaconChainDB, value: SignedBeaconBlock) =
|
||||
db.putBlock(hash_tree_root(value.message), value)
|
||||
|
||||
proc delBlock*(db: BeaconChainDB, key: Eth2Digest) =
|
||||
db.backend.del(subkey(BeaconBlock, key))
|
||||
db.backend.del(subkey(SignedBeaconBlock, key))
|
||||
|
||||
proc delState*(db: BeaconChainDB, key: Eth2Digest) =
|
||||
db.backend.del(subkey(BeaconState, key))
|
||||
@ -108,10 +108,10 @@ proc get(db: BeaconChainDB, key: auto, T: typedesc): Option[T] =
|
||||
else:
|
||||
none(T)
|
||||
|
||||
proc getBlock*(db: BeaconChainDB, key: Eth2Digest): Option[BeaconBlock] =
|
||||
db.get(subkey(BeaconBlock, key), BeaconBlock)
|
||||
proc getBlock*(db: BeaconChainDB, key: Eth2Digest): Option[SignedBeaconBlock] =
|
||||
db.get(subkey(SignedBeaconBlock, key), SignedBeaconBlock)
|
||||
|
||||
proc getBlock*(db: BeaconChainDB, slot: Slot): Option[BeaconBlock] =
|
||||
proc getBlock*(db: BeaconChainDB, slot: Slot): Option[SignedBeaconBlock] =
|
||||
# TODO implement this
|
||||
discard
|
||||
|
||||
@ -130,14 +130,14 @@ proc getTailBlock*(db: BeaconChainDB): Option[Eth2Digest] =
|
||||
|
||||
proc containsBlock*(
|
||||
db: BeaconChainDB, key: Eth2Digest): bool =
|
||||
db.backend.contains(subkey(BeaconBlock, key))
|
||||
db.backend.contains(subkey(SignedBeaconBlock, key))
|
||||
|
||||
proc containsState*(
|
||||
db: BeaconChainDB, key: Eth2Digest): bool =
|
||||
db.backend.contains(subkey(BeaconState, key))
|
||||
|
||||
iterator getAncestors*(db: BeaconChainDB, root: Eth2Digest):
|
||||
tuple[root: Eth2Digest, blck: BeaconBlock] =
|
||||
tuple[root: Eth2Digest, blck: SignedBeaconBlock] =
|
||||
## Load a chain of ancestors for blck - returns a list of blocks with the
|
||||
## oldest block last (blck will be at result[0]).
|
||||
##
|
||||
@ -147,4 +147,4 @@ iterator getAncestors*(db: BeaconChainDB, root: Eth2Digest):
|
||||
while (let blck = db.getBlock(root); blck.isSome()):
|
||||
yield (root, blck.get())
|
||||
|
||||
root = blck.get().parent_root
|
||||
root = blck.get().message.parent_root
|
||||
|
@ -1,9 +1,9 @@
|
||||
import
|
||||
# Standard library
|
||||
os, net, tables, random, strutils, times, memfiles,
|
||||
os, net, tables, random, strutils, times,
|
||||
|
||||
# Nimble packages
|
||||
stew/[objects, bitseqs], stew/ranges/ptr_arith,
|
||||
stew/[objects, bitseqs, byteutils], stew/ranges/ptr_arith,
|
||||
chronos, chronicles, confutils, metrics,
|
||||
json_serialization/std/[options, sets], serialization/errors,
|
||||
eth/trie/db, eth/trie/backends/rocksdb_backend, eth/async_utils,
|
||||
@ -16,7 +16,6 @@ import
|
||||
sync_protocol, request_manager, validator_keygen, interop, statusbar
|
||||
|
||||
const
|
||||
dataDirValidators = "validators"
|
||||
genesisFile = "genesis.ssz"
|
||||
hasPrompt = not defined(withoutPrompt)
|
||||
maxEmptySlotCount = uint64(24*60*60) div SECONDS_PER_SLOT
|
||||
@ -69,16 +68,11 @@ type
|
||||
## state replaying.
|
||||
# TODO Something smarter, so we don't need to keep two full copies, wasteful
|
||||
|
||||
proc onBeaconBlock*(node: BeaconNode, blck: BeaconBlock) {.gcsafe.}
|
||||
|
||||
func localValidatorsDir(conf: BeaconNodeConf): string =
|
||||
conf.dataDir / "validators"
|
||||
|
||||
func databaseDir(conf: BeaconNodeConf): string =
|
||||
conf.dataDir / "db"
|
||||
proc onBeaconBlock*(node: BeaconNode, blck: SignedBeaconBlock) {.gcsafe.}
|
||||
proc updateHead(node: BeaconNode): BlockRef
|
||||
|
||||
proc saveValidatorKey(keyName, key: string, conf: BeaconNodeConf) =
|
||||
let validatorsDir = conf.dataDir / dataDirValidators
|
||||
let validatorsDir = conf.localValidatorsDir
|
||||
let outputFile = validatorsDir / keyName
|
||||
createDir validatorsDir
|
||||
writeFile(outputFile, key)
|
||||
@ -221,9 +215,28 @@ proc init*(T: type BeaconNode, conf: BeaconNodeConf): Future[BeaconNode] {.async
|
||||
# TODO sync is called when a remote peer is connected - is that the right
|
||||
# time to do so?
|
||||
let sync = result.network.protocolState(BeaconSync)
|
||||
let node = result
|
||||
sync.init(
|
||||
result.blockPool, result.forkVersion,
|
||||
proc(blck: BeaconBlock) = onBeaconBlock(result, blck))
|
||||
proc(signedBlock: SignedBeaconBlock) =
|
||||
if signedBlock.message.slot mod SLOTS_PER_EPOCH == 0:
|
||||
# TODO this is a hack to make sure that lmd ghost is run regularly
|
||||
# while syncing blocks - it's poor form to keep it here though -
|
||||
# the logic should be moved elsewhere
|
||||
# TODO why only when syncing? well, because the way the code is written
|
||||
# we require a connection to a boot node to start, and that boot
|
||||
# node will start syncing as part of connection setup - it looks
|
||||
# like it needs to finish syncing before the slot timer starts
|
||||
# ticking which is a problem: all the synced blocks will be added
|
||||
# to the block pool without any periodic head updates while this
|
||||
# process is ongoing (during a blank start for example), which
|
||||
# leads to an unhealthy buildup of blocks in the non-finalized part
|
||||
# of the block pool
|
||||
# TODO is it a problem that someone sending us a block can force
|
||||
# a potentially expensive head resolution?
|
||||
discard node.updateHead()
|
||||
|
||||
onBeaconBlock(result, signedBlock))
|
||||
|
||||
result.stateCache = result.blockPool.loadTailState()
|
||||
result.justifiedStateCache = result.stateCache
|
||||
@ -283,17 +296,8 @@ proc addLocalValidator(
|
||||
node.attachedValidators.addLocalValidator(pubKey, privKey)
|
||||
|
||||
proc addLocalValidators(node: BeaconNode, state: BeaconState) =
|
||||
for validatorKeyFile in node.config.validators:
|
||||
node.addLocalValidator state, validatorKeyFile.load
|
||||
|
||||
for kind, file in walkDir(node.config.localValidatorsDir):
|
||||
if kind in {pcFile, pcLinkToFile}:
|
||||
if cmpIgnoreCase(".privkey", splitFile(file).ext) == 0:
|
||||
try:
|
||||
let keyText = ValidatorPrivKey.init(readFile(file).string)
|
||||
node.addLocalValidator state, keyText
|
||||
except CatchableError:
|
||||
warn "Failed to load a validator private key", file
|
||||
for validatorKey in node.config.validatorKeys:
|
||||
node.addLocalValidator state, validatorKey
|
||||
|
||||
info "Local validators attached ", count = node.attachedValidators.count
|
||||
|
||||
@ -330,7 +334,7 @@ proc isSynced(node: BeaconNode, head: BlockRef): bool =
|
||||
else:
|
||||
true
|
||||
|
||||
proc updateHead(node: BeaconNode, slot: Slot): BlockRef =
|
||||
proc updateHead(node: BeaconNode): BlockRef =
|
||||
# Use head state for attestation resolution below
|
||||
|
||||
# Check pending attestations - maybe we found some blocks for them
|
||||
@ -346,7 +350,6 @@ proc updateHead(node: BeaconNode, slot: Slot): BlockRef =
|
||||
lmdGhost(node.attestationPool, state, justifiedHead.blck)
|
||||
|
||||
node.blockPool.updateHead(node.stateCache, newHead)
|
||||
beacon_head_slot.set slot.int64
|
||||
beacon_head_root.set newHead.root.toGaugeValue
|
||||
|
||||
newHead
|
||||
@ -449,21 +452,20 @@ proc proposeBlock(node: BeaconNode,
|
||||
deposits: deposits)
|
||||
|
||||
var
|
||||
newBlock = BeaconBlock(
|
||||
slot: slot,
|
||||
parent_root: head.root,
|
||||
body: blockBody,
|
||||
# TODO: This shouldn't be necessary if OpaqueBlob is the default
|
||||
signature: ValidatorSig(kind: OpaqueBlob))
|
||||
newBlock = SignedBeaconBlock(
|
||||
message: BeaconBlock(
|
||||
slot: slot,
|
||||
parent_root: head.root,
|
||||
body: blockBody))
|
||||
tmpState = hashedState
|
||||
discard state_transition(tmpState, newBlock, {skipValidation})
|
||||
discard state_transition(tmpState, newBlock.message, {skipValidation})
|
||||
# TODO only enable in fast-fail debugging situations
|
||||
# otherwise, bad attestations can bring down network
|
||||
# doAssert ok # TODO: err, could this fail somehow?
|
||||
|
||||
newBlock.state_root = tmpState.root
|
||||
newBlock.message.state_root = tmpState.root
|
||||
|
||||
let blockRoot = signing_root(newBlock)
|
||||
let blockRoot = hash_tree_root(newBlock.message)
|
||||
|
||||
# Careful, state no longer valid after here..
|
||||
# We use the fork from the pre-newBlock state which should be fine because
|
||||
@ -477,20 +479,20 @@ proc proposeBlock(node: BeaconNode,
|
||||
let newBlockRef = node.blockPool.add(node.stateCache, nroot, nblck)
|
||||
if newBlockRef == nil:
|
||||
warn "Unable to add proposed block to block pool",
|
||||
newBlock = shortLog(newBlock),
|
||||
newBlock = shortLog(newBlock.message),
|
||||
blockRoot = shortLog(blockRoot),
|
||||
cat = "bug"
|
||||
return head
|
||||
|
||||
info "Block proposed",
|
||||
blck = shortLog(newBlock),
|
||||
blck = shortLog(newBlock.message),
|
||||
blockRoot = shortLog(newBlockRef.root),
|
||||
validator = shortLog(validator),
|
||||
cat = "consensus"
|
||||
|
||||
if node.config.dump:
|
||||
SSZ.saveFile(
|
||||
node.config.dumpDir / "block-" & $newBlock.slot & "-" &
|
||||
node.config.dumpDir / "block-" & $newBlock.message.slot & "-" &
|
||||
shortLog(newBlockRef.root) & ".ssz", newBlock)
|
||||
SSZ.saveFile(
|
||||
node.config.dumpDir / "state-" & $tmpState.data.slot & "-" &
|
||||
@ -545,12 +547,12 @@ proc onAttestation(node: BeaconNode, attestation: Attestation) =
|
||||
else:
|
||||
node.attestationPool.addUnresolved(attestation)
|
||||
|
||||
proc onBeaconBlock(node: BeaconNode, blck: BeaconBlock) =
|
||||
proc onBeaconBlock(node: BeaconNode, blck: SignedBeaconBlock) =
|
||||
# We received a block but don't know much about it yet - in particular, we
|
||||
# don't know if it's part of the chain we're currently building.
|
||||
let blockRoot = signing_root(blck)
|
||||
let blockRoot = hash_tree_root(blck.message)
|
||||
debug "Block received",
|
||||
blck = shortLog(blck),
|
||||
blck = shortLog(blck.message),
|
||||
blockRoot = shortLog(blockRoot),
|
||||
cat = "block_listener",
|
||||
pcs = "receive_block"
|
||||
@ -567,8 +569,8 @@ proc onBeaconBlock(node: BeaconNode, blck: BeaconBlock) =
|
||||
# TODO shouldn't add attestations if the block turns out to be invalid..
|
||||
let currentSlot = node.beaconClock.now.toSlot
|
||||
if currentSlot.afterGenesis and
|
||||
blck.slot.epoch + 1 >= currentSlot.slot.epoch:
|
||||
for attestation in blck.body.attestations:
|
||||
blck.message.slot.epoch + 1 >= currentSlot.slot.epoch:
|
||||
for attestation in blck.message.body.attestations:
|
||||
node.onAttestation(attestation)
|
||||
|
||||
proc handleAttestations(node: BeaconNode, head: BlockRef, slot: Slot) =
|
||||
@ -743,7 +745,11 @@ proc onSlotStart(node: BeaconNode, lastSlot, scheduledSlot: Slot) {.gcsafe, asyn
|
||||
# updates and is stable across some epoch transitions as well - see how
|
||||
# we can avoid recalculating everything here
|
||||
|
||||
var head = node.updateHead(slot)
|
||||
var head = node.updateHead()
|
||||
|
||||
# TODO is the slot of the clock or the head block more interestion? provide
|
||||
# rationale in comment
|
||||
beacon_head_slot.set slot.int64
|
||||
|
||||
# TODO if the head is very old, that is indicative of something being very
|
||||
# wrong - us being out of sync or disconnected from the network - need
|
||||
@ -805,14 +811,20 @@ proc onSlotStart(node: BeaconNode, lastSlot, scheduledSlot: Slot) {.gcsafe, asyn
|
||||
# the work for the whole slot using a monotonic clock instead, then deal
|
||||
# with any clock discrepancies once only, at the start of slot timer
|
||||
# processing..
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/validator/0_beacon-chain-validator.md#attesting
|
||||
# A validator should create and broadcast the attestation to the
|
||||
# associated attestation subnet one-third of the way through the slot
|
||||
# during which the validator is assigned―that is, SECONDS_PER_SLOT / 3
|
||||
# seconds after the start of slot.
|
||||
let
|
||||
attestationStart = node.beaconClock.fromNow(slot)
|
||||
halfSlot = seconds(int64(SECONDS_PER_SLOT div 2))
|
||||
thirdSlot = seconds(int64(SECONDS_PER_SLOT)) div 3
|
||||
|
||||
if attestationStart.inFuture or attestationStart.offset <= halfSlot:
|
||||
if attestationStart.inFuture or attestationStart.offset <= thirdSlot:
|
||||
let fromNow =
|
||||
if attestationStart.inFuture: attestationStart.offset + halfSlot
|
||||
else: halfSlot - attestationStart.offset
|
||||
if attestationStart.inFuture: attestationStart.offset + thirdSlot
|
||||
else: thirdSlot - attestationStart.offset
|
||||
|
||||
trace "Waiting to send attestations",
|
||||
slot = shortLog(slot),
|
||||
@ -822,7 +834,7 @@ proc onSlotStart(node: BeaconNode, lastSlot, scheduledSlot: Slot) {.gcsafe, asyn
|
||||
await sleepAsync(fromNow)
|
||||
|
||||
# Time passed - we might need to select a new head in that case
|
||||
head = node.updateHead(slot)
|
||||
head = node.updateHead()
|
||||
|
||||
handleAttestations(node, head, slot)
|
||||
|
||||
@ -839,7 +851,7 @@ proc handleMissingBlocks(node: BeaconNode) =
|
||||
var left = missingBlocks.len
|
||||
|
||||
info "Requesting detected missing blocks", missingBlocks
|
||||
node.requestManager.fetchAncestorBlocks(missingBlocks) do (b: BeaconBlock):
|
||||
node.requestManager.fetchAncestorBlocks(missingBlocks) do (b: SignedBeaconBlock):
|
||||
onBeaconBlock(node, b)
|
||||
|
||||
# TODO instead of waiting for a full second to try the next missing block
|
||||
@ -861,8 +873,8 @@ proc onSecond(node: BeaconNode, moment: Moment) {.async.} =
|
||||
asyncCheck node.onSecond(nextSecond)
|
||||
|
||||
proc run*(node: BeaconNode) =
|
||||
waitFor node.network.subscribe(topicBeaconBlocks) do (blck: BeaconBlock):
|
||||
onBeaconBlock(node, blck)
|
||||
waitFor node.network.subscribe(topicBeaconBlocks) do (signedBlock: SignedBeaconBlock):
|
||||
onBeaconBlock(node, signedBlock)
|
||||
|
||||
waitFor node.network.subscribe(topicAttestations) do (attestation: Attestation):
|
||||
# Avoid double-counting attestation-topic attestations on shared codepath
|
||||
@ -1058,10 +1070,6 @@ when hasPrompt:
|
||||
# var t: Thread[ptr Prompt]
|
||||
# createThread(t, processPromptCommands, addr p)
|
||||
|
||||
template bytes(memFile: MemFile): untyped =
|
||||
let f = memFile
|
||||
makeOpenArray(f.mem, byte, f.size)
|
||||
|
||||
when isMainModule:
|
||||
randomize()
|
||||
|
||||
@ -1191,14 +1199,14 @@ when isMainModule:
|
||||
|
||||
of QueryCmd.get:
|
||||
let pathFragments = config.getQueryPath.split('/', maxsplit = 1)
|
||||
var navigator: DynamicSszNavigator
|
||||
let bytes =
|
||||
case pathFragments[0]
|
||||
of "genesis_state":
|
||||
readFile(config.dataDir/genesisFile).string.toBytes()
|
||||
else:
|
||||
stderr.write config.getQueryPath & " is not a valid path"
|
||||
quit 1
|
||||
|
||||
case pathFragments[0]
|
||||
of "genesis_state":
|
||||
var genesisMapFile = memfiles.open(config.dataDir/genesisFile)
|
||||
navigator = DynamicSszNavigator.init(genesisMapFile.bytes, BeaconState)
|
||||
else:
|
||||
stderr.write config.getQueryPath & " is not a valid path"
|
||||
quit 1
|
||||
let navigator = DynamicSszNavigator.init(bytes, BeaconState)
|
||||
|
||||
echo navigator.navigatePath(pathFragments[1 .. ^1]).toJson
|
||||
|
@ -106,7 +106,7 @@ type
|
||||
## TODO evaluate the split of responsibilities between the two
|
||||
## TODO prune the graph as tail moves
|
||||
|
||||
pending*: Table[Eth2Digest, BeaconBlock] ##\
|
||||
pending*: Table[Eth2Digest, SignedBeaconBlock] ##\
|
||||
## Blocks that have passed validation but that we lack a link back to tail
|
||||
## for - when we receive a "missing link", we can use this data to build
|
||||
## an entire branch
|
||||
@ -161,7 +161,7 @@ type
|
||||
BlockData* = object
|
||||
## Body and graph in one
|
||||
|
||||
data*: BeaconBlock
|
||||
data*: SignedBeaconBlock
|
||||
refs*: BlockRef
|
||||
|
||||
StateData* = object
|
||||
|
@ -22,6 +22,24 @@ func link(parent, child: BlockRef) =
|
||||
child.parent = parent
|
||||
parent.children.add(child)
|
||||
|
||||
func isAncestorOf*(a, b: BlockRef): bool =
|
||||
var b = b
|
||||
var depth = 0
|
||||
const maxDepth = (100'i64 * 365 * 24 * 60 * 60 div SECONDS_PER_SLOT.int)
|
||||
while true:
|
||||
if a == b: return true
|
||||
|
||||
# for now, use an assert for block chain length since a chain this long
|
||||
# indicates a circular reference here..
|
||||
doAssert depth < maxDepth
|
||||
depth += 1
|
||||
|
||||
if a.slot >= b.slot or b.parent.isNil:
|
||||
return false
|
||||
|
||||
doAssert b.slot > b.parent.slot
|
||||
b = b.parent
|
||||
|
||||
func init*(T: type BlockRef, root: Eth2Digest, slot: Slot): BlockRef =
|
||||
BlockRef(
|
||||
root: root,
|
||||
@ -55,7 +73,7 @@ proc init*(T: type BlockPool, db: BeaconChainDB): BlockPool =
|
||||
let
|
||||
tailRoot = tailBlockRoot.get()
|
||||
tailBlock = db.getBlock(tailRoot).get()
|
||||
tailRef = BlockRef.init(tailRoot, tailBlock)
|
||||
tailRef = BlockRef.init(tailRoot, tailBlock.message)
|
||||
headRoot = headBlockRoot.get()
|
||||
|
||||
var
|
||||
@ -73,7 +91,7 @@ proc init*(T: type BlockPool, db: BeaconChainDB): BlockPool =
|
||||
curRef = curRef.parent
|
||||
break
|
||||
|
||||
let newRef = BlockRef.init(root, blck)
|
||||
let newRef = BlockRef.init(root, blck.message)
|
||||
if curRef == nil:
|
||||
curRef = newRef
|
||||
headRef = newRef
|
||||
@ -83,8 +101,8 @@ proc init*(T: type BlockPool, db: BeaconChainDB): BlockPool =
|
||||
blocks[curRef.root] = curRef
|
||||
trace "Populating block pool", key = curRef.root, val = curRef
|
||||
|
||||
if latestStateRoot.isNone() and db.containsState(blck.state_root):
|
||||
latestStateRoot = some(blck.state_root)
|
||||
if latestStateRoot.isNone() and db.containsState(blck.message.state_root):
|
||||
latestStateRoot = some(blck.message.state_root)
|
||||
|
||||
doAssert curRef == tailRef,
|
||||
"head block does not lead to tail, database corrupt?"
|
||||
@ -93,7 +111,7 @@ proc init*(T: type BlockPool, db: BeaconChainDB): BlockPool =
|
||||
|
||||
var blocksBySlot = initTable[Slot, seq[BlockRef]]()
|
||||
for _, b in tables.pairs(blocks):
|
||||
let slot = db.getBlock(b.root).get().slot
|
||||
let slot = db.getBlock(b.root).get().message.slot
|
||||
blocksBySlot.mgetOrPut(slot, @[]).add(b)
|
||||
|
||||
let
|
||||
@ -103,7 +121,7 @@ proc init*(T: type BlockPool, db: BeaconChainDB): BlockPool =
|
||||
if latestStateRoot.isSome():
|
||||
latestStateRoot.get()
|
||||
else:
|
||||
db.getBlock(tailRef.root).get().state_root
|
||||
db.getBlock(tailRef.root).get().message.state_root
|
||||
|
||||
# TODO right now, because we save a state at every epoch, this *should*
|
||||
# be the latest justified state or newer, meaning it's enough for
|
||||
@ -126,7 +144,7 @@ proc init*(T: type BlockPool, db: BeaconChainDB): BlockPool =
|
||||
totalBlocks = blocks.len, totalKnownSlots = blocksBySlot.len
|
||||
|
||||
BlockPool(
|
||||
pending: initTable[Eth2Digest, BeaconBlock](),
|
||||
pending: initTable[Eth2Digest, SignedBeaconBlock](),
|
||||
missing: initTable[Eth2Digest, MissingBlock](),
|
||||
blocks: blocks,
|
||||
blocksBySlot: blocksBySlot,
|
||||
@ -158,14 +176,14 @@ proc updateStateData*(
|
||||
|
||||
proc add*(
|
||||
pool: var BlockPool, state: var StateData, blockRoot: Eth2Digest,
|
||||
blck: BeaconBlock): BlockRef {.gcsafe.}
|
||||
signedBlock: SignedBeaconBlock): BlockRef {.gcsafe.}
|
||||
|
||||
proc addResolvedBlock(
|
||||
pool: var BlockPool, state: var StateData, blockRoot: Eth2Digest,
|
||||
blck: BeaconBlock, parent: BlockRef): BlockRef =
|
||||
signedBlock: SignedBeaconBlock, parent: BlockRef): BlockRef =
|
||||
logScope: pcs = "block_resolution"
|
||||
|
||||
let blockRef = BlockRef.init(blockRoot, blck)
|
||||
let blockRef = BlockRef.init(blockRoot, signedBlock.message)
|
||||
link(parent, blockRef)
|
||||
|
||||
pool.blocks[blockRoot] = blockRef
|
||||
@ -174,7 +192,7 @@ proc addResolvedBlock(
|
||||
pool.addSlotMapping(blockRef)
|
||||
|
||||
# Resolved blocks should be stored in database
|
||||
pool.db.putBlock(blockRoot, blck)
|
||||
pool.db.putBlock(blockRoot, signedBlock)
|
||||
|
||||
# TODO this is a bit ugly - we update state.data outside of this function then
|
||||
# set the rest here - need a blockRef to update it. Clean this up -
|
||||
@ -190,10 +208,12 @@ proc addResolvedBlock(
|
||||
|
||||
var foundHead: Option[Head]
|
||||
for head in pool.heads.mitems():
|
||||
if head.blck.root == blck.parent_root:
|
||||
if head.blck.isAncestorOf(blockRef):
|
||||
if head.justified.slot != justifiedSlot:
|
||||
head.justified = blockRef.findAncestorBySlot(justifiedSlot)
|
||||
|
||||
head.blck = blockRef
|
||||
|
||||
foundHead = some(head)
|
||||
break
|
||||
|
||||
@ -204,7 +224,7 @@ proc addResolvedBlock(
|
||||
pool.heads.add(foundHead.get())
|
||||
|
||||
info "Block resolved",
|
||||
blck = shortLog(blck),
|
||||
blck = shortLog(signedBlock.message),
|
||||
blockRoot = shortLog(blockRoot),
|
||||
justifiedRoot = shortLog(foundHead.get().justified.blck.root),
|
||||
justifiedSlot = shortLog(foundHead.get().justified.slot),
|
||||
@ -233,12 +253,13 @@ proc addResolvedBlock(
|
||||
|
||||
proc add*(
|
||||
pool: var BlockPool, state: var StateData, blockRoot: Eth2Digest,
|
||||
blck: BeaconBlock): BlockRef {.gcsafe.} =
|
||||
signedBlock: SignedBeaconBlock): BlockRef {.gcsafe.} =
|
||||
## return the block, if resolved...
|
||||
## the state parameter may be updated to include the given block, if
|
||||
## everything checks out
|
||||
# TODO reevaluate passing the state in like this
|
||||
doAssert blockRoot == signing_root(blck)
|
||||
let blck = signedBlock.message
|
||||
doAssert blockRoot == hash_tree_root(blck)
|
||||
|
||||
logScope: pcs = "block_addition"
|
||||
|
||||
@ -289,9 +310,13 @@ proc add*(
|
||||
|
||||
return
|
||||
|
||||
return pool.addResolvedBlock(state, blockRoot, blck, parent)
|
||||
return pool.addResolvedBlock(state, blockRoot, signedBlock, parent)
|
||||
|
||||
pool.pending[blockRoot] = blck
|
||||
# TODO already checked hash though? main reason to keep this is because
|
||||
# the pending pool calls this function back later in a loop, so as long
|
||||
# as pool.add(...) requires a SignedBeaconBlock, easier to keep them in
|
||||
# pending too.
|
||||
pool.pending[blockRoot] = signedBlock
|
||||
|
||||
# TODO possibly, it makes sense to check the database - that would allow sync
|
||||
# to simply fill up the database with random blocks the other clients
|
||||
@ -593,7 +618,7 @@ proc updateStateData*(pool: BlockPool, state: var StateData, bs: BlockSlot) =
|
||||
# applied
|
||||
for i in countdown(ancestors.len - 2, 0):
|
||||
let ok =
|
||||
skipAndUpdateState(state.data, ancestors[i].data, {skipValidation}) do(
|
||||
skipAndUpdateState(state.data, ancestors[i].data.message, {skipValidation}) do(
|
||||
state: HashedBeaconState):
|
||||
pool.maybePutState(state, ancestors[i].refs)
|
||||
doAssert ok, "Blocks in database should never fail to apply.."
|
||||
@ -606,7 +631,7 @@ proc updateStateData*(pool: BlockPool, state: var StateData, bs: BlockSlot) =
|
||||
|
||||
proc loadTailState*(pool: BlockPool): StateData =
|
||||
## Load the state associated with the current tail in the pool
|
||||
let stateRoot = pool.db.getBlock(pool.tail.root).get().state_root
|
||||
let stateRoot = pool.db.getBlock(pool.tail.root).get().message.state_root
|
||||
StateData(
|
||||
data: HashedBeaconState(
|
||||
data: pool.db.getState(stateRoot).get(),
|
||||
@ -614,27 +639,9 @@ proc loadTailState*(pool: BlockPool): StateData =
|
||||
blck: pool.tail
|
||||
)
|
||||
|
||||
func isAncestorOf*(a, b: BlockRef): bool =
|
||||
var b = b
|
||||
var depth = 0
|
||||
const maxDepth = (100'i64 * 365 * 24 * 60 * 60 div SECONDS_PER_SLOT.int)
|
||||
while true:
|
||||
if a == b: return true
|
||||
|
||||
# for now, use an assert for block chain length since a chain this long
|
||||
# indicates a circular reference here..
|
||||
doAssert depth < maxDepth
|
||||
depth += 1
|
||||
|
||||
if a.slot >= b.slot or b.parent.isNil:
|
||||
return false
|
||||
|
||||
doAssert b.slot > b.parent.slot
|
||||
b = b.parent
|
||||
|
||||
proc delBlockAndState(pool: BlockPool, blockRoot: Eth2Digest) =
|
||||
if (let blk = pool.db.getBlock(blockRoot); blk.isSome):
|
||||
pool.db.delState(blk.get.stateRoot)
|
||||
pool.db.delState(blk.get.message.stateRoot)
|
||||
pool.db.delBlock(blockRoot)
|
||||
|
||||
proc delFinalizedStateIfNeeded(pool: BlockPool, b: BlockRef) =
|
||||
@ -644,7 +651,7 @@ proc delFinalizedStateIfNeeded(pool: BlockPool, b: BlockRef) =
|
||||
# so we don't need any of the finalized states, and thus remove all of them
|
||||
# (except the most recent)
|
||||
if (let blk = pool.db.getBlock(b.root); blk.isSome):
|
||||
pool.db.delState(blk.get.stateRoot)
|
||||
pool.db.delState(blk.get.message.stateRoot)
|
||||
|
||||
proc setTailBlock(pool: BlockPool, newTail: BlockRef) =
|
||||
## Advance tail block, pruning all the states and blocks with older slots
|
||||
@ -713,7 +720,8 @@ proc updateHead*(pool: BlockPool, state: var StateData, blck: BlockRef) =
|
||||
|
||||
# A reasonable criterion for "reorganizations of the chain"
|
||||
# TODO if multiple heads have gotten skipped, could fire at
|
||||
# spurious times
|
||||
# spurious times - for example when multiple blocks have been added between
|
||||
# head updates
|
||||
beacon_reorgs_total.inc()
|
||||
else:
|
||||
info "Updated head block",
|
||||
@ -768,8 +776,9 @@ proc updateHead*(pool: BlockPool, state: var StateData, blck: BlockRef) =
|
||||
let hlen = pool.heads.len
|
||||
for i in 0..<hlen:
|
||||
let n = hlen - i - 1
|
||||
if pool.heads[n].blck.slot < pool.finalizedHead.blck.slot and
|
||||
not pool.heads[n].blck.isAncestorOf(pool.finalizedHead.blck):
|
||||
if pool.heads[n].blck.slot < pool.finalizedHead.blck.slot:
|
||||
# By definition, the current head should be newer than the finalized
|
||||
# head, so we'll never delete it here
|
||||
pool.heads.del(n)
|
||||
|
||||
# Calculate new tail block and set it
|
||||
@ -796,24 +805,26 @@ func latestJustifiedBlock*(pool: BlockPool): BlockSlot =
|
||||
result = head.justified
|
||||
|
||||
proc preInit*(
|
||||
T: type BlockPool, db: BeaconChainDB, state: BeaconState, blck: BeaconBlock) =
|
||||
T: type BlockPool, db: BeaconChainDB, state: BeaconState,
|
||||
signedBlock: SignedBeaconBlock) =
|
||||
# write a genesis state, the way the BlockPool expects it to be stored in
|
||||
# database
|
||||
# TODO probably should just init a blockpool with the freshly written
|
||||
# state - but there's more refactoring needed to make it nice - doing
|
||||
# a minimal patch for now..
|
||||
let
|
||||
blockRoot = signing_root(blck)
|
||||
blockRoot = hash_tree_root(signedBlock.message)
|
||||
|
||||
notice "New database from snapshot",
|
||||
blockRoot = shortLog(blockRoot),
|
||||
stateRoot = shortLog(blck.state_root),
|
||||
stateRoot = shortLog(signedBlock.message.state_root),
|
||||
fork = state.fork,
|
||||
validators = state.validators.len(),
|
||||
cat = "initialization"
|
||||
|
||||
db.putState(state)
|
||||
db.putBlock(blck)
|
||||
db.putBlock(signedBlock)
|
||||
db.putTailBlock(blockRoot)
|
||||
db.putHeadBlock(blockRoot)
|
||||
db.putStateRoot(blockRoot, blck.slot, blck.state_root)
|
||||
db.putStateRoot(
|
||||
blockRoot, signedBlock.message.slot, signedBlock.message.state_root)
|
||||
|
@ -1,5 +1,6 @@
|
||||
import
|
||||
os, options, strformat,
|
||||
os, options, strformat, strutils,
|
||||
chronicles, confutils,
|
||||
confutils/defs, chronicles/options as chroniclesOptions,
|
||||
spec/[crypto]
|
||||
|
||||
@ -251,3 +252,25 @@ proc validatorFileBaseName*(validatorIdx: int): string =
|
||||
|
||||
func dumpDir*(conf: BeaconNodeConf): string =
|
||||
conf.dataDir / "dump"
|
||||
|
||||
func localValidatorsDir*(conf: BeaconNodeConf): string =
|
||||
conf.dataDir / "validators"
|
||||
|
||||
func databaseDir*(conf: BeaconNodeConf): string =
|
||||
conf.dataDir / "db"
|
||||
|
||||
iterator validatorKeys*(conf: BeaconNodeConf): ValidatorPrivKey =
|
||||
for validatorKeyFile in conf.validators:
|
||||
try:
|
||||
yield validatorKeyFile.load
|
||||
except CatchableError as err:
|
||||
warn "Failed to load validator private key",
|
||||
file = validatorKeyFile.string, err = err.msg
|
||||
|
||||
for kind, file in walkDir(conf.localValidatorsDir):
|
||||
if kind in {pcFile, pcLinkToFile} and
|
||||
cmpIgnoreCase(".privkey", splitFile(file).ext) == 0:
|
||||
try:
|
||||
yield ValidatorPrivKey.init(readFile(file).string)
|
||||
except CatchableError as err:
|
||||
warn "Failed to load a validator private key", file, err = err.msg
|
||||
|
@ -56,7 +56,7 @@ proc setupNat(conf: BeaconNodeConf): tuple[ip: IpAddress,
|
||||
if extPorts.isSome:
|
||||
(result.tcpPort, result.udpPort) = extPorts.get()
|
||||
|
||||
when networkBackend == rlpxBackend:
|
||||
when networkBackend == rlpx:
|
||||
import
|
||||
os,
|
||||
eth/[rlp, p2p, keys], gossipsub_protocol,
|
||||
@ -132,23 +132,36 @@ when networkBackend == rlpxBackend:
|
||||
else:
|
||||
import
|
||||
os, random,
|
||||
stew/io, eth/async_utils, libp2p/crypto/crypto,
|
||||
stew/io, eth/async_utils,
|
||||
libp2p/crypto/crypto, libp2p/[multiaddress, multicodec],
|
||||
ssz
|
||||
|
||||
when networkBackend == libp2pBackend:
|
||||
export
|
||||
multiaddress
|
||||
|
||||
when networkBackend == libp2p:
|
||||
import
|
||||
libp2p_backend
|
||||
libp2p/standard_setup, libp2p_backend
|
||||
|
||||
export
|
||||
libp2p_backend
|
||||
|
||||
else:
|
||||
import
|
||||
libp2p/daemon/daemonapi, libp2p/multiaddress, libp2p_daemon_backend
|
||||
libp2p/daemon/daemonapi, libp2p_daemon_backend
|
||||
|
||||
export
|
||||
libp2p_daemon_backend
|
||||
|
||||
var mainDaemon: DaemonAPI
|
||||
|
||||
proc closeDaemon() {.noconv.} =
|
||||
if mainDaemon != nil:
|
||||
info "Shutting down the LibP2P daemon"
|
||||
waitFor mainDaemon.close()
|
||||
|
||||
addQuitProc(closeDaemon)
|
||||
|
||||
const
|
||||
netBackendName* = "libp2p"
|
||||
networkKeyFilename = "privkey.protobuf"
|
||||
@ -188,8 +201,6 @@ else:
|
||||
template tcpEndPoint(address, port): auto =
|
||||
MultiAddress.init(address, Protocol.IPPROTO_TCP, port)
|
||||
|
||||
var mainDaemon: DaemonAPI
|
||||
|
||||
proc allMultiAddresses(nodes: seq[BootstrapAddr]): seq[string] =
|
||||
for node in nodes:
|
||||
result.add $node
|
||||
@ -201,36 +212,42 @@ else:
|
||||
hostAddress = tcpEndPoint(globalListeningAddr, Port conf.tcpPort)
|
||||
announcedAddresses = if extIp == globalListeningAddr: @[]
|
||||
else: @[tcpEndPoint(extIp, extTcpPort)]
|
||||
keyFile = conf.ensureNetworkIdFile
|
||||
|
||||
info "Starting the LibP2P daemon", hostAddress, announcedAddresses,
|
||||
keyFile, bootstrapNodes
|
||||
info "Initializing networking", hostAddress,
|
||||
announcedAddresses,
|
||||
bootstrapNodes
|
||||
|
||||
var daemonFut = if bootstrapNodes.len == 0:
|
||||
newDaemonApi({PSNoSign, DHTFull, PSFloodSub},
|
||||
id = keyFile,
|
||||
hostAddresses = @[hostAddress],
|
||||
announcedAddresses = announcedAddresses)
|
||||
when networkBackend == libp2p:
|
||||
let keys = conf.getPersistentNetIdentity
|
||||
# TODO nim-libp2p still doesn't have support for announcing addresses
|
||||
# that are different from the host address (this is relevant when we
|
||||
# are running behind a NAT).
|
||||
result = Eth2Node.init newStandardSwitch(some keys.seckey, hostAddress,
|
||||
triggerSelf = true, gossip = true)
|
||||
await result.start()
|
||||
else:
|
||||
newDaemonApi({PSNoSign, DHTFull, PSFloodSub, WaitBootstrap},
|
||||
id = keyFile,
|
||||
hostAddresses = @[hostAddress],
|
||||
announcedAddresses = announcedAddresses,
|
||||
bootstrapNodes = allMultiAddresses(bootstrapNodes),
|
||||
peersRequired = 1)
|
||||
let keyFile = conf.ensureNetworkIdFile
|
||||
|
||||
mainDaemon = await daemonFut
|
||||
var identity = await mainDaemon.identity()
|
||||
var daemonFut = if bootstrapNodes.len == 0:
|
||||
newDaemonApi({PSNoSign, DHTFull, PSFloodSub},
|
||||
id = keyFile,
|
||||
hostAddresses = @[hostAddress],
|
||||
announcedAddresses = announcedAddresses)
|
||||
else:
|
||||
newDaemonApi({PSNoSign, DHTFull, PSFloodSub, WaitBootstrap},
|
||||
id = keyFile,
|
||||
hostAddresses = @[hostAddress],
|
||||
announcedAddresses = announcedAddresses,
|
||||
bootstrapNodes = allMultiAddresses(bootstrapNodes),
|
||||
peersRequired = 1)
|
||||
|
||||
info "LibP2P daemon started", peer = identity.peer.pretty(),
|
||||
addresses = identity.addresses
|
||||
mainDaemon = await daemonFut
|
||||
|
||||
proc closeDaemon() {.noconv.} =
|
||||
info "Shutting down the LibP2P daemon"
|
||||
waitFor mainDaemon.close()
|
||||
addQuitProc(closeDaemon)
|
||||
var identity = await mainDaemon.identity()
|
||||
info "LibP2P daemon started", peer = identity.peer.pretty(),
|
||||
addresses = identity.addresses
|
||||
|
||||
return await Eth2Node.init(mainDaemon)
|
||||
result = await Eth2Node.init(mainDaemon)
|
||||
|
||||
proc getPersistenBootstrapAddr*(conf: BeaconNodeConf,
|
||||
ip: IpAddress, port: Port): BootstrapAddr =
|
||||
@ -247,21 +264,31 @@ else:
|
||||
proc shortForm*(id: Eth2NodeIdentity): string =
|
||||
$PeerID.init(id.pubkey)
|
||||
|
||||
proc multiAddressToPeerInfo(a: MultiAddress): PeerInfo =
|
||||
if IPFS.match(a):
|
||||
let
|
||||
peerId = PeerID.init(a[2].protoAddress())
|
||||
addresses = @[a[0] & a[1]]
|
||||
when networkBackend == libp2p:
|
||||
return PeerInfo.init(peerId, addresses)
|
||||
else:
|
||||
return PeerInfo(peer: peerId, addresses: addresses)
|
||||
|
||||
proc connectToNetwork*(node: Eth2Node,
|
||||
bootstrapNodes: seq[MultiAddress]) {.async.} =
|
||||
# TODO: perhaps we should do these in parallel
|
||||
var connected = false
|
||||
for bootstrapNode in bootstrapNodes:
|
||||
try:
|
||||
if IPFS.match(bootstrapNode):
|
||||
let pid = PeerID.init(bootstrapNode[2].protoAddress())
|
||||
await node.daemon.connect(pid, @[bootstrapNode[0] & bootstrapNode[1]])
|
||||
var peer = node.getPeer(pid)
|
||||
peer.wasDialed = true
|
||||
await initializeConnection(peer)
|
||||
connected = true
|
||||
let peerInfo = multiAddressToPeerInfo(bootstrapNode)
|
||||
when networkBackend == libp2p:
|
||||
discard await node.switch.dial(peerInfo)
|
||||
else:
|
||||
raise newException(CatchableError, "Incorrect bootstrap address")
|
||||
await node.daemon.connect(peerInfo.peer, peerInfo.addresses)
|
||||
var peer = node.getPeer(peerInfo)
|
||||
peer.wasDialed = true
|
||||
await initializeConnection(peer)
|
||||
connected = true
|
||||
except CatchableError as err:
|
||||
error "Failed to connect to bootstrap node",
|
||||
node = bootstrapNode, err = err.msg
|
||||
@ -271,33 +298,48 @@ else:
|
||||
quit 1
|
||||
|
||||
proc saveConnectionAddressFile*(node: Eth2Node, filename: string) =
|
||||
let id = waitFor node.daemon.identity()
|
||||
writeFile(filename, $id.addresses[0] & "/p2p/" & id.peer.pretty)
|
||||
|
||||
proc loadConnectionAddressFile*(filename: string): PeerInfo =
|
||||
Json.loadFile(filename, PeerInfo)
|
||||
when networkBackend == libp2p:
|
||||
writeFile(filename, $node.switch.peerInfo.addrs[0] & "/p2p/" &
|
||||
node.switch.peerInfo.id)
|
||||
else:
|
||||
let id = waitFor node.daemon.identity()
|
||||
writeFile(filename, $id.addresses[0] & "/p2p/" & id.peer.pretty)
|
||||
|
||||
func peersCount*(node: Eth2Node): int =
|
||||
node.peers.len
|
||||
|
||||
proc makeMessageHandler[MsgType](msgHandler: proc(msg: MsgType) {.gcsafe.}): P2PPubSubCallback =
|
||||
result = proc(api: DaemonAPI,
|
||||
ticket: PubsubTicket,
|
||||
msg: PubSubMessage): Future[bool] {.async, gcsafe.} =
|
||||
inc gossip_messages_received
|
||||
trace "Incoming gossip bytes",
|
||||
peer = msg.peer, len = msg.data.len, tops = msg.topics
|
||||
msgHandler SSZ.decode(msg.data, MsgType)
|
||||
return true
|
||||
|
||||
proc subscribe*[MsgType](node: Eth2Node,
|
||||
topic: string,
|
||||
msgHandler: proc(msg: MsgType) {.gcsafe.} ) {.async, gcsafe.} =
|
||||
discard await node.daemon.pubsubSubscribe(topic, makeMessageHandler(msgHandler))
|
||||
template execMsgHandler(gossipBytes, gossipTopic) =
|
||||
inc gossip_messages_received
|
||||
trace "Incoming gossip bytes",
|
||||
peer = msg.peer, len = gossipBytes.len, topic = gossipTopic
|
||||
msgHandler SSZ.decode(gossipBytes, MsgType)
|
||||
|
||||
when networkBackend == libp2p:
|
||||
let incomingMsgHandler = proc(topic: string,
|
||||
data: seq[byte]) {.async, gcsafe.} =
|
||||
execMsgHandler data, topic
|
||||
|
||||
await node.switch.subscribe(topic, incomingMsgHandler)
|
||||
|
||||
else:
|
||||
let incomingMsgHandler = proc(api: DaemonAPI,
|
||||
ticket: PubsubTicket,
|
||||
msg: PubSubMessage): Future[bool] {.async, gcsafe.} =
|
||||
execMsgHandler msg.data, msg.topics[0]
|
||||
return true
|
||||
|
||||
discard await node.daemon.pubsubSubscribe(topic, incomingMsgHandler)
|
||||
|
||||
proc broadcast*(node: Eth2Node, topic: string, msg: auto) =
|
||||
inc gossip_messages_sent
|
||||
traceAsyncErrors node.daemon.pubsubPublish(topic, SSZ.encode(msg))
|
||||
let broadcastBytes = SSZ.encode(msg)
|
||||
when networkBackend == libp2p:
|
||||
traceAsyncErrors node.switch.publish(topic, broadcastBytes)
|
||||
else:
|
||||
traceAsyncErrors node.daemon.pubsubPublish(topic, broadcastBytes)
|
||||
|
||||
# TODO:
|
||||
# At the moment, this is just a compatiblity shim for the existing RLPx functionality.
|
||||
|
@ -8,7 +8,7 @@ import strutils, os, tables
|
||||
import confutils, chronicles, chronos, libp2p/daemon/daemonapi,
|
||||
libp2p/multiaddress
|
||||
import stew/byteutils as bu
|
||||
import spec/network
|
||||
import spec/[crypto, datatypes, network, digest], ssz
|
||||
|
||||
const
|
||||
InspectorName* = "Beacon-Chain Network Inspector"
|
||||
@ -88,6 +88,11 @@ type
|
||||
abbr: "b"
|
||||
name: "bootnodes" }: seq[string]
|
||||
|
||||
decode* {.
|
||||
desc: "Try to decode message using SSZ"
|
||||
abbr: "d"
|
||||
defaultValue: false }: bool
|
||||
|
||||
proc getTopic(filter: TopicFilter): string {.inline.} =
|
||||
case filter
|
||||
of TopicFilter.Blocks:
|
||||
@ -176,6 +181,22 @@ proc run(conf: InspectorConf) {.async.} =
|
||||
mtopics = $message.topics,
|
||||
message = bu.toHex(message.data),
|
||||
zpeers = len(pubsubPeers)
|
||||
|
||||
if conf.decode:
|
||||
try:
|
||||
if ticket.topic.startsWith(topicBeaconBlocks):
|
||||
info "BeaconBlock", msg = SSZ.decode(message.data, BeaconBlock)
|
||||
elif ticket.topic.startsWith(topicAttestations):
|
||||
info "Attestation", msg = SSZ.decode(message.data, Attestation)
|
||||
elif ticket.topic.startsWith(topicVoluntaryExits):
|
||||
info "VoluntaryExit", msg = SSZ.decode(message.data, VoluntaryExit)
|
||||
elif ticket.topic.startsWith(topicProposerSlashings):
|
||||
info "ProposerSlashing", msg = SSZ.decode(message.data, ProposerSlashing)
|
||||
elif ticket.topic.startsWith(topicAttesterSlashings):
|
||||
info "AttesterSlashing", msg = SSZ.decode(message.data, AttesterSlashing)
|
||||
except CatchableError as exc:
|
||||
info "Unable to decode message", msg = exc.msg
|
||||
|
||||
result = true
|
||||
|
||||
if len(conf.topics) > 0:
|
||||
|
@ -59,6 +59,6 @@ func makeDeposit*(
|
||||
if skipValidation notin flags:
|
||||
ret.data.signature =
|
||||
bls_sign(
|
||||
privkey, signing_root(ret.data).data, compute_domain(DOMAIN_DEPOSIT))
|
||||
privkey, hash_tree_root(ret.data).data, compute_domain(DOMAIN_DEPOSIT))
|
||||
|
||||
ret
|
||||
|
@ -1,24 +1,36 @@
|
||||
import
|
||||
algorithm,
|
||||
algorithm, typetraits,
|
||||
stew/varints, stew/shims/[macros, tables], chronos, chronicles,
|
||||
libp2p/daemon/daemonapi, faststreams/output_stream, serialization,
|
||||
faststreams/output_stream, serialization,
|
||||
json_serialization/std/options, eth/p2p/p2p_protocol_dsl,
|
||||
# TODO: create simpler to use libp2p modules that use re-exports
|
||||
libp2p/[switch, multistream, connection,
|
||||
base58, multiaddress, peerinfo, peer,
|
||||
crypto/crypto, protocols/identify, protocols/protocol],
|
||||
libp2p/muxers/mplex/[mplex, types],
|
||||
libp2p/protocols/secure/[secure, secio],
|
||||
libp2p/protocols/pubsub/[pubsub, floodsub],
|
||||
libp2p/transports/[transport, tcptransport],
|
||||
libp2p_json_serialization, ssz
|
||||
|
||||
export
|
||||
daemonapi, p2pProtocol, libp2p_json_serialization, ssz
|
||||
p2pProtocol, libp2p_json_serialization, ssz
|
||||
|
||||
type
|
||||
P2PStream = Connection
|
||||
|
||||
# TODO Is this really needed?
|
||||
Eth2Node* = ref object of RootObj
|
||||
daemon*: DaemonAPI
|
||||
switch*: Switch
|
||||
peers*: Table[PeerID, Peer]
|
||||
protocolStates*: seq[RootRef]
|
||||
libp2pTransportLoops*: seq[Future[void]]
|
||||
|
||||
EthereumNode = Eth2Node # needed for the definitions in p2p_backends_helpers
|
||||
|
||||
Peer* = ref object
|
||||
network*: Eth2Node
|
||||
id*: PeerID
|
||||
info*: PeerInfo
|
||||
wasDialed*: bool
|
||||
connectionState*: ConnectionState
|
||||
protocolStates*: seq[RootRef]
|
||||
@ -31,11 +43,6 @@ type
|
||||
Disconnecting,
|
||||
Disconnected
|
||||
|
||||
DisconnectionReason* = enum
|
||||
ClientShutDown
|
||||
IrrelevantNetwork
|
||||
FaultOrError
|
||||
|
||||
UntypedResponder = object
|
||||
peer*: Peer
|
||||
stream*: P2PStream
|
||||
@ -46,8 +53,8 @@ type
|
||||
name*: string
|
||||
|
||||
# Private fields:
|
||||
thunk*: ThunkProc
|
||||
libp2pProtocol: string
|
||||
libp2pCodecName: string
|
||||
protocolMounter*: MounterProc
|
||||
printer*: MessageContentPrinter
|
||||
nextMsgResolver*: NextMsgResolver
|
||||
|
||||
@ -65,108 +72,76 @@ type
|
||||
|
||||
ProtocolInfo* = ptr ProtocolInfoObj
|
||||
|
||||
ResponseCode* = enum
|
||||
Success
|
||||
InvalidRequest
|
||||
ServerError
|
||||
|
||||
PeerStateInitializer* = proc(peer: Peer): RootRef {.gcsafe.}
|
||||
NetworkStateInitializer* = proc(network: EthereumNode): RootRef {.gcsafe.}
|
||||
HandshakeStep* = proc(peer: Peer, stream: P2PStream): Future[void] {.gcsafe.}
|
||||
DisconnectionHandler* = proc(peer: Peer): Future[void] {.gcsafe.}
|
||||
ThunkProc* = proc(daemon: DaemonAPI, stream: P2PStream): Future[void] {.gcsafe.}
|
||||
ThunkProc* = LPProtoHandler
|
||||
MounterProc* = proc(network: Eth2Node) {.gcsafe.}
|
||||
MessageContentPrinter* = proc(msg: pointer): string {.gcsafe.}
|
||||
NextMsgResolver* = proc(msgData: SszReader, future: FutureBase) {.gcsafe.}
|
||||
|
||||
Bytes = seq[byte]
|
||||
DisconnectionReason* = enum
|
||||
ClientShutDown
|
||||
IrrelevantNetwork
|
||||
FaultOrError
|
||||
|
||||
PeerDisconnected* = object of CatchableError
|
||||
reason*: DisconnectionReason
|
||||
|
||||
TransmissionError* = object of CatchableError
|
||||
|
||||
const
|
||||
defaultIncomingReqTimeout = 5000
|
||||
HandshakeTimeout = FaultOrError
|
||||
|
||||
# Spec constants
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/dev/specs/networking/p2p-interface.md#eth-20-network-interaction-domains
|
||||
REQ_RESP_MAX_SIZE* = 1 * 1024 * 1024 # bytes
|
||||
GOSSIP_MAX_SIZE* = 1 * 1024 * 1024 # bytes
|
||||
TTFB_TIMEOUT* = 5.seconds
|
||||
RESP_TIMEOUT* = 10.seconds
|
||||
|
||||
readTimeoutErrorMsg = "Exceeded read timeout for a request"
|
||||
|
||||
logScope:
|
||||
topic = "libp2p"
|
||||
|
||||
template `$`*(peer: Peer): string = $peer.id
|
||||
template `$`*(peer: Peer): string = id(peer.info)
|
||||
chronicles.formatIt(Peer): $it
|
||||
|
||||
template libp2pProtocol*(name: string, version: int) {.pragma.}
|
||||
# TODO: This exists only as a compatibility layer between the daemon
|
||||
# APIs and the native LibP2P ones. It won't be necessary once the
|
||||
# daemon is removed.
|
||||
#
|
||||
template writeAllBytes(stream: P2PStream, bytes: seq[byte]): untyped =
|
||||
write(stream, bytes)
|
||||
|
||||
include eth/p2p/p2p_backends_helpers
|
||||
include eth/p2p/p2p_tracing
|
||||
template openStream(node: Eth2Node, peer: Peer, protocolId: string): untyped =
|
||||
dial(node.switch, peer.info, protocolId)
|
||||
|
||||
proc init*(T: type Peer, network: Eth2Node, id: PeerID): Peer {.gcsafe.}
|
||||
proc peer(stream: P2PStream): PeerID =
|
||||
# TODO: Can this be `none`?
|
||||
stream.peerInfo.get.peerId
|
||||
#
|
||||
# End of compatibility layer
|
||||
|
||||
proc getPeer*(node: Eth2Node, peerId: PeerID): Peer {.gcsafe.} =
|
||||
proc init*(T: type Peer, network: Eth2Node, info: PeerInfo): Peer {.gcsafe.}
|
||||
|
||||
proc getPeer*(node: Eth2Node, peerInfo: PeerInfo): Peer {.gcsafe.} =
|
||||
let peerId = peerInfo.peerId
|
||||
result = node.peers.getOrDefault(peerId)
|
||||
if result == nil:
|
||||
result = Peer.init(node, peerId)
|
||||
result = Peer.init(node, peerInfo)
|
||||
node.peers[peerId] = result
|
||||
|
||||
proc peerFromStream(daemon: DaemonAPI, stream: P2PStream): Peer {.gcsafe.} =
|
||||
Eth2Node(daemon.userData).getPeer(stream.peer)
|
||||
|
||||
proc safeClose(stream: P2PStream) {.async.} =
|
||||
if P2PStreamFlags.Closed notin stream.flags:
|
||||
await close(stream)
|
||||
proc peerFromStream(network: Eth2Node, stream: P2PStream): Peer {.gcsafe.} =
|
||||
# TODO: Can this be `none`?
|
||||
return network.getPeer(stream.peerInfo.get)
|
||||
|
||||
proc disconnect*(peer: Peer, reason: DisconnectionReason, notifyOtherPeer = false) {.async.} =
|
||||
# TODO: How should we notify the other peer?
|
||||
if peer.connectionState notin {Disconnecting, Disconnected}:
|
||||
peer.connectionState = Disconnecting
|
||||
await peer.network.daemon.disconnect(peer.id)
|
||||
await peer.network.switch.disconnect(peer.info)
|
||||
peer.connectionState = Disconnected
|
||||
peer.network.peers.del(peer.id)
|
||||
peer.network.peers.del(peer.info.peerId)
|
||||
|
||||
template raisePeerDisconnected(msg: string, r: DisconnectionReason) =
|
||||
var e = newException(PeerDisconnected, msg)
|
||||
e.reason = r
|
||||
raise e
|
||||
proc safeClose(stream: P2PStream) {.async.} =
|
||||
if not stream.closed:
|
||||
await close(stream)
|
||||
|
||||
proc disconnectAndRaise(peer: Peer,
|
||||
reason: DisconnectionReason,
|
||||
msg: string) {.async.} =
|
||||
let r = reason
|
||||
await peer.disconnect(r)
|
||||
raisePeerDisconnected(msg, r)
|
||||
include eth/p2p/p2p_backends_helpers
|
||||
include eth/p2p/p2p_tracing
|
||||
include libp2p_backends_common
|
||||
|
||||
template reraiseAsPeerDisconnected(peer: Peer, errMsgExpr: static string,
|
||||
reason = FaultOrError): auto =
|
||||
const errMsg = errMsgExpr
|
||||
debug errMsg
|
||||
disconnectAndRaise(peer, reason, errMsg)
|
||||
|
||||
proc registerProtocol(protocol: ProtocolInfo) =
|
||||
# TODO: This can be done at compile-time in the future
|
||||
let pos = lowerBound(gProtocols, protocol)
|
||||
gProtocols.insert(protocol, pos)
|
||||
for i in 0 ..< gProtocols.len:
|
||||
gProtocols[i].index = i
|
||||
|
||||
proc setEventHandlers(p: ProtocolInfo,
|
||||
handshake: HandshakeStep,
|
||||
disconnectHandler: DisconnectionHandler) =
|
||||
p.handshake = handshake
|
||||
p.disconnectHandler = disconnectHandler
|
||||
|
||||
proc init*(T: type Eth2Node, daemon: DaemonAPI): Future[T] {.async.} =
|
||||
proc init*(T: type Eth2Node, switch: Switch): T =
|
||||
new result
|
||||
result.daemon = daemon
|
||||
result.daemon.userData = result
|
||||
result.switch = switch
|
||||
result.peers = initTable[PeerID, Peer]()
|
||||
|
||||
newSeq result.protocolStates, allProtocols.len
|
||||
@ -175,237 +150,15 @@ proc init*(T: type Eth2Node, daemon: DaemonAPI): Future[T] {.async.} =
|
||||
result.protocolStates[proto.index] = proto.networkStateInitializer(result)
|
||||
|
||||
for msg in proto.messages:
|
||||
if msg.libp2pProtocol.len > 0:
|
||||
await daemon.addHandler(@[msg.libp2pProtocol], msg.thunk)
|
||||
if msg.protocolMounter != nil:
|
||||
msg.protocolMounter result
|
||||
|
||||
proc readChunk(stream: P2PStream,
|
||||
MsgType: type,
|
||||
withResponseCode: bool,
|
||||
deadline: Future[void]): Future[Option[MsgType]] {.gcsafe.}
|
||||
proc start*(node: Eth2Node) {.async.} =
|
||||
node.libp2pTransportLoops = await node.switch.start()
|
||||
|
||||
proc readSizePrefix(transp: StreamTransport,
|
||||
deadline: Future[void]): Future[int] {.async.} =
|
||||
var parser: VarintParser[uint64, ProtoBuf]
|
||||
while true:
|
||||
var nextByte: byte
|
||||
var readNextByte = transp.readExactly(addr nextByte, 1)
|
||||
await readNextByte or deadline
|
||||
if not readNextByte.finished:
|
||||
return -1
|
||||
case parser.feedByte(nextByte)
|
||||
of Done:
|
||||
let res = parser.getResult
|
||||
if res > uint64(REQ_RESP_MAX_SIZE):
|
||||
return -1
|
||||
else:
|
||||
return int(res)
|
||||
of Overflow:
|
||||
return -1
|
||||
of Incomplete:
|
||||
continue
|
||||
|
||||
proc readMsgBytes(stream: P2PStream,
|
||||
withResponseCode: bool,
|
||||
deadline: Future[void]): Future[Bytes] {.async.} =
|
||||
try:
|
||||
if withResponseCode:
|
||||
var responseCode: byte
|
||||
var readResponseCode = stream.transp.readExactly(addr responseCode, 1)
|
||||
await readResponseCode or deadline
|
||||
if not readResponseCode.finished:
|
||||
return
|
||||
if responseCode > ResponseCode.high.byte: return
|
||||
|
||||
logScope: responseCode = ResponseCode(responseCode)
|
||||
case ResponseCode(responseCode)
|
||||
of InvalidRequest, ServerError:
|
||||
let responseErrMsg = await readChunk(stream, string, false, deadline)
|
||||
debug "P2P request resulted in error", responseErrMsg
|
||||
return
|
||||
of Success:
|
||||
# The response is OK, the execution continues below
|
||||
discard
|
||||
|
||||
var sizePrefix = await readSizePrefix(stream.transp, deadline)
|
||||
if sizePrefix == -1:
|
||||
debug "Failed to read an incoming message size prefix", peer = stream.peer
|
||||
return
|
||||
|
||||
if sizePrefix == 0:
|
||||
debug "Received SSZ with zero size", peer = stream.peer
|
||||
return
|
||||
|
||||
var msgBytes = newSeq[byte](sizePrefix)
|
||||
var readBody = stream.transp.readExactly(addr msgBytes[0], sizePrefix)
|
||||
await readBody or deadline
|
||||
if not readBody.finished: return
|
||||
|
||||
return msgBytes
|
||||
except TransportIncompleteError:
|
||||
return @[]
|
||||
|
||||
proc readChunk(stream: P2PStream,
|
||||
MsgType: type,
|
||||
withResponseCode: bool,
|
||||
deadline: Future[void]): Future[Option[MsgType]] {.gcsafe, async.} =
|
||||
var msgBytes = await stream.readMsgBytes(withResponseCode, deadline)
|
||||
try:
|
||||
if msgBytes.len > 0:
|
||||
return some SSZ.decode(msgBytes, MsgType)
|
||||
except SerializationError as err:
|
||||
debug "Failed to decode a network message",
|
||||
msgBytes, errMsg = err.formatMsg("<msg>")
|
||||
return
|
||||
|
||||
proc readResponse(
|
||||
stream: P2PStream,
|
||||
MsgType: type,
|
||||
deadline: Future[void]): Future[Option[MsgType]] {.gcsafe, async.} =
|
||||
|
||||
when MsgType is seq:
|
||||
type E = ElemType(MsgType)
|
||||
var results: MsgType
|
||||
while true:
|
||||
let nextRes = await readChunk(stream, E, true, deadline)
|
||||
if nextRes.isNone: break
|
||||
results.add nextRes.get
|
||||
if results.len > 0:
|
||||
return some(results)
|
||||
else:
|
||||
return await readChunk(stream, MsgType, true, deadline)
|
||||
|
||||
proc encodeErrorMsg(responseCode: ResponseCode, errMsg: string): Bytes =
|
||||
var s = init OutputStream
|
||||
s.append byte(responseCode)
|
||||
s.appendVarint errMsg.len
|
||||
s.appendValue SSZ, errMsg
|
||||
s.getOutput
|
||||
|
||||
proc sendErrorResponse(peer: Peer,
|
||||
stream: P2PStream,
|
||||
err: ref SerializationError,
|
||||
msgName: string,
|
||||
msgBytes: Bytes) {.async.} =
|
||||
debug "Received an invalid request",
|
||||
peer, msgName, msgBytes, errMsg = err.formatMsg("<msg>")
|
||||
|
||||
let responseBytes = encodeErrorMsg(InvalidRequest, err.formatMsg("msg"))
|
||||
discard await stream.transp.write(responseBytes)
|
||||
await stream.close()
|
||||
|
||||
proc sendErrorResponse(peer: Peer,
|
||||
stream: P2PStream,
|
||||
responseCode: ResponseCode,
|
||||
errMsg: string) {.async.} =
|
||||
debug "Error processing request", peer, responseCode, errMsg
|
||||
|
||||
let responseBytes = encodeErrorMsg(ServerError, errMsg)
|
||||
discard await stream.transp.write(responseBytes)
|
||||
await stream.close()
|
||||
|
||||
proc writeSizePrefix(transp: StreamTransport, size: uint64) {.async.} =
|
||||
var
|
||||
varintBuf: array[10, byte]
|
||||
varintSize = vsizeof(size)
|
||||
cursor = createWriteCursor(varintBuf)
|
||||
cursor.appendVarint size
|
||||
var sent = await transp.write(varintBuf[0 ..< varintSize])
|
||||
if sent != varintSize:
|
||||
raise newException(TransmissionError, "Failed to deliver size prefix")
|
||||
|
||||
proc sendNotificationMsg(peer: Peer, protocolId: string, requestBytes: Bytes) {.async} =
|
||||
var deadline = sleepAsync RESP_TIMEOUT
|
||||
var streamFut = peer.network.daemon.openStream(peer.id, @[protocolId])
|
||||
await streamFut or deadline
|
||||
if not streamFut.finished:
|
||||
# TODO: we are returning here because the deadline passed, but
|
||||
# the stream can still be opened eventually a bit later. Who is
|
||||
# going to close it then?
|
||||
raise newException(TransmissionError, "Failed to open LibP2P stream")
|
||||
|
||||
let stream = streamFut.read
|
||||
defer:
|
||||
await safeClose(stream)
|
||||
|
||||
var s = init OutputStream
|
||||
s.appendVarint requestBytes.len.uint64
|
||||
s.append requestBytes
|
||||
let bytes = s.getOutput
|
||||
let sent = await stream.transp.write(bytes)
|
||||
if sent != bytes.len:
|
||||
raise newException(TransmissionError, "Failed to deliver msg bytes")
|
||||
|
||||
# TODO There is too much duplication in the responder functions, but
|
||||
# I hope to reduce this when I increse the reliance on output streams.
|
||||
proc sendResponseChunkBytes(responder: UntypedResponder, payload: Bytes) {.async.} =
|
||||
var s = init OutputStream
|
||||
s.append byte(Success)
|
||||
s.appendVarint payload.len.uint64
|
||||
s.append payload
|
||||
let bytes = s.getOutput
|
||||
|
||||
let sent = await responder.stream.transp.write(bytes)
|
||||
if sent != bytes.len:
|
||||
raise newException(TransmissionError, "Failed to deliver all bytes")
|
||||
|
||||
proc sendResponseChunkObj(responder: UntypedResponder, val: auto) {.async.} =
|
||||
var s = init OutputStream
|
||||
s.append byte(Success)
|
||||
s.appendValue SSZ, sizePrefixed(val)
|
||||
let bytes = s.getOutput
|
||||
|
||||
let sent = await responder.stream.transp.write(bytes)
|
||||
if sent != bytes.len:
|
||||
raise newException(TransmissionError, "Failed to deliver all bytes")
|
||||
|
||||
proc sendResponseChunks[T](responder: UntypedResponder, chunks: seq[T]) {.async.} =
|
||||
var s = init OutputStream
|
||||
for chunk in chunks:
|
||||
s.append byte(Success)
|
||||
s.appendValue SSZ, sizePrefixed(chunk)
|
||||
|
||||
let bytes = s.getOutput
|
||||
let sent = await responder.stream.transp.write(bytes)
|
||||
if sent != bytes.len:
|
||||
raise newException(TransmissionError, "Failed to deliver all bytes")
|
||||
|
||||
proc makeEth2Request(peer: Peer, protocolId: string, requestBytes: Bytes,
|
||||
ResponseMsg: type,
|
||||
timeout: Duration): Future[Option[ResponseMsg]] {.gcsafe, async.} =
|
||||
var deadline = sleepAsync timeout
|
||||
|
||||
# Open a new LibP2P stream
|
||||
var streamFut = peer.network.daemon.openStream(peer.id, @[protocolId])
|
||||
await streamFut or deadline
|
||||
if not streamFut.finished:
|
||||
# TODO: we are returning here because the deadline passed, but
|
||||
# the stream can still be opened eventually a bit later. Who is
|
||||
# going to close it then?
|
||||
return none(ResponseMsg)
|
||||
|
||||
let stream = streamFut.read
|
||||
defer:
|
||||
await safeClose(stream)
|
||||
|
||||
# Send the request
|
||||
var s = init OutputStream
|
||||
s.appendVarint requestBytes.len.uint64
|
||||
s.append requestBytes
|
||||
let bytes = s.getOutput
|
||||
let sent = await stream.transp.write(bytes)
|
||||
if sent != bytes.len:
|
||||
await disconnectAndRaise(peer, FaultOrError, "Incomplete send")
|
||||
|
||||
# Read the response
|
||||
return await stream.readResponse(ResponseMsg, deadline)
|
||||
|
||||
proc p2pStreamName(MsgType: type): string =
|
||||
mixin msgProtocol, protocolInfo, msgId
|
||||
MsgType.msgProtocol.protocolInfo.messages[MsgType.msgId].libp2pProtocol
|
||||
|
||||
proc init*(T: type Peer, network: Eth2Node, id: PeerID): Peer =
|
||||
proc init*(T: type Peer, network: Eth2Node, info: PeerInfo): Peer =
|
||||
new result
|
||||
result.id = id
|
||||
result.info = info
|
||||
result.network = network
|
||||
result.connectionState = Connected
|
||||
result.maxInactivityAllowed = 15.minutes # TODO: Read this from the config
|
||||
@ -415,102 +168,21 @@ proc init*(T: type Peer, network: Eth2Node, id: PeerID): Peer =
|
||||
if proto.peerStateInitializer != nil:
|
||||
result.protocolStates[i] = proto.peerStateInitializer(result)
|
||||
|
||||
proc performProtocolHandshakes*(peer: Peer) {.async.} =
|
||||
var subProtocolsHandshakes = newSeqOfCap[Future[void]](allProtocols.len)
|
||||
for protocol in allProtocols:
|
||||
if protocol.handshake != nil:
|
||||
subProtocolsHandshakes.add((protocol.handshake)(peer, nil))
|
||||
|
||||
await all(subProtocolsHandshakes)
|
||||
|
||||
template initializeConnection*(peer: Peer): auto =
|
||||
performProtocolHandshakes(peer)
|
||||
|
||||
proc initProtocol(name: string,
|
||||
peerInit: PeerStateInitializer,
|
||||
networkInit: NetworkStateInitializer): ProtocolInfoObj =
|
||||
result.name = name
|
||||
result.messages = @[]
|
||||
result.peerStateInitializer = peerInit
|
||||
result.networkStateInitializer = networkInit
|
||||
|
||||
proc registerMsg(protocol: ProtocolInfo,
|
||||
name: string,
|
||||
thunk: ThunkProc,
|
||||
libp2pProtocol: string,
|
||||
mounter: MounterProc,
|
||||
libp2pCodecName: string,
|
||||
printer: MessageContentPrinter) =
|
||||
protocol.messages.add MessageInfo(name: name,
|
||||
thunk: thunk,
|
||||
libp2pProtocol: libp2pProtocol,
|
||||
protocolMounter: mounter,
|
||||
libp2pCodecName: libp2pCodecName,
|
||||
printer: printer)
|
||||
|
||||
proc getRequestProtoName(fn: NimNode): NimNode =
|
||||
# `getCustomPragmaVal` doesn't work yet on regular nnkProcDef nodes
|
||||
# (TODO: file as an issue)
|
||||
|
||||
let pragmas = fn.pragma
|
||||
if pragmas.kind == nnkPragma and pragmas.len > 0:
|
||||
for pragma in pragmas:
|
||||
if pragma.len > 0 and $pragma[0] == "libp2pProtocol":
|
||||
let protoName = $(pragma[1])
|
||||
let protoVer = $(pragma[2].intVal)
|
||||
return newLit("/eth2/beacon_chain/req/" & protoName & "/" & protoVer & "/ssz")
|
||||
|
||||
return newLit("")
|
||||
|
||||
proc init*[MsgType](T: type Responder[MsgType],
|
||||
peer: Peer, stream: P2PStream): T =
|
||||
T(UntypedResponder(peer: peer, stream: stream))
|
||||
|
||||
import
|
||||
typetraits
|
||||
|
||||
template write*[M](r: var Responder[M], val: auto): auto =
|
||||
mixin send
|
||||
type Msg = M
|
||||
type MsgRec = RecType(Msg)
|
||||
when MsgRec is seq|openarray:
|
||||
type E = ElemType(MsgRec)
|
||||
when val is E:
|
||||
sendResponseChunkObj(UntypedResponder(r), val)
|
||||
elif val is MsgRec:
|
||||
sendResponseChunks(UntypedResponder(r), val)
|
||||
else:
|
||||
static: echo "BAD TYPE ", name(E), " vs ", name(type(val))
|
||||
{.fatal: "bad".}
|
||||
else:
|
||||
send(r, val)
|
||||
|
||||
proc implementSendProcBody(sendProc: SendProc) =
|
||||
let
|
||||
msg = sendProc.msg
|
||||
UntypedResponder = bindSym "UntypedResponder"
|
||||
await = ident "await"
|
||||
|
||||
proc sendCallGenerator(peer, bytes: NimNode): NimNode =
|
||||
if msg.kind != msgResponse:
|
||||
let msgProto = getRequestProtoName(msg.procDef)
|
||||
case msg.kind
|
||||
of msgRequest:
|
||||
let
|
||||
timeout = msg.timeoutParam[0]
|
||||
ResponseRecord = msg.response.recName
|
||||
quote:
|
||||
makeEth2Request(`peer`, `msgProto`, `bytes`,
|
||||
`ResponseRecord`, `timeout`)
|
||||
else:
|
||||
quote: sendNotificationMsg(`peer`, `msgProto`, `bytes`)
|
||||
else:
|
||||
quote: sendResponseChunkBytes(`UntypedResponder`(`peer`), `bytes`)
|
||||
|
||||
sendProc.useStandardBody(nil, nil, sendCallGenerator)
|
||||
|
||||
proc p2pProtocolBackendImpl*(p: P2PProtocol): Backend =
|
||||
var
|
||||
Format = ident "SSZ"
|
||||
Responder = bindSym "Responder"
|
||||
DaemonAPI = bindSym "DaemonAPI"
|
||||
P2PStream = ident "P2PStream"
|
||||
P2PStream = bindSym "P2PStream"
|
||||
OutputStream = bindSym "OutputStream"
|
||||
Peer = bindSym "Peer"
|
||||
Eth2Node = bindSym "Eth2Node"
|
||||
@ -522,8 +194,9 @@ proc p2pProtocolBackendImpl*(p: P2PProtocol): Backend =
|
||||
errVar = ident "err"
|
||||
msgVar = ident "msg"
|
||||
msgBytesVar = ident "msgBytes"
|
||||
daemonVar = ident "daemon"
|
||||
networkVar = ident "network"
|
||||
await = ident "await"
|
||||
callUserHandler = ident "callUserHandler"
|
||||
|
||||
p.useRequestIds = false
|
||||
p.useSingleRecordInlining = true
|
||||
@ -545,7 +218,8 @@ proc p2pProtocolBackendImpl*(p: P2PProtocol): Backend =
|
||||
protocol = msg.protocol
|
||||
msgName = $msg.ident
|
||||
msgNameLit = newLit msgName
|
||||
msgRecName = msg.recName
|
||||
MsgRecName = msg.recName
|
||||
codecNameLit = getRequestProtoName(msg.procDef)
|
||||
|
||||
if msg.procDef.body.kind != nnkEmpty and msg.kind == msgRequest:
|
||||
# Request procs need an extra param - the stream where the response
|
||||
@ -554,53 +228,42 @@ proc p2pProtocolBackendImpl*(p: P2PProtocol): Backend =
|
||||
msg.initResponderCall.add streamVar
|
||||
|
||||
##
|
||||
## Implemenmt Thunk
|
||||
## Implement the Thunk:
|
||||
##
|
||||
var thunkName = ident(msgName & "_thunk")
|
||||
let awaitUserHandler = msg.genAwaitUserHandler(msgVar, [peerVar, streamVar])
|
||||
## The protocol handlers in nim-libp2p receive only a `P2PStream`
|
||||
## parameter and there is no way to access the wider context (such
|
||||
## as the current `Switch`). In our handlers, we may need to list all
|
||||
## peers in the current network, so we must keep a reference to the
|
||||
## network object in the closure environment of the installed handlers.
|
||||
##
|
||||
## For this reason, we define a `protocol mounter` proc that will
|
||||
## initialize the network object by creating handlers bound to the
|
||||
## specific network.
|
||||
##
|
||||
let
|
||||
protocolMounterName = ident(msgName & "_mounter")
|
||||
userHandlerCall = msg.genUserHandlerCall(msgVar, [peerVar, streamVar])
|
||||
|
||||
let tracing = when tracingEnabled:
|
||||
quote: logReceivedMsg(`streamVar`.peer, `msgVar`.get)
|
||||
var mounter: NimNode
|
||||
if msg.userHandler != nil:
|
||||
protocol.outRecvProcs.add quote do:
|
||||
template `callUserHandler`(`peerVar`: `Peer`,
|
||||
`streamVar`: `P2PStream`,
|
||||
`msgVar`: `MsgRecName`): untyped =
|
||||
`userHandlerCall`
|
||||
|
||||
proc `protocolMounterName`(`networkVar`: `Eth2Node`) =
|
||||
proc thunk(`streamVar`: `P2PStream`,
|
||||
proto: string): Future[void] {.gcsafe.} =
|
||||
return handleIncomingStream(`networkVar`, `streamVar`,
|
||||
`MsgRecName`, `Format`)
|
||||
|
||||
mount `networkVar`.switch,
|
||||
LPProtocol(codec: `codecNameLit`, handler: thunk)
|
||||
|
||||
mounter = protocolMounterName
|
||||
else:
|
||||
newStmtList()
|
||||
|
||||
msg.defineThunk quote do:
|
||||
proc `thunkName`(`daemonVar`: `DaemonAPI`,
|
||||
`streamVar`: `P2PStream`) {.async, gcsafe.} =
|
||||
defer:
|
||||
`await` safeClose(`streamVar`)
|
||||
|
||||
let
|
||||
`deadlineVar` = sleepAsync RESP_TIMEOUT
|
||||
`msgBytesVar` = `await` readMsgBytes(`streamVar`, false, `deadlineVar`)
|
||||
`peerVar` = peerFromStream(`daemonVar`, `streamVar`)
|
||||
|
||||
if `msgBytesVar`.len == 0:
|
||||
`await` sendErrorResponse(`peerVar`, `streamVar`,
|
||||
ServerError, readTimeoutErrorMsg)
|
||||
return
|
||||
|
||||
var `msgVar`: `msgRecName`
|
||||
try:
|
||||
`msgVar` = decode(`Format`, `msgBytesVar`, `msgRecName`)
|
||||
except SerializationError as `errVar`:
|
||||
`await` sendErrorResponse(`peerVar`, `streamVar`, `errVar`,
|
||||
`msgNameLit`, `msgBytesVar`)
|
||||
return
|
||||
except Exception as err:
|
||||
# TODO. This is temporary code that should be removed after interop.
|
||||
# It can be enabled only in certain diagnostic builds where it should
|
||||
# re-raise the exception.
|
||||
debug "Crash during serialization", inputBytes = toHex(`msgBytesVar`),
|
||||
msgName = `msgNameLit`,
|
||||
deserializedType = astToStr(`msgRecName`)
|
||||
`await` sendErrorResponse(`peerVar`, `streamVar`, ServerError, err.msg)
|
||||
|
||||
try:
|
||||
`tracing`
|
||||
`awaitUserHandler`
|
||||
except CatchableError as `errVar`:
|
||||
`await` sendErrorResponse(`peerVar`, `streamVar`, ServerError, `errVar`.msg)
|
||||
mounter = newNilLit()
|
||||
|
||||
##
|
||||
## Implement Senders and Handshake
|
||||
@ -615,9 +278,9 @@ proc p2pProtocolBackendImpl*(p: P2PProtocol): Backend =
|
||||
newCall(registerMsg,
|
||||
protocol.protocolInfoVar,
|
||||
msgNameLit,
|
||||
thunkName,
|
||||
getRequestProtoName(msg.procDef),
|
||||
newTree(nnkBracketExpr, messagePrinter, msgRecName)))
|
||||
mounter,
|
||||
codecNameLit,
|
||||
newTree(nnkBracketExpr, messagePrinter, MsgRecName)))
|
||||
|
||||
result.implementProtocolInit = proc (p: P2PProtocol): NimNode =
|
||||
return newCall(initProtocol, newLit(p.name), p.peerInit, p.netInit)
|
||||
|
394
beacon_chain/libp2p_backends_common.nim
Normal file
394
beacon_chain/libp2p_backends_common.nim
Normal file
@ -0,0 +1,394 @@
|
||||
type
|
||||
ResponseCode* = enum
|
||||
Success
|
||||
InvalidRequest
|
||||
ServerError
|
||||
|
||||
Bytes = seq[byte]
|
||||
|
||||
const
|
||||
defaultIncomingReqTimeout = 5000
|
||||
HandshakeTimeout = FaultOrError
|
||||
|
||||
# Spec constants
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/dev/specs/networking/p2p-interface.md#eth-20-network-interaction-domains
|
||||
REQ_RESP_MAX_SIZE* = 1 * 1024 * 1024 # bytes
|
||||
GOSSIP_MAX_SIZE* = 1 * 1024 * 1024 # bytes
|
||||
TTFB_TIMEOUT* = 5.seconds
|
||||
RESP_TIMEOUT* = 10.seconds
|
||||
|
||||
readTimeoutErrorMsg = "Exceeded read timeout for a request"
|
||||
|
||||
logScope:
|
||||
topics = "libp2p"
|
||||
|
||||
template libp2pProtocol*(name: string, version: int) {.pragma.}
|
||||
|
||||
proc getRequestProtoName(fn: NimNode): NimNode =
|
||||
# `getCustomPragmaVal` doesn't work yet on regular nnkProcDef nodes
|
||||
# (TODO: file as an issue)
|
||||
|
||||
let pragmas = fn.pragma
|
||||
if pragmas.kind == nnkPragma and pragmas.len > 0:
|
||||
for pragma in pragmas:
|
||||
if pragma.len > 0 and $pragma[0] == "libp2pProtocol":
|
||||
let protoName = $(pragma[1])
|
||||
let protoVer = $(pragma[2].intVal)
|
||||
return newLit("/eth2/beacon_chain/req/" & protoName & "/" & protoVer & "/ssz")
|
||||
|
||||
return newLit("")
|
||||
|
||||
template raisePeerDisconnected(msg: string, r: DisconnectionReason) =
|
||||
var e = newException(PeerDisconnected, msg)
|
||||
e.reason = r
|
||||
raise e
|
||||
|
||||
proc disconnectAndRaise(peer: Peer,
|
||||
reason: DisconnectionReason,
|
||||
msg: string) {.async.} =
|
||||
let r = reason
|
||||
await peer.disconnect(r)
|
||||
raisePeerDisconnected(msg, r)
|
||||
|
||||
proc readChunk(stream: P2PStream,
|
||||
MsgType: type,
|
||||
withResponseCode: bool,
|
||||
deadline: Future[void]): Future[Option[MsgType]] {.gcsafe.}
|
||||
|
||||
proc readSizePrefix(stream: P2PStream,
|
||||
deadline: Future[void]): Future[int] {.async.} =
|
||||
trace "about to read msg size prefix"
|
||||
var parser: VarintParser[uint64, ProtoBuf]
|
||||
while true:
|
||||
var nextByte: byte
|
||||
var readNextByte = stream.readExactly(addr nextByte, 1)
|
||||
await readNextByte or deadline
|
||||
if not readNextByte.finished:
|
||||
trace "size prefix byte not received in time"
|
||||
return -1
|
||||
case parser.feedByte(nextByte)
|
||||
of Done:
|
||||
let res = parser.getResult
|
||||
if res > uint64(REQ_RESP_MAX_SIZE):
|
||||
trace "size prefix outside of range", res
|
||||
return -1
|
||||
else:
|
||||
trace "got size prefix", res
|
||||
return int(res)
|
||||
of Overflow:
|
||||
trace "size prefix overflow"
|
||||
return -1
|
||||
of Incomplete:
|
||||
continue
|
||||
|
||||
proc readMsgBytes(stream: P2PStream,
|
||||
withResponseCode: bool,
|
||||
deadline: Future[void]): Future[Bytes] {.async.} =
|
||||
trace "about to read message bytes", withResponseCode
|
||||
|
||||
try:
|
||||
if withResponseCode:
|
||||
var responseCode: byte
|
||||
trace "about to read response code"
|
||||
var readResponseCode = stream.readExactly(addr responseCode, 1)
|
||||
await readResponseCode or deadline
|
||||
|
||||
if not readResponseCode.finished:
|
||||
trace "response code not received in time"
|
||||
return
|
||||
|
||||
if responseCode > ResponseCode.high.byte:
|
||||
trace "invalid response code", responseCode
|
||||
return
|
||||
|
||||
logScope: responseCode = ResponseCode(responseCode)
|
||||
trace "got response code"
|
||||
|
||||
case ResponseCode(responseCode)
|
||||
of InvalidRequest, ServerError:
|
||||
let responseErrMsg = await readChunk(stream, string, false, deadline)
|
||||
debug "P2P request resulted in error", responseErrMsg
|
||||
return
|
||||
|
||||
of Success:
|
||||
# The response is OK, the execution continues below
|
||||
discard
|
||||
|
||||
var sizePrefix = await readSizePrefix(stream, deadline)
|
||||
trace "got msg size prefix", sizePrefix
|
||||
|
||||
if sizePrefix == -1:
|
||||
debug "Failed to read an incoming message size prefix", peer = stream.peer
|
||||
return
|
||||
|
||||
if sizePrefix == 0:
|
||||
debug "Received SSZ with zero size", peer = stream.peer
|
||||
return
|
||||
|
||||
trace "about to read msg bytes"
|
||||
var msgBytes = newSeq[byte](sizePrefix)
|
||||
var readBody = stream.readExactly(addr msgBytes[0], sizePrefix)
|
||||
await readBody or deadline
|
||||
if not readBody.finished:
|
||||
trace "msg bytes not received in time"
|
||||
return
|
||||
|
||||
trace "got message bytes", msgBytes
|
||||
return msgBytes
|
||||
|
||||
except TransportIncompleteError:
|
||||
return @[]
|
||||
|
||||
proc readChunk(stream: P2PStream,
|
||||
MsgType: type,
|
||||
withResponseCode: bool,
|
||||
deadline: Future[void]): Future[Option[MsgType]] {.gcsafe, async.} =
|
||||
var msgBytes = await stream.readMsgBytes(withResponseCode, deadline)
|
||||
try:
|
||||
if msgBytes.len > 0:
|
||||
return some SSZ.decode(msgBytes, MsgType)
|
||||
except SerializationError as err:
|
||||
debug "Failed to decode a network message",
|
||||
msgBytes, errMsg = err.formatMsg("<msg>")
|
||||
return
|
||||
|
||||
proc readResponse(
|
||||
stream: P2PStream,
|
||||
MsgType: type,
|
||||
deadline: Future[void]): Future[Option[MsgType]] {.gcsafe, async.} =
|
||||
|
||||
when MsgType is seq:
|
||||
type E = ElemType(MsgType)
|
||||
var results: MsgType
|
||||
while true:
|
||||
let nextRes = await readChunk(stream, E, true, deadline)
|
||||
if nextRes.isNone: break
|
||||
results.add nextRes.get
|
||||
if results.len > 0:
|
||||
return some(results)
|
||||
else:
|
||||
return await readChunk(stream, MsgType, true, deadline)
|
||||
|
||||
proc encodeErrorMsg(responseCode: ResponseCode, errMsg: string): Bytes =
|
||||
var s = init OutputStream
|
||||
s.append byte(responseCode)
|
||||
s.appendVarint errMsg.len
|
||||
s.appendValue SSZ, errMsg
|
||||
s.getOutput
|
||||
|
||||
proc sendErrorResponse(peer: Peer,
|
||||
stream: P2PStream,
|
||||
err: ref SerializationError,
|
||||
msgName: string,
|
||||
msgBytes: Bytes) {.async.} =
|
||||
debug "Received an invalid request",
|
||||
peer, msgName, msgBytes, errMsg = err.formatMsg("<msg>")
|
||||
|
||||
let responseBytes = encodeErrorMsg(InvalidRequest, err.formatMsg("msg"))
|
||||
await stream.writeAllBytes(responseBytes)
|
||||
await stream.close()
|
||||
|
||||
proc sendErrorResponse(peer: Peer,
|
||||
stream: P2PStream,
|
||||
responseCode: ResponseCode,
|
||||
errMsg: string) {.async.} =
|
||||
debug "Error processing request", peer, responseCode, errMsg
|
||||
|
||||
let responseBytes = encodeErrorMsg(ServerError, errMsg)
|
||||
await stream.writeAllBytes(responseBytes)
|
||||
await stream.close()
|
||||
|
||||
proc sendNotificationMsg(peer: Peer, protocolId: string, requestBytes: Bytes) {.async} =
|
||||
var deadline = sleepAsync RESP_TIMEOUT
|
||||
var streamFut = peer.network.openStream(peer, protocolId)
|
||||
await streamFut or deadline
|
||||
if not streamFut.finished:
|
||||
# TODO: we are returning here because the deadline passed, but
|
||||
# the stream can still be opened eventually a bit later. Who is
|
||||
# going to close it then?
|
||||
raise newException(TransmissionError, "Failed to open LibP2P stream")
|
||||
|
||||
let stream = streamFut.read
|
||||
defer:
|
||||
await safeClose(stream)
|
||||
|
||||
var s = init OutputStream
|
||||
s.appendVarint requestBytes.len.uint64
|
||||
s.append requestBytes
|
||||
let bytes = s.getOutput
|
||||
await stream.writeAllBytes(bytes)
|
||||
|
||||
# TODO There is too much duplication in the responder functions, but
|
||||
# I hope to reduce this when I increse the reliance on output streams.
|
||||
proc sendResponseChunkBytes(responder: UntypedResponder, payload: Bytes) {.async.} =
|
||||
var s = init OutputStream
|
||||
s.append byte(Success)
|
||||
s.appendVarint payload.len.uint64
|
||||
s.append payload
|
||||
let bytes = s.getOutput
|
||||
await responder.stream.writeAllBytes(bytes)
|
||||
|
||||
proc sendResponseChunkObj(responder: UntypedResponder, val: auto) {.async.} =
|
||||
var s = init OutputStream
|
||||
s.append byte(Success)
|
||||
s.appendValue SSZ, sizePrefixed(val)
|
||||
let bytes = s.getOutput
|
||||
await responder.stream.writeAllBytes(bytes)
|
||||
|
||||
proc sendResponseChunks[T](responder: UntypedResponder, chunks: seq[T]) {.async.} =
|
||||
var s = init OutputStream
|
||||
for chunk in chunks:
|
||||
s.append byte(Success)
|
||||
s.appendValue SSZ, sizePrefixed(chunk)
|
||||
|
||||
let bytes = s.getOutput
|
||||
await responder.stream.writeAllBytes(bytes)
|
||||
|
||||
proc makeEth2Request(peer: Peer, protocolId: string, requestBytes: Bytes,
|
||||
ResponseMsg: type,
|
||||
timeout: Duration): Future[Option[ResponseMsg]] {.gcsafe, async.} =
|
||||
var deadline = sleepAsync timeout
|
||||
|
||||
# Open a new LibP2P stream
|
||||
var streamFut = peer.network.openStream(peer, protocolId)
|
||||
await streamFut or deadline
|
||||
if not streamFut.finished:
|
||||
# TODO: we are returning here because the deadline passed, but
|
||||
# the stream can still be opened eventually a bit later. Who is
|
||||
# going to close it then?
|
||||
return none(ResponseMsg)
|
||||
|
||||
let stream = streamFut.read
|
||||
defer:
|
||||
await safeClose(stream)
|
||||
|
||||
# Send the request
|
||||
var s = init OutputStream
|
||||
s.appendVarint requestBytes.len.uint64
|
||||
s.append requestBytes
|
||||
let bytes = s.getOutput
|
||||
await stream.writeAllBytes(bytes)
|
||||
|
||||
# Read the response
|
||||
return await stream.readResponse(ResponseMsg, deadline)
|
||||
|
||||
proc init*[MsgType](T: type Responder[MsgType],
|
||||
peer: Peer, stream: P2PStream): T =
|
||||
T(UntypedResponder(peer: peer, stream: stream))
|
||||
|
||||
template write*[M](r: var Responder[M], val: auto): auto =
|
||||
mixin send
|
||||
type Msg = M
|
||||
type MsgRec = RecType(Msg)
|
||||
when MsgRec is seq|openarray:
|
||||
type E = ElemType(MsgRec)
|
||||
when val is E:
|
||||
sendResponseChunkObj(UntypedResponder(r), val)
|
||||
elif val is MsgRec:
|
||||
sendResponseChunks(UntypedResponder(r), val)
|
||||
else:
|
||||
{.fatal: "Unepected message type".}
|
||||
else:
|
||||
send(r, val)
|
||||
|
||||
proc performProtocolHandshakes*(peer: Peer) {.async.} =
|
||||
var subProtocolsHandshakes = newSeqOfCap[Future[void]](allProtocols.len)
|
||||
for protocol in allProtocols:
|
||||
if protocol.handshake != nil:
|
||||
subProtocolsHandshakes.add((protocol.handshake)(peer, nil))
|
||||
|
||||
await all(subProtocolsHandshakes)
|
||||
|
||||
template initializeConnection*(peer: Peer): auto =
|
||||
performProtocolHandshakes(peer)
|
||||
|
||||
proc initProtocol(name: string,
|
||||
peerInit: PeerStateInitializer,
|
||||
networkInit: NetworkStateInitializer): ProtocolInfoObj =
|
||||
result.name = name
|
||||
result.messages = @[]
|
||||
result.peerStateInitializer = peerInit
|
||||
result.networkStateInitializer = networkInit
|
||||
|
||||
proc registerProtocol(protocol: ProtocolInfo) =
|
||||
# TODO: This can be done at compile-time in the future
|
||||
let pos = lowerBound(gProtocols, protocol)
|
||||
gProtocols.insert(protocol, pos)
|
||||
for i in 0 ..< gProtocols.len:
|
||||
gProtocols[i].index = i
|
||||
|
||||
proc setEventHandlers(p: ProtocolInfo,
|
||||
handshake: HandshakeStep,
|
||||
disconnectHandler: DisconnectionHandler) =
|
||||
p.handshake = handshake
|
||||
p.disconnectHandler = disconnectHandler
|
||||
|
||||
proc implementSendProcBody(sendProc: SendProc) =
|
||||
let
|
||||
msg = sendProc.msg
|
||||
UntypedResponder = bindSym "UntypedResponder"
|
||||
await = ident "await"
|
||||
|
||||
proc sendCallGenerator(peer, bytes: NimNode): NimNode =
|
||||
if msg.kind != msgResponse:
|
||||
let msgProto = getRequestProtoName(msg.procDef)
|
||||
case msg.kind
|
||||
of msgRequest:
|
||||
let
|
||||
timeout = msg.timeoutParam[0]
|
||||
ResponseRecord = msg.response.recName
|
||||
quote:
|
||||
makeEth2Request(`peer`, `msgProto`, `bytes`,
|
||||
`ResponseRecord`, `timeout`)
|
||||
else:
|
||||
quote: sendNotificationMsg(`peer`, `msgProto`, `bytes`)
|
||||
else:
|
||||
quote: sendResponseChunkBytes(`UntypedResponder`(`peer`), `bytes`)
|
||||
|
||||
sendProc.useStandardBody(nil, nil, sendCallGenerator)
|
||||
|
||||
proc handleIncomingStream(network: Eth2Node, stream: P2PStream,
|
||||
MsgType, Format: distinct type) {.async, gcsafe.} =
|
||||
mixin callUserHandler
|
||||
const msgName = typetraits.name(MsgType)
|
||||
|
||||
## Uncomment this to enable tracing on all incoming requests
|
||||
## You can include `msgNameLit` in the condition to select
|
||||
## more specific requests:
|
||||
# when chronicles.runtimeFilteringEnabled:
|
||||
# setLogLevel(LogLevel.TRACE)
|
||||
# defer: setLogLevel(LogLevel.DEBUG)
|
||||
# trace "incoming " & `msgNameLit` & " stream"
|
||||
|
||||
defer:
|
||||
await safeClose(stream)
|
||||
|
||||
let
|
||||
deadline = sleepAsync RESP_TIMEOUT
|
||||
msgBytes = await readMsgBytes(stream, false, deadline)
|
||||
peer = peerFromStream(network, stream)
|
||||
|
||||
if msgBytes.len == 0:
|
||||
await sendErrorResponse(peer, stream, ServerError, readTimeoutErrorMsg)
|
||||
return
|
||||
|
||||
var msg: MsgType
|
||||
try:
|
||||
msg = decode(Format, msgBytes, MsgType)
|
||||
except SerializationError as err:
|
||||
await sendErrorResponse(peer, stream, err, msgName, msgBytes)
|
||||
return
|
||||
except Exception as err:
|
||||
# TODO. This is temporary code that should be removed after interop.
|
||||
# It can be enabled only in certain diagnostic builds where it should
|
||||
# re-raise the exception.
|
||||
debug "Crash during serialization", inputBytes = toHex(msgBytes), msgName
|
||||
await sendErrorResponse(peer, stream, ServerError, err.msg)
|
||||
raise err
|
||||
|
||||
try:
|
||||
logReceivedMsg(peer, msg)
|
||||
await callUserHandler(peer, stream, msg)
|
||||
except CatchableError as err:
|
||||
await sendErrorResponse(peer, stream, ServerError, err.msg)
|
||||
|
@ -1,5 +1,5 @@
|
||||
import
|
||||
algorithm,
|
||||
algorithm, typetraits,
|
||||
stew/varints, stew/shims/[macros, tables], chronos, chronicles,
|
||||
libp2p/daemon/daemonapi, faststreams/output_stream, serialization,
|
||||
json_serialization/std/options, eth/p2p/p2p_protocol_dsl,
|
||||
@ -31,11 +31,6 @@ type
|
||||
Disconnecting,
|
||||
Disconnected
|
||||
|
||||
DisconnectionReason* = enum
|
||||
ClientShutDown
|
||||
IrrelevantNetwork
|
||||
FaultOrError
|
||||
|
||||
UntypedResponder = object
|
||||
peer*: Peer
|
||||
stream*: P2PStream
|
||||
@ -65,11 +60,6 @@ type
|
||||
|
||||
ProtocolInfo* = ptr ProtocolInfoObj
|
||||
|
||||
ResponseCode* = enum
|
||||
Success
|
||||
InvalidRequest
|
||||
ServerError
|
||||
|
||||
PeerStateInitializer* = proc(peer: Peer): RootRef {.gcsafe.}
|
||||
NetworkStateInitializer* = proc(network: EthereumNode): RootRef {.gcsafe.}
|
||||
HandshakeStep* = proc(peer: Peer, stream: P2PStream): Future[void] {.gcsafe.}
|
||||
@ -78,36 +68,36 @@ type
|
||||
MessageContentPrinter* = proc(msg: pointer): string {.gcsafe.}
|
||||
NextMsgResolver* = proc(msgData: SszReader, future: FutureBase) {.gcsafe.}
|
||||
|
||||
Bytes = seq[byte]
|
||||
DisconnectionReason* = enum
|
||||
ClientShutDown
|
||||
IrrelevantNetwork
|
||||
FaultOrError
|
||||
|
||||
PeerDisconnected* = object of CatchableError
|
||||
reason*: DisconnectionReason
|
||||
|
||||
TransmissionError* = object of CatchableError
|
||||
|
||||
const
|
||||
defaultIncomingReqTimeout = 5000
|
||||
HandshakeTimeout = FaultOrError
|
||||
|
||||
# Spec constants
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/dev/specs/networking/p2p-interface.md#eth-20-network-interaction-domains
|
||||
REQ_RESP_MAX_SIZE* = 1 * 1024 * 1024 # bytes
|
||||
GOSSIP_MAX_SIZE* = 1 * 1024 * 1024 # bytes
|
||||
TTFB_TIMEOUT* = 5.seconds
|
||||
RESP_TIMEOUT* = 10.seconds
|
||||
|
||||
readTimeoutErrorMsg = "Exceeded read timeout for a request"
|
||||
|
||||
logScope:
|
||||
topic = "libp2p"
|
||||
|
||||
template `$`*(peer: Peer): string = $peer.id
|
||||
chronicles.formatIt(Peer): $it
|
||||
|
||||
template libp2pProtocol*(name: string, version: int) {.pragma.}
|
||||
# TODO: These exists only as a compatibility layer between the daemon
|
||||
# APIs and the native LibP2P ones. It won't be necessary once the
|
||||
# daemon is removed.
|
||||
#
|
||||
proc writeAllBytes(stream: P2PStream, bytes: seq[byte]) {.async.} =
|
||||
let sent = await stream.transp.write(bytes)
|
||||
if sent != bytes.len:
|
||||
raise newException(TransmissionError, "Failed to deliver msg bytes")
|
||||
|
||||
include eth/p2p/p2p_backends_helpers
|
||||
include eth/p2p/p2p_tracing
|
||||
template readExactly(stream: P2PStream, dst: pointer, dstLen: int): untyped =
|
||||
readExactly(stream.transp, dst, dstLen)
|
||||
|
||||
template openStream(node: Eth2Node, peer: Peer, protocolId: string): untyped =
|
||||
openStream(node.daemon, peer.id, @[protocolId])
|
||||
|
||||
#
|
||||
# End of compatibility layer
|
||||
|
||||
proc init*(T: type Peer, network: Eth2Node, id: PeerID): Peer {.gcsafe.}
|
||||
|
||||
@ -117,12 +107,11 @@ proc getPeer*(node: Eth2Node, peerId: PeerID): Peer {.gcsafe.} =
|
||||
result = Peer.init(node, peerId)
|
||||
node.peers[peerId] = result
|
||||
|
||||
proc peerFromStream(daemon: DaemonAPI, stream: P2PStream): Peer {.gcsafe.} =
|
||||
Eth2Node(daemon.userData).getPeer(stream.peer)
|
||||
proc getPeer*(node: Eth2Node, peerInfo: PeerInfo): Peer =
|
||||
node.getPeer(peerInfo.peer)
|
||||
|
||||
proc safeClose(stream: P2PStream) {.async.} =
|
||||
if P2PStreamFlags.Closed notin stream.flags:
|
||||
await close(stream)
|
||||
proc peerFromStream(node: Eth2Node, stream: P2PStream): Peer {.gcsafe.} =
|
||||
node.getPeer(stream.peer)
|
||||
|
||||
proc disconnect*(peer: Peer, reason: DisconnectionReason, notifyOtherPeer = false) {.async.} =
|
||||
# TODO: How should we notify the other peer?
|
||||
@ -132,30 +121,13 @@ proc disconnect*(peer: Peer, reason: DisconnectionReason, notifyOtherPeer = fals
|
||||
peer.connectionState = Disconnected
|
||||
peer.network.peers.del(peer.id)
|
||||
|
||||
template raisePeerDisconnected(msg: string, r: DisconnectionReason) =
|
||||
var e = newException(PeerDisconnected, msg)
|
||||
e.reason = r
|
||||
raise e
|
||||
proc safeClose(stream: P2PStream) {.async.} =
|
||||
if P2PStreamFlags.Closed notin stream.flags:
|
||||
await close(stream)
|
||||
|
||||
proc disconnectAndRaise(peer: Peer,
|
||||
reason: DisconnectionReason,
|
||||
msg: string) {.async.} =
|
||||
let r = reason
|
||||
await peer.disconnect(r)
|
||||
raisePeerDisconnected(msg, r)
|
||||
|
||||
proc registerProtocol(protocol: ProtocolInfo) =
|
||||
# TODO: This can be done at compile-time in the future
|
||||
let pos = lowerBound(gProtocols, protocol)
|
||||
gProtocols.insert(protocol, pos)
|
||||
for i in 0 ..< gProtocols.len:
|
||||
gProtocols[i].index = i
|
||||
|
||||
proc setEventHandlers(p: ProtocolInfo,
|
||||
handshake: HandshakeStep,
|
||||
disconnectHandler: DisconnectionHandler) =
|
||||
p.handshake = handshake
|
||||
p.disconnectHandler = disconnectHandler
|
||||
include eth/p2p/p2p_backends_helpers
|
||||
include eth/p2p/p2p_tracing
|
||||
include libp2p_backends_common
|
||||
|
||||
proc init*(T: type Eth2Node, daemon: DaemonAPI): Future[T] {.async.} =
|
||||
new result
|
||||
@ -169,243 +141,9 @@ proc init*(T: type Eth2Node, daemon: DaemonAPI): Future[T] {.async.} =
|
||||
result.protocolStates[proto.index] = proto.networkStateInitializer(result)
|
||||
|
||||
for msg in proto.messages:
|
||||
if msg.libp2pProtocol.len > 0:
|
||||
if msg.libp2pProtocol.len > 0 and msg.thunk != nil:
|
||||
await daemon.addHandler(@[msg.libp2pProtocol], msg.thunk)
|
||||
|
||||
proc readChunk(stream: P2PStream,
|
||||
MsgType: type,
|
||||
withResponseCode: bool,
|
||||
deadline: Future[void]): Future[Option[MsgType]] {.gcsafe.}
|
||||
|
||||
proc readSizePrefix(transp: StreamTransport,
|
||||
deadline: Future[void]): Future[int] {.async.} =
|
||||
trace "about to read msg size prefix"
|
||||
var parser: VarintParser[uint64, ProtoBuf]
|
||||
while true:
|
||||
var nextByte: byte
|
||||
var readNextByte = transp.readExactly(addr nextByte, 1)
|
||||
await readNextByte or deadline
|
||||
if not readNextByte.finished:
|
||||
trace "size prefix byte not received in time"
|
||||
return -1
|
||||
case parser.feedByte(nextByte)
|
||||
of Done:
|
||||
let res = parser.getResult
|
||||
if res > uint64(REQ_RESP_MAX_SIZE):
|
||||
trace "size prefix outside of range", res
|
||||
return -1
|
||||
else:
|
||||
trace "got size prefix", res
|
||||
return int(res)
|
||||
of Overflow:
|
||||
trace "size prefix overflow"
|
||||
return -1
|
||||
of Incomplete:
|
||||
continue
|
||||
|
||||
proc readMsgBytes(stream: P2PStream,
|
||||
withResponseCode: bool,
|
||||
deadline: Future[void]): Future[Bytes] {.async.} =
|
||||
trace "about to read message bytes", withResponseCode
|
||||
|
||||
try:
|
||||
if withResponseCode:
|
||||
var responseCode: byte
|
||||
trace "about to read response code"
|
||||
var readResponseCode = stream.transp.readExactly(addr responseCode, 1)
|
||||
await readResponseCode or deadline
|
||||
|
||||
if not readResponseCode.finished:
|
||||
trace "response code not received in time"
|
||||
return
|
||||
|
||||
if responseCode > ResponseCode.high.byte:
|
||||
trace "invalid response code", responseCode
|
||||
return
|
||||
|
||||
logScope: responseCode = ResponseCode(responseCode)
|
||||
trace "got response code"
|
||||
|
||||
case ResponseCode(responseCode)
|
||||
of InvalidRequest, ServerError:
|
||||
let responseErrMsg = await readChunk(stream, string, false, deadline)
|
||||
debug "P2P request resulted in error", responseErrMsg
|
||||
return
|
||||
|
||||
of Success:
|
||||
# The response is OK, the execution continues below
|
||||
discard
|
||||
|
||||
var sizePrefix = await readSizePrefix(stream.transp, deadline)
|
||||
trace "got msg size prefix", sizePrefix
|
||||
|
||||
if sizePrefix == -1:
|
||||
debug "Failed to read an incoming message size prefix", peer = stream.peer
|
||||
return
|
||||
|
||||
if sizePrefix == 0:
|
||||
debug "Received SSZ with zero size", peer = stream.peer
|
||||
return
|
||||
|
||||
trace "about to read msg bytes"
|
||||
var msgBytes = newSeq[byte](sizePrefix)
|
||||
var readBody = stream.transp.readExactly(addr msgBytes[0], sizePrefix)
|
||||
await readBody or deadline
|
||||
if not readBody.finished:
|
||||
trace "msg bytes not received in time"
|
||||
return
|
||||
|
||||
trace "got message bytes", msgBytes
|
||||
return msgBytes
|
||||
|
||||
except TransportIncompleteError:
|
||||
return @[]
|
||||
|
||||
proc readChunk(stream: P2PStream,
|
||||
MsgType: type,
|
||||
withResponseCode: bool,
|
||||
deadline: Future[void]): Future[Option[MsgType]] {.gcsafe, async.} =
|
||||
var msgBytes = await stream.readMsgBytes(withResponseCode, deadline)
|
||||
try:
|
||||
if msgBytes.len > 0:
|
||||
return some SSZ.decode(msgBytes, MsgType)
|
||||
except SerializationError as err:
|
||||
debug "Failed to decode a network message",
|
||||
msgBytes, errMsg = err.formatMsg("<msg>")
|
||||
return
|
||||
|
||||
proc readResponse(
|
||||
stream: P2PStream,
|
||||
MsgType: type,
|
||||
deadline: Future[void]): Future[Option[MsgType]] {.gcsafe, async.} =
|
||||
|
||||
when MsgType is seq:
|
||||
type E = ElemType(MsgType)
|
||||
var results: MsgType
|
||||
while true:
|
||||
let nextRes = await readChunk(stream, E, true, deadline)
|
||||
if nextRes.isNone: break
|
||||
results.add nextRes.get
|
||||
if results.len > 0:
|
||||
return some(results)
|
||||
else:
|
||||
return await readChunk(stream, MsgType, true, deadline)
|
||||
|
||||
proc encodeErrorMsg(responseCode: ResponseCode, errMsg: string): Bytes =
|
||||
var s = init OutputStream
|
||||
s.append byte(responseCode)
|
||||
s.appendVarint errMsg.len
|
||||
s.appendValue SSZ, errMsg
|
||||
s.getOutput
|
||||
|
||||
proc sendErrorResponse(peer: Peer,
|
||||
stream: P2PStream,
|
||||
err: ref SerializationError,
|
||||
msgName: string,
|
||||
msgBytes: Bytes) {.async.} =
|
||||
debug "Received an invalid request",
|
||||
peer, msgName, msgBytes, errMsg = err.formatMsg("<msg>")
|
||||
|
||||
let responseBytes = encodeErrorMsg(InvalidRequest, err.formatMsg("msg"))
|
||||
discard await stream.transp.write(responseBytes)
|
||||
await stream.close()
|
||||
|
||||
proc sendErrorResponse(peer: Peer,
|
||||
stream: P2PStream,
|
||||
responseCode: ResponseCode,
|
||||
errMsg: string) {.async.} =
|
||||
debug "Error processing request", peer, responseCode, errMsg
|
||||
|
||||
let responseBytes = encodeErrorMsg(ServerError, errMsg)
|
||||
discard await stream.transp.write(responseBytes)
|
||||
await stream.close()
|
||||
|
||||
proc sendNotificationMsg(peer: Peer, protocolId: string, requestBytes: Bytes) {.async} =
|
||||
var deadline = sleepAsync RESP_TIMEOUT
|
||||
var streamFut = peer.network.daemon.openStream(peer.id, @[protocolId])
|
||||
await streamFut or deadline
|
||||
if not streamFut.finished:
|
||||
# TODO: we are returning here because the deadline passed, but
|
||||
# the stream can still be opened eventually a bit later. Who is
|
||||
# going to close it then?
|
||||
raise newException(TransmissionError, "Failed to open LibP2P stream")
|
||||
|
||||
let stream = streamFut.read
|
||||
defer:
|
||||
await safeClose(stream)
|
||||
|
||||
var s = init OutputStream
|
||||
s.appendVarint requestBytes.len.uint64
|
||||
s.append requestBytes
|
||||
let bytes = s.getOutput
|
||||
let sent = await stream.transp.write(bytes)
|
||||
if sent != bytes.len:
|
||||
raise newException(TransmissionError, "Failed to deliver msg bytes")
|
||||
|
||||
# TODO There is too much duplication in the responder functions, but
|
||||
# I hope to reduce this when I increse the reliance on output streams.
|
||||
proc sendResponseChunkBytes(responder: UntypedResponder, payload: Bytes) {.async.} =
|
||||
var s = init OutputStream
|
||||
s.append byte(Success)
|
||||
s.appendVarint payload.len.uint64
|
||||
s.append payload
|
||||
let bytes = s.getOutput
|
||||
|
||||
let sent = await responder.stream.transp.write(bytes)
|
||||
if sent != bytes.len:
|
||||
raise newException(TransmissionError, "Failed to deliver all bytes")
|
||||
|
||||
proc sendResponseChunkObj(responder: UntypedResponder, val: auto) {.async.} =
|
||||
var s = init OutputStream
|
||||
s.append byte(Success)
|
||||
s.appendValue SSZ, sizePrefixed(val)
|
||||
let bytes = s.getOutput
|
||||
|
||||
let sent = await responder.stream.transp.write(bytes)
|
||||
if sent != bytes.len:
|
||||
raise newException(TransmissionError, "Failed to deliver all bytes")
|
||||
|
||||
proc sendResponseChunks[T](responder: UntypedResponder, chunks: seq[T]) {.async.} =
|
||||
var s = init OutputStream
|
||||
for chunk in chunks:
|
||||
s.append byte(Success)
|
||||
s.appendValue SSZ, sizePrefixed(chunk)
|
||||
|
||||
let bytes = s.getOutput
|
||||
let sent = await responder.stream.transp.write(bytes)
|
||||
if sent != bytes.len:
|
||||
raise newException(TransmissionError, "Failed to deliver all bytes")
|
||||
|
||||
proc makeEth2Request(peer: Peer, protocolId: string, requestBytes: Bytes,
|
||||
ResponseMsg: type,
|
||||
timeout: Duration): Future[Option[ResponseMsg]] {.gcsafe, async.} =
|
||||
var deadline = sleepAsync timeout
|
||||
|
||||
# Open a new LibP2P stream
|
||||
var streamFut = peer.network.daemon.openStream(peer.id, @[protocolId])
|
||||
await streamFut or deadline
|
||||
if not streamFut.finished:
|
||||
# TODO: we are returning here because the deadline passed, but
|
||||
# the stream can still be opened eventually a bit later. Who is
|
||||
# going to close it then?
|
||||
return none(ResponseMsg)
|
||||
|
||||
let stream = streamFut.read
|
||||
defer:
|
||||
await safeClose(stream)
|
||||
|
||||
# Send the request
|
||||
var s = init OutputStream
|
||||
s.appendVarint requestBytes.len.uint64
|
||||
s.append requestBytes
|
||||
let bytes = s.getOutput
|
||||
let sent = await stream.transp.write(bytes)
|
||||
if sent != bytes.len:
|
||||
await disconnectAndRaise(peer, FaultOrError, "Incomplete send")
|
||||
|
||||
# Read the response
|
||||
return await stream.readResponse(ResponseMsg, deadline)
|
||||
|
||||
proc init*(T: type Peer, network: Eth2Node, id: PeerID): Peer =
|
||||
new result
|
||||
result.id = id
|
||||
@ -418,25 +156,6 @@ proc init*(T: type Peer, network: Eth2Node, id: PeerID): Peer =
|
||||
if proto.peerStateInitializer != nil:
|
||||
result.protocolStates[i] = proto.peerStateInitializer(result)
|
||||
|
||||
proc performProtocolHandshakes*(peer: Peer) {.async.} =
|
||||
var subProtocolsHandshakes = newSeqOfCap[Future[void]](allProtocols.len)
|
||||
for protocol in allProtocols:
|
||||
if protocol.handshake != nil:
|
||||
subProtocolsHandshakes.add((protocol.handshake)(peer, nil))
|
||||
|
||||
await all(subProtocolsHandshakes)
|
||||
|
||||
template initializeConnection*(peer: Peer): auto =
|
||||
performProtocolHandshakes(peer)
|
||||
|
||||
proc initProtocol(name: string,
|
||||
peerInit: PeerStateInitializer,
|
||||
networkInit: NetworkStateInitializer): ProtocolInfoObj =
|
||||
result.name = name
|
||||
result.messages = @[]
|
||||
result.peerStateInitializer = peerInit
|
||||
result.networkStateInitializer = networkInit
|
||||
|
||||
proc registerMsg(protocol: ProtocolInfo,
|
||||
name: string,
|
||||
thunk: ThunkProc,
|
||||
@ -447,67 +166,6 @@ proc registerMsg(protocol: ProtocolInfo,
|
||||
libp2pProtocol: libp2pProtocol,
|
||||
printer: printer)
|
||||
|
||||
proc getRequestProtoName(fn: NimNode): NimNode =
|
||||
# `getCustomPragmaVal` doesn't work yet on regular nnkProcDef nodes
|
||||
# (TODO: file as an issue)
|
||||
|
||||
let pragmas = fn.pragma
|
||||
if pragmas.kind == nnkPragma and pragmas.len > 0:
|
||||
for pragma in pragmas:
|
||||
if pragma.len > 0 and $pragma[0] == "libp2pProtocol":
|
||||
let protoName = $(pragma[1])
|
||||
let protoVer = $(pragma[2].intVal)
|
||||
return newLit("/eth2/beacon_chain/req/" & protoName & "/" & protoVer & "/ssz")
|
||||
|
||||
return newLit("")
|
||||
|
||||
proc init*[MsgType](T: type Responder[MsgType],
|
||||
peer: Peer, stream: P2PStream): T =
|
||||
T(UntypedResponder(peer: peer, stream: stream))
|
||||
|
||||
import
|
||||
typetraits
|
||||
|
||||
template write*[M](r: var Responder[M], val: auto): auto =
|
||||
mixin send
|
||||
type Msg = M
|
||||
type MsgRec = RecType(Msg)
|
||||
when MsgRec is seq|openarray:
|
||||
type E = ElemType(MsgRec)
|
||||
when val is E:
|
||||
sendResponseChunkObj(UntypedResponder(r), val)
|
||||
elif val is MsgRec:
|
||||
sendResponseChunks(UntypedResponder(r), val)
|
||||
else:
|
||||
static: echo "BAD TYPE ", name(E), " vs ", name(type(val))
|
||||
{.fatal: "bad".}
|
||||
else:
|
||||
send(r, val)
|
||||
|
||||
proc implementSendProcBody(sendProc: SendProc) =
|
||||
let
|
||||
msg = sendProc.msg
|
||||
UntypedResponder = bindSym "UntypedResponder"
|
||||
await = ident "await"
|
||||
|
||||
proc sendCallGenerator(peer, bytes: NimNode): NimNode =
|
||||
if msg.kind != msgResponse:
|
||||
let msgProto = getRequestProtoName(msg.procDef)
|
||||
case msg.kind
|
||||
of msgRequest:
|
||||
let
|
||||
timeout = msg.timeoutParam[0]
|
||||
ResponseRecord = msg.response.recName
|
||||
quote:
|
||||
makeEth2Request(`peer`, `msgProto`, `bytes`,
|
||||
`ResponseRecord`, `timeout`)
|
||||
else:
|
||||
quote: sendNotificationMsg(`peer`, `msgProto`, `bytes`)
|
||||
else:
|
||||
quote: sendResponseChunkBytes(`UntypedResponder`(`peer`), `bytes`)
|
||||
|
||||
sendProc.useStandardBody(nil, nil, sendCallGenerator)
|
||||
|
||||
proc p2pProtocolBackendImpl*(p: P2PProtocol): Backend =
|
||||
var
|
||||
Format = ident "SSZ"
|
||||
@ -527,6 +185,7 @@ proc p2pProtocolBackendImpl*(p: P2PProtocol): Backend =
|
||||
msgBytesVar = ident "msgBytes"
|
||||
daemonVar = ident "daemon"
|
||||
await = ident "await"
|
||||
callUserHandler = ident "callUserHandler"
|
||||
|
||||
p.useRequestIds = false
|
||||
p.useSingleRecordInlining = true
|
||||
@ -548,7 +207,7 @@ proc p2pProtocolBackendImpl*(p: P2PProtocol): Backend =
|
||||
protocol = msg.protocol
|
||||
msgName = $msg.ident
|
||||
msgNameLit = newLit msgName
|
||||
msgRecName = msg.recName
|
||||
MsgRecName = msg.recName
|
||||
|
||||
if msg.procDef.body.kind != nnkEmpty and msg.kind == msgRequest:
|
||||
# Request procs need an extra param - the stream where the response
|
||||
@ -559,64 +218,23 @@ proc p2pProtocolBackendImpl*(p: P2PProtocol): Backend =
|
||||
##
|
||||
## Implemenmt Thunk
|
||||
##
|
||||
var thunkName = ident(msgName & "_thunk")
|
||||
let awaitUserHandler = msg.genAwaitUserHandler(msgVar, [peerVar, streamVar])
|
||||
var thunkName: NimNode
|
||||
|
||||
let tracing = when tracingEnabled:
|
||||
quote: logReceivedMsg(`streamVar`.peer, `msgVar`.get)
|
||||
if msg.userHandler != nil:
|
||||
thunkName = ident(msgName & "_thunk")
|
||||
let userHandlerCall = msg.genUserHandlerCall(msgVar, [peerVar, streamVar])
|
||||
msg.defineThunk quote do:
|
||||
template `callUserHandler`(`peerVar`: `Peer`,
|
||||
`streamVar`: `P2PStream`,
|
||||
`msgVar`: `MsgRecName`): untyped =
|
||||
`userHandlerCall`
|
||||
|
||||
proc `thunkName`(`daemonVar`: `DaemonAPI`,
|
||||
`streamVar`: `P2PStream`): Future[void] {.gcsafe.} =
|
||||
return handleIncomingStream(`Eth2Node`(`daemonVar`.userData), `streamVar`,
|
||||
`MsgRecName`, `Format`)
|
||||
else:
|
||||
newStmtList()
|
||||
|
||||
msg.defineThunk quote do:
|
||||
proc `thunkName`(`daemonVar`: `DaemonAPI`,
|
||||
`streamVar`: `P2PStream`) {.async, gcsafe.} =
|
||||
## Uncomment this to enable tracing on all incoming requests
|
||||
## You can include `msgNameLit` in the condition to select
|
||||
## more specific requests:
|
||||
# when chronicles.runtimeFilteringEnabled:
|
||||
# setLogLevel(LogLevel.TRACE)
|
||||
# defer: setLogLevel(LogLevel.DEBUG)
|
||||
# trace "incoming " & `msgNameLit` & " stream"
|
||||
|
||||
defer:
|
||||
`await` safeClose(`streamVar`)
|
||||
|
||||
let
|
||||
`deadlineVar` = sleepAsync RESP_TIMEOUT
|
||||
`msgBytesVar` = `await` readMsgBytes(`streamVar`, false, `deadlineVar`)
|
||||
`peerVar` = peerFromStream(`daemonVar`, `streamVar`)
|
||||
|
||||
if `msgBytesVar`.len == 0:
|
||||
`await` sendErrorResponse(`peerVar`, `streamVar`,
|
||||
ServerError, readTimeoutErrorMsg)
|
||||
return
|
||||
|
||||
var `msgVar`: `msgRecName`
|
||||
try:
|
||||
trace "about to decode incoming msg"
|
||||
`msgVar` = decode(`Format`, `msgBytesVar`, `msgRecName`)
|
||||
except SerializationError as `errVar`:
|
||||
`await` sendErrorResponse(`peerVar`, `streamVar`, `errVar`,
|
||||
`msgNameLit`, `msgBytesVar`)
|
||||
return
|
||||
except Exception as err:
|
||||
# TODO. This is temporary code that should be removed after interop.
|
||||
# It can be enabled only in certain diagnostic builds where it should
|
||||
# re-raise the exception.
|
||||
debug "Crash during serialization", inputBytes = toHex(`msgBytesVar`),
|
||||
msgName = `msgNameLit`,
|
||||
deserializedType = astToStr(`msgRecName`)
|
||||
`await` sendErrorResponse(`peerVar`, `streamVar`, ServerError, err.msg)
|
||||
|
||||
try:
|
||||
`tracing`
|
||||
trace "about to execute user handler"
|
||||
`awaitUserHandler`
|
||||
except CatchableError as `errVar`:
|
||||
try:
|
||||
`await` sendErrorResponse(`peerVar`, `streamVar`, ServerError, `errVar`.msg)
|
||||
except CatchableError:
|
||||
debug "Failed to deliver error response", peer = `peerVar`
|
||||
thunkName = newNilLit()
|
||||
|
||||
##
|
||||
## Implement Senders and Handshake
|
||||
@ -633,7 +251,7 @@ proc p2pProtocolBackendImpl*(p: P2PProtocol): Backend =
|
||||
msgNameLit,
|
||||
thunkName,
|
||||
getRequestProtoName(msg.procDef),
|
||||
newTree(nnkBracketExpr, messagePrinter, msgRecName)))
|
||||
newTree(nnkBracketExpr, messagePrinter, MsgRecName)))
|
||||
|
||||
result.implementProtocolInit = proc (p: P2PProtocol): NimNode =
|
||||
return newCall(initProtocol, newLit(p.name), p.peerInit, p.netInit)
|
||||
|
@ -13,7 +13,7 @@ proc init*(T: type RequestManager, network: Eth2Node): T =
|
||||
T(network: network)
|
||||
|
||||
type
|
||||
FetchAncestorsResponseHandler = proc (b: BeaconBlock) {.gcsafe.}
|
||||
FetchAncestorsResponseHandler = proc (b: SignedBeaconBlock) {.gcsafe.}
|
||||
|
||||
proc fetchAncestorBlocksFromPeer(
|
||||
peer: Peer,
|
||||
|
@ -80,7 +80,7 @@ func process_deposit*(
|
||||
if index == -1:
|
||||
# Verify the deposit signature (proof of possession)
|
||||
if skipValidation notin flags and not bls_verify(
|
||||
pubkey, signing_root(deposit.data).data, deposit.data.signature,
|
||||
pubkey, hash_tree_root(deposit.data).data, deposit.data.signature,
|
||||
compute_domain(DOMAIN_DEPOSIT)):
|
||||
return false
|
||||
|
||||
@ -189,7 +189,7 @@ proc slash_validator*(state: var BeaconState, slashed_index: ValidatorIndex,
|
||||
increase_balance(
|
||||
state, whistleblower_index, whistleblowing_reward - proposer_reward)
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#genesis
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.3/specs/core/0_beacon-chain.md#genesis
|
||||
func initialize_beacon_state_from_eth1*(
|
||||
eth1_block_hash: Eth2Digest,
|
||||
eth1_timestamp: uint64,
|
||||
@ -222,9 +222,7 @@ func initialize_beacon_state_from_eth1*(
|
||||
BeaconBlockHeader(
|
||||
body_root: hash_tree_root(BeaconBlockBody(
|
||||
randao_reveal: BlsValue[Signature](kind: OpaqueBlob)
|
||||
)),
|
||||
# TODO - Pure BLSSig cannot be zero: https://github.com/status-im/nim-beacon-chain/issues/374
|
||||
signature: BlsValue[Signature](kind: OpaqueBlob)
|
||||
))
|
||||
)
|
||||
)
|
||||
|
||||
@ -265,17 +263,16 @@ func is_valid_genesis_state*(state: BeaconState): bool =
|
||||
|
||||
# TODO this is now a non-spec helper function, and it's not really accurate
|
||||
# so only usable/used in research/ and tests/
|
||||
func get_initial_beacon_block*(state: BeaconState): BeaconBlock =
|
||||
BeaconBlock(
|
||||
slot: GENESIS_SLOT,
|
||||
state_root: hash_tree_root(state),
|
||||
body: BeaconBlockBody(
|
||||
func get_initial_beacon_block*(state: BeaconState): SignedBeaconBlock =
|
||||
SignedBeaconBlock(
|
||||
message: BeaconBlock(
|
||||
slot: GENESIS_SLOT,
|
||||
state_root: hash_tree_root(state),
|
||||
body: BeaconBlockBody(
|
||||
# TODO: This shouldn't be necessary if OpaqueBlob is the default
|
||||
randao_reveal: BlsValue[Signature](kind: OpaqueBlob)),
|
||||
# TODO: This shouldn't be necessary if OpaqueBlob is the default
|
||||
signature: BlsValue[Signature](kind: OpaqueBlob))
|
||||
# parent_root, randao_reveal, eth1_data, signature, and body automatically
|
||||
# initialized to default values.
|
||||
randao_reveal: BlsValue[Signature](kind: OpaqueBlob))))
|
||||
# parent_root, randao_reveal, eth1_data, signature, and body automatically
|
||||
# initialized to default values.
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#get_block_root_at_slot
|
||||
func get_block_root_at_slot*(state: BeaconState,
|
||||
@ -300,7 +297,24 @@ func get_total_balance*(state: BeaconState, validators: auto): Gwei =
|
||||
)
|
||||
|
||||
# XXX: Move to state_transition_epoch.nim?
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#registry-updates
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.3/specs/core/0_beacon-chain.md#is_eligible_for_activation_queue
|
||||
func is_eligible_for_activation_queue(validator: Validator): bool =
|
||||
# Check if ``validator`` is eligible to be placed into the activation queue.
|
||||
validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and
|
||||
validator.effective_balance == MAX_EFFECTIVE_BALANCE
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.3/specs/core/0_beacon-chain.md#is_eligible_for_activation
|
||||
func is_eligible_for_activation(state: BeaconState, validator: Validator):
|
||||
bool =
|
||||
# Check if ``validator`` is eligible for activation.
|
||||
|
||||
# Placement in queue is finalized
|
||||
validator.activation_eligibility_epoch <= state.finalized_checkpoint.epoch and
|
||||
# Has not yet been activated
|
||||
validator.activation_epoch == FAR_FUTURE_EPOCH
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.3/specs/core/0_beacon-chain.md#registry-updates
|
||||
proc process_registry_updates*(state: var BeaconState) =
|
||||
## Process activation eligibility and ejections
|
||||
## Try to avoid caching here, since this could easily become undefined
|
||||
@ -308,17 +322,16 @@ proc process_registry_updates*(state: var BeaconState) =
|
||||
# Make visible, e.g.,
|
||||
# https://github.com/status-im/nim-beacon-chain/pull/608
|
||||
# https://github.com/sigp/lighthouse/pull/657
|
||||
let epoch = get_current_epoch(state)
|
||||
let epoch {.used.} = get_current_epoch(state)
|
||||
trace "process_registry_updates validator balances",
|
||||
balances=state.balances,
|
||||
active_validator_indices=get_active_validator_indices(state, epoch),
|
||||
epoch=epoch
|
||||
|
||||
for index, validator in state.validators:
|
||||
if validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and
|
||||
validator.effective_balance == MAX_EFFECTIVE_BALANCE:
|
||||
if is_eligible_for_activation_queue(validator):
|
||||
state.validators[index].activation_eligibility_epoch =
|
||||
get_current_epoch(state)
|
||||
get_current_epoch(state) + 1
|
||||
|
||||
if is_active_validator(validator, get_current_epoch(state)) and
|
||||
validator.effective_balance <= EJECTION_BALANCE:
|
||||
@ -333,12 +346,9 @@ proc process_registry_updates*(state: var BeaconState) =
|
||||
initiate_validator_exit(state, index.ValidatorIndex)
|
||||
|
||||
## Queue validators eligible for activation and not dequeued for activation
|
||||
## prior to finalized epoch
|
||||
var activation_queue : seq[tuple[a: Epoch, b: int]] = @[]
|
||||
for index, validator in state.validators:
|
||||
if validator.activation_eligibility_epoch != FAR_FUTURE_EPOCH and
|
||||
validator.activation_epoch >=
|
||||
compute_activation_exit_epoch(state.finalized_checkpoint.epoch):
|
||||
if is_eligible_for_activation(state, validator):
|
||||
activation_queue.add (
|
||||
state.validators[index].activation_eligibility_epoch, index)
|
||||
|
||||
@ -353,9 +363,8 @@ proc process_registry_updates*(state: var BeaconState) =
|
||||
let
|
||||
(_, index) = epoch_and_index
|
||||
validator = addr state.validators[index]
|
||||
if validator.activation_epoch == FAR_FUTURE_EPOCH:
|
||||
validator.activation_epoch =
|
||||
compute_activation_exit_epoch(get_current_epoch(state))
|
||||
validator.activation_epoch =
|
||||
compute_activation_exit_epoch(get_current_epoch(state))
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#is_valid_indexed_attestation
|
||||
proc is_valid_indexed_attestation*(
|
||||
|
@ -9,40 +9,17 @@
|
||||
# cryptography in the spec is in flux, with sizes and test vectors still being
|
||||
# hashed out. This layer helps isolate those chagnes.
|
||||
|
||||
# Useful conversation about BLS signatures (TODO: condense this)
|
||||
# BLS signatures can be combined such that multiple signatures are aggregated.
|
||||
# Each time a new signature is added, the corresponding public key must be
|
||||
# added to the verification key as well - if a key signs twice, it must be added
|
||||
# twice to the verification key. Aggregated signatures can be combined
|
||||
# arbitrarily (like addition) as long as public keys are aggregated in the same
|
||||
# way.
|
||||
#
|
||||
# I can probably google this somehow, but bls signatures, anyone knows off the
|
||||
# top of their head if they have to be combined one by one, or can two group
|
||||
# signatures be combined? what happens to overlap then?
|
||||
# Danny Ryan
|
||||
# @djrtwo
|
||||
# Dec 21 12:00
|
||||
# Yeah, you can do any linear combination of signatures. but you have to
|
||||
# remember the linear combination of pubkeys that constructed
|
||||
# if you have two instances of a signature from pubkey p, then you need 2*p in
|
||||
# the group pubkey
|
||||
# because the attestation bitfield is only 1 bit per pubkey right now,
|
||||
# attestations do not support this
|
||||
# it could be extended to support N overlaps up to N times per pubkey if we
|
||||
# had N bits per validator instead of 1
|
||||
# We are shying away from this for the time being. If there end up being
|
||||
# substantial difficulties in network layer aggregation, then adding bits
|
||||
# to aid in supporting overlaps is one potential solution
|
||||
# Jacek Sieka
|
||||
# @arnetheduck
|
||||
# Dec 21 12:02
|
||||
# ah nice, you anticipated my followup question there :) so it's not a
|
||||
# straight-off set union operation
|
||||
# Danny Ryan
|
||||
# @djrtwo
|
||||
# Dec 21 12:02
|
||||
# depending on the particular network level troubles we run into
|
||||
# right
|
||||
# aggregatng sigs and pubkeys are both just ec adds
|
||||
# https://github.com/ethereum/py-evm/blob/d82b10ae361cde6abbac62f171fcea7809c4e3cf/eth/_utils/bls.py#L191-L202
|
||||
# subtractions work too (i suppose this is obvious). You can linearly combine
|
||||
# sigs or pubs in any way
|
||||
|
||||
# In eth2, we use a single bit to record which keys have signed, thus we cannot
|
||||
# combined overlapping aggregates - ie if we have an aggregate of signatures of
|
||||
# A, B and C, and another with B, C and D, we cannot practically combine them
|
||||
# even if in theory it is possible to allow this in BLS.
|
||||
|
||||
import
|
||||
stew/[endians2, objects, byteutils], hashes, nimcrypto/utils,
|
||||
@ -151,6 +128,9 @@ func init(T: type VerKey): VerKey =
|
||||
func init(T: type SigKey): SigKey =
|
||||
result.point.inf()
|
||||
|
||||
func init(T: type Signature): Signature =
|
||||
result.point.inf()
|
||||
|
||||
func combine*[T](values: openarray[BlsValue[T]]): BlsValue[T] =
|
||||
result = BlsValue[T](kind: Real, blsValue: T.init())
|
||||
|
||||
@ -165,6 +145,10 @@ func combine*[T](x: var BlsValue[T], other: BlsValue[T]) =
|
||||
func bls_aggregate_pubkeys*(keys: openArray[ValidatorPubKey]): ValidatorPubKey =
|
||||
keys.combine()
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/bls_signature.md#bls_aggregate_signatures
|
||||
func bls_aggregate_signatures*(keys: openArray[ValidatorSig]): ValidatorSig =
|
||||
keys.combine()
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/bls_signature.md#bls_verify
|
||||
func bls_verify*(
|
||||
pubkey: ValidatorPubKey, msg: openArray[byte], sig: ValidatorSig,
|
||||
@ -269,7 +253,7 @@ else:
|
||||
proc newPrivKey*(): ValidatorPrivKey =
|
||||
SigKey.random()
|
||||
|
||||
when networkBackend == rlpxBackend:
|
||||
when networkBackend == rlpx:
|
||||
import eth/rlp
|
||||
|
||||
when ValidatorPubKey is BlsValue:
|
||||
@ -314,4 +298,3 @@ proc toGaugeValue*(hash: Eth2Digest): int64 =
|
||||
|
||||
template fromSszBytes*(T: type BlsValue, bytes: openarray[byte]): auto =
|
||||
fromBytes(T, bytes)
|
||||
|
||||
|
@ -52,7 +52,7 @@ else:
|
||||
{.fatal: "Preset \"" & const_preset ".nim\" is not supported.".}
|
||||
|
||||
const
|
||||
SPEC_VERSION* = "0.9.2" ## \
|
||||
SPEC_VERSION* = "0.9.3" ## \
|
||||
## Spec version we're aiming to be compatible with, right now
|
||||
## TODO: improve this scheme once we can negotiate versions in protocol
|
||||
|
||||
@ -110,8 +110,8 @@ type
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#proposerslashing
|
||||
ProposerSlashing* = object
|
||||
proposer_index*: uint64
|
||||
header_1*: BeaconBlockHeader
|
||||
header_2*: BeaconBlockHeader
|
||||
signed_header_1*: SignedBeaconBlockHeader
|
||||
signed_header_2*: SignedBeaconBlockHeader
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#attesterslashing
|
||||
AttesterSlashing* = object
|
||||
@ -157,12 +157,18 @@ type
|
||||
|
||||
data*: DepositData
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.3/specs/core/0_beacon-chain.md#depositdata
|
||||
DepositMessage* = object
|
||||
pubkey*: ValidatorPubKey
|
||||
withdrawal_credentials*: Eth2Digest
|
||||
amount*: Gwei
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#depositdata
|
||||
DepositData* = object
|
||||
pubkey*: ValidatorPubKey
|
||||
withdrawal_credentials*: Eth2Digest
|
||||
amount*: uint64
|
||||
signature*: ValidatorSig
|
||||
signature*: ValidatorSig # signing over DepositMessage
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#voluntaryexit
|
||||
VoluntaryExit* = object
|
||||
@ -170,7 +176,6 @@ type
|
||||
## Earliest epoch when voluntary exit can be processed
|
||||
|
||||
validator_index*: uint64
|
||||
signature*: ValidatorSig
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#beaconblock
|
||||
BeaconBlock* = object
|
||||
@ -190,16 +195,12 @@ type
|
||||
|
||||
body*: BeaconBlockBody
|
||||
|
||||
signature*: ValidatorSig ##\
|
||||
## Proposer signature
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#beaconblockheader
|
||||
BeaconBlockHeader* = object
|
||||
slot*: Slot
|
||||
parent_root*: Eth2Digest
|
||||
state_root*: Eth2Digest
|
||||
body_root*: Eth2Digest
|
||||
signature*: ValidatorSig
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#beaconblockbody
|
||||
BeaconBlockBody* = object
|
||||
@ -212,7 +213,7 @@ type
|
||||
attester_slashings*: List[AttesterSlashing, MAX_ATTESTER_SLASHINGS]
|
||||
attestations*: List[Attestation, MAX_ATTESTATIONS]
|
||||
deposits*: List[Deposit, MAX_DEPOSITS]
|
||||
voluntary_exits*: List[VoluntaryExit, MAX_VOLUNTARY_EXITS]
|
||||
voluntary_exits*: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS]
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#beaconstate
|
||||
BeaconState* = object
|
||||
@ -320,6 +321,27 @@ type
|
||||
deposit_count*: uint64
|
||||
block_hash*: Eth2Digest
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.3/specs/core/0_beacon-chain.md#signedvoluntaryexit
|
||||
SignedVoluntaryExit* = object
|
||||
message*: VoluntaryExit
|
||||
signature*: ValidatorSig
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.3/specs/core/0_beacon-chain.md#signedbeaconblock
|
||||
SignedBeaconBlock* = object
|
||||
message*: BeaconBlock
|
||||
signature*: ValidatorSig
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.3/specs/core/0_beacon-chain.md#signedvoluntaryexit
|
||||
SignedBeaconBlockHeader* = object
|
||||
message*: BeaconBlockHeader
|
||||
signature*: ValidatorSig
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/validator/0_beacon-chain-validator.md#aggregateandproof
|
||||
AggregateAndProof* = object
|
||||
aggregator_index*: uint64
|
||||
aggregate*: Attestation
|
||||
selection_proof*: ValidatorSig
|
||||
|
||||
# TODO to be replaced with some magic hash caching
|
||||
HashedBeaconState* = object
|
||||
data*: BeaconState
|
||||
@ -332,7 +354,7 @@ type
|
||||
Table[Epoch, seq[ValidatorIndex]]
|
||||
committee_count_cache*: Table[Epoch, uint64]
|
||||
|
||||
when networkBackend == rlpxBackend:
|
||||
when networkBackend == rlpx:
|
||||
import eth/rlp/bitseqs as rlpBitseqs
|
||||
export read, append
|
||||
|
||||
@ -346,6 +368,7 @@ template foreachSpecType*(op: untyped) =
|
||||
## These are all spec types that will appear in network messages
|
||||
## and persistent consensus data. This helper template is useful
|
||||
## for populating RTTI tables that concern them.
|
||||
op AggregateAndProof
|
||||
op Attestation
|
||||
op AttestationData
|
||||
op AttesterSlashing
|
||||
@ -361,6 +384,9 @@ template foreachSpecType*(op: untyped) =
|
||||
op IndexedAttestation
|
||||
op PendingAttestation
|
||||
op ProposerSlashing
|
||||
op SignedBeaconBlock
|
||||
op SignedBeaconBlockHeader
|
||||
op SignedVoluntaryExit
|
||||
op Validator
|
||||
op VoluntaryExit
|
||||
|
||||
@ -439,7 +465,7 @@ template ethTimeUnit(typ: type) {.dirty.} =
|
||||
proc `%`*(x: typ): JsonNode {.borrow.}
|
||||
|
||||
# Serialization
|
||||
when networkBackend == rlpxBackend:
|
||||
when networkBackend == rlpx:
|
||||
proc read*(rlp: var Rlp, T: type typ): typ {.inline.} =
|
||||
typ(rlp.read(uint64))
|
||||
|
||||
@ -547,7 +573,6 @@ func shortLog*(v: BeaconBlock): auto =
|
||||
attestations_len: v.body.attestations.len(),
|
||||
deposits_len: v.body.deposits.len(),
|
||||
voluntary_exits_len: v.body.voluntary_exits.len(),
|
||||
signature: shortLog(v.signature)
|
||||
)
|
||||
|
||||
func shortLog*(v: AttestationData): auto =
|
||||
|
@ -43,7 +43,7 @@ declareGauge beacon_previous_live_validators, "Number of active validators that
|
||||
declareGauge beacon_pending_deposits, "Number of pending deposits (state.eth1_data.deposit_count - state.eth1_deposit_index)" # On block
|
||||
declareGauge beacon_processed_deposits_total, "Number of total deposits included on chain" # On block
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#block-header
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.3/specs/core/0_beacon-chain.md#block-header
|
||||
proc process_block_header*(
|
||||
state: var BeaconState, blck: BeaconBlock, flags: UpdateFlags,
|
||||
stateCache: var StateCache): bool =
|
||||
@ -56,13 +56,13 @@ proc process_block_header*(
|
||||
|
||||
# Verify that the parent matches
|
||||
if skipValidation notin flags and not (blck.parent_root ==
|
||||
signing_root(state.latest_block_header)):
|
||||
hash_tree_root(state.latest_block_header)):
|
||||
# TODO: skip validation is too strong
|
||||
# can't do "invalid_parent_root" test
|
||||
notice "Block header: previous block root mismatch",
|
||||
latest_block_header = state.latest_block_header,
|
||||
blck = shortLog(blck),
|
||||
latest_block_header_root = shortLog(signing_root(state.latest_block_header))
|
||||
latest_block_header_root = shortLog(hash_tree_root(state.latest_block_header))
|
||||
return false
|
||||
|
||||
# Save current block as the new latest block
|
||||
@ -71,9 +71,6 @@ proc process_block_header*(
|
||||
parent_root: blck.parent_root,
|
||||
# state_root: zeroed, overwritten in the next `process_slot` call
|
||||
body_root: hash_tree_root(blck.body),
|
||||
# signature is always zeroed
|
||||
# TODO - Pure BLSSig cannot be zero: https://github.com/status-im/nim-beacon-chain/issues/374
|
||||
signature: BlsValue[Signature](kind: OpaqueBlob)
|
||||
)
|
||||
|
||||
# Verify proposer is not slashed
|
||||
@ -87,18 +84,6 @@ proc process_block_header*(
|
||||
notice "Block header: proposer slashed"
|
||||
return false
|
||||
|
||||
# Verify proposer signature
|
||||
if skipValidation notin flags and not bls_verify(
|
||||
proposer.pubkey,
|
||||
signing_root(blck).data,
|
||||
blck.signature,
|
||||
get_domain(state, DOMAIN_BEACON_PROPOSER)):
|
||||
notice "Block header: invalid block header",
|
||||
proposer_pubkey = proposer.pubkey,
|
||||
block_root = shortLog(signing_root(blck)),
|
||||
block_signature = blck.signature
|
||||
return false
|
||||
|
||||
true
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#randao
|
||||
@ -153,7 +138,7 @@ func is_slashable_validator(validator: Validator, epoch: Epoch): bool =
|
||||
(validator.activation_epoch <= epoch) and
|
||||
(epoch < validator.withdrawable_epoch)
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#proposer-slashings
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.3/specs/core/0_beacon-chain.md#proposer-slashings
|
||||
proc process_proposer_slashing*(
|
||||
state: var BeaconState, proposer_slashing: ProposerSlashing,
|
||||
flags: UpdateFlags, stateCache: var StateCache): bool =
|
||||
@ -164,13 +149,14 @@ proc process_proposer_slashing*(
|
||||
let proposer = state.validators[proposer_slashing.proposer_index.int]
|
||||
|
||||
# Verify slots match
|
||||
if not (proposer_slashing.header_1.slot ==
|
||||
proposer_slashing.header_2.slot):
|
||||
if not (proposer_slashing.signed_header_1.message.slot ==
|
||||
proposer_slashing.signed_header_2.message.slot):
|
||||
notice "Proposer slashing: slot mismatch"
|
||||
return false
|
||||
|
||||
# But the headers are different
|
||||
if not (proposer_slashing.header_1 != proposer_slashing.header_2):
|
||||
if not (proposer_slashing.signed_header_1.message !=
|
||||
proposer_slashing.signed_header_2.message):
|
||||
notice "Proposer slashing: headers not different"
|
||||
return false
|
||||
|
||||
@ -181,13 +167,15 @@ proc process_proposer_slashing*(
|
||||
|
||||
# Signatures are valid
|
||||
if skipValidation notin flags:
|
||||
for i, header in [proposer_slashing.header_1, proposer_slashing.header_2]:
|
||||
for i, signed_header in [proposer_slashing.signed_header_1,
|
||||
proposer_slashing.signed_header_2]:
|
||||
if not bls_verify(
|
||||
proposer.pubkey,
|
||||
signing_root(header).data,
|
||||
header.signature,
|
||||
hash_tree_root(signed_header.message).data,
|
||||
signed_header.signature,
|
||||
get_domain(
|
||||
state, DOMAIN_BEACON_PROPOSER, compute_epoch_at_slot(header.slot))):
|
||||
state, DOMAIN_BEACON_PROPOSER,
|
||||
compute_epoch_at_slot(signed_header.message.slot))):
|
||||
notice "Proposer slashing: invalid signature",
|
||||
signature_index = i
|
||||
return false
|
||||
@ -312,20 +300,22 @@ proc processDeposits(state: var BeaconState, blck: BeaconBlock): bool =
|
||||
|
||||
true
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#voluntary-exits
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.3/specs/core/0_beacon-chain.md#voluntary-exits
|
||||
proc process_voluntary_exit*(
|
||||
state: var BeaconState,
|
||||
exit: VoluntaryExit,
|
||||
signed_voluntary_exit: SignedVoluntaryExit,
|
||||
flags: UpdateFlags): bool =
|
||||
|
||||
let voluntary_exit = signed_voluntary_exit.message
|
||||
|
||||
# Not in spec. Check that validator_index is in range
|
||||
if exit.validator_index.int >= state.validators.len:
|
||||
if voluntary_exit.validator_index.int >= state.validators.len:
|
||||
notice "Exit: invalid validator index",
|
||||
index = exit.validator_index,
|
||||
index = voluntary_exit.validator_index,
|
||||
num_validators = state.validators.len
|
||||
return false
|
||||
|
||||
let validator = state.validators[exit.validator_index.int]
|
||||
let validator = state.validators[voluntary_exit.validator_index.int]
|
||||
|
||||
# Verify the validator is active
|
||||
if not is_active_validator(validator, get_current_epoch(state)):
|
||||
@ -339,7 +329,7 @@ proc process_voluntary_exit*(
|
||||
|
||||
## Exits must specify an epoch when they become valid; they are not valid
|
||||
## before then
|
||||
if not (get_current_epoch(state) >= exit.epoch):
|
||||
if not (get_current_epoch(state) >= voluntary_exit.epoch):
|
||||
notice "Exit: exit epoch not passed"
|
||||
return false
|
||||
|
||||
@ -351,26 +341,26 @@ proc process_voluntary_exit*(
|
||||
|
||||
# Verify signature
|
||||
if skipValidation notin flags:
|
||||
let domain = get_domain(state, DOMAIN_VOLUNTARY_EXIT, exit.epoch)
|
||||
let domain = get_domain(state, DOMAIN_VOLUNTARY_EXIT, voluntary_exit.epoch)
|
||||
if not bls_verify(
|
||||
validator.pubkey,
|
||||
signing_root(exit).data,
|
||||
exit.signature,
|
||||
hash_tree_root(voluntary_exit).data,
|
||||
signed_voluntary_exit.signature,
|
||||
domain):
|
||||
notice "Exit: invalid signature"
|
||||
return false
|
||||
|
||||
# Initiate exit
|
||||
debug "Exit: processing voluntary exit (validator_leaving)",
|
||||
index = exit.validator_index,
|
||||
index = voluntary_exit.validator_index,
|
||||
num_validators = state.validators.len,
|
||||
epoch = exit.epoch,
|
||||
epoch = voluntary_exit.epoch,
|
||||
current_epoch = get_current_epoch(state),
|
||||
validator_slashed = validator.slashed,
|
||||
validator_withdrawable_epoch = validator.withdrawable_epoch,
|
||||
validator_exit_epoch = validator.exit_epoch,
|
||||
validator_effective_balance = validator.effective_balance
|
||||
initiate_validator_exit(state, exit.validator_index.ValidatorIndex)
|
||||
initiate_validator_exit(state, voluntary_exit.validator_index.ValidatorIndex)
|
||||
|
||||
true
|
||||
|
||||
|
@ -557,6 +557,8 @@ func maxChunksCount(T: type, maxLen: static int64): int64 {.compileTime.} =
|
||||
func hash_tree_root*(x: auto): Eth2Digest =
|
||||
trs "STARTING HASH TREE ROOT FOR TYPE ", name(type(x))
|
||||
mixin toSszType
|
||||
when x is SignedBeaconBlock:
|
||||
doassert false
|
||||
when x is TypeWithMaxLen:
|
||||
const maxLen = x.maxLen
|
||||
type T = type valueOf(x)
|
||||
@ -593,14 +595,3 @@ iterator hash_tree_roots_prefix*[T](lst: openarray[T], limit: auto):
|
||||
for i, elem in lst:
|
||||
merkelizer.addChunk(hash_tree_root(elem).data)
|
||||
yield mixInLength(merkelizer.getFinalHash(), i + 1)
|
||||
|
||||
func lastFieldName(RecordType: type): string {.compileTime.} =
|
||||
enumAllSerializedFields(RecordType):
|
||||
result = fieldName
|
||||
|
||||
func signingRoot*(obj: object): Eth2Digest =
|
||||
const lastField = lastFieldName(obj.type)
|
||||
merkelizeFields:
|
||||
obj.enumInstanceSerializedFields(fieldName, field):
|
||||
when fieldName != lastField:
|
||||
addField2 field
|
||||
|
@ -56,7 +56,7 @@ func process_slot*(state: var BeaconState) =
|
||||
|
||||
# Cache block root
|
||||
state.block_roots[state.slot mod SLOTS_PER_HISTORICAL_ROOT] =
|
||||
signing_root(state.latest_block_header)
|
||||
hash_tree_root(state.latest_block_header)
|
||||
|
||||
func get_epoch_validator_count(state: BeaconState): int64 =
|
||||
# https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#additional-metrics
|
||||
@ -183,7 +183,7 @@ func process_slot(state: var HashedBeaconState) =
|
||||
|
||||
# Cache block root
|
||||
state.data.block_roots[state.data.slot mod SLOTS_PER_HISTORICAL_ROOT] =
|
||||
signing_root(state.data.latest_block_header)
|
||||
hash_tree_root(state.data.latest_block_header)
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#beacon-chain-state-transition-function
|
||||
proc process_slots*(state: var HashedBeaconState, slot: Slot) =
|
||||
|
@ -4,9 +4,11 @@ import
|
||||
spec/[datatypes, crypto, digest, helpers], eth/rlp,
|
||||
beacon_node_types, eth2_network, block_pool, ssz
|
||||
|
||||
when networkBackend == rlpxBackend:
|
||||
when networkBackend == rlpx:
|
||||
import eth/rlp/options as rlpOptions
|
||||
template libp2pProtocol*(name: string, version: int) {.pragma.}
|
||||
elif networkBackend == libp2p:
|
||||
import libp2p/switch
|
||||
|
||||
declarePublicGauge libp2p_peers, "Number of libp2p peers"
|
||||
|
||||
@ -25,7 +27,7 @@ type
|
||||
else:
|
||||
index: uint32
|
||||
|
||||
BeaconBlockCallback* = proc(blck: BeaconBlock) {.gcsafe.}
|
||||
BeaconBlockCallback* = proc(blck: SignedBeaconBlock) {.gcsafe.}
|
||||
BeaconSyncNetworkState* = ref object
|
||||
blockPool*: BlockPool
|
||||
forkVersion*: array[4, byte]
|
||||
@ -49,7 +51,7 @@ func init*(
|
||||
v.onBeaconBlock = onBeaconBlock
|
||||
|
||||
proc importBlocks(state: BeaconSyncNetworkState,
|
||||
blocks: openarray[BeaconBlock]) {.gcsafe.} =
|
||||
blocks: openarray[SignedBeaconBlock]) {.gcsafe.} =
|
||||
for blk in blocks:
|
||||
state.onBeaconBlock(blk)
|
||||
info "Forward sync imported blocks", len = blocks.len
|
||||
@ -156,12 +158,20 @@ p2pProtocol BeaconSync(version = 1,
|
||||
|
||||
proc beaconBlocks(
|
||||
peer: Peer,
|
||||
blocks: openarray[BeaconBlock])
|
||||
blocks: openarray[SignedBeaconBlock])
|
||||
|
||||
proc handleInitialStatus(peer: Peer,
|
||||
state: BeaconSyncNetworkState,
|
||||
ourStatus: StatusMsg,
|
||||
theirStatus: StatusMsg) {.async, gcsafe.} =
|
||||
when networkBackend == libp2p:
|
||||
# TODO: This doesn't seem like an appropraite place for this call,
|
||||
# but it's hard to pick a better place at the moment.
|
||||
# nim-libp2p plans to add a general `onPeerConnected` callback which
|
||||
# will allow us to implement the subscription earlier.
|
||||
# The root of the problem is that both sides must call `subscribeToPeer`
|
||||
# before any GossipSub traffic will flow between them.
|
||||
await peer.network.switch.subscribeToPeer(peer.info)
|
||||
|
||||
if theirStatus.forkVersion != state.forkVersion:
|
||||
notice "Irrelevant peer",
|
||||
@ -211,7 +221,7 @@ proc handleInitialStatus(peer: Peer,
|
||||
break
|
||||
|
||||
state.importBlocks(blocks.get)
|
||||
let lastSlot = blocks.get[^1].slot
|
||||
let lastSlot = blocks.get[^1].message.slot
|
||||
if lastSlot <= s:
|
||||
info "Slot did not advance during sync", peer
|
||||
break
|
||||
|
@ -60,8 +60,6 @@ func toSlot*(c: BeaconClock, t: Time): tuple[afterGenesis: bool, slot: Slot] =
|
||||
func toBeaconTime*(s: Slot, offset = chronos.seconds(0)): BeaconTime =
|
||||
BeaconTime(int64(uint64(s) * SECONDS_PER_SLOT) + seconds(offset))
|
||||
|
||||
# TODO on Travis ARM64 CIs, this claims to have side effects, but neither Linux
|
||||
# nor Mac OS x86 CIs exhibit this behavior.
|
||||
proc now*(c: BeaconClock): BeaconTime =
|
||||
## Current time, in slots - this may end up being less than GENESIS_SLOT(!)
|
||||
toBeaconTime(c, getTime())
|
||||
|
@ -1,15 +1,15 @@
|
||||
type
|
||||
NetworkBackendType* = enum
|
||||
libp2pBackend
|
||||
libp2pDaemonBackend
|
||||
rlpxBackend
|
||||
libp2p
|
||||
libp2pDaemon
|
||||
rlpx
|
||||
|
||||
const
|
||||
NETWORK_TYPE {.strdefine.} = "libp2p_daemon"
|
||||
|
||||
networkBackend* = when NETWORK_TYPE == "rlpx": rlpxBackend
|
||||
elif NETWORK_TYPE == "libp2p": libp2pBackend
|
||||
elif NETWORK_TYPE == "libp2p_daemon": libp2pDaemonBackend
|
||||
networkBackend* = when NETWORK_TYPE == "rlpx": rlpx
|
||||
elif NETWORK_TYPE == "libp2p": libp2p
|
||||
elif NETWORK_TYPE == "libp2p_daemon": libp2pDaemon
|
||||
else: {.fatal: "The 'NETWORK_TYPE' should be either 'libp2p', 'libp2p_daemon' or 'rlpx'" .}
|
||||
|
||||
const
|
||||
@ -25,9 +25,6 @@ const
|
||||
# to join the same testnets.
|
||||
|
||||
useInsecureFeatures* = defined(insecure)
|
||||
# TODO This is temporarily set to true, so it's easier for other teams to
|
||||
# launch the beacon_node with metrics enabled during the interop lock-in.
|
||||
# We'll disable it once the lock-in is over.
|
||||
|
||||
gitRevision* = staticExec("git rev-parse --short HEAD")
|
||||
|
||||
|
1
ncli/.gitignore
vendored
1
ncli/.gitignore
vendored
@ -1,4 +1,3 @@
|
||||
ncli_pretty
|
||||
ncli_signing_root
|
||||
ncli_hash_tree_root
|
||||
ncli_transition
|
||||
|
@ -5,7 +5,7 @@
|
||||
# Tools
|
||||
|
||||
* transition: Perform state transition given a pre-state and a block to apply (both in SSZ format)
|
||||
* signing_root/hash_tree_root: Print tree root of an SSZ object
|
||||
* hash_tree_root: Print tree root of an SSZ object
|
||||
* pretty: Pretty-print SSZ object as JSON
|
||||
|
||||
# Building
|
||||
|
@ -1,35 +0,0 @@
|
||||
import
|
||||
confutils, os, strutils, chronicles, json_serialization,
|
||||
nimcrypto/utils,
|
||||
../beacon_chain/spec/[crypto, datatypes, digest],
|
||||
../beacon_chain/[ssz]
|
||||
|
||||
# TODO turn into arguments
|
||||
cli do(kind: string, file: string):
|
||||
|
||||
template printit(t: untyped) {.dirty.} =
|
||||
let v =
|
||||
if cmpIgnoreCase(ext, ".ssz") == 0:
|
||||
SSZ.loadFile(file, t)
|
||||
elif cmpIgnoreCase(ext, ".json") == 0:
|
||||
JSON.loadFile(file, t)
|
||||
else:
|
||||
echo "Unknown file type: ", ext
|
||||
quit 1
|
||||
|
||||
echo signing_root(v).data.toHex(true)
|
||||
|
||||
let ext = splitFile(file).ext
|
||||
|
||||
case kind
|
||||
of "attester_slashing": printit(AttesterSlashing)
|
||||
of "attestation": printit(Attestation)
|
||||
of "block": printit(BeaconBlock)
|
||||
of "block_body": printit(BeaconBlockBody)
|
||||
of "block_header": printit(BeaconBlockHeader)
|
||||
of "deposit": printit(Deposit)
|
||||
of "deposit_data": printit(DepositData)
|
||||
of "eth1_data": printit(Eth1Data)
|
||||
of "state": printit(BeaconState)
|
||||
of "proposer_slashing": printit(ProposerSlashing)
|
||||
of "voluntary_exit": printit(VoluntaryExit)
|
2
nim.cfg
2
nim.cfg
@ -35,4 +35,4 @@
|
||||
@end
|
||||
|
||||
-d:nimOldCaseObjects # https://github.com/status-im/nim-confutils/issues/9
|
||||
|
||||
--warning[CaseTransition]:off
|
||||
|
@ -51,11 +51,11 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
||||
var
|
||||
attestations = initTable[Slot, seq[Attestation]]()
|
||||
state = genesisState
|
||||
latest_block_root = signing_root(genesisBlock)
|
||||
latest_block_root = hash_tree_root(genesisBlock.message)
|
||||
timers: array[Timers, RunningStat]
|
||||
attesters: RunningStat
|
||||
r: Rand
|
||||
blck: BeaconBlock
|
||||
blck: SignedBeaconBlock
|
||||
cache = get_empty_per_epoch_cache()
|
||||
|
||||
proc maybeWrite() =
|
||||
@ -90,7 +90,7 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
||||
withTimer(timers[t]):
|
||||
blck = addBlock(state, latest_block_root, body, flags)
|
||||
latest_block_root = withTimerRet(timers[tHashBlock]):
|
||||
signing_root(blck)
|
||||
hash_tree_root(blck.message)
|
||||
|
||||
if attesterRatio > 0.0:
|
||||
# attesterRatio is the fraction of attesters that actually do their
|
||||
|
@ -66,12 +66,17 @@ cli do (testnetName {.argument.}: string):
|
||||
depositContractOpt = "--deposit-contract=" & readFile(depositContractFile).strip
|
||||
|
||||
if system.dirExists(dataDir):
|
||||
if system.fileExists(dataDir/genesisFile):
|
||||
let localGenesisContent = readFile(dataDir/genesisFile)
|
||||
let testnetGenesisContent = readFile(testnetDir/genesisFile)
|
||||
if localGenesisContent != testnetGenesisContent:
|
||||
echo "Detected testnet restart. Deleting previous database..."
|
||||
rmDir dataDir
|
||||
block resetDataDir:
|
||||
# We reset the testnet data dir if the existing data dir is
|
||||
# incomplete (it misses a genesis file) or if it has a genesis
|
||||
# file from an older testnet:
|
||||
if system.fileExists(dataDir/genesisFile):
|
||||
let localGenesisContent = readFile(dataDir/genesisFile)
|
||||
let testnetGenesisContent = readFile(testnetDir/genesisFile)
|
||||
if localGenesisContent == testnetGenesisContent:
|
||||
break
|
||||
echo "Detected testnet restart. Deleting previous database..."
|
||||
rmDir dataDir
|
||||
|
||||
cd rootDir
|
||||
exec &"""nim c {nimFlags} -d:"const_preset={preset}" -o:"{beaconNodeBinary}" beacon_chain/beacon_node.nim"""
|
||||
|
@ -1,5 +1,5 @@
|
||||
CONST_PRESET=minimal
|
||||
NETWORK_TYPE=libp2p_daemon
|
||||
NETWORK_TYPE=libp2p
|
||||
QUICKSTART_VALIDATORS=8
|
||||
RANDOM_VALIDATORS=120
|
||||
BOOTSTRAP_PORT=9100
|
||||
|
@ -17,7 +17,7 @@ import # Unit test
|
||||
./test_beaconstate,
|
||||
./test_block_pool,
|
||||
./test_helpers,
|
||||
./test_interop,
|
||||
#./test_interop, TODO check zcli
|
||||
./test_ssz,
|
||||
./test_state_transition,
|
||||
./test_sync_protocol,
|
||||
|
@ -26,7 +26,7 @@ proc mockAttestationData(
|
||||
doAssert state.slot >= slot
|
||||
|
||||
if slot == state.slot:
|
||||
result.beacon_block_root = mockBlockForNextSlot(state).parent_root
|
||||
result.beacon_block_root = mockBlockForNextSlot(state).message.parent_root
|
||||
else:
|
||||
result.beacon_block_root = get_block_root_at_slot(state, slot)
|
||||
|
||||
@ -140,11 +140,12 @@ proc fillAggregateAttestation*(state: BeaconState, attestation: var Attestation)
|
||||
attestation.aggregation_bits[i] = true
|
||||
|
||||
proc add*(state: var BeaconState, attestation: Attestation, slot: Slot) =
|
||||
var blck = mockBlockForNextSlot(state)
|
||||
blck.slot = slot
|
||||
blck.body.attestations.add attestation
|
||||
var signedBlock = mockBlockForNextSlot(state)
|
||||
signedBlock.message.slot = slot
|
||||
signedBlock.message.body.attestations.add attestation
|
||||
process_slots(state, slot)
|
||||
signMockBlock(state, blck)
|
||||
signMockBlock(state, signedBlock)
|
||||
|
||||
# TODO: we can skip just VerifyStateRoot
|
||||
doAssert state_transition(state, blck, flags = {skipValidation})
|
||||
doAssert state_transition(
|
||||
state, signedBlock.message, flags = {skipValidation})
|
||||
|
@ -19,51 +19,45 @@ import
|
||||
|
||||
proc signMockBlockImpl(
|
||||
state: BeaconState,
|
||||
blck: var BeaconBlock,
|
||||
signedBlock: var SignedBeaconBlock,
|
||||
proposer_index: ValidatorIndex
|
||||
) =
|
||||
doAssert state.slot <= blck.slot
|
||||
let block_slot = signedBlock.message.slot
|
||||
doAssert state.slot <= block_slot
|
||||
|
||||
let privkey = MockPrivKeys[proposer_index]
|
||||
|
||||
blck.body.randao_reveal = bls_sign(
|
||||
signedBlock.message.body.randao_reveal = bls_sign(
|
||||
key = privkey,
|
||||
msg = blck.slot
|
||||
msg = block_slot
|
||||
.compute_epoch_at_slot()
|
||||
.hash_tree_root()
|
||||
.data,
|
||||
domain = get_domain(
|
||||
state,
|
||||
DOMAIN_RANDAO,
|
||||
message_epoch = blck.slot.compute_epoch_at_slot(),
|
||||
message_epoch = block_slot.compute_epoch_at_slot(),
|
||||
)
|
||||
)
|
||||
|
||||
blck.signature = bls_sign(
|
||||
signedBlock.signature = bls_sign(
|
||||
key = privkey,
|
||||
msg = blck.signing_root().data,
|
||||
msg = signedBlock.message.hash_tree_root().data,
|
||||
domain = get_domain(
|
||||
state,
|
||||
DOMAIN_BEACON_PROPOSER,
|
||||
message_epoch = blck.slot.compute_epoch_at_slot(),
|
||||
message_epoch = block_slot.compute_epoch_at_slot(),
|
||||
)
|
||||
)
|
||||
|
||||
proc signMockBlock*(
|
||||
state: BeaconState,
|
||||
blck: var BeaconBlock,
|
||||
proposer_index: ValidatorIndex
|
||||
) =
|
||||
signMockBlockImpl(state, blck, proposer_index)
|
||||
|
||||
proc signMockBlock*(
|
||||
state: BeaconState,
|
||||
blck: var BeaconBlock
|
||||
signedBlock: var SignedBeaconBlock
|
||||
) =
|
||||
|
||||
var emptyCache = get_empty_per_epoch_cache()
|
||||
let proposer_index =
|
||||
if blck.slot == state.slot:
|
||||
if signedBlock.message.slot == state.slot:
|
||||
get_beacon_proposer_index(state, emptyCache)
|
||||
else:
|
||||
# Stub to get proposer index of future slot
|
||||
@ -71,37 +65,38 @@ proc signMockBlock*(
|
||||
# i.e. BeaconState should have value semantics
|
||||
# and not contain ref objects or pointers
|
||||
var stubState = state
|
||||
process_slots(stub_state, blck.slot)
|
||||
process_slots(stub_state, signedBlock.message.slot)
|
||||
get_beacon_proposer_index(stub_state, emptyCache)
|
||||
|
||||
# In tests, just let this throw if appropriate
|
||||
signMockBlockImpl(state, blck, proposer_index.get)
|
||||
signMockBlockImpl(state, signedBlock, proposer_index.get)
|
||||
|
||||
proc mockBlock*(
|
||||
proc mockBlock(
|
||||
state: BeaconState,
|
||||
slot: Slot,
|
||||
flags: UpdateFlags = {}): BeaconBlock =
|
||||
flags: UpdateFlags = {}): SignedBeaconBlock =
|
||||
## Mock a BeaconBlock for the specific slot
|
||||
## Add skipValidation if block should not be signed
|
||||
|
||||
result.slot = slot
|
||||
result.body.eth1_data.deposit_count = state.eth1_deposit_index
|
||||
result.message.slot = slot
|
||||
result.message.body.eth1_data.deposit_count = state.eth1_deposit_index
|
||||
|
||||
var previous_block_header = state.latest_block_header
|
||||
if previous_block_header.state_root == ZERO_HASH:
|
||||
previous_block_header.state_root = state.hash_tree_root()
|
||||
result.parent_root = previous_block_header.signing_root()
|
||||
result.message.parent_root = previous_block_header.hash_tree_root()
|
||||
|
||||
if skipValidation notin flags:
|
||||
signMockBlock(state, result)
|
||||
|
||||
proc mockBlockForNextSlot*(state: BeaconState, flags: UpdateFlags = {}): BeaconBlock =
|
||||
proc mockBlockForNextSlot*(state: BeaconState, flags: UpdateFlags = {}):
|
||||
SignedBeaconBlock =
|
||||
mockBlock(state, state.slot + 1, flags)
|
||||
|
||||
proc applyEmptyBlock*(state: var BeaconState) =
|
||||
## Do a state transition with an empty signed block
|
||||
## on the current slot
|
||||
let blck = mockBlock(state, state.slot, flags = {})
|
||||
let signedBlock = mockBlock(state, state.slot, flags = {})
|
||||
# TODO: we only need to skip verifyStateRoot validation
|
||||
# processBlock validation should work
|
||||
doAssert state_transition(state, blck, {skipValidation})
|
||||
doAssert state_transition(state, signedBlock.message, {skipValidation})
|
||||
|
@ -25,7 +25,7 @@ func signMockDepositData(
|
||||
# No state --> Genesis
|
||||
deposit_data.signature = bls_sign(
|
||||
key = privkey,
|
||||
msg = deposit_data.signing_root().data,
|
||||
msg = deposit_data.hash_tree_root().data,
|
||||
domain = compute_domain(
|
||||
DOMAIN_DEPOSIT,
|
||||
default(array[4, byte]) # Genesis is fork_version 0
|
||||
@ -39,7 +39,7 @@ func signMockDepositData(
|
||||
) =
|
||||
deposit_data.signature = bls_sign(
|
||||
key = privkey,
|
||||
msg = deposit_data.signing_root().data,
|
||||
msg = deposit_data.hash_tree_root().data,
|
||||
domain = get_domain(
|
||||
state,
|
||||
DOMAIN_DEPOSIT
|
||||
|
@ -1 +1 @@
|
||||
Subproject commit 0a51654000c7066fa2d89105044367a748ae5db0
|
||||
Subproject commit a26def415f2969d625e39bfc160c97497dfe37b3
|
@ -36,7 +36,7 @@ proc readValue*(r: var JsonReader, a: var seq[byte]) {.inline.} =
|
||||
|
||||
const
|
||||
FixturesDir* = currentSourcePath.rsplit(DirSep, 1)[0] / "fixtures"
|
||||
SszTestsDir* = FixturesDir/"tests-v0.9.2"
|
||||
SszTestsDir* = FixturesDir/"tests-v0.9.3"
|
||||
|
||||
proc parseTest*(path: string, Format: typedesc[Json or SSZ], T: typedesc): T =
|
||||
try:
|
||||
|
@ -67,7 +67,6 @@ template runTest(identifier: untyped) =
|
||||
|
||||
suite "Official - Operations - Block header " & preset():
|
||||
runTest(success_block_header)
|
||||
runTest(invalid_sig_block_header)
|
||||
runTest(invalid_slot_block_header)
|
||||
when false: # skipValidation needs to be split https://github.com/status-im/nim-beacon-chain/issues/407
|
||||
runTest(invalid_parent_root)
|
||||
|
@ -41,11 +41,11 @@ template runTest(identifier: untyped) =
|
||||
|
||||
timedTest prefix & astToStr(identifier):
|
||||
var stateRef, postRef: ref BeaconState
|
||||
var voluntaryExit: ref VoluntaryExit
|
||||
var voluntaryExit: ref SignedVoluntaryExit
|
||||
new voluntaryExit
|
||||
new stateRef
|
||||
|
||||
voluntaryExit[] = parseTest(testDir/"voluntary_exit.ssz", SSZ, VoluntaryExit)
|
||||
voluntaryExit[] = parseTest(testDir/"voluntary_exit.ssz", SSZ, SignedVoluntaryExit)
|
||||
stateRef[] = parseTest(testDir/"pre.ssz", SSZ, BeaconState)
|
||||
|
||||
if existsFile(testDir/"post.ssz"):
|
||||
@ -65,7 +65,11 @@ template runTest(identifier: untyped) =
|
||||
|
||||
suite "Official - Operations - Voluntary exit " & preset():
|
||||
runTest(success)
|
||||
runTest(invalid_signature)
|
||||
|
||||
when false:
|
||||
# TODO not sure how this particularly could falsely succeed
|
||||
runTest(invalid_signature)
|
||||
|
||||
runTest(success_exit_queue)
|
||||
runTest(validator_exit_in_future)
|
||||
runTest(validator_invalid_validator_index)
|
||||
|
@ -11,7 +11,7 @@ import
|
||||
# Standard library
|
||||
os, unittest,
|
||||
# Beacon chain internals
|
||||
../../beacon_chain/spec/[datatypes],
|
||||
../../beacon_chain/spec/[crypto, datatypes],
|
||||
../../beacon_chain/[ssz, state_transition, extras],
|
||||
# Test utilities
|
||||
../testutil,
|
||||
@ -37,10 +37,10 @@ template runValidTest(testName: string, identifier: untyped, num_blocks: int): u
|
||||
postRef[] = parseTest(testDir/"post.ssz", SSZ, BeaconState)
|
||||
|
||||
for i in 0 ..< num_blocks:
|
||||
let blck = parseTest(testDir/"blocks_" & $i & ".ssz", SSZ, BeaconBlock)
|
||||
let blck = parseTest(testDir/"blocks_" & $i & ".ssz", SSZ, SignedBeaconBlock)
|
||||
|
||||
# TODO: The EF is using invalid BLS keys so we can't verify them
|
||||
let success = state_transition(stateRef[], blck, flags = {skipValidation})
|
||||
let success = state_transition(stateRef[], blck.message, flags = {skipValidation})
|
||||
doAssert success, "Failure when applying block " & $i
|
||||
|
||||
# Checks:
|
||||
@ -56,13 +56,13 @@ suite "Official - Sanity - Blocks " & preset():
|
||||
new stateRef
|
||||
stateRef[] = parseTest(testDir/"pre.ssz", SSZ, BeaconState)
|
||||
|
||||
let blck = parseTest(testDir/"blocks_0.ssz", SSZ, BeaconBlock)
|
||||
let blck = parseTest(testDir/"blocks_0.ssz", SSZ, SignedBeaconBlock)
|
||||
|
||||
# Check that a block build for an old slot cannot be used for state transition
|
||||
expect(AssertionError):
|
||||
# assert in process_slots. This should not be triggered
|
||||
# for blocks from block_pool/network
|
||||
discard state_transition(stateRef[], blck, flags = {skipValidation})
|
||||
discard state_transition(stateRef[], blck.message, flags = {skipValidation})
|
||||
|
||||
runValidTest("Same slot block transition", same_slot_block_transition, 1)
|
||||
runValidTest("Empty block transition", empty_block_transition, 1)
|
||||
|
@ -14,7 +14,7 @@ import
|
||||
# Third-party
|
||||
yaml,
|
||||
# Beacon chain internals
|
||||
../../beacon_chain/spec/[datatypes, digest],
|
||||
../../beacon_chain/spec/[crypto, datatypes, digest],
|
||||
../../beacon_chain/ssz,
|
||||
# Test utilities
|
||||
../testutil
|
||||
@ -26,7 +26,7 @@ import
|
||||
|
||||
const
|
||||
FixturesDir = currentSourcePath.rsplit(DirSep, 1)[0] / "fixtures"
|
||||
SSZDir = FixturesDir/"tests-v0.9.2"/const_preset/"phase0"/"ssz_static"
|
||||
SSZDir = FixturesDir/"tests-v0.9.3"/const_preset/"phase0"/"ssz_static"
|
||||
|
||||
type
|
||||
SSZHashTreeRoot = object
|
||||
@ -39,11 +39,8 @@ type
|
||||
# Make signing root optional
|
||||
setDefaultValue(SSZHashTreeRoot, signing_root, "")
|
||||
|
||||
# Note this onyl tracks HashTreeRoot and SigningRoot
|
||||
# Note this only tracks HashTreeRoot
|
||||
# Checking the values against the yaml file is TODO (require more flexible Yaml parser)
|
||||
const Unsupported = toHashSet([
|
||||
"AggregateAndProof", # Type for signature aggregation - not implemented
|
||||
])
|
||||
|
||||
proc checkSSZ(T: typedesc, dir: string, expectedHash: SSZHashTreeRoot) =
|
||||
# Deserialize into a ref object to not fill Nim stack
|
||||
@ -52,8 +49,6 @@ proc checkSSZ(T: typedesc, dir: string, expectedHash: SSZHashTreeRoot) =
|
||||
deserialized[] = SSZ.loadFile(dir/"serialized.ssz", T)
|
||||
|
||||
check: expectedHash.root == "0x" & toLowerASCII($deserialized.hashTreeRoot())
|
||||
if expectedHash.signing_root != "":
|
||||
check: expectedHash.signing_root == "0x" & toLowerASCII($deserialized[].signingRoot())
|
||||
|
||||
# TODO check the value
|
||||
|
||||
@ -69,10 +64,6 @@ proc runSSZtests() =
|
||||
doAssert existsDir(SSZDir), "You need to run the \"download_test_vectors.sh\" script to retrieve the official test vectors."
|
||||
for pathKind, sszType in walkDir(SSZDir, relative = true):
|
||||
doAssert pathKind == pcDir
|
||||
if sszType in Unsupported:
|
||||
timedTest &" Skipping {sszType:20} ✗✗✗":
|
||||
discard
|
||||
continue
|
||||
|
||||
timedTest &" Testing {sszType}":
|
||||
let path = SSZDir/sszType
|
||||
@ -84,7 +75,7 @@ proc runSSZtests() =
|
||||
let hash = loadExpectedHashTreeRoot(path)
|
||||
|
||||
case sszType:
|
||||
# of "AggregateAndProof": checkSSZ(AggregateAndProof, path, hash)
|
||||
of "AggregateAndProof": checkSSZ(AggregateAndProof, path, hash)
|
||||
of "Attestation": checkSSZ(Attestation, path, hash)
|
||||
of "AttestationData": checkSSZ(AttestationData, path, hash)
|
||||
of "AttesterSlashing": checkSSZ(AttesterSlashing, path, hash)
|
||||
@ -95,12 +86,17 @@ proc runSSZtests() =
|
||||
of "Checkpoint": checkSSZ(Checkpoint, path, hash)
|
||||
of "Deposit": checkSSZ(Deposit, path, hash)
|
||||
of "DepositData": checkSSZ(DepositData, path, hash)
|
||||
of "DepositMessage": checkSSZ(DepositMessage, path, hash)
|
||||
of "Eth1Data": checkSSZ(Eth1Data, path, hash)
|
||||
of "Fork": checkSSZ(Fork, path, hash)
|
||||
of "HistoricalBatch": checkSSZ(HistoricalBatch, path, hash)
|
||||
of "IndexedAttestation": checkSSZ(IndexedAttestation, path, hash)
|
||||
of "PendingAttestation": checkSSZ(PendingAttestation, path, hash)
|
||||
of "ProposerSlashing": checkSSZ(ProposerSlashing, path, hash)
|
||||
of "SignedBeaconBlock": checkSSZ(SignedBeaconBlock, path, hash)
|
||||
of "SignedBeaconBlockHeader":
|
||||
checkSSZ(SignedBeaconBlockHeader, path, hash)
|
||||
of "SignedVoluntaryExit": checkSSZ(SignedVoluntaryExit, path, hash)
|
||||
of "Validator": checkSSZ(Validator, path, hash)
|
||||
of "VoluntaryExit": checkSSZ(VoluntaryExit, path, hash)
|
||||
else:
|
||||
|
@ -31,8 +31,8 @@ suite "Beacon chain DB" & preset():
|
||||
db = init(BeaconChainDB, newMemoryDB())
|
||||
|
||||
let
|
||||
blck = BeaconBlock()
|
||||
root = signing_root(blck)
|
||||
blck = SignedBeaconBlock()
|
||||
root = hash_tree_root(blck.message)
|
||||
|
||||
db.putBlock(blck)
|
||||
|
||||
@ -40,9 +40,9 @@ suite "Beacon chain DB" & preset():
|
||||
db.containsBlock(root)
|
||||
db.getBlock(root).get() == blck
|
||||
|
||||
db.putStateRoot(root, blck.slot, root)
|
||||
db.putStateRoot(root, blck.message.slot, root)
|
||||
check:
|
||||
db.getStateRoot(root, blck.slot).get() == root
|
||||
db.getStateRoot(root, blck.message.slot).get() == root
|
||||
|
||||
timedTest "sanity check states" & preset():
|
||||
var
|
||||
@ -68,12 +68,14 @@ suite "Beacon chain DB" & preset():
|
||||
check: x == y
|
||||
|
||||
let
|
||||
a0 = BeaconBlock(slot: GENESIS_SLOT + 0)
|
||||
a0r = signing_root(a0)
|
||||
a1 = BeaconBlock(slot: GENESIS_SLOT + 1, parent_root: a0r)
|
||||
a1r = signing_root(a1)
|
||||
a2 = BeaconBlock(slot: GENESIS_SLOT + 2, parent_root: a1r)
|
||||
a2r = signing_root(a2)
|
||||
a0 = SignedBeaconBlock(message: BeaconBlock(slot: GENESIS_SLOT + 0))
|
||||
a0r = hash_tree_root(a0.message)
|
||||
a1 = SignedBeaconBlock(message:
|
||||
BeaconBlock(slot: GENESIS_SLOT + 1, parent_root: a0r))
|
||||
a1r = hash_tree_root(a1.message)
|
||||
a2 = SignedBeaconBlock(message:
|
||||
BeaconBlock(slot: GENESIS_SLOT + 2, parent_root: a1r))
|
||||
a2r = hash_tree_root(a2.message)
|
||||
|
||||
doAssert toSeq(db.getAncestors(a0r)) == []
|
||||
doAssert toSeq(db.getAncestors(a2r)) == []
|
||||
|
@ -8,7 +8,7 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
options, sequtils, unittest,
|
||||
options, sequtils, unittest, chronicles,
|
||||
./testutil, ./testblockutil,
|
||||
../beacon_chain/spec/[beaconstate, datatypes, digest],
|
||||
../beacon_chain/[beacon_node_types, block_pool, beacon_chain_db, extras, ssz]
|
||||
@ -42,7 +42,7 @@ suite "Block pool processing" & preset():
|
||||
timedTest "Simple block add&get" & preset():
|
||||
let
|
||||
b1 = makeBlock(state.data.data, state.blck.root, BeaconBlockBody())
|
||||
b1Root = signing_root(b1)
|
||||
b1Root = hash_tree_root(b1.message)
|
||||
|
||||
# TODO the return value is ugly here, need to fix and test..
|
||||
discard pool.add(state, b1Root, b1)
|
||||
@ -57,9 +57,9 @@ suite "Block pool processing" & preset():
|
||||
timedTest "Reverse order block add & get" & preset():
|
||||
let
|
||||
b1 = addBlock(state.data.data, state.blck.root, BeaconBlockBody(), {})
|
||||
b1Root = signing_root(b1)
|
||||
b1Root = hash_tree_root(b1.message)
|
||||
b2 = addBlock(state.data.data, b1Root, BeaconBlockBody(), {})
|
||||
b2Root = signing_root(b2)
|
||||
b2Root = hash_tree_root(b2.message)
|
||||
|
||||
discard pool.add(state, b2Root, b2)
|
||||
|
||||
@ -81,11 +81,16 @@ suite "Block pool processing" & preset():
|
||||
|
||||
b1r.get().refs.children[0] == b2r.get().refs
|
||||
b2r.get().refs.parent == b1r.get().refs
|
||||
toSeq(pool.blockRootsForSlot(b1.slot)) == @[b1Root]
|
||||
toSeq(pool.blockRootsForSlot(b2.slot)) == @[b2Root]
|
||||
toSeq(pool.blockRootsForSlot(b1.message.slot)) == @[b1Root]
|
||||
toSeq(pool.blockRootsForSlot(b2.message.slot)) == @[b2Root]
|
||||
|
||||
db.putHeadBlock(b2Root)
|
||||
|
||||
# The heads structure should have been updated to contain only the new
|
||||
# b2 head
|
||||
check:
|
||||
pool.heads.mapIt(it.blck) == @[b2r.get().refs]
|
||||
|
||||
# check that init also reloads block graph
|
||||
var
|
||||
pool2 = BlockPool.init(db)
|
||||
|
@ -23,7 +23,7 @@ asyncTest "connect two nodes":
|
||||
|
||||
echo "Node 1 persistent address: ", n1PersistentAddress
|
||||
|
||||
when networkBackend != rlpxBackend:
|
||||
when networkBackend != rlpx:
|
||||
var n1ActualAddress = await n1.daemon.identity()
|
||||
echo "Node 1 actual address:", n1ActualAddress
|
||||
|
||||
|
@ -24,7 +24,7 @@ suite "Block processing" & preset():
|
||||
Eth2Digest(), 0,
|
||||
makeInitialDeposits(), {})
|
||||
genesisBlock = get_initial_beacon_block(genesisState)
|
||||
genesisRoot = signing_root(genesisBlock)
|
||||
genesisRoot = hash_tree_root(genesisBlock.message)
|
||||
|
||||
timedTest "Passes from genesis state, no block" & preset():
|
||||
var
|
||||
@ -37,10 +37,10 @@ suite "Block processing" & preset():
|
||||
timedTest "Passes from genesis state, empty block" & preset():
|
||||
var
|
||||
state = genesisState
|
||||
previous_block_root = signing_root(genesisBlock)
|
||||
previous_block_root = hash_tree_root(genesisBlock.message)
|
||||
new_block = makeBlock(state, previous_block_root, BeaconBlockBody())
|
||||
|
||||
let block_ok = state_transition(state, new_block, {})
|
||||
let block_ok = state_transition(state, new_block.message, {})
|
||||
|
||||
check:
|
||||
block_ok
|
||||
@ -64,12 +64,12 @@ suite "Block processing" & preset():
|
||||
for i in 1..SLOTS_PER_EPOCH.int:
|
||||
var new_block = makeBlock(state, previous_block_root, BeaconBlockBody())
|
||||
|
||||
let block_ok = state_transition(state, new_block, {})
|
||||
let block_ok = state_transition(state, new_block.message, {})
|
||||
|
||||
check:
|
||||
block_ok
|
||||
|
||||
previous_block_root = signing_root(new_block)
|
||||
previous_block_root = hash_tree_root(new_block.message)
|
||||
|
||||
check:
|
||||
state.slot == genesisState.slot + SLOTS_PER_EPOCH
|
||||
@ -98,7 +98,7 @@ suite "Block processing" & preset():
|
||||
new_block = makeBlock(state, previous_block_root, BeaconBlockBody(
|
||||
attestations: @[attestation]
|
||||
))
|
||||
discard state_transition(state, new_block, {})
|
||||
discard state_transition(state, new_block.message, {})
|
||||
|
||||
check:
|
||||
# TODO epoch attestations can get multiplied now; clean up paths to
|
||||
|
@ -36,9 +36,9 @@ suite "Zero signature sanity checks":
|
||||
|
||||
# check(zeroSIg == deserZeroSig)
|
||||
|
||||
timedTest "SSZ serialization roundtrip of BeaconBlockHeader":
|
||||
timedTest "SSZ serialization roundtrip of SignedBeaconBlockHeader":
|
||||
|
||||
let defaultBlockHeader = BeaconBlockHeader(
|
||||
let defaultBlockHeader = SignedBeaconBlockHeader(
|
||||
signature: BlsValue[Signature](kind: OpaqueBlob)
|
||||
)
|
||||
|
||||
@ -50,6 +50,7 @@ suite "Zero signature sanity checks":
|
||||
allZeros
|
||||
|
||||
let sszDefaultBlockHeader = SSZ.encode(defaultBlockHeader)
|
||||
let deserBlockHeader = SSZ.decode(sszDefaultBlockHeader, BeaconBlockHeader)
|
||||
let deserBlockHeader =
|
||||
SSZ.decode(sszDefaultBlockHeader, SignedBeaconBlockHeader)
|
||||
|
||||
check(defaultBlockHeader == deserBlockHeader)
|
||||
|
@ -64,7 +64,7 @@ func makeDeposit(i: int, flags: UpdateFlags): Deposit =
|
||||
|
||||
if skipValidation notin flags:
|
||||
result.data.signature =
|
||||
bls_sign(privkey, signing_root(result.data).data,
|
||||
bls_sign(privkey, hash_tree_root(result.data).data,
|
||||
domain)
|
||||
|
||||
func makeInitialDeposits*(
|
||||
@ -74,7 +74,7 @@ func makeInitialDeposits*(
|
||||
|
||||
proc addBlock*(
|
||||
state: var BeaconState, previous_block_root: Eth2Digest,
|
||||
body: BeaconBlockBody, flags: UpdateFlags = {}): BeaconBlock =
|
||||
body: BeaconBlockBody, flags: UpdateFlags = {}): SignedBeaconBlock =
|
||||
# Create and add a block to state - state will advance by one slot!
|
||||
# This is the equivalent of running
|
||||
# updateState(state, prev_block, makeBlock(...), {skipValidation})
|
||||
@ -100,44 +100,46 @@ proc addBlock*(
|
||||
# In order to reuse the state transition function, we first create a dummy
|
||||
# block that has some fields set, and use that to generate the state as it
|
||||
# would look with the new block applied.
|
||||
new_block = BeaconBlock(
|
||||
slot: state.slot + 1,
|
||||
parent_root: previous_block_root,
|
||||
state_root: Eth2Digest(), # we need the new state first
|
||||
body: new_body,
|
||||
signature: ValidatorSig(), # we need the rest of the block first!
|
||||
new_block = SignedBeaconBlock(
|
||||
message: BeaconBlock(
|
||||
slot: state.slot + 1,
|
||||
parent_root: previous_block_root,
|
||||
state_root: Eth2Digest(), # we need the new state first
|
||||
body: new_body
|
||||
)
|
||||
)
|
||||
|
||||
let block_ok = state_transition(state, new_block, {skipValidation})
|
||||
let block_ok = state_transition(state, new_block.message, {skipValidation})
|
||||
doAssert block_ok
|
||||
|
||||
# Ok, we have the new state as it would look with the block applied - now we
|
||||
# can set the state root in order to be able to create a valid signature
|
||||
new_block.state_root = hash_tree_root(state)
|
||||
new_block.message.state_root = hash_tree_root(state)
|
||||
|
||||
doAssert privKey.pubKey() == proposer.pubkey,
|
||||
"signature key should be derived from private key! - wrong privkey?"
|
||||
|
||||
if skipValidation notin flags:
|
||||
let block_root = signing_root(new_block)
|
||||
let block_root = hash_tree_root(new_block.message)
|
||||
# We have a signature - put it in the block and we should be done!
|
||||
new_block.signature =
|
||||
bls_sign(privKey, block_root.data,
|
||||
get_domain(state, DOMAIN_BEACON_PROPOSER,
|
||||
compute_epoch_at_slot(new_block.slot)))
|
||||
compute_epoch_at_slot(new_block.message.slot)))
|
||||
|
||||
doAssert bls_verify(
|
||||
proposer.pubkey,
|
||||
block_root.data, new_block.signature,
|
||||
get_domain(
|
||||
state, DOMAIN_BEACON_PROPOSER, compute_epoch_at_slot(new_block.slot))),
|
||||
state, DOMAIN_BEACON_PROPOSER,
|
||||
compute_epoch_at_slot(new_block.message.slot))),
|
||||
"we just signed this message - it should pass verification!"
|
||||
|
||||
new_block
|
||||
|
||||
proc makeBlock*(
|
||||
state: BeaconState, previous_block_root: Eth2Digest,
|
||||
body: BeaconBlockBody): BeaconBlock =
|
||||
body: BeaconBlockBody): SignedBeaconBlock =
|
||||
# Create a block for `state.slot + 1` - like a block proposer would do!
|
||||
# It's a bit awkward - in order to produce a block for N+1, we need to
|
||||
# calculate what the state will look like after that block has been applied,
|
||||
|
@ -71,7 +71,7 @@ template timedTest*(name, body) =
|
||||
# TODO noto thread-safe as-is
|
||||
testTimes.add (f, name)
|
||||
|
||||
proc makeTestDB*(tailState: BeaconState, tailBlock: BeaconBlock): BeaconChainDB =
|
||||
proc makeTestDB*(tailState: BeaconState, tailBlock: SignedBeaconBlock): BeaconChainDB =
|
||||
result = init(BeaconChainDB, newMemoryDB())
|
||||
BlockPool.preInit(result, tailState, tailBlock)
|
||||
|
||||
|
2
vendor/nim-bearssl
vendored
2
vendor/nim-bearssl
vendored
@ -1 +1 @@
|
||||
Subproject commit 993372dd78fa935e051e2d1f3c874d068d7171e6
|
||||
Subproject commit 02c575c84489c88b09ff18062bd306de6dd83356
|
2
vendor/nim-chronicles
vendored
2
vendor/nim-chronicles
vendored
@ -1 +1 @@
|
||||
Subproject commit a2ea339569878720571bbadabdde72b55c411d78
|
||||
Subproject commit f1acc470eb01a4d9086e752d103a73e49a372eaf
|
2
vendor/nim-eth
vendored
2
vendor/nim-eth
vendored
@ -1 +1 @@
|
||||
Subproject commit 4976bd9fb95c36df3688867a4e2fe7bbfed8f966
|
||||
Subproject commit 655fc43751f203acdc525bab688115043f504b87
|
2
vendor/nim-libp2p
vendored
2
vendor/nim-libp2p
vendored
@ -1 +1 @@
|
||||
Subproject commit f9eed172d4a61f142e596f891f371ecf7c21b415
|
||||
Subproject commit 77e866d29a58ad6cfefaf9c8a8ee7159a43bcfe5
|
2
vendor/nim-nat-traversal
vendored
2
vendor/nim-nat-traversal
vendored
@ -1 +1 @@
|
||||
Subproject commit e3fd23e90f661f51f028f78202eb4de1e6128ea9
|
||||
Subproject commit bfc48eda54a35b7c19cbb2994470655ad7a3bdd1
|
2
vendor/nim-stew
vendored
2
vendor/nim-stew
vendored
@ -1 +1 @@
|
||||
Subproject commit 1c4293b3e754b5ea68a188b60b192801162cd44e
|
||||
Subproject commit e9d75c05f62a7a9628b28b822b5190a6682e2a7e
|
Loading…
x
Reference in New Issue
Block a user