2018-11-23 23:58:49 +00:00
|
|
|
import
|
2019-01-14 12:19:44 +00:00
|
|
|
std_shims/[os_shims, objects], net, sequtils, options, tables,
|
2019-02-06 17:56:04 +00:00
|
|
|
chronos, chronicles, confutils, eth/[p2p, keys],
|
2019-01-26 19:32:10 +00:00
|
|
|
spec/[datatypes, digest, crypto, beaconstate, helpers, validator], conf, time,
|
2018-12-28 16:51:40 +00:00
|
|
|
state_transition, fork_choice, ssz, beacon_chain_db, validator_pool, extras,
|
2019-02-19 23:35:02 +00:00
|
|
|
attestation_pool,
|
2019-01-08 17:28:21 +00:00
|
|
|
mainchain_monitor, sync_protocol, gossipsub_protocol, trusted_state_snapshots,
|
2019-02-05 19:21:18 +00:00
|
|
|
eth/trie/db, eth/trie/backends/rocksdb_backend
|
2018-11-23 23:58:49 +00:00
|
|
|
|
|
|
|
type
|
|
|
|
BeaconNode* = ref object
|
|
|
|
beaconState*: BeaconState
|
|
|
|
network*: EthereumNode
|
|
|
|
db*: BeaconChainDB
|
2018-11-29 01:08:34 +00:00
|
|
|
config*: BeaconNodeConf
|
2018-11-23 23:58:49 +00:00
|
|
|
keys*: KeyPair
|
2018-11-26 13:33:06 +00:00
|
|
|
attachedValidators: ValidatorPool
|
2018-12-28 16:51:40 +00:00
|
|
|
attestationPool: AttestationPool
|
2018-11-26 13:33:06 +00:00
|
|
|
mainchainMonitor: MainchainMonitor
|
2019-01-05 21:01:26 +00:00
|
|
|
headBlock: BeaconBlock
|
|
|
|
headBlockRoot: Eth2Digest
|
2019-01-08 17:28:21 +00:00
|
|
|
blocksChildren: Table[Eth2Digest, seq[Eth2Digest]]
|
2018-11-23 23:58:49 +00:00
|
|
|
|
|
|
|
const
|
2018-11-29 01:08:34 +00:00
|
|
|
version = "v0.1" # TODO: read this from the nimble file
|
2018-11-23 23:58:49 +00:00
|
|
|
clientId = "nimbus beacon node " & version
|
2018-11-26 13:33:06 +00:00
|
|
|
|
|
|
|
topicBeaconBlocks = "ethereum/2.1/beacon_chain/blocks"
|
|
|
|
topicAttestations = "ethereum/2.1/beacon_chain/attestations"
|
2018-11-23 23:58:49 +00:00
|
|
|
|
2019-02-20 01:33:58 +00:00
|
|
|
stateStoragePeriod = SLOTS_PER_EPOCH.uint64 * 10 # Save states once per this number of slots. TODO: Find a good number.
|
2019-01-31 16:06:48 +00:00
|
|
|
|
2019-02-07 21:14:08 +00:00
|
|
|
func shortHash(x: auto): string =
|
|
|
|
($x)[0..7]
|
|
|
|
|
2019-01-25 17:35:22 +00:00
|
|
|
func shortValidatorKey(node: BeaconNode, validatorIdx: int): string =
|
|
|
|
($node.beaconState.validator_registry[validatorIdx].pubkey)[0..7]
|
2019-01-05 21:01:26 +00:00
|
|
|
|
2018-11-23 23:58:49 +00:00
|
|
|
proc ensureNetworkKeys*(dataDir: string): KeyPair =
|
|
|
|
# TODO:
|
|
|
|
# 1. Check if keys already exist in the data dir
|
|
|
|
# 2. Generate new ones and save them in the directory
|
|
|
|
# if necessary
|
|
|
|
return newKeyPair()
|
|
|
|
|
2019-02-21 04:42:17 +00:00
|
|
|
proc updateHeadBlock(node: BeaconNode, blck: BeaconBlock)
|
|
|
|
|
2018-11-29 01:08:34 +00:00
|
|
|
proc init*(T: type BeaconNode, conf: BeaconNodeConf): T =
|
2018-11-23 23:58:49 +00:00
|
|
|
new result
|
|
|
|
result.config = conf
|
2018-12-19 12:58:53 +00:00
|
|
|
|
2018-12-28 16:51:40 +00:00
|
|
|
result.attachedValidators = ValidatorPool.init
|
2019-02-19 23:35:02 +00:00
|
|
|
init result.attestationPool, 42 # TODO compile failure without the dummy int??
|
2018-12-28 16:51:40 +00:00
|
|
|
init result.mainchainMonitor, "", Port(0) # TODO: specify geth address and port
|
|
|
|
|
2019-01-14 12:19:44 +00:00
|
|
|
let trieDB = trieDB newChainDb(string conf.dataDir)
|
|
|
|
result.db = BeaconChainDB.init(trieDB)
|
2019-01-25 14:17:35 +00:00
|
|
|
|
2019-02-21 04:42:17 +00:00
|
|
|
# TODO does it really make sense to load from DB if a state snapshot has been
|
|
|
|
# specified on command line? potentially, this should be the other way
|
|
|
|
# around...
|
2019-02-21 21:38:26 +00:00
|
|
|
if (let head = result.db.getHead(); head.isSome()):
|
2019-02-21 04:42:17 +00:00
|
|
|
info "Loading head from database",
|
|
|
|
blockSlot = humaneSlotNum(head.get().slot)
|
|
|
|
updateHeadBlock(result, head.get())
|
|
|
|
else:
|
|
|
|
result.beaconState = result.config.stateSnapshot.get()
|
|
|
|
result.headBlock = get_initial_beacon_block(result.beaconState)
|
|
|
|
result.headBlockRoot = hash_tree_root_final(result.headBlock)
|
|
|
|
|
|
|
|
info "Loaded state from snapshot",
|
|
|
|
stateSlot = humaneSlotNum(result.beaconState.slot)
|
2019-02-21 21:38:26 +00:00
|
|
|
result.db.putState(result.beaconState)
|
2019-02-21 04:42:17 +00:00
|
|
|
# The genesis block is special in that we have to store it at hash 0 - in
|
|
|
|
# the genesis state, this block has not been applied..
|
2019-02-21 21:38:26 +00:00
|
|
|
result.db.putBlock(result.headBlock)
|
2019-01-25 14:17:35 +00:00
|
|
|
|
2018-11-29 01:08:34 +00:00
|
|
|
result.keys = ensureNetworkKeys(string conf.dataDir)
|
2018-11-23 23:58:49 +00:00
|
|
|
|
|
|
|
var address: Address
|
2018-12-28 16:51:40 +00:00
|
|
|
address.ip = parseIpAddress("127.0.0.1")
|
2018-11-23 23:58:49 +00:00
|
|
|
address.tcpPort = Port(conf.tcpPort)
|
|
|
|
address.udpPort = Port(conf.udpPort)
|
2018-12-19 12:58:53 +00:00
|
|
|
|
2018-12-28 16:51:40 +00:00
|
|
|
result.network = newEthereumNode(result.keys, address, 0, nil, clientId, minPeers = 1)
|
2018-11-23 23:58:49 +00:00
|
|
|
|
2018-12-19 12:58:53 +00:00
|
|
|
writeFile(string(conf.dataDir) / "beacon_node.address",
|
|
|
|
$result.network.listeningAddress)
|
|
|
|
|
|
|
|
proc connectToNetwork(node: BeaconNode) {.async.} =
|
|
|
|
var bootstrapNodes = newSeq[ENode]()
|
|
|
|
|
|
|
|
for node in node.config.bootstrapNodes:
|
|
|
|
bootstrapNodes.add initENode(node)
|
|
|
|
|
|
|
|
let bootstrapFile = string node.config.bootstrapNodesFile
|
|
|
|
if bootstrapFile.len > 0:
|
|
|
|
for ln in lines(bootstrapFile):
|
|
|
|
bootstrapNodes.add initENode(string ln)
|
|
|
|
|
|
|
|
if bootstrapNodes.len > 0:
|
2018-12-28 16:51:40 +00:00
|
|
|
info "Connecting to bootstrap nodes", bootstrapNodes
|
2018-12-19 12:58:53 +00:00
|
|
|
await node.network.connectToNetwork(bootstrapNodes)
|
|
|
|
else:
|
2018-12-28 16:51:40 +00:00
|
|
|
info "Waiting for connections"
|
2018-12-19 12:58:53 +00:00
|
|
|
node.network.startListening()
|
|
|
|
|
2018-11-23 23:58:49 +00:00
|
|
|
proc sync*(node: BeaconNode): Future[bool] {.async.} =
|
2019-02-21 04:42:17 +00:00
|
|
|
if node.beaconState.slotDistanceFromNow() > WEAK_SUBJECTVITY_PERIOD.int64:
|
2019-01-25 14:17:35 +00:00
|
|
|
node.beaconState = await obtainTrustedStateSnapshot(node.db)
|
2018-11-23 23:58:49 +00:00
|
|
|
else:
|
2019-02-07 15:38:46 +00:00
|
|
|
var targetSlot = node.beaconState.getSlotFromTime()
|
2018-11-23 23:58:49 +00:00
|
|
|
|
2019-01-16 23:01:15 +00:00
|
|
|
let t = now()
|
|
|
|
if t < node.beaconState.genesisTime * 1000:
|
|
|
|
await sleepAsync int(node.beaconState.genesisTime * 1000 - t)
|
|
|
|
|
2019-02-15 16:33:32 +00:00
|
|
|
# TODO: change this to a full sync / block download
|
|
|
|
info "Syncing state from remote peers",
|
|
|
|
finalized_epoch = humaneEpochNum(node.beaconState.finalized_epoch),
|
|
|
|
target_slot_epoch = humaneEpochNum(targetSlot.slot_to_epoch)
|
|
|
|
|
2019-02-21 04:42:17 +00:00
|
|
|
# TODO: sync is called at the beginning of the program, but doing this kind
|
|
|
|
# of catching up here is wrong - if we fall behind on processing
|
|
|
|
# for whatever reason, we want to be safe against the damage that
|
|
|
|
# might cause regardless if we just started or have been running for
|
|
|
|
# long. A classic example where this might happen is when the
|
|
|
|
# computer goes to sleep - when waking up, we'll be in the middle of
|
|
|
|
# processing, but behind everyone else.
|
|
|
|
# while node.beaconState.finalized_epoch < targetSlot.slot_to_epoch:
|
|
|
|
# var (peer, changeLog) = await node.network.getValidatorChangeLog(
|
|
|
|
# node.beaconState.validator_registry_delta_chain_tip)
|
|
|
|
|
|
|
|
# if peer == nil:
|
|
|
|
# error "Failed to sync with any peer"
|
|
|
|
# return false
|
|
|
|
|
|
|
|
# if applyValidatorChangeLog(changeLog, node.beaconState):
|
|
|
|
# node.db.persistState(node.beaconState)
|
|
|
|
# node.db.persistBlock(changeLog.signedBlock)
|
|
|
|
# else:
|
|
|
|
# warn "Ignoring invalid validator change log", sentFrom = peer
|
2018-11-23 23:58:49 +00:00
|
|
|
|
|
|
|
return true
|
|
|
|
|
2018-12-05 13:58:41 +00:00
|
|
|
template findIt(s: openarray, predicate: untyped): int =
|
|
|
|
var res = -1
|
|
|
|
for i, it {.inject.} in s:
|
|
|
|
if predicate:
|
|
|
|
res = i
|
|
|
|
break
|
|
|
|
res
|
|
|
|
|
2018-11-23 23:58:49 +00:00
|
|
|
proc addLocalValidators*(node: BeaconNode) =
|
2018-12-19 12:58:53 +00:00
|
|
|
for validator in node.config.validators:
|
|
|
|
let
|
|
|
|
privKey = validator.privKey
|
|
|
|
pubKey = privKey.pubKey()
|
|
|
|
|
2019-02-07 10:51:21 +00:00
|
|
|
let idx = node.beaconState.validator_registry.findIt(it.pubKey == pubKey)
|
2018-12-05 13:58:41 +00:00
|
|
|
if idx == -1:
|
|
|
|
warn "Validator not in registry", pubKey
|
|
|
|
else:
|
2019-01-25 17:35:22 +00:00
|
|
|
debug "Attaching validator", validator = shortValidatorKey(node, idx),
|
|
|
|
idx, pubKey
|
2019-02-14 21:41:04 +00:00
|
|
|
node.attachedValidators.addLocalValidator(idx, pubKey, privKey)
|
2018-12-05 13:58:41 +00:00
|
|
|
|
2018-12-28 16:51:40 +00:00
|
|
|
info "Local validators attached ", count = node.attachedValidators.count
|
2018-11-23 23:58:49 +00:00
|
|
|
|
2018-11-26 13:33:06 +00:00
|
|
|
proc getAttachedValidator(node: BeaconNode, idx: int): AttachedValidator =
|
2018-12-03 21:41:24 +00:00
|
|
|
let validatorKey = node.beaconState.validator_registry[idx].pubkey
|
2018-11-26 13:33:06 +00:00
|
|
|
return node.attachedValidators.getValidator(validatorKey)
|
|
|
|
|
2018-11-29 01:08:34 +00:00
|
|
|
proc makeAttestation(node: BeaconNode,
|
2018-12-28 16:51:40 +00:00
|
|
|
validator: AttachedValidator,
|
2019-02-20 01:33:58 +00:00
|
|
|
slot: Slot,
|
2018-12-28 16:51:40 +00:00
|
|
|
shard: uint64,
|
|
|
|
committeeLen: int,
|
|
|
|
indexInCommittee: int) {.async.} =
|
|
|
|
doAssert node != nil
|
|
|
|
doAssert validator != nil
|
|
|
|
|
2019-02-19 23:35:02 +00:00
|
|
|
var state = node.beaconState
|
2019-01-16 23:01:15 +00:00
|
|
|
|
2019-02-19 23:35:02 +00:00
|
|
|
if state.slot < slot:
|
|
|
|
info "Filling slot gap for attestation",
|
|
|
|
slot = humaneSlotNum(slot),
|
|
|
|
stateSlot = humaneSlotNum(state.slot)
|
2018-12-28 16:51:40 +00:00
|
|
|
|
2019-02-19 23:35:02 +00:00
|
|
|
for s in state.slot ..< slot:
|
|
|
|
let ok = updateState(
|
|
|
|
state, node.headBlockRoot, none[BeaconBlock](), {skipValidation})
|
|
|
|
doAssert ok
|
2018-12-28 16:51:40 +00:00
|
|
|
|
2019-02-19 23:35:02 +00:00
|
|
|
let
|
|
|
|
justifiedBlockRoot =
|
|
|
|
get_block_root(state, get_epoch_start_slot(state.justified_epoch))
|
|
|
|
|
|
|
|
attestationData = AttestationData(
|
|
|
|
slot: slot,
|
|
|
|
shard: shard,
|
|
|
|
beacon_block_root: node.headBlockRoot,
|
|
|
|
epoch_boundary_root: Eth2Digest(), # TODO
|
|
|
|
shard_block_root: Eth2Digest(), # TODO
|
2019-02-27 13:58:07 +00:00
|
|
|
latest_crosslink: Crosslink(epoch: state.latest_crosslinks[shard].epoch),
|
2019-02-19 23:35:02 +00:00
|
|
|
justified_epoch: state.justified_epoch,
|
|
|
|
justified_block_root: justifiedBlockRoot)
|
|
|
|
|
|
|
|
validatorSignature = await validator.signAttestation(attestationData)
|
2018-12-28 16:51:40 +00:00
|
|
|
|
|
|
|
var participationBitfield = repeat(0'u8, ceil_div8(committeeLen))
|
|
|
|
bitSet(participationBitfield, indexInCommittee)
|
|
|
|
|
|
|
|
var attestation = Attestation(
|
|
|
|
data: attestationData,
|
|
|
|
aggregate_signature: validatorSignature,
|
2019-02-12 22:50:02 +00:00
|
|
|
aggregation_bitfield: participationBitfield,
|
|
|
|
# Stub in phase0
|
|
|
|
custody_bitfield: newSeq[byte](participationBitfield.len)
|
|
|
|
)
|
2018-11-26 13:33:06 +00:00
|
|
|
|
2018-11-29 01:08:34 +00:00
|
|
|
await node.network.broadcast(topicAttestations, attestation)
|
2018-11-26 13:33:06 +00:00
|
|
|
|
2019-02-19 23:35:02 +00:00
|
|
|
info "Attestation sent",
|
|
|
|
slot = humaneSlotNum(attestationData.slot),
|
|
|
|
shard = attestationData.shard,
|
|
|
|
validator = shortValidatorKey(node, validator.idx),
|
|
|
|
signature = shortHash(validatorSignature),
|
|
|
|
beaconBlockRoot = shortHash(attestationData.beacon_block_root)
|
2018-12-28 16:51:40 +00:00
|
|
|
|
2018-11-29 01:08:34 +00:00
|
|
|
proc proposeBlock(node: BeaconNode,
|
|
|
|
validator: AttachedValidator,
|
2019-02-20 01:33:58 +00:00
|
|
|
slot: Slot) {.async.} =
|
2018-12-28 16:51:40 +00:00
|
|
|
doAssert node != nil
|
|
|
|
doAssert validator != nil
|
|
|
|
doAssert validator.idx < node.beaconState.validator_registry.len
|
|
|
|
|
2019-01-05 21:01:26 +00:00
|
|
|
var state = node.beaconState
|
|
|
|
|
2019-02-19 23:35:02 +00:00
|
|
|
if state.slot + 1 < slot:
|
|
|
|
info "Filling slot gap for block proposal",
|
2019-02-07 21:14:08 +00:00
|
|
|
slot = humaneSlotNum(slot),
|
2019-02-19 23:35:02 +00:00
|
|
|
stateSlot = humaneSlotNum(state.slot)
|
|
|
|
|
|
|
|
for s in state.slot + 1 ..< slot:
|
|
|
|
let ok = updateState(
|
|
|
|
state, node.headBlockRoot, none[BeaconBlock](), {skipValidation})
|
2019-01-25 17:35:22 +00:00
|
|
|
doAssert ok
|
2019-01-05 21:01:26 +00:00
|
|
|
|
2018-12-28 16:51:40 +00:00
|
|
|
var blockBody = BeaconBlockBody(
|
2019-02-19 23:35:02 +00:00
|
|
|
attestations: node.attestationPool.getAttestationsForBlock(state, slot))
|
2018-12-28 16:51:40 +00:00
|
|
|
|
|
|
|
var newBlock = BeaconBlock(
|
|
|
|
slot: slot,
|
2019-01-05 21:01:26 +00:00
|
|
|
parent_root: node.headBlockRoot,
|
2019-02-14 21:41:04 +00:00
|
|
|
randao_reveal: validator.genRandaoReveal(state, state.slot),
|
2019-01-18 00:14:22 +00:00
|
|
|
eth1_data: node.mainchainMonitor.getBeaconBlockRef(),
|
2018-12-28 16:51:40 +00:00
|
|
|
signature: ValidatorSig(), # we need the rest of the block first!
|
|
|
|
body: blockBody)
|
|
|
|
|
2019-02-19 23:35:02 +00:00
|
|
|
let ok =
|
|
|
|
updateState(state, node.headBlockRoot, some(newBlock), {skipValidation})
|
2018-12-28 16:51:40 +00:00
|
|
|
doAssert ok # TODO: err, could this fail somehow?
|
2018-11-26 13:33:06 +00:00
|
|
|
|
2018-12-28 16:51:40 +00:00
|
|
|
newBlock.state_root = Eth2Digest(data: hash_tree_root(state))
|
2018-11-26 13:33:06 +00:00
|
|
|
|
2018-12-28 16:51:40 +00:00
|
|
|
var signedData = ProposalSignedData(
|
|
|
|
slot: slot,
|
|
|
|
shard: BEACON_CHAIN_SHARD_NUMBER,
|
|
|
|
blockRoot: hash_tree_root_final(newBlock))
|
2018-11-26 13:33:06 +00:00
|
|
|
|
2019-02-19 23:35:02 +00:00
|
|
|
newBlock.signature = await validator.signBlockProposal(state.fork, signedData)
|
2018-11-26 13:33:06 +00:00
|
|
|
|
2018-12-28 16:51:40 +00:00
|
|
|
await node.network.broadcast(topicBeaconBlocks, newBlock)
|
2018-11-26 13:33:06 +00:00
|
|
|
|
2019-02-19 23:35:02 +00:00
|
|
|
info "Block proposed",
|
|
|
|
slot = humaneSlotNum(slot),
|
|
|
|
stateRoot = shortHash(newBlock.state_root),
|
2019-02-21 04:42:17 +00:00
|
|
|
parentRoot = shortHash(newBlock.parent_root),
|
2019-02-19 23:35:02 +00:00
|
|
|
validator = shortValidatorKey(node, validator.idx),
|
|
|
|
idx = validator.idx
|
2018-11-26 13:33:06 +00:00
|
|
|
|
2018-12-28 16:51:40 +00:00
|
|
|
proc scheduleBlockProposal(node: BeaconNode,
|
2019-02-20 01:33:58 +00:00
|
|
|
slot: Slot,
|
2018-12-28 16:51:40 +00:00
|
|
|
validator: AttachedValidator) =
|
|
|
|
# TODO:
|
|
|
|
# This function exists only to hide a bug with Nim's closures.
|
2019-01-09 01:01:07 +00:00
|
|
|
# If you inline it in `scheduleEpochActions`, you'll see the
|
2018-12-28 16:51:40 +00:00
|
|
|
# internal `doAssert` starting to fail.
|
|
|
|
doAssert validator != nil
|
|
|
|
|
2019-02-19 23:35:02 +00:00
|
|
|
let
|
|
|
|
at = node.beaconState.slotStart(slot)
|
|
|
|
now = fastEpochTime()
|
|
|
|
|
|
|
|
if now > at:
|
|
|
|
warn "Falling behind on block proposals", at, now, slot
|
2019-01-25 17:35:22 +00:00
|
|
|
|
|
|
|
info "Scheduling block proposal",
|
|
|
|
validator = shortValidatorKey(node, validator.idx),
|
2019-02-12 15:56:58 +00:00
|
|
|
idx = validator.idx,
|
2019-02-07 21:14:08 +00:00
|
|
|
slot = humaneSlotNum(slot),
|
2019-02-19 23:35:02 +00:00
|
|
|
fromNow = (at - now) div 1000
|
2019-01-25 17:35:22 +00:00
|
|
|
|
2019-02-19 23:35:02 +00:00
|
|
|
addTimer(at) do (x: pointer) {.gcsafe.}:
|
2019-01-25 17:35:22 +00:00
|
|
|
# TODO timers are generally not accurate / guaranteed to fire at the right
|
|
|
|
# time - need to guard here against early / late firings
|
2018-12-28 16:51:40 +00:00
|
|
|
doAssert validator != nil
|
2019-02-18 18:54:05 +00:00
|
|
|
asyncCheck proposeBlock(node, validator, slot)
|
2018-12-28 16:51:40 +00:00
|
|
|
|
|
|
|
proc scheduleAttestation(node: BeaconNode,
|
|
|
|
validator: AttachedValidator,
|
2019-02-20 01:33:58 +00:00
|
|
|
slot: Slot,
|
2018-12-28 16:51:40 +00:00
|
|
|
shard: uint64,
|
|
|
|
committeeLen: int,
|
|
|
|
indexInCommittee: int) =
|
|
|
|
# TODO:
|
|
|
|
# This function exists only to hide a bug with Nim's closures.
|
2019-01-09 01:01:07 +00:00
|
|
|
# If you inline it in `scheduleEpochActions`, you'll see the
|
2018-12-28 16:51:40 +00:00
|
|
|
# internal `doAssert` starting to fail.
|
|
|
|
doAssert validator != nil
|
|
|
|
|
2019-02-19 23:35:02 +00:00
|
|
|
let
|
|
|
|
at = node.beaconState.slotStart(slot)
|
|
|
|
now = fastEpochTime()
|
|
|
|
|
|
|
|
if now > at:
|
|
|
|
warn "Falling behind on attestations", at, now, slot
|
|
|
|
|
|
|
|
debug "Scheduling attestation",
|
|
|
|
validator = shortValidatorKey(node, validator.idx),
|
|
|
|
fromNow = (at - now) div 1000,
|
|
|
|
slot = humaneSlotNum(slot),
|
|
|
|
shard
|
|
|
|
|
|
|
|
addTimer(at) do (p: pointer) {.gcsafe.}:
|
2018-12-28 16:51:40 +00:00
|
|
|
doAssert validator != nil
|
2019-02-18 18:54:05 +00:00
|
|
|
asyncCheck makeAttestation(node, validator, slot,
|
2018-12-28 16:51:40 +00:00
|
|
|
shard, committeeLen, indexInCommittee)
|
2018-11-26 13:33:06 +00:00
|
|
|
|
2019-02-20 01:33:58 +00:00
|
|
|
proc scheduleEpochActions(node: BeaconNode, epoch: Epoch) =
|
2018-11-23 23:58:49 +00:00
|
|
|
## This schedules the required block proposals and
|
|
|
|
## attestations from our attached validators.
|
2018-12-28 16:51:40 +00:00
|
|
|
doAssert node != nil
|
2019-02-19 23:35:02 +00:00
|
|
|
doAssert epoch >= GENESIS_EPOCH,
|
|
|
|
"Epoch: " & $epoch & ", humane epoch: " & $humaneSlotNum(epoch)
|
2018-12-28 16:51:40 +00:00
|
|
|
|
2019-02-15 16:33:32 +00:00
|
|
|
debug "Scheduling epoch actions", epoch = humaneEpochNum(epoch)
|
2019-01-21 19:42:37 +00:00
|
|
|
|
2018-12-28 16:51:40 +00:00
|
|
|
# TODO: this copy of the state shouldn't be necessary, but please
|
|
|
|
# see the comments in `get_beacon_proposer_index`
|
|
|
|
var nextState = node.beaconState
|
2018-11-23 23:58:49 +00:00
|
|
|
|
2019-02-12 15:56:58 +00:00
|
|
|
let start = if epoch == GENESIS_EPOCH: 1.uint64 else: 0.uint64
|
|
|
|
|
2019-02-20 01:33:58 +00:00
|
|
|
for i in start ..< SLOTS_PER_EPOCH:
|
|
|
|
let slot = epoch * SLOTS_PER_EPOCH + i
|
2019-02-19 23:35:02 +00:00
|
|
|
nextState.slot = slot # ugly trick, see get_beacon_proposer_index
|
|
|
|
|
|
|
|
block: # Schedule block proposals
|
|
|
|
let proposerIdx = get_beacon_proposer_index(nextState, slot)
|
|
|
|
let validator = node.getAttachedValidator(proposerIdx)
|
|
|
|
|
|
|
|
if validator != nil:
|
|
|
|
# TODO:
|
|
|
|
# Warm-up the proposer earlier to try to obtain previous
|
|
|
|
# missing blocks if necessary
|
|
|
|
scheduleBlockProposal(node, slot, validator)
|
|
|
|
|
|
|
|
block: # Schedule attestations
|
|
|
|
for crosslink_committee in get_crosslink_committees_at_slot(
|
|
|
|
nextState, slot):
|
|
|
|
for i, validatorIdx in crosslink_committee.committee:
|
|
|
|
let validator = node.getAttachedValidator(validatorIdx)
|
|
|
|
if validator != nil:
|
|
|
|
scheduleAttestation(
|
|
|
|
node, validator, slot, crosslink_committee.shard,
|
|
|
|
crosslink_committee.committee.len, i)
|
2018-12-28 16:51:40 +00:00
|
|
|
|
2019-01-25 17:35:22 +00:00
|
|
|
let
|
2019-02-18 15:58:34 +00:00
|
|
|
nextEpoch = epoch + 1
|
2019-02-21 04:42:17 +00:00
|
|
|
at = node.beaconState.slotStart(nextEpoch.get_epoch_start_slot())
|
2018-11-23 23:58:49 +00:00
|
|
|
|
2019-01-25 17:35:22 +00:00
|
|
|
info "Scheduling next epoch update",
|
|
|
|
fromNow = (at - fastEpochTime()) div 1000,
|
2019-02-18 15:58:34 +00:00
|
|
|
epoch = humaneEpochNum(nextEpoch)
|
2019-01-25 17:35:22 +00:00
|
|
|
|
|
|
|
addTimer(at) do (p: pointer):
|
2019-02-21 04:42:17 +00:00
|
|
|
node.scheduleEpochActions(nextEpoch)
|
2018-11-23 23:58:49 +00:00
|
|
|
|
2019-01-31 16:06:48 +00:00
|
|
|
proc stateNeedsSaving(s: BeaconState): bool =
|
|
|
|
# TODO: Come up with a better predicate logic
|
|
|
|
s.slot mod stateStoragePeriod == 0
|
|
|
|
|
2019-02-21 04:42:17 +00:00
|
|
|
proc onAttestation(node: BeaconNode, attestation: Attestation) =
|
|
|
|
let participants = get_attestation_participants(
|
|
|
|
node.beaconState, attestation.data, attestation.aggregation_bitfield).
|
|
|
|
mapIt(shortValidatorKey(node, it))
|
|
|
|
|
|
|
|
info "Attestation received",
|
|
|
|
slot = humaneSlotNum(attestation.data.slot),
|
|
|
|
shard = attestation.data.shard,
|
|
|
|
signature = shortHash(attestation.aggregate_signature),
|
|
|
|
participants,
|
|
|
|
beaconBlockRoot = shortHash(attestation.data.beacon_block_root)
|
|
|
|
|
|
|
|
node.attestationPool.add(attestation, node.beaconState)
|
|
|
|
|
2019-02-21 21:38:26 +00:00
|
|
|
if not node.db.containsBlock(attestation.data.beacon_block_root):
|
2019-02-21 04:42:17 +00:00
|
|
|
notice "Attestation block root missing",
|
|
|
|
beaconBlockRoot = shortHash(attestation.data.beacon_block_root)
|
|
|
|
# TODO download...
|
|
|
|
|
|
|
|
proc skipSlots(state: var BeaconState, parentRoot: Eth2Digest, nextSlot: Slot) =
|
|
|
|
if state.slot + 1 < nextSlot:
|
|
|
|
info "Advancing state past slot gap",
|
|
|
|
targetSlot = humaneSlotNum(nextSlot),
|
|
|
|
stateSlot = humaneSlotNum(state.slot)
|
2018-11-26 13:33:06 +00:00
|
|
|
|
2019-02-21 04:42:17 +00:00
|
|
|
for slot in state.slot + 1 ..< nextSlot:
|
|
|
|
let ok = updateState(state, parentRoot, none[BeaconBlock](), {})
|
|
|
|
doAssert ok, "Empty block state update should never fail!"
|
|
|
|
|
|
|
|
proc skipAndUpdateState(
|
|
|
|
state: var BeaconState, blck: BeaconBlock, flags: UpdateFlags): bool =
|
|
|
|
skipSlots(state, blck.parent_root, blck.slot)
|
|
|
|
updateState(state, blck.parent_root, some(blck), flags)
|
|
|
|
|
|
|
|
proc updateHeadBlock(node: BeaconNode, blck: BeaconBlock) =
|
|
|
|
# To update the head block, we need to apply it to the state. When things
|
|
|
|
# progress normally, the block we recieve will be a direct child of the
|
|
|
|
# last block we applied to the state:
|
|
|
|
|
|
|
|
if blck.parent_root == node.headBlockRoot:
|
|
|
|
let ok = skipAndUpdateState(node.beaconState, blck, {})
|
|
|
|
doAssert ok, "Nobody is ever going to send a faulty block!"
|
|
|
|
|
|
|
|
node.headBlock = blck
|
|
|
|
node.headBlockRoot = hash_tree_root_final(blck)
|
|
|
|
node.db.putHead(node.headBlockRoot)
|
|
|
|
|
|
|
|
info "Updated head",
|
|
|
|
stateRoot = shortHash(blck.state_root),
|
|
|
|
headBlockRoot = shortHash(node.headBlockRoot),
|
|
|
|
stateSlot = humaneSlotNum(node.beaconState.slot)
|
|
|
|
|
|
|
|
return
|
|
|
|
|
|
|
|
# It appears that the parent root of the proposed new block is different from
|
|
|
|
# what we expected. We will have to rewind the state to a point along the
|
|
|
|
# chain of ancestors of the new block. We will do this by loading each
|
|
|
|
# successive parent block and checking if we can find the corresponding state
|
|
|
|
# in the database.
|
2019-02-21 17:20:50 +00:00
|
|
|
let
|
|
|
|
ancestors = node.db.getAncestors(blck) do (bb: BeaconBlock) -> bool:
|
2019-02-21 21:38:26 +00:00
|
|
|
node.db.containsState(bb.state_root)
|
2019-02-21 17:20:50 +00:00
|
|
|
ancestor = ancestors[^1]
|
|
|
|
|
|
|
|
# Several things can happen, but the most common one should be that we found
|
|
|
|
# a beacon state
|
2019-02-21 21:38:26 +00:00
|
|
|
if (let state = node.db.getState(ancestor.state_root); state.isSome()):
|
2019-02-21 17:20:50 +00:00
|
|
|
# Got it!
|
|
|
|
notice "Replaying state transitions",
|
|
|
|
stateSlot = humaneSlotNum(node.beaconState.slot),
|
|
|
|
prevStateSlot = humaneSlotNum(state.get().slot)
|
|
|
|
node.beaconState = state.get()
|
|
|
|
|
|
|
|
elif ancestor.slot == 0:
|
|
|
|
# We've arrived at the genesis block and still haven't found what we're
|
|
|
|
# looking for. This is very bad - are we receiving blocks from a different
|
|
|
|
# chain? What's going on?
|
|
|
|
# TODO crashing like this is the wrong thing to do, obviously, but
|
|
|
|
# we'll do it anyway just to see if it ever happens - if it does,
|
|
|
|
# it's likely a bug :)
|
|
|
|
error "Couldn't find ancestor state",
|
|
|
|
blockSlot = humaneSlotNum(blck.slot),
|
|
|
|
blockRoot = shortHash(hash_tree_root_final(blck))
|
|
|
|
doAssert false, "Oh noes, we passed big bang!"
|
|
|
|
else:
|
|
|
|
# We don't have the parent block. This is a bit strange, but may happen
|
|
|
|
# if things are happening seriously out of order or if we're back after
|
|
|
|
# a net split or restart, for example. Once the missing block arrives,
|
|
|
|
# we should retry setting the head block..
|
|
|
|
# TODO implement block sync here
|
|
|
|
# TODO instead of doing block sync here, make sure we are sync already
|
|
|
|
# elsewhere, so as to simplify the logic of finding the block
|
|
|
|
# here..
|
|
|
|
error "Parent missing! Too bad, because sync is also missing :/",
|
|
|
|
parentRoot = shortHash(ancestor.parent_root),
|
|
|
|
blockSlot = humaneSlotNum(ancestor.slot)
|
|
|
|
doAssert false, "So long"
|
2019-02-21 04:42:17 +00:00
|
|
|
|
|
|
|
# If we come this far, we found the state root. The last block on the stack
|
|
|
|
# is the one that produced this particular state, so we can pop it
|
|
|
|
# TODO it might be possible to use the latest block hashes from the state to
|
|
|
|
# do this more efficiently.. whatever!
|
|
|
|
|
2019-02-21 17:20:50 +00:00
|
|
|
# Time to replay all the blocks between then and now. We skip the one because
|
|
|
|
# it's the one that we found the state with, and it has already been
|
|
|
|
# applied
|
|
|
|
for i in countdown(ancestors.len - 2, 0):
|
|
|
|
let last = ancestors[i]
|
|
|
|
|
2019-02-21 04:42:17 +00:00
|
|
|
skipSlots(node.beaconState, last.parent_root, last.slot)
|
|
|
|
|
2019-02-21 17:20:50 +00:00
|
|
|
# TODO technically, we should be storing states here, because we're now
|
|
|
|
# going down a different fork
|
2019-02-21 04:42:17 +00:00
|
|
|
let ok = updateState(
|
|
|
|
node.beaconState, last.parent_root, some(last),
|
2019-02-21 17:20:50 +00:00
|
|
|
if ancestors.len == 0: {} else: {skipValidation})
|
2019-02-21 04:42:17 +00:00
|
|
|
|
|
|
|
doAssert(ok)
|
|
|
|
|
|
|
|
node.headBlock = blck
|
|
|
|
node.headBlockRoot = hash_tree_root_final(blck)
|
|
|
|
node.db.putHead(node.headBlockRoot)
|
|
|
|
|
|
|
|
info "Updated head",
|
|
|
|
stateRoot = shortHash(blck.state_root),
|
|
|
|
headBlockRoot = shortHash(node.headBlockRoot),
|
|
|
|
stateSlot = humaneSlotNum(node.beaconState.slot)
|
|
|
|
|
|
|
|
proc onBeaconBlock(node: BeaconNode, blck: BeaconBlock) =
|
|
|
|
let
|
|
|
|
blockRoot = hash_tree_root_final(blck)
|
|
|
|
stateSlot = node.beaconState.slot
|
|
|
|
|
2019-02-21 21:38:26 +00:00
|
|
|
if node.db.containsBlock(blockRoot):
|
2019-02-21 04:42:17 +00:00
|
|
|
debug "Block already seen",
|
|
|
|
slot = humaneSlotNum(blck.slot),
|
|
|
|
stateRoot = shortHash(blck.state_root),
|
|
|
|
blockRoot = shortHash(blockRoot),
|
|
|
|
stateSlot = humaneSlotNum(stateSlot)
|
2018-12-28 16:51:40 +00:00
|
|
|
|
2019-02-21 04:42:17 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
info "Block received",
|
|
|
|
slot = humaneSlotNum(blck.slot),
|
|
|
|
stateRoot = shortHash(blck.state_root),
|
|
|
|
parentRoot = shortHash(blck.parent_root),
|
|
|
|
blockRoot = shortHash(blockRoot)
|
|
|
|
|
|
|
|
# TODO we should now validate the block to ensure that it's sane - but the
|
|
|
|
# only way to do that is to apply it to the state... for now, we assume
|
|
|
|
# all blocks are good!
|
|
|
|
|
|
|
|
# The block has been validated and it's not in the database yet - first, let's
|
|
|
|
# store it there, just to be safe
|
2019-02-21 21:38:26 +00:00
|
|
|
node.db.putBlock(blck)
|
2019-02-21 04:42:17 +00:00
|
|
|
|
|
|
|
# Since this is a good block, we should add its attestations in case we missed
|
|
|
|
# any. If everything checks out, this should lead to the fork choice selecting
|
|
|
|
# this particular block as head, eventually (technically, if we have other
|
|
|
|
# attestations, that might not be the case!)
|
|
|
|
for attestation in blck.body.attestations:
|
|
|
|
# TODO attestation pool needs to be taught to deal with overlapping
|
|
|
|
# attestations!
|
|
|
|
discard # node.onAttestation(attestation)
|
|
|
|
|
|
|
|
if blck.slot <= node.beaconState.slot:
|
|
|
|
# This is some old block that we received (perhaps as the result of a sync)
|
|
|
|
# request. At this point, there's not much we can do, except maybe try to
|
|
|
|
# update the state to the head block (this could have failed before due to
|
|
|
|
# missing blocks!)..
|
|
|
|
# TODO figure out what to do - for example, how to resume setting
|
|
|
|
# the head block...
|
|
|
|
return
|
|
|
|
|
|
|
|
# TODO We have a block that is newer than our latest state. What now??
|
|
|
|
# Here, we choose to update our state eagerly, assuming that the block
|
|
|
|
# is the one that the fork choice would have ended up with anyway, but
|
|
|
|
# is this a sane strategy? Technically, we could wait for more
|
|
|
|
# attestations and update the state lazily only when actually needed,
|
|
|
|
# such as when attesting.
|
|
|
|
# TODO Also, should we update to the block we just got, or run the fork
|
|
|
|
# choice at this point??
|
|
|
|
|
|
|
|
updateHeadBlock(node, blck)
|
|
|
|
|
|
|
|
if stateNeedsSaving(node.beaconState):
|
2019-02-21 21:38:26 +00:00
|
|
|
node.db.putState(node.beaconState)
|
2019-02-21 04:42:17 +00:00
|
|
|
|
|
|
|
proc run*(node: BeaconNode) =
|
|
|
|
node.network.subscribe(topicBeaconBlocks) do (blck: BeaconBlock):
|
|
|
|
node.onBeaconBlock(blck)
|
|
|
|
|
|
|
|
node.network.subscribe(topicAttestations) do (attestation: Attestation):
|
|
|
|
node.onAttestation(attestation)
|
2018-12-28 16:51:40 +00:00
|
|
|
|
2019-02-20 01:33:58 +00:00
|
|
|
let epoch = node.beaconState.getSlotFromTime div SLOTS_PER_EPOCH
|
2019-01-25 17:35:22 +00:00
|
|
|
node.scheduleEpochActions(epoch)
|
2018-12-28 16:51:40 +00:00
|
|
|
|
2019-01-25 17:35:22 +00:00
|
|
|
runForever()
|
2018-11-23 23:58:49 +00:00
|
|
|
|
2018-12-19 12:58:53 +00:00
|
|
|
var gPidFile: string
|
|
|
|
proc createPidFile(filename: string) =
|
|
|
|
createDir splitFile(filename).dir
|
|
|
|
writeFile filename, $getCurrentProcessId()
|
|
|
|
gPidFile = filename
|
|
|
|
addQuitProc proc {.noconv.} = removeFile gPidFile
|
|
|
|
|
2018-11-23 23:58:49 +00:00
|
|
|
when isMainModule:
|
2018-12-28 16:51:40 +00:00
|
|
|
let config = load BeaconNodeConf
|
2019-01-21 19:42:37 +00:00
|
|
|
if config.logLevel != LogLevel.NONE:
|
|
|
|
setLogLevel(config.logLevel)
|
2019-01-16 23:01:15 +00:00
|
|
|
|
2018-12-19 12:58:53 +00:00
|
|
|
case config.cmd
|
|
|
|
of createChain:
|
2019-02-15 16:33:32 +00:00
|
|
|
createStateSnapshot(
|
|
|
|
config.chainStartupData, config.genesisOffset,
|
|
|
|
config.outputStateFile.string)
|
2018-12-19 12:58:53 +00:00
|
|
|
quit 0
|
|
|
|
|
|
|
|
of noCommand:
|
2019-01-25 17:35:22 +00:00
|
|
|
waitFor synchronizeClock()
|
2018-12-28 16:51:40 +00:00
|
|
|
createPidFile(config.dataDir.string / "beacon_node.pid")
|
2018-12-19 12:58:53 +00:00
|
|
|
|
|
|
|
var node = BeaconNode.init config
|
|
|
|
|
2019-01-25 17:35:22 +00:00
|
|
|
dynamicLogScope(node = node.config.tcpPort - 50000):
|
2019-02-21 04:42:17 +00:00
|
|
|
# TODO: while it's nice to cheat by waiting for connections here, we
|
|
|
|
# actually need to make this part of normal application flow -
|
|
|
|
# losing all connections might happen at any time and we should be
|
|
|
|
# prepared to handle it.
|
2019-01-25 17:35:22 +00:00
|
|
|
waitFor node.connectToNetwork()
|
|
|
|
|
|
|
|
if not waitFor node.sync():
|
|
|
|
quit 1
|
2018-12-19 12:58:53 +00:00
|
|
|
|
2019-01-25 17:35:22 +00:00
|
|
|
info "Starting beacon node",
|
|
|
|
slotsSinceFinalization = node.beaconState.slotDistanceFromNow(),
|
2019-02-14 19:32:33 +00:00
|
|
|
stateSlot = humaneSlotNum(node.beaconState.slot),
|
|
|
|
SHARD_COUNT,
|
2019-02-20 01:33:58 +00:00
|
|
|
SLOTS_PER_EPOCH,
|
2019-02-19 23:07:56 +00:00
|
|
|
SECONDS_PER_SLOT,
|
2019-02-14 19:32:33 +00:00
|
|
|
SPEC_VERSION
|
2018-11-23 23:58:49 +00:00
|
|
|
|
2019-01-25 17:35:22 +00:00
|
|
|
node.addLocalValidators()
|
2019-02-21 04:42:17 +00:00
|
|
|
node.run()
|