mirror of
https://github.com/status-im/nimbus-eth2.git
synced 2025-01-23 04:50:59 +00:00
More reliable network simulation
You'll need the latest versions of nim-eth-p2p, nim-serialization and nim-json-serialization. Before starting the simulation script, make sure to delete any previous json files from the simulation folder: ``` rm tests/simulation/*.json tests/simulation/start.sh ``` This should survive the creation of few blocks before diying with a block validation error.
This commit is contained in:
parent
98177dace3
commit
8f9a5441f1
@ -1,7 +1,7 @@
|
||||
import
|
||||
os, json,
|
||||
chronicles, json_serialization, eth_common/eth_types_json_serialization,
|
||||
spec/datatypes
|
||||
spec/[datatypes, digest, crypto]
|
||||
|
||||
type
|
||||
BeaconChainDB* = ref object
|
||||
|
@ -1,9 +1,9 @@
|
||||
import
|
||||
std_shims/os_shims, net, sequtils, options,
|
||||
std_shims/[os_shims, objects], net, sequtils, options,
|
||||
asyncdispatch2, chronicles, confutils, eth_p2p, eth_keys,
|
||||
spec/[beaconstate, datatypes, helpers, crypto, digest], conf, time,
|
||||
fork_choice, ssz, beacon_chain_db, validator_pool, mainchain_monitor,
|
||||
sync_protocol, gossipsub_protocol, trusted_state_snapshots
|
||||
spec/[datatypes, digest, crypto, beaconstate, helpers], conf, time,
|
||||
state_transition, fork_choice, ssz, beacon_chain_db, validator_pool, extras,
|
||||
mainchain_monitor, sync_protocol, gossipsub_protocol, trusted_state_snapshots
|
||||
|
||||
type
|
||||
BeaconNode* = ref object
|
||||
@ -13,9 +13,10 @@ type
|
||||
config*: BeaconNodeConf
|
||||
keys*: KeyPair
|
||||
attachedValidators: ValidatorPool
|
||||
attestations: AttestationPool
|
||||
attestationPool: AttestationPool
|
||||
headBlock: BeaconBlock
|
||||
mainchainMonitor: MainchainMonitor
|
||||
lastScheduledCycle: int
|
||||
|
||||
const
|
||||
version = "v0.1" # TODO: read this from the nimble file
|
||||
@ -35,15 +36,19 @@ proc init*(T: type BeaconNode, conf: BeaconNodeConf): T =
|
||||
new result
|
||||
result.config = conf
|
||||
|
||||
result.attachedValidators = ValidatorPool.init
|
||||
init result.attestationPool, 0
|
||||
init result.mainchainMonitor, "", Port(0) # TODO: specify geth address and port
|
||||
|
||||
result.db = BeaconChainDB.init(string conf.dataDir)
|
||||
result.keys = ensureNetworkKeys(string conf.dataDir)
|
||||
|
||||
var address: Address
|
||||
address.ip = parseIpAddress("0.0.0.0")
|
||||
address.ip = parseIpAddress("127.0.0.1")
|
||||
address.tcpPort = Port(conf.tcpPort)
|
||||
address.udpPort = Port(conf.udpPort)
|
||||
|
||||
result.network = newEthereumNode(result.keys, address, 0, nil, clientId)
|
||||
result.network = newEthereumNode(result.keys, address, 0, nil, clientId, minPeers = 1)
|
||||
|
||||
writeFile(string(conf.dataDir) / "beacon_node.address",
|
||||
$result.network.listeningAddress)
|
||||
@ -60,8 +65,10 @@ proc connectToNetwork(node: BeaconNode) {.async.} =
|
||||
bootstrapNodes.add initENode(string ln)
|
||||
|
||||
if bootstrapNodes.len > 0:
|
||||
info "Connecting to bootstrap nodes", bootstrapNodes
|
||||
await node.network.connectToNetwork(bootstrapNodes)
|
||||
else:
|
||||
info "Waiting for connections"
|
||||
node.network.startListening()
|
||||
|
||||
proc sync*(node: BeaconNode): Future[bool] {.async.} =
|
||||
@ -99,6 +106,10 @@ template findIt(s: openarray, predicate: untyped): int =
|
||||
break
|
||||
res
|
||||
|
||||
template isSameKey(lhs, rhs: ValidatorPubKey): bool =
|
||||
# TODO: operator `==` for ValidatorPubKey doesn't work properly at the moment
|
||||
$lhs == $rhs
|
||||
|
||||
proc addLocalValidators*(node: BeaconNode) =
|
||||
for validator in node.config.validators:
|
||||
let
|
||||
@ -106,102 +117,182 @@ proc addLocalValidators*(node: BeaconNode) =
|
||||
pubKey = privKey.pubKey()
|
||||
randao = validator.randao
|
||||
|
||||
let idx = node.beaconState.validator_registry.findIt(it.pubKey == pubKey)
|
||||
let idx = node.beaconState.validator_registry.findIt(isSameKey(it.pubKey, pubKey))
|
||||
if idx == -1:
|
||||
warn "Validator not in registry", pubKey
|
||||
else:
|
||||
node.attachedValidators.addLocalValidator(idx, pubKey, privKey, randao)
|
||||
|
||||
info "Local validators attached ", count = node.attachedValidators.count
|
||||
info "Local validators attached ", count = node.attachedValidators.count
|
||||
|
||||
proc getAttachedValidator(node: BeaconNode, idx: int): AttachedValidator =
|
||||
let validatorKey = node.beaconState.validator_registry[idx].pubkey
|
||||
return node.attachedValidators.getValidator(validatorKey)
|
||||
|
||||
proc makeAttestation(node: BeaconNode,
|
||||
validator: AttachedValidator) {.async.} =
|
||||
var attestation: AttestationCandidate
|
||||
attestation.validator = validator.idx
|
||||
validator: AttachedValidator,
|
||||
slot: uint64,
|
||||
shard: uint64,
|
||||
committeeLen: int,
|
||||
indexInCommittee: int) {.async.} =
|
||||
doAssert node != nil
|
||||
doAssert validator != nil
|
||||
|
||||
# TODO: Populate attestation.data
|
||||
let
|
||||
headBlockRoot = hash_tree_root_final(node.headBlock)
|
||||
|
||||
justifiedBlockRoot = if node.beaconState.justified_slot == node.beaconState.slot: headBlockRoot
|
||||
else: get_block_root(node.beaconState, node.beaconState.justified_slot)
|
||||
|
||||
var attestationData = AttestationData(
|
||||
slot: slot,
|
||||
shard: shard,
|
||||
beacon_block_root: headBlockRoot,
|
||||
epoch_boundary_root: Eth2Digest(), # TODO
|
||||
shard_block_root: Eth2Digest(), # TODO
|
||||
latest_crosslink_root: Eth2Digest(), # TODO
|
||||
justified_slot: node.beaconState.justified_slot,
|
||||
justified_block_root: justifiedBlockRoot)
|
||||
|
||||
let validatorSignature = await validator.signAttestation(attestationData)
|
||||
|
||||
var participationBitfield = repeat(0'u8, ceil_div8(committeeLen))
|
||||
bitSet(participationBitfield, indexInCommittee)
|
||||
|
||||
var attestation = Attestation(
|
||||
data: attestationData,
|
||||
aggregate_signature: validatorSignature,
|
||||
participation_bitfield: participationBitfield)
|
||||
|
||||
attestation.signature = await validator.signAttestation(attestation.data)
|
||||
await node.network.broadcast(topicAttestations, attestation)
|
||||
|
||||
info "Attestation sent", slot = slot,
|
||||
shard = shard,
|
||||
validator = validator.idx
|
||||
|
||||
proc proposeBlock(node: BeaconNode,
|
||||
validator: AttachedValidator,
|
||||
slot: int) {.async.} =
|
||||
var proposal: BeaconBlock
|
||||
slot: uint64) {.async.} =
|
||||
doAssert node != nil
|
||||
doAssert validator != nil
|
||||
doAssert validator.idx < node.beaconState.validator_registry.len
|
||||
|
||||
let
|
||||
randaoCommitment = node.beaconState.validator_registry[validator.idx].randao_commitment
|
||||
randaoReveal = await validator.randaoReveal(randaoCommitment)
|
||||
headBlockRoot = hash_tree_root_final(node.headBlock)
|
||||
|
||||
var blockBody = BeaconBlockBody(
|
||||
attestations: node.attestationPool.getAttestationsForBlock(node.beaconState, slot))
|
||||
|
||||
var newBlock = BeaconBlock(
|
||||
slot: slot,
|
||||
parent_root: headBlockRoot,
|
||||
randao_reveal: randaoReveal,
|
||||
candidate_pow_receipt_root: node.mainchainMonitor.getBeaconBlockRef(),
|
||||
signature: ValidatorSig(), # we need the rest of the block first!
|
||||
body: blockBody)
|
||||
|
||||
var state = node.beaconState
|
||||
# TODO:
|
||||
# 1. Produce a RANDAO reveal from attachedVadalidator.randaoSecret
|
||||
# and its matching ValidatorRecord.
|
||||
let randaoCommitment = node.beaconState.validator_registry[validator.idx].randao_commitment
|
||||
proposal.randao_reveal = await validator.randaoReveal(randaoCommitment)
|
||||
# Er, this is needed to avoid a failure in `processBlock`, but why is it necessary?
|
||||
# Shouldn't `updateState` skip blocks automatically?
|
||||
state.slot = slot - 1
|
||||
|
||||
# 2. Get ancestors from the beacon_db
|
||||
let ok = updateState(state, headBlockRoot, some(newBlock), {skipValidation})
|
||||
doAssert ok # TODO: err, could this fail somehow?
|
||||
|
||||
# 3. Calculate the correct state hash
|
||||
newBlock.state_root = Eth2Digest(data: hash_tree_root(state))
|
||||
|
||||
proposal.candidate_pow_receipt_root =
|
||||
node.mainchainMonitor.getBeaconBlockRef()
|
||||
var signedData = ProposalSignedData(
|
||||
slot: slot,
|
||||
shard: BEACON_CHAIN_SHARD_NUMBER,
|
||||
blockRoot: hash_tree_root_final(newBlock))
|
||||
|
||||
for a in node.attestations.each(firstSlot = node.headBlock.slot.int + 1,
|
||||
lastSlot = slot - MIN_ATTESTATION_INCLUSION_DELAY.int):
|
||||
# TODO: this is not quite right,
|
||||
# the attestations from individual validators have to be merged.
|
||||
# proposal.attestations.add a
|
||||
discard
|
||||
newBlock.signature = await validator.signBlockProposal(signedData)
|
||||
|
||||
# TODO update after spec change removed specials
|
||||
# for r in node.mainchainMonitor.getValidatorActions(
|
||||
# node.headBlock.candidate_pow_receipt_root,
|
||||
# proposal.candidate_pow_receipt_root):
|
||||
# proposal.specials.add r
|
||||
await node.network.broadcast(topicBeaconBlocks, newBlock)
|
||||
|
||||
var signedData: ProposalSignedData
|
||||
signedData.slot = node.beaconState.slot
|
||||
signedData.shard = BEACON_CHAIN_SHARD_NUMBER
|
||||
signedData.blockRoot = hash_tree_root_final(proposal)
|
||||
info "Block proposed", slot = slot,
|
||||
stateRoot = newBlock.state_root,
|
||||
blockRoot = signedData.blockRoot,
|
||||
validator = validator.idx
|
||||
|
||||
proposal.signature = await validator.signBlockProposal(signedData)
|
||||
await node.network.broadcast(topicBeaconBlocks, proposal)
|
||||
proc scheduleBlockProposal(node: BeaconNode,
|
||||
slot: int,
|
||||
validator: AttachedValidator) =
|
||||
# TODO:
|
||||
# This function exists only to hide a bug with Nim's closures.
|
||||
# If you inline it in `scheduleCycleActions`, you'll see the
|
||||
# internal `doAssert` starting to fail.
|
||||
doAssert validator != nil
|
||||
|
||||
proc scheduleCycleActions(node: BeaconNode) =
|
||||
addTimer(node.beaconState.slotStart(slot)) do (p: pointer):
|
||||
doAssert validator != nil
|
||||
asyncCheck proposeBlock(node, validator, slot.uint64)
|
||||
|
||||
proc scheduleAttestation(node: BeaconNode,
|
||||
validator: AttachedValidator,
|
||||
slot: int,
|
||||
shard: uint64,
|
||||
committeeLen: int,
|
||||
indexInCommittee: int) =
|
||||
# TODO:
|
||||
# This function exists only to hide a bug with Nim's closures.
|
||||
# If you inline it in `scheduleCycleActions`, you'll see the
|
||||
# internal `doAssert` starting to fail.
|
||||
doAssert validator != nil
|
||||
|
||||
addTimer(node.beaconState.slotMiddle(slot)) do (p: pointer):
|
||||
doAssert validator != nil
|
||||
asyncCheck makeAttestation(node, validator, slot.uint64,
|
||||
shard, committeeLen, indexInCommittee)
|
||||
|
||||
proc scheduleCycleActions(node: BeaconNode, cycleStart: int) =
|
||||
## This schedules the required block proposals and
|
||||
## attestations from our attached validators.
|
||||
let cycleStart = node.beaconState.slot.int
|
||||
doAssert node != nil
|
||||
|
||||
for i in 0 ..< EPOCH_LENGTH:
|
||||
# TODO: this copy of the state shouldn't be necessary, but please
|
||||
# see the comments in `get_beacon_proposer_index`
|
||||
var nextState = node.beaconState
|
||||
|
||||
for i in 1 ..< EPOCH_LENGTH:
|
||||
# Schedule block proposals
|
||||
nextState.slot = node.beaconState.slot + i.uint64
|
||||
|
||||
let
|
||||
slot = cycleStart + i
|
||||
proposerIdx = get_beacon_proposer_index(node.beaconState, slot.uint64)
|
||||
attachedValidator = node.getAttachedValidator(proposerIdx)
|
||||
proposerIdx = get_beacon_proposer_index(nextState, nextState.slot.uint64)
|
||||
validator = node.getAttachedValidator(proposerIdx)
|
||||
|
||||
if attachedValidator != nil:
|
||||
if validator != nil:
|
||||
# TODO:
|
||||
# Warm-up the proposer earlier to try to obtain previous
|
||||
# missing blocks if necessary
|
||||
|
||||
addTimer(node.beaconState.slotStart(slot)) do (p: pointer):
|
||||
asyncCheck proposeBlock(node, attachedValidator, slot)
|
||||
scheduleBlockProposal(node, slot, validator)
|
||||
|
||||
# Schedule attestations
|
||||
let
|
||||
committeesIdx = get_shard_committees_index(node.beaconState, slot.uint64)
|
||||
committeesIdx = get_shard_committees_index(nextState, nextState.slot.uint64)
|
||||
|
||||
for shard in node.beaconState.shard_committees_at_slots[committees_idx]:
|
||||
for validatorIdx in shard.committee:
|
||||
let attachedValidator = node.getAttachedValidator(validatorIdx)
|
||||
if attachedValidator != nil:
|
||||
addTimer(node.beaconState.slotMiddle(slot)) do (p: pointer):
|
||||
asyncCheck makeAttestation(node, attachedValidator)
|
||||
for i, validatorIdx in shard.committee:
|
||||
let validator = node.getAttachedValidator(validatorIdx)
|
||||
if validator != nil:
|
||||
scheduleAttestation(node, validator, slot, shard.shard, shard.committee.len, i)
|
||||
|
||||
proc processBlocks*(node: BeaconNode) {.async.} =
|
||||
node.scheduleCycleActions()
|
||||
node.lastScheduledCycle = cycleStart
|
||||
let nextCycle = cycleStart + EPOCH_LENGTH
|
||||
|
||||
addTimer(node.beaconState.slotMiddle(nextCycle)) do (p: pointer):
|
||||
if node.lastScheduledCycle != nextCycle:
|
||||
node.scheduleCycleActions(nextCycle)
|
||||
|
||||
proc processBlocks*(node: BeaconNode) =
|
||||
node.network.subscribe(topicBeaconBlocks) do (b: BeaconBlock):
|
||||
info "Block received", slot = b.slot, stateRoot = b.state_root
|
||||
|
||||
# TODO:
|
||||
#
|
||||
# 1. Check for missing blocks and obtain them
|
||||
@ -211,13 +302,21 @@ proc processBlocks*(node: BeaconNode) {.async.} =
|
||||
# 3. Peform block processing / state recalculation / etc
|
||||
#
|
||||
|
||||
if b.slot mod EPOCH_LENGTH == 0:
|
||||
node.scheduleCycleActions()
|
||||
node.attestations.discardHistoryToSlot(b.slot.int)
|
||||
let slot = b.slot.int
|
||||
if slot mod EPOCH_LENGTH == 0:
|
||||
node.scheduleCycleActions(slot)
|
||||
node.attestationPool.discardHistoryToSlot(slot)
|
||||
|
||||
node.network.subscribe(topicAttestations) do (a: Attestation):
|
||||
# Attestations are verified as aggregated groups
|
||||
node.attestations.add(getAttestationCandidate a, node.beaconState)
|
||||
info "Attestation received", slot = a.data.slot,
|
||||
shard = a.data.shard
|
||||
|
||||
node.attestationPool.add(a, node.beaconState)
|
||||
|
||||
let cycleStart = node.beaconState.slot.int
|
||||
node.scheduleCycleActions(cycleStart)
|
||||
|
||||
runForever()
|
||||
|
||||
var gPidFile: string
|
||||
proc createPidFile(filename: string) =
|
||||
@ -227,22 +326,15 @@ proc createPidFile(filename: string) =
|
||||
addQuitProc proc {.noconv.} = removeFile gPidFile
|
||||
|
||||
when isMainModule:
|
||||
let config = BeaconNodeConf.load()
|
||||
let config = load BeaconNodeConf
|
||||
case config.cmd
|
||||
of createChain:
|
||||
let outfile = string config.outputStateFile
|
||||
let initialState = get_initial_beacon_state(
|
||||
config.chainStartupData.validatorDeposits,
|
||||
config.chainStartupData.genesisTime,
|
||||
Eth2Digest(), {})
|
||||
|
||||
Json.saveFile(outfile, initialState, pretty = true)
|
||||
echo "Wrote ", outfile
|
||||
createStateSnapshot(config.chainStartupData, config.outputStateFile.string)
|
||||
quit 0
|
||||
|
||||
of noCommand:
|
||||
waitFor syncrhronizeClock()
|
||||
createPidFile(string(config.dataDir) / "beacon_node.pid")
|
||||
createPidFile(config.dataDir.string / "beacon_node.pid")
|
||||
|
||||
var node = BeaconNode.init config
|
||||
waitFor node.connectToNetwork()
|
||||
@ -251,6 +343,5 @@ when isMainModule:
|
||||
quit 1
|
||||
|
||||
node.addLocalValidators()
|
||||
|
||||
waitFor node.processBlocks()
|
||||
node.processBlocks()
|
||||
|
||||
|
@ -90,7 +90,7 @@ template mustBeFilePath(input: TaintedString) =
|
||||
template handledAsJsonFilename(T: untyped) {.dirty.} =
|
||||
proc parseCmdArg*(_: type T, input: TaintedString): T =
|
||||
input.mustBeFilePath
|
||||
#return Json.loadFile(string(input), T)
|
||||
return Json.loadFile(string(input), T)
|
||||
|
||||
handledAsJsonFilename BeaconState
|
||||
handledAsJsonFilename ChainStartupData
|
||||
|
@ -1,6 +1,7 @@
|
||||
import
|
||||
deques,
|
||||
spec/[datatypes, crypto]
|
||||
deques, options,
|
||||
milagro_crypto,
|
||||
spec/[datatypes, crypto, helpers], extras
|
||||
|
||||
type
|
||||
AttestationCandidate* = object
|
||||
@ -9,11 +10,15 @@ type
|
||||
signature*: ValidatorSig
|
||||
|
||||
AttestationPool* = object
|
||||
attestations: Deque[seq[AttestationCandidate]]
|
||||
# The Deque below stores all outstanding attestations per slot.
|
||||
# In each slot, we have an array of all attestations indexed by their
|
||||
# shard number. When we haven't received an attestation for a particular
|
||||
# shard yet, the Option value will be `none`
|
||||
attestations: Deque[array[SHARD_COUNT, Option[Attestation]]]
|
||||
startingSlot: int
|
||||
|
||||
proc init*(T: type AttestationPool, startingSlot: int): T =
|
||||
result.attestationsPerSlot = initDeque[seq[AttestationCandidate]]()
|
||||
result.attestations = initDeque[array[SHARD_COUNT, Option[Attestation]]]()
|
||||
result.startingSlot = startingSlot
|
||||
|
||||
proc setLen*[T](d: var Deque[T], len: int) =
|
||||
@ -26,31 +31,73 @@ proc setLen*[T](d: var Deque[T], len: int) =
|
||||
else:
|
||||
d.shrink(fromLast = delta)
|
||||
|
||||
proc combine*(tgt: var Attestation, src: Attestation, flags: UpdateFlags) =
|
||||
# Combine the signature and participation bitfield, with the assumption that
|
||||
# the same data is being signed!
|
||||
# TODO similar code in work_pool, clean up
|
||||
|
||||
assert tgt.data == src.data
|
||||
|
||||
for i in 0 ..< tgt.participation_bitfield.len:
|
||||
# TODO:
|
||||
# when BLS signatures are combined, we must ensure that
|
||||
# the same participant key is not included on both sides
|
||||
tgt.participation_bitfield[i] =
|
||||
tgt.participation_bitfield[i] or
|
||||
src.participation_bitfield[i]
|
||||
|
||||
if skipValidation notin flags:
|
||||
tgt.aggregate_signature.combine(src.aggregate_signature)
|
||||
|
||||
proc add*(pool: var AttestationPool,
|
||||
attestation: AttestationCandidate,
|
||||
attestation: Attestation,
|
||||
beaconState: BeaconState) =
|
||||
# The caller of this function is responsible for ensuring that
|
||||
# the attestations will be given in a strictly slot increasing order:
|
||||
doAssert attestation.data.slot.int >= pool.startingSlot
|
||||
|
||||
# TODO:
|
||||
# Validate that the attestation is authentic (it's properly signed)
|
||||
# and make sure that the validator is supposed to make an attestation
|
||||
# for the specific shard/slot
|
||||
|
||||
let slotIdxInPool = attestation.data.slot.int - pool.startingSlot
|
||||
if slotIdxInPool >= pool.attestations.len:
|
||||
pool.attestations.setLen(slotIdxInPool + 1)
|
||||
|
||||
pool.attestations[slotIdxInPool].add attestation
|
||||
let shard = attestation.data.shard
|
||||
|
||||
iterator each*(pool: AttestationPool,
|
||||
firstSlot, lastSlot: int): AttestationCandidate =
|
||||
## Both indices are treated inclusively
|
||||
## TODO: this should return a lent value
|
||||
doAssert firstSlot <= lastSlot
|
||||
for idx in countup(max(0, firstSlot - pool.startingSlot),
|
||||
min(pool.attestations.len - 1, lastSlot - pool.startingSlot)):
|
||||
for attestation in pool.attestations[idx]:
|
||||
yield attestation
|
||||
if pool.attestations[slotIdxInPool][shard].isSome:
|
||||
combine(pool.attestations[slotIdxInPool][shard].get, attestation, {})
|
||||
else:
|
||||
pool.attestations[slotIdxInPool][shard] = some(attestation)
|
||||
|
||||
proc getAttestationsForBlock*(pool: AttestationPool,
|
||||
lastState: BeaconState,
|
||||
newBlockSlot: uint64): seq[Attestation] =
|
||||
if newBlockSlot < MIN_ATTESTATION_INCLUSION_DELAY or pool.attestations.len == 0:
|
||||
return
|
||||
|
||||
doAssert newBlockSlot > lastState.slot
|
||||
|
||||
var
|
||||
firstSlot = 0.uint64
|
||||
lastSlot = newBlockSlot - MIN_ATTESTATION_INCLUSION_DELAY
|
||||
|
||||
if pool.startingSlot.uint64 + MIN_ATTESTATION_INCLUSION_DELAY <= lastState.slot:
|
||||
firstSlot = lastState.slot - MIN_ATTESTATION_INCLUSION_DELAY
|
||||
|
||||
for slot in firstSlot .. lastSlot:
|
||||
let slotDequeIdx = slot.int - pool.startingSlot
|
||||
if slotDequeIdx >= pool.attestations.len: return
|
||||
let shardAndComittees = get_shard_committees_at_slot(lastState, slot)
|
||||
for s in shardAndComittees:
|
||||
if pool.attestations[slotDequeIdx][s.shard].isSome:
|
||||
result.add pool.attestations[slotDequeIdx][s.shard].get
|
||||
|
||||
proc discardHistoryToSlot*(pool: var AttestationPool, slot: int) =
|
||||
## The index is treated inclusively
|
||||
let slot = slot - MIN_ATTESTATION_INCLUSION_DELAY.int
|
||||
if slot < pool.startingSlot:
|
||||
return
|
||||
let slotIdx = int(slot - pool.startingSlot)
|
||||
|
@ -1,69 +1,94 @@
|
||||
import
|
||||
tables, sets, macros, base64,
|
||||
asyncdispatch2, nimcrypto/sysrand, chronicles, rlp, eth_p2p, eth_p2p/rlpx
|
||||
asyncdispatch2, nimcrypto/sysrand, chronicles, rlp, json_serialization,
|
||||
eth_p2p, eth_p2p/[rlpx, peer_pool],
|
||||
spec/[datatypes, crypto]
|
||||
|
||||
type
|
||||
TopicMsgHandler = proc(data: seq[byte]): Future[void]
|
||||
TopicMsgHandler = proc(msg: string): Future[void]
|
||||
|
||||
GossibSubPeer = ref object
|
||||
GossipSubPeer* = ref object
|
||||
sentMessages: HashSet[string]
|
||||
subscribedFor: HashSet[string]
|
||||
|
||||
GossipSubNetwork = ref object
|
||||
GossipSubNetwork* = ref object
|
||||
topicSubscribers: Table[string, TopicMsgHandler]
|
||||
handledMessages: HashSet[string]
|
||||
|
||||
proc initProtocolState*(network: GossipSubNetwork, node: EthereumNode) =
|
||||
proc initProtocolState*(network: GossipSubNetwork, _: EthereumNode) =
|
||||
network.topicSubscribers = initTable[string, TopicMsgHandler]()
|
||||
network.handledMessages = initSet[string]()
|
||||
|
||||
proc initProtocolState*(peer: GossibSubPeer, node: EthereumNode) =
|
||||
proc initProtocolState*(peer: GossipSubPeer, _: Peer) =
|
||||
peer.sentMessages = initSet[string]()
|
||||
peer.subscribedFor = initSet[string]()
|
||||
|
||||
p2pProtocol GossipSub(version = 1,
|
||||
shortName = "gss",
|
||||
peerState = GossibSubPeer,
|
||||
peerState = GossipSubPeer,
|
||||
networkState = GossipSubNetwork):
|
||||
# This is a very barebones emulation of the GossipSub protocol
|
||||
# available in LibP2P:
|
||||
|
||||
onPeerConnected do (peer: Peer):
|
||||
info "GossipSub Peer connecetd", peer
|
||||
let gossipNet = peer.networkState
|
||||
for topic, _ in gossipNet.topicSubscribers:
|
||||
peer.subscribeFor(topic)
|
||||
|
||||
onPeerDisconnected do (peer: Peer, reason: DisconnectionReason):
|
||||
info "GossipSub Peer disconnected"
|
||||
writeStackTrace()
|
||||
|
||||
proc subscribeFor(peer: Peer, topic: string) =
|
||||
peer.state.subscribedFor.incl topic
|
||||
|
||||
proc emit(peer: Peer, topic: string, msgId: string, data: openarray[byte]) =
|
||||
proc emit(peer: Peer, topic: string, msgId: string, msg: string) =
|
||||
if msgId in peer.networkState.handledMessages:
|
||||
debug "Ignored previously handled message", msgId
|
||||
return
|
||||
|
||||
peer.networkState.handledMessages.incl msgId
|
||||
|
||||
for p in peer.network.peers(GossipSub):
|
||||
if msgId notin p.state.sentMessages and topic in p.state.subscribedFor:
|
||||
asyncCheck p.emit(topic, msgId, data)
|
||||
p.state.sentMessages.incl msgId
|
||||
asyncCheck p.emit(topic, msgId, msg)
|
||||
|
||||
let handler = peer.networkState.topicSubscribers.getOrDefault(topic)
|
||||
if handler != nil:
|
||||
await handler(data)
|
||||
await handler(msg)
|
||||
|
||||
proc subscribeImpl(node: EthereumNode,
|
||||
topic: string,
|
||||
subscriber: TopicMsgHandler) =
|
||||
var gossipNet = node.protocolState(GossipSub)
|
||||
gossipNet.topicSubscribers[topic] = subscriber
|
||||
for peer in node.peers(GossipSub): discard peer.subscribeFor(topic)
|
||||
|
||||
proc broadcastImpl(node: EthereumNode, topic: string, msgBytes: seq[byte]): seq[Future[void]] {.gcsafe.} =
|
||||
for peer in node.peers(GossipSub):
|
||||
discard peer.subscribeFor(topic)
|
||||
|
||||
proc broadcastImpl(node: EthereumNode, topic: string, msg: string): seq[Future[void]] {.gcsafe.} =
|
||||
var randBytes: array[10, byte];
|
||||
if randomBytes(randBytes) != 10:
|
||||
warn "Failed to generate random message id"
|
||||
|
||||
let msgId = base64.encode(randBytes)
|
||||
debug "Sending GossipSub message", msgId
|
||||
|
||||
for peer in node.peers(GossipSub):
|
||||
if topic in peer.state(GossipSub).subscribedFor:
|
||||
result.add peer.emit(topic, msgId, msgBytes)
|
||||
result.add peer.emit(topic, msgId, msg)
|
||||
|
||||
proc makeMessageHandler[MsgType](userHandler: proc(msg: MsgType): Future[void]): TopicMsgHandler =
|
||||
result = proc (data: seq[byte]): Future[void] =
|
||||
userHandler rlp.decode(data, MsgType)
|
||||
result = proc (msg: string): Future[void] =
|
||||
userHandler Json.decode(msg, MsgType)
|
||||
|
||||
macro subscribe*(node: EthereumNode, topic: string, handler: untyped): untyped =
|
||||
handler.addPragma ident"async"
|
||||
result = newCall(bindSym"subscribeImpl",
|
||||
node, topic, newCall(bindSym"makeMessageHandler", handler))
|
||||
|
||||
proc broadcast*(node: EthereumNode, topic: string, data: auto) {.async.} =
|
||||
await all(node.broadcastImpl(topic, rlp.encode(data)))
|
||||
proc broadcast*(node: EthereumNode, topic: string, msg: auto) {.async.} =
|
||||
await all(node.broadcastImpl(topic, Json.encode(msg)))
|
||||
|
||||
|
@ -438,4 +438,6 @@ when true:
|
||||
proc read*(rlp: var Rlp, T: type ValidatorSig): T {.inline.} =
|
||||
discard
|
||||
|
||||
export
|
||||
writeValue, readValue
|
||||
|
||||
|
@ -126,6 +126,8 @@ func get_beacon_proposer_index*(state: BeaconState, slot: uint64): Uint24 =
|
||||
# because presently, `state.slot += 1` happens before this function
|
||||
# is called - see also testutil.getNextBeaconProposerIndex
|
||||
let idx = get_shard_committees_index(state, slot)
|
||||
doAssert idx.int < state.shard_committees_at_slots.len
|
||||
doAssert state.shard_committees_at_slots[idx].len > 0
|
||||
state.shard_committees_at_slots[idx][0].committee.mod_get(slot)
|
||||
|
||||
func integer_squareroot*(n: SomeInteger): SomeInteger =
|
||||
|
@ -337,7 +337,7 @@ proc processBlock(
|
||||
# TODO probably better to do all verification first, then apply state changes
|
||||
|
||||
if not (blck.slot == state.slot):
|
||||
warn("Unexpected block slot number")
|
||||
warn "Unexpected block slot number"
|
||||
return false
|
||||
|
||||
# Spec does not have this check explicitly, but requires that this condition
|
||||
|
@ -1,6 +1,6 @@
|
||||
import
|
||||
asyncdispatch2,
|
||||
spec/datatypes, beacon_chain_db
|
||||
spec/[datatypes, crypto, digest, beaconstate], beacon_chain_db, conf
|
||||
|
||||
const
|
||||
WEAK_SUBJECTVITY_PERIOD* = 4 * 30 * 24 * 60 * 60 div SLOT_DURATION
|
||||
@ -29,3 +29,12 @@ proc obtainTrustedStateSnapshot*(db: BeaconChainDB): Future[BeaconState] {.async
|
||||
|
||||
discard
|
||||
|
||||
proc createStateSnapshot*(startup: ChainStartupData, outFile: string) =
|
||||
let initialState = get_initial_beacon_state(startup.validatorDeposits,
|
||||
startup.genesisTime,
|
||||
Eth2Digest(), {})
|
||||
|
||||
var vr: ValidatorRecord
|
||||
Json.saveFile(outFile, initialState, pretty = true)
|
||||
echo "Wrote ", outFile
|
||||
|
||||
|
@ -1,12 +1,12 @@
|
||||
import
|
||||
os, ospaths, strutils, strformat,
|
||||
milagro_crypto, nimcrypto, json_serialization,
|
||||
asyncdispatch2, milagro_crypto, nimcrypto, json_serialization,
|
||||
spec/[datatypes, digest, crypto], conf, randao, time, ssz,
|
||||
../tests/testutil
|
||||
|
||||
proc writeFile(filename: string, value: auto) =
|
||||
Json.saveFile(filename, value, pretty = true)
|
||||
echo &"Wrote {filename}"
|
||||
echo "Wrote ", filename
|
||||
|
||||
proc genSingleValidator(path: string): (ValidatorPubKey,
|
||||
ValidatorPrivKey,
|
||||
@ -48,12 +48,18 @@ proc main() =
|
||||
|
||||
for i in 1 .. totalValidators:
|
||||
let (pubKey, privKey, randaoCommitment) =
|
||||
|
||||
genSingleValidator(outPath / &"validator-{i:02}.json")
|
||||
|
||||
let withdrawalCredentials = makeFakeHash(i)
|
||||
let proofOfPossession = signMessage(privkey, hash_tree_root_final(
|
||||
(pubKey, withdrawalCredentials, randaoCommitment)).data)
|
||||
let
|
||||
withdrawalCredentials = makeFakeHash(i)
|
||||
|
||||
proofOfPossessionData = DepositInput(
|
||||
pubkey: pubKey,
|
||||
withdrawal_credentials: withdrawalCredentials,
|
||||
randao_commitment: randaoCommitment)
|
||||
|
||||
proofOfPossession = signMessage(
|
||||
privkey, hash_tree_root_final(proofOfPossessionData).data)
|
||||
|
||||
startupData.validatorDeposits.add Deposit(
|
||||
deposit_data: DepositData(
|
||||
@ -65,7 +71,7 @@ proc main() =
|
||||
withdrawal_credentials: withdrawalCredentials,
|
||||
randao_commitment: randaoCommitment)))
|
||||
|
||||
startupData.genesisTime = now()
|
||||
startupData.genesisTime = now() div 1000
|
||||
|
||||
writeFile(outPath / "startup.json", startupData)
|
||||
|
||||
|
@ -63,8 +63,8 @@ proc signAttestation*(v: AttachedValidator,
|
||||
await sleepAsync(1)
|
||||
|
||||
let attestationRoot = hash_tree_root_final(attestation)
|
||||
# TODO: Should we use attestationRoot as data, or digest in regards to signature?
|
||||
return signMessage(v.privKey, attestationRoot.data)
|
||||
# TODO: Avoid the allocations belows
|
||||
return signMessage(v.privKey, @(attestationRoot.data) & @[0'u8])
|
||||
else:
|
||||
# TODO:
|
||||
# send RPC
|
||||
|
@ -5,7 +5,7 @@ import
|
||||
milagro_crypto,
|
||||
../tests/[testutil],
|
||||
../beacon_chain/spec/[beaconstate, crypto, datatypes, digest, helpers],
|
||||
../beacon_chain/[extras, ssz, state_transition]
|
||||
../beacon_chain/[extras, ssz, state_transition, fork_choice]
|
||||
|
||||
proc `%`(v: uint64): JsonNode = newJInt(v.BiggestInt)
|
||||
proc `%`(v: Eth2Digest): JsonNode = newJString($v)
|
||||
@ -17,21 +17,6 @@ proc writeJson*(prefix, slot, v: auto) =
|
||||
discard open(f, fmt"{prefix:04}-{slot:08}.json", fmWrite)
|
||||
write(f, pretty(%*(v)))
|
||||
|
||||
proc combine(tgt: var Attestation, src: Attestation, flags: UpdateFlags) =
|
||||
# Combine the signature and participation bitfield, with the assumption that
|
||||
# the same data is being signed!
|
||||
# TODO similar code in work_pool, clean up
|
||||
|
||||
assert tgt.data == src.data
|
||||
|
||||
for i in 0..<len(tgt.participation_bitfield):
|
||||
tgt.participation_bitfield[i] =
|
||||
tgt.participation_bitfield[i] or
|
||||
src.participation_bitfield[i]
|
||||
|
||||
if skipValidation notin flags:
|
||||
tgt.aggregate_signature.combine(src.aggregate_signature)
|
||||
|
||||
proc transition(
|
||||
slots = 1945,
|
||||
validators = EPOCH_LENGTH, # One per shard is minimum
|
||||
|
@ -26,14 +26,27 @@ if [ ! -f $SNAPSHOT_FILE ]; then
|
||||
--out:$SNAPSHOT_FILE
|
||||
fi
|
||||
|
||||
MASTER_NODE_ADDRESS_FILE="$SIMULATION_DIR/data-0/beacon_node.address"
|
||||
|
||||
# Delete any leftover address files from a previous session
|
||||
if [ -f $MASTER_NODE_ADDRESS_FILE ]; then
|
||||
rm $MASTER_NODE_ADDRESS_FILE
|
||||
fi
|
||||
|
||||
for i in $(seq 0 9); do
|
||||
DATA_DIR=$SIMULATION_DIR/data-$i
|
||||
BOOTSTRAP_NODES_FLAG=--bootstrapNodesFile:"$DATA_DIR/beacon_node.address"
|
||||
BOOTSTRAP_NODES_FLAG="--bootstrapNodesFile:$MASTER_NODE_ADDRESS_FILE"
|
||||
|
||||
if [[ "$i" == "0" ]]; then
|
||||
BOOTSTRAP_NODES_FLAG=""
|
||||
else
|
||||
# Wait for the master node to write out its address file
|
||||
while [ ! -f $MASTER_NODE_ADDRESS_FILE ]; do
|
||||
sleep 0.1
|
||||
done
|
||||
fi
|
||||
|
||||
DATA_DIR=$SIMULATION_DIR/data-$i
|
||||
|
||||
beacon_chain/beacon_node \
|
||||
--dataDir:"$DATA_DIR" \
|
||||
--validator:"$SIMULATION_DIR/validator-${i}1.json" \
|
||||
|
Loading…
x
Reference in New Issue
Block a user