altair-capable beacon block creation (#2834)
* altair-capable beacon block creation * update block_sim to use sync committees and the new block production interface
This commit is contained in:
parent
3efcdb0de5
commit
2d8a796a93
|
@ -191,19 +191,15 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
||||||
let res = await makeBeaconBlockForHeadAndSlot(
|
let res = await makeBeaconBlockForHeadAndSlot(
|
||||||
node, qrandao, proposer.get(), qgraffiti, qhead, qslot)
|
node, qrandao, proposer.get(), qgraffiti, qhead, qslot)
|
||||||
if res.isErr():
|
if res.isErr():
|
||||||
return RestApiResponse.jsonError(Http400, BlockProduceError)
|
return RestApiResponse.jsonError(Http400, res.error())
|
||||||
res.get()
|
res.get()
|
||||||
return
|
return
|
||||||
when message is phase0.BeaconBlock:
|
case message.kind
|
||||||
# TODO (cheatfate): This could be removed when `altair` branch will be
|
of BeaconBlockFork.Phase0:
|
||||||
# merged.
|
RestApiResponse.jsonResponse(message.phase0Block)
|
||||||
RestApiResponse.jsonResponse(message)
|
|
||||||
else:
|
else:
|
||||||
case message.kind
|
RestApiResponse.jsonError(Http400,
|
||||||
of BeaconBlockFork.Phase0:
|
"Unable to produce block for altair fork")
|
||||||
RestApiResponse.jsonResponse(message.phase0Block.message)
|
|
||||||
of BeaconBlockFork.Altair:
|
|
||||||
return RestApiResponse.jsonError(Http400, BlockProduceError)
|
|
||||||
|
|
||||||
# https://ethereum.github.io/beacon-APIs/#/Validator/produceBlockV2
|
# https://ethereum.github.io/beacon-APIs/#/Validator/produceBlockV2
|
||||||
router.api(MethodGet, "/api/eth/v2/validator/blocks/{slot}") do (
|
router.api(MethodGet, "/api/eth/v2/validator/blocks/{slot}") do (
|
||||||
|
@ -253,25 +249,9 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
||||||
let res = await makeBeaconBlockForHeadAndSlot(
|
let res = await makeBeaconBlockForHeadAndSlot(
|
||||||
node, qrandao, proposer.get(), qgraffiti, qhead, qslot)
|
node, qrandao, proposer.get(), qgraffiti, qhead, qslot)
|
||||||
if res.isErr():
|
if res.isErr():
|
||||||
return RestApiResponse.jsonError(Http400, BlockProduceError)
|
return RestApiResponse.jsonError(Http400, res.error())
|
||||||
res.get()
|
res.get()
|
||||||
return
|
return RestApiResponse.jsonResponsePlain(message)
|
||||||
when message is phase0.BeaconBlock:
|
|
||||||
# TODO (cheatfate): This could be removed when `altair` branch will be
|
|
||||||
# merged.
|
|
||||||
RestApiResponse.jsonResponse(
|
|
||||||
(version: "phase0", data: message)
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
case message.kind
|
|
||||||
of BeaconBlockFork.Phase0:
|
|
||||||
RestApiResponse.jsonResponse(
|
|
||||||
(version: "phase0", data: message.phase0Block.message)
|
|
||||||
)
|
|
||||||
of BeaconBlockFork.Altair:
|
|
||||||
RestApiResponse.jsonResponse(
|
|
||||||
(version: "altair", data: message.altairBlock.message)
|
|
||||||
)
|
|
||||||
|
|
||||||
# https://ethereum.github.io/beacon-APIs/#/Validator/produceAttestationData
|
# https://ethereum.github.io/beacon-APIs/#/Validator/produceAttestationData
|
||||||
router.api(MethodGet, "/api/eth/v1/validator/attestation_data") do (
|
router.api(MethodGet, "/api/eth/v1/validator/attestation_data") do (
|
||||||
|
|
|
@ -411,7 +411,9 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
||||||
# It was not integrated into the beacon node's database.
|
# It was not integrated into the beacon node's database.
|
||||||
return 202
|
return 202
|
||||||
else:
|
else:
|
||||||
let res = await proposeSignedBlock(node, head, AttachedValidator(), blck)
|
let res = await proposeSignedBlock(
|
||||||
|
node, head, AttachedValidator(),
|
||||||
|
ForkedSignedBeaconBlock.init(blck))
|
||||||
if res == head:
|
if res == head:
|
||||||
# TODO altair-transition, but not immediate testnet-priority
|
# TODO altair-transition, but not immediate testnet-priority
|
||||||
node.network.broadcastBeaconBlock(ForkedSignedBeaconBlock.init(blck))
|
node.network.broadcastBeaconBlock(ForkedSignedBeaconBlock.init(blck))
|
||||||
|
|
|
@ -46,7 +46,13 @@ proc installValidatorApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
||||||
if message.isErr():
|
if message.isErr():
|
||||||
raise newException(CatchableError,
|
raise newException(CatchableError,
|
||||||
"could not retrieve block for slot: " & $slot)
|
"could not retrieve block for slot: " & $slot)
|
||||||
return message.get()
|
let blck = message.get()
|
||||||
|
case blck.kind
|
||||||
|
of BeaconBlockFork.Phase0:
|
||||||
|
return blck.phase0Block
|
||||||
|
of BeaconBlockFork.Altair:
|
||||||
|
raise newException(CatchableError,
|
||||||
|
"could not retrieve block for altair blocks")
|
||||||
|
|
||||||
rpcServer.rpc("post_v1_validator_block") do (body: phase0.SignedBeaconBlock) -> bool:
|
rpcServer.rpc("post_v1_validator_block") do (body: phase0.SignedBeaconBlock) -> bool:
|
||||||
debug "post_v1_validator_block",
|
debug "post_v1_validator_block",
|
||||||
|
@ -57,7 +63,8 @@ proc installValidatorApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
||||||
if head.slot >= body.message.slot:
|
if head.slot >= body.message.slot:
|
||||||
raise newException(CatchableError,
|
raise newException(CatchableError,
|
||||||
"Proposal is for a past slot: " & $body.message.slot)
|
"Proposal is for a past slot: " & $body.message.slot)
|
||||||
if head == await proposeSignedBlock(node, head, AttachedValidator(), body):
|
if head == await proposeSignedBlock(
|
||||||
|
node, head, AttachedValidator(), ForkedSignedBeaconBlock.init(body)):
|
||||||
raise newException(CatchableError, "Could not propose block")
|
raise newException(CatchableError, "Could not propose block")
|
||||||
return true
|
return true
|
||||||
|
|
||||||
|
|
|
@ -406,6 +406,7 @@ proc makeBeaconBlock*(
|
||||||
proposerSlashings: seq[ProposerSlashing],
|
proposerSlashings: seq[ProposerSlashing],
|
||||||
attesterSlashings: seq[AttesterSlashing],
|
attesterSlashings: seq[AttesterSlashing],
|
||||||
voluntaryExits: seq[SignedVoluntaryExit],
|
voluntaryExits: seq[SignedVoluntaryExit],
|
||||||
|
sync_aggregate: SyncAggregate,
|
||||||
executionPayload: ExecutionPayload,
|
executionPayload: ExecutionPayload,
|
||||||
rollback: RollbackAltairHashedProc,
|
rollback: RollbackAltairHashedProc,
|
||||||
cache: var StateCache): Result[altair.BeaconBlock, string] =
|
cache: var StateCache): Result[altair.BeaconBlock, string] =
|
||||||
|
@ -432,10 +433,7 @@ proc makeBeaconBlock*(
|
||||||
deposits: List[Deposit, Limit MAX_DEPOSITS](deposits),
|
deposits: List[Deposit, Limit MAX_DEPOSITS](deposits),
|
||||||
voluntary_exits:
|
voluntary_exits:
|
||||||
List[SignedVoluntaryExit, Limit MAX_VOLUNTARY_EXITS](voluntaryExits),
|
List[SignedVoluntaryExit, Limit MAX_VOLUNTARY_EXITS](voluntaryExits),
|
||||||
sync_aggregate: SyncAggregate(sync_committee_signature:
|
sync_aggregate: sync_aggregate))
|
||||||
default(CookedSig).toValidatorSig)))
|
|
||||||
|
|
||||||
# TODO sync committees
|
|
||||||
|
|
||||||
let res = process_block(cfg, state.data, blck, {skipBlsValidation}, cache)
|
let res = process_block(cfg, state.data, blck, {skipBlsValidation}, cache)
|
||||||
|
|
||||||
|
|
|
@ -69,6 +69,7 @@ logScope: topics = "beacval"
|
||||||
type
|
type
|
||||||
SendResult* = Result[void, cstring]
|
SendResult* = Result[void, cstring]
|
||||||
SendBlockResult* = Result[bool, cstring]
|
SendBlockResult* = Result[bool, cstring]
|
||||||
|
ForkedBlockResult* = Result[ForkedBeaconBlock, string]
|
||||||
|
|
||||||
proc findValidator(validators: auto, pubKey: ValidatorPubKey):
|
proc findValidator(validators: auto, pubKey: ValidatorPubKey):
|
||||||
Option[ValidatorIndex] =
|
Option[ValidatorIndex] =
|
||||||
|
@ -316,7 +317,7 @@ proc makeBeaconBlockForHeadAndSlot*(node: BeaconNode,
|
||||||
validator_index: ValidatorIndex,
|
validator_index: ValidatorIndex,
|
||||||
graffiti: GraffitiBytes,
|
graffiti: GraffitiBytes,
|
||||||
head: BlockRef, slot: Slot
|
head: BlockRef, slot: Slot
|
||||||
): Future[Result[phase0.BeaconBlock, string]] {.async.} =
|
): Future[ForkedBlockResult] {.async.} =
|
||||||
# Advance state to the slot that we're proposing for
|
# Advance state to the slot that we're proposing for
|
||||||
|
|
||||||
let
|
let
|
||||||
|
@ -330,63 +331,100 @@ proc makeBeaconBlockForHeadAndSlot*(node: BeaconNode,
|
||||||
|
|
||||||
if eth1Proposal.hasMissingDeposits:
|
if eth1Proposal.hasMissingDeposits:
|
||||||
error "Eth1 deposits not available. Skipping block proposal", slot
|
error "Eth1 deposits not available. Skipping block proposal", slot
|
||||||
return Result[phase0.BeaconBlock, string].err("Eth1 deposits not available")
|
return ForkedBlockResult.err("Eth1 deposits not available")
|
||||||
|
|
||||||
func restore(v: var phase0.HashedBeaconState) =
|
let doPhase0 = slot.epoch < node.dag.cfg.ALTAIR_FORK_EPOCH
|
||||||
# TODO address this ugly workaround - there should probably be a
|
return if doPhase0:
|
||||||
# `state_transition` that takes a `StateData` instead and updates
|
func restore(v: var phase0.HashedBeaconState) =
|
||||||
# the block as well
|
# TODO address this ugly workaround - there should probably be a
|
||||||
doAssert v.addr == addr proposalStateAddr.data.hbsPhase0
|
# `state_transition` that takes a `StateData` instead and updates
|
||||||
assign(proposalStateAddr[], poolPtr.headState)
|
# the block as well
|
||||||
|
doAssert v.addr == addr proposalStateAddr.data.hbsPhase0
|
||||||
|
assign(proposalStateAddr[], poolPtr.headState)
|
||||||
|
|
||||||
return makeBeaconBlock(
|
makeBeaconBlock(
|
||||||
node.dag.cfg,
|
node.dag.cfg,
|
||||||
stateData.data.hbsPhase0,
|
stateData.data.hbsPhase0,
|
||||||
validator_index,
|
validator_index,
|
||||||
head.root,
|
head.root,
|
||||||
randao_reveal,
|
randao_reveal,
|
||||||
eth1Proposal.vote,
|
eth1Proposal.vote,
|
||||||
graffiti,
|
graffiti,
|
||||||
node.attestationPool[].getAttestationsForBlock(
|
node.attestationPool[].getAttestationsForBlock(
|
||||||
stateData.data.hbsPhase0, cache),
|
stateData.data.hbsPhase0, cache),
|
||||||
eth1Proposal.deposits,
|
eth1Proposal.deposits,
|
||||||
node.exitPool[].getProposerSlashingsForBlock(),
|
node.exitPool[].getProposerSlashingsForBlock(),
|
||||||
node.exitPool[].getAttesterSlashingsForBlock(),
|
node.exitPool[].getAttesterSlashingsForBlock(),
|
||||||
node.exitPool[].getVoluntaryExitsForBlock(),
|
node.exitPool[].getVoluntaryExitsForBlock(),
|
||||||
default(ExecutionPayload),
|
default(ExecutionPayload),
|
||||||
restore,
|
restore,
|
||||||
cache)
|
cache).map(proc (t: auto): auto = ForkedBeaconBlock.init(t))
|
||||||
|
else:
|
||||||
|
func restore(v: var altair.HashedBeaconState) =
|
||||||
|
# TODO address this ugly workaround - there should probably be a
|
||||||
|
# `state_transition` that takes a `StateData` instead and updates
|
||||||
|
# the block as well
|
||||||
|
doAssert v.addr == addr proposalStateAddr.data.hbsAltair
|
||||||
|
assign(proposalStateAddr[], poolPtr.headState)
|
||||||
|
|
||||||
|
makeBeaconBlock(
|
||||||
|
node.dag.cfg,
|
||||||
|
stateData.data.hbsAltair,
|
||||||
|
validator_index,
|
||||||
|
head.root,
|
||||||
|
randao_reveal,
|
||||||
|
eth1Proposal.vote,
|
||||||
|
graffiti,
|
||||||
|
node.attestationPool[].getAttestationsForBlock(
|
||||||
|
stateData.data.hbsAltair, cache),
|
||||||
|
eth1Proposal.deposits,
|
||||||
|
node.exitPool[].getProposerSlashingsForBlock(),
|
||||||
|
node.exitPool[].getAttesterSlashingsForBlock(),
|
||||||
|
node.exitPool[].getVoluntaryExitsForBlock(),
|
||||||
|
node.sync_committee_msg_pool[].produceSyncAggregate(head),
|
||||||
|
default(ExecutionPayload),
|
||||||
|
restore,
|
||||||
|
cache).map(proc (t: auto): auto = ForkedBeaconBlock.init(t))
|
||||||
|
|
||||||
proc proposeSignedBlock*(node: BeaconNode,
|
proc proposeSignedBlock*(node: BeaconNode,
|
||||||
head: BlockRef,
|
head: BlockRef,
|
||||||
validator: AttachedValidator,
|
validator: AttachedValidator,
|
||||||
newBlock: phase0.SignedBeaconBlock):
|
newBlock: ForkedSignedBeaconBlock):
|
||||||
Future[BlockRef] {.async.} =
|
Future[BlockRef] {.async.} =
|
||||||
let newBlockRef = node.dag.addRawBlock(node.quarantine, newBlock) do (
|
let newBlockRef =
|
||||||
blckRef: BlockRef, trustedBlock: phase0.TrustedSignedBeaconBlock,
|
case newBlock.kind:
|
||||||
epochRef: EpochRef):
|
of BeaconBlockFork.Phase0:
|
||||||
# Callback add to fork choice if signed block valid (and becomes trusted)
|
node.dag.addRawBlock(node.quarantine, newBlock.phase0Block) do (
|
||||||
node.attestationPool[].addForkChoice(
|
blckRef: BlockRef, trustedBlock: phase0.TrustedSignedBeaconBlock,
|
||||||
epochRef, blckRef, trustedBlock.message,
|
epochRef: EpochRef):
|
||||||
node.beaconClock.now().slotOrZero())
|
# Callback add to fork choice if signed block valid (and becomes trusted)
|
||||||
|
node.attestationPool[].addForkChoice(
|
||||||
|
epochRef, blckRef, trustedBlock.message,
|
||||||
|
node.beaconClock.now().slotOrZero())
|
||||||
|
of BeaconBlockFork.Altair:
|
||||||
|
node.dag.addRawBlock(node.quarantine, newBlock.altairBlock) do (
|
||||||
|
blckRef: BlockRef, trustedBlock: altair.TrustedSignedBeaconBlock,
|
||||||
|
epochRef: EpochRef):
|
||||||
|
# Callback add to fork choice if signed block valid (and becomes trusted)
|
||||||
|
node.attestationPool[].addForkChoice(
|
||||||
|
epochRef, blckRef, trustedBlock.message,
|
||||||
|
node.beaconClock.now().slotOrZero())
|
||||||
|
|
||||||
if newBlockRef.isErr:
|
if newBlockRef.isErr:
|
||||||
warn "Unable to add proposed block to block pool",
|
withBlck(newBlock):
|
||||||
newBlock = shortLog(newBlock.message),
|
warn "Unable to add proposed block to block pool",
|
||||||
blockRoot = shortLog(newBlock.root)
|
newBlock = blck.message, root = blck.root
|
||||||
|
|
||||||
return head
|
return head
|
||||||
|
|
||||||
notice "Block proposed",
|
withBlck(newBlock):
|
||||||
blck = shortLog(newBlock.message),
|
notice "Block proposed",
|
||||||
blockRoot = shortLog(newBlockRef[].root),
|
blck = shortLog(blck.message), root = blck.root,
|
||||||
validator = shortLog(validator)
|
validator = shortLog(validator)
|
||||||
|
|
||||||
if node.config.dumpEnabled:
|
if node.config.dumpEnabled:
|
||||||
dump(node.config.dumpDirOutgoing, newBlock)
|
dump(node.config.dumpDirOutgoing, blck)
|
||||||
|
|
||||||
node.network.broadcast(
|
node.network.broadcastBeaconBlock(newBlock)
|
||||||
getBeaconBlocksTopic(node.dag.forkDigests.phase0), newBlock)
|
|
||||||
|
|
||||||
beacon_blocks_proposed.inc()
|
beacon_blocks_proposed.inc()
|
||||||
|
|
||||||
|
@ -412,37 +450,66 @@ proc proposeBlock(node: BeaconNode,
|
||||||
getStateField(node.dag.headState.data, genesis_validators_root)
|
getStateField(node.dag.headState.data, genesis_validators_root)
|
||||||
randao = await validator.genRandaoReveal(
|
randao = await validator.genRandaoReveal(
|
||||||
fork, genesis_validators_root, slot)
|
fork, genesis_validators_root, slot)
|
||||||
message = await makeBeaconBlockForHeadAndSlot(
|
var newBlock = await makeBeaconBlockForHeadAndSlot(
|
||||||
node, randao, validator_index, node.graffitiBytes, head, slot)
|
node, randao, validator_index, node.graffitiBytes, head, slot)
|
||||||
|
|
||||||
if not message.isOk():
|
if newBlock.isErr():
|
||||||
return head # already logged elsewhere!
|
return head # already logged elsewhere!
|
||||||
|
|
||||||
var
|
let blck = newBlock.get()
|
||||||
newBlock = phase0.SignedBeaconBlock(
|
|
||||||
message: message.get()
|
# TODO abstract this, or move it into makeBeaconBlockForHeadAndSlot, and in
|
||||||
|
# general this is far too much copy/paste
|
||||||
|
let forked = case blck.kind:
|
||||||
|
of BeaconBlockFork.Phase0:
|
||||||
|
let root = hash_tree_root(blck.phase0Block)
|
||||||
|
|
||||||
|
# TODO: recomputed in block proposal
|
||||||
|
let signing_root = compute_block_root(
|
||||||
|
fork, genesis_validators_root, slot, root)
|
||||||
|
let notSlashable = node.attachedValidators
|
||||||
|
.slashingProtection
|
||||||
|
.registerBlock(validator_index, validator.pubkey, slot, signing_root)
|
||||||
|
|
||||||
|
if notSlashable.isErr:
|
||||||
|
warn "Slashing protection activated",
|
||||||
|
validator = validator.pubkey,
|
||||||
|
slot = slot,
|
||||||
|
existingProposal = notSlashable.error
|
||||||
|
return head
|
||||||
|
|
||||||
|
let signature = await validator.signBlockProposal(
|
||||||
|
fork, genesis_validators_root, slot, root)
|
||||||
|
ForkedSignedBeaconBlock.init(
|
||||||
|
phase0.SignedBeaconBlock(
|
||||||
|
message: blck.phase0Block, root: root, signature: signature)
|
||||||
|
)
|
||||||
|
of BeaconBlockFork.Altair:
|
||||||
|
let root = hash_tree_root(blck.altairBlock)
|
||||||
|
|
||||||
|
# TODO: recomputed in block proposal
|
||||||
|
let signing_root = compute_block_root(
|
||||||
|
fork, genesis_validators_root, slot, root)
|
||||||
|
let notSlashable = node.attachedValidators
|
||||||
|
.slashingProtection
|
||||||
|
.registerBlock(validator_index, validator.pubkey, slot, signing_root)
|
||||||
|
|
||||||
|
if notSlashable.isErr:
|
||||||
|
warn "Slashing protection activated",
|
||||||
|
validator = validator.pubkey,
|
||||||
|
slot = slot,
|
||||||
|
existingProposal = notSlashable.error
|
||||||
|
return head
|
||||||
|
|
||||||
|
let signature = await validator.signBlockProposal(
|
||||||
|
fork, genesis_validators_root, slot, root)
|
||||||
|
|
||||||
|
ForkedSignedBeaconBlock.init(
|
||||||
|
altair.SignedBeaconBlock(
|
||||||
|
message: blck.altairBlock, root: root, signature: signature)
|
||||||
)
|
)
|
||||||
|
|
||||||
newBlock.root = hash_tree_root(newBlock.message)
|
return await node.proposeSignedBlock(head, validator, forked)
|
||||||
|
|
||||||
# TODO: recomputed in block proposal
|
|
||||||
let signing_root = compute_block_root(
|
|
||||||
fork, genesis_validators_root, slot, newBlock.root)
|
|
||||||
let notSlashable = node.attachedValidators
|
|
||||||
.slashingProtection
|
|
||||||
.registerBlock(validator_index, validator.pubkey, slot, signing_root)
|
|
||||||
|
|
||||||
if notSlashable.isErr:
|
|
||||||
warn "Slashing protection activated",
|
|
||||||
validator = validator.pubkey,
|
|
||||||
slot = slot,
|
|
||||||
existingProposal = notSlashable.error
|
|
||||||
return head
|
|
||||||
|
|
||||||
newBlock.signature = await validator.signBlockProposal(
|
|
||||||
fork, genesis_validators_root, slot, newBlock.root)
|
|
||||||
|
|
||||||
return await node.proposeSignedBlock(head, validator, newBlock)
|
|
||||||
|
|
||||||
proc handleAttestations(node: BeaconNode, head: BlockRef, slot: Slot) =
|
proc handleAttestations(node: BeaconNode, head: BlockRef, slot: Slot) =
|
||||||
## Perform all attestations that the validators attached to this node should
|
## Perform all attestations that the validators attached to this node should
|
||||||
|
@ -873,16 +940,8 @@ proc sendBeaconBlock*(node: BeaconNode, forked: ForkedSignedBeaconBlock
|
||||||
if head.slot >= forked.slot():
|
if head.slot >= forked.slot():
|
||||||
node.network.broadcastBeaconBlock(forked)
|
node.network.broadcastBeaconBlock(forked)
|
||||||
return SendBlockResult.ok(false)
|
return SendBlockResult.ok(false)
|
||||||
let res =
|
|
||||||
case forked.kind
|
let res = await node.proposeSignedBlock(head, AttachedValidator(), forked)
|
||||||
of BeaconBlockFork.Phase0:
|
|
||||||
await node.proposeSignedBlock(head, AttachedValidator(),
|
|
||||||
forked.phase0Block)
|
|
||||||
of BeaconBlockFork.Altair:
|
|
||||||
# TODO altair-transition
|
|
||||||
# await node.proposeSignedBlock(head, AttachedValidator(),
|
|
||||||
# forked.altairBlock)
|
|
||||||
head
|
|
||||||
if res == head:
|
if res == head:
|
||||||
# `res == head` means failure, in such case we need to broadcast block
|
# `res == head` means failure, in such case we need to broadcast block
|
||||||
# manually because of the specification.
|
# manually because of the specification.
|
||||||
|
|
|
@ -16,18 +16,20 @@
|
||||||
|
|
||||||
import
|
import
|
||||||
math, stats, times, strformat,
|
math, stats, times, strformat,
|
||||||
options, random, tables, os,
|
tables, options, random, tables, os,
|
||||||
confutils, chronicles, eth/db/kvstore_sqlite3,
|
confutils, chronicles, eth/db/kvstore_sqlite3,
|
||||||
eth/keys,
|
chronos/timer, eth/keys,
|
||||||
../tests/testblockutil,
|
../tests/testblockutil,
|
||||||
../beacon_chain/spec/[
|
../beacon_chain/spec/[
|
||||||
beaconstate, forks, helpers, signatures, state_transition],
|
beaconstate, forks, helpers, signatures, state_transition],
|
||||||
../beacon_chain/spec/datatypes/[phase0, altair],
|
../beacon_chain/spec/datatypes/[phase0, altair],
|
||||||
../beacon_chain/[beacon_node_types, beacon_chain_db],
|
../beacon_chain/[beacon_node_types, beacon_chain_db, beacon_clock],
|
||||||
../beacon_chain/eth1/eth1_monitor,
|
../beacon_chain/eth1/eth1_monitor,
|
||||||
../beacon_chain/validators/validator_pool,
|
../beacon_chain/validators/validator_pool,
|
||||||
|
../beacon_chain/gossip_processing/gossip_validation,
|
||||||
../beacon_chain/consensus_object_pools/[blockchain_dag, block_quarantine,
|
../beacon_chain/consensus_object_pools/[blockchain_dag, block_quarantine,
|
||||||
block_clearance, attestation_pool],
|
block_clearance, attestation_pool,
|
||||||
|
sync_committee_msg_pool],
|
||||||
./simutils
|
./simutils
|
||||||
|
|
||||||
type Timers = enum
|
type Timers = enum
|
||||||
|
@ -36,8 +38,12 @@ type Timers = enum
|
||||||
tHashBlock = "Tree-hash block"
|
tHashBlock = "Tree-hash block"
|
||||||
tSignBlock = "Sign block"
|
tSignBlock = "Sign block"
|
||||||
tAttest = "Have committee attest to block"
|
tAttest = "Have committee attest to block"
|
||||||
|
tSyncCommittees = "Produce sync committee actions"
|
||||||
tReplay = "Replay all produced blocks"
|
tReplay = "Replay all produced blocks"
|
||||||
|
|
||||||
|
template seconds(x: uint64): timer.Duration =
|
||||||
|
timer.seconds(int(x))
|
||||||
|
|
||||||
func gauss(r: var Rand; mu = 0.0; sigma = 1.0): float =
|
func gauss(r: var Rand; mu = 0.0; sigma = 1.0): float =
|
||||||
# TODO This is present in Nim 1.4
|
# TODO This is present in Nim 1.4
|
||||||
const K = sqrt(2 / E)
|
const K = sqrt(2 / E)
|
||||||
|
@ -54,6 +60,7 @@ func gauss(r: var Rand; mu = 0.0; sigma = 1.0): float =
|
||||||
cli do(slots = SLOTS_PER_EPOCH * 6,
|
cli do(slots = SLOTS_PER_EPOCH * 6,
|
||||||
validators = SLOTS_PER_EPOCH * 400, # One per shard is minimum
|
validators = SLOTS_PER_EPOCH * 400, # One per shard is minimum
|
||||||
attesterRatio {.desc: "ratio of validators that attest in each round"} = 0.82,
|
attesterRatio {.desc: "ratio of validators that attest in each round"} = 0.82,
|
||||||
|
syncCommitteeRatio {.desc: "ratio of validators that perform sync committee actions in each round"} = 0.75,
|
||||||
blockRatio {.desc: "ratio of slots with blocks"} = 1.0,
|
blockRatio {.desc: "ratio of slots with blocks"} = 1.0,
|
||||||
replay = true):
|
replay = true):
|
||||||
let
|
let
|
||||||
|
@ -61,7 +68,9 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
||||||
genesisBlock = get_initial_beacon_block(state[].data)
|
genesisBlock = get_initial_beacon_block(state[].data)
|
||||||
genesisTime = float state[].data.genesis_time
|
genesisTime = float state[].data.genesis_time
|
||||||
|
|
||||||
var cfg = defaultRuntimeConfig
|
var
|
||||||
|
validatorKeyToIndex = initTable[ValidatorPubKey, int]()
|
||||||
|
cfg = defaultRuntimeConfig
|
||||||
|
|
||||||
cfg.ALTAIR_FORK_EPOCH = 96.Slot.epoch
|
cfg.ALTAIR_FORK_EPOCH = 96.Slot.epoch
|
||||||
|
|
||||||
|
@ -73,12 +82,16 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
||||||
ChainDAGRef.preInit(db, state[].data, state[].data, genesisBlock)
|
ChainDAGRef.preInit(db, state[].data, state[].data, genesisBlock)
|
||||||
putInitialDepositContractSnapshot(db, depositContractSnapshot)
|
putInitialDepositContractSnapshot(db, depositContractSnapshot)
|
||||||
|
|
||||||
|
for i in 0 ..< state.data.validators.len:
|
||||||
|
validatorKeyToIndex[state.data.validators[i].pubkey] = i
|
||||||
|
|
||||||
var
|
var
|
||||||
dag = ChainDAGRef.init(cfg, db, {})
|
dag = ChainDAGRef.init(cfg, db, {})
|
||||||
eth1Chain = Eth1Chain.init(cfg, db)
|
eth1Chain = Eth1Chain.init(cfg, db)
|
||||||
merkleizer = depositContractSnapshot.createMerkleizer
|
merkleizer = depositContractSnapshot.createMerkleizer
|
||||||
quarantine = QuarantineRef.init(keys.newRng())
|
quarantine = QuarantineRef.init(keys.newRng())
|
||||||
attPool = AttestationPool.init(dag, quarantine)
|
attPool = AttestationPool.init(dag, quarantine)
|
||||||
|
syncCommitteePool = newClone SyncCommitteeMsgPool.init()
|
||||||
timers: array[Timers, RunningStat]
|
timers: array[Timers, RunningStat]
|
||||||
attesters: RunningStat
|
attesters: RunningStat
|
||||||
r = initRand(1)
|
r = initRand(1)
|
||||||
|
@ -125,6 +138,90 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
||||||
signature: sig.toValidatorSig()
|
signature: sig.toValidatorSig()
|
||||||
), [validatorIdx], sig, data.slot)
|
), [validatorIdx], sig, data.slot)
|
||||||
|
|
||||||
|
proc handleSyncCommitteeActions(slot: Slot) =
|
||||||
|
type
|
||||||
|
Aggregator = object
|
||||||
|
committeeIdx: SyncCommitteeIndex
|
||||||
|
validatorIdx: int
|
||||||
|
selectionProof: ValidatorSig
|
||||||
|
|
||||||
|
let
|
||||||
|
syncCommittee = @(dag.syncCommitteeParticipants(slot + 1))
|
||||||
|
genesisValidatorsRoot = dag.genesisValidatorsRoot
|
||||||
|
fork = dag.forkAtEpoch(slot.epoch)
|
||||||
|
signingRoot = sync_committee_msg_signing_root(
|
||||||
|
fork, slot.epoch, genesisValidatorsRoot, dag.head.root)
|
||||||
|
messagesTime = slot.toBeaconTime(seconds(SECONDS_PER_SLOT div 3))
|
||||||
|
contributionsTime = slot.toBeaconTime(seconds(2 * SECONDS_PER_SLOT div 3))
|
||||||
|
|
||||||
|
var aggregators: seq[Aggregator]
|
||||||
|
|
||||||
|
for committeeIdx in allSyncCommittees():
|
||||||
|
for valKey in syncSubcommittee(syncCommittee, committeeIdx):
|
||||||
|
if rand(r, 1.0) > syncCommitteeRatio:
|
||||||
|
continue
|
||||||
|
|
||||||
|
let
|
||||||
|
validatorIdx = validatorKeyToIndex[valKey]
|
||||||
|
validarorPrivKey = makeFakeValidatorPrivKey(validatorIdx)
|
||||||
|
signature = blsSign(validarorPrivKey, signingRoot.data)
|
||||||
|
msg = SyncCommitteeMessage(
|
||||||
|
slot: slot,
|
||||||
|
beacon_block_root: dag.head.root,
|
||||||
|
validator_index: uint64 validatorIdx,
|
||||||
|
signature: signature.toValidatorSig)
|
||||||
|
|
||||||
|
let res = dag.validateSyncCommitteeMessage(
|
||||||
|
syncCommitteePool,
|
||||||
|
msg,
|
||||||
|
committeeIdx,
|
||||||
|
messagesTime,
|
||||||
|
false)
|
||||||
|
|
||||||
|
doAssert res.isOk
|
||||||
|
|
||||||
|
let
|
||||||
|
selectionProofSigningRoot =
|
||||||
|
sync_committee_selection_proof_signing_root(
|
||||||
|
fork, genesisValidatorsRoot, slot, uint64 committeeIdx)
|
||||||
|
selectionProofSig = blsSign(
|
||||||
|
validarorPrivKey, selectionProofSigningRoot.data).toValidatorSig
|
||||||
|
|
||||||
|
if is_sync_committee_aggregator(selectionProofSig):
|
||||||
|
aggregators.add Aggregator(
|
||||||
|
committeeIdx: committeeIdx,
|
||||||
|
validatorIdx: validatorIdx,
|
||||||
|
selectionProof: selectionProofSig)
|
||||||
|
|
||||||
|
for aggregator in aggregators:
|
||||||
|
var contribution: SyncCommitteeContribution
|
||||||
|
let contributionWasProduced = syncCommitteePool[].produceContribution(
|
||||||
|
slot, dag.head, aggregator.committeeIdx, contribution)
|
||||||
|
|
||||||
|
if contributionWasProduced:
|
||||||
|
let
|
||||||
|
contributionAndProof = ContributionAndProof(
|
||||||
|
aggregator_index: uint64 aggregator.validatorIdx,
|
||||||
|
contribution: contribution,
|
||||||
|
selection_proof: aggregator.selectionProof)
|
||||||
|
|
||||||
|
signingRoot = contribution_and_proof_signing_root(
|
||||||
|
fork, genesisValidatorsRoot, contributionAndProof)
|
||||||
|
|
||||||
|
validarorPrivKey = makeFakeValidatorPrivKey(aggregator.validatorIdx)
|
||||||
|
|
||||||
|
signedContributionAndProof = SignedContributionAndProof(
|
||||||
|
message: contributionAndProof,
|
||||||
|
signature: blsSign(validarorPrivKey, signingRoot.data).toValidatorSig)
|
||||||
|
|
||||||
|
res = dag.validateSignedContributionAndProof(
|
||||||
|
syncCommitteePool,
|
||||||
|
signedContributionAndProof,
|
||||||
|
contributionsTime,
|
||||||
|
false)
|
||||||
|
|
||||||
|
doAssert res.isOk
|
||||||
|
|
||||||
proc getNewBlock[T](
|
proc getNewBlock[T](
|
||||||
stateData: var StateData, slot: Slot, cache: var StateCache): T =
|
stateData: var StateData, slot: Slot, cache: var StateCache): T =
|
||||||
let
|
let
|
||||||
|
@ -144,25 +241,56 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
||||||
addr stateData.data.hbsAltair
|
addr stateData.data.hbsAltair
|
||||||
else:
|
else:
|
||||||
static: doAssert false
|
static: doAssert false
|
||||||
message = makeBeaconBlock(
|
|
||||||
cfg,
|
# TODO this is ugly, to need to almost-but-not-quite-identical calls to
|
||||||
hashedState[],
|
# makeBeaconBlock. Add a quasi-dummy SyncAggregate param to the phase 0
|
||||||
proposerIdx,
|
# makeBeaconBlock, to avoid code duplication.
|
||||||
dag.head.root,
|
#
|
||||||
privKey.genRandaoReveal(
|
# One could combine these "when"s, but this "when" should disappear.
|
||||||
getStateField(stateData.data, fork),
|
message =
|
||||||
getStateField(stateData.data, genesis_validators_root),
|
when T is phase0.SignedBeaconBlock:
|
||||||
slot).toValidatorSig(),
|
makeBeaconBlock(
|
||||||
eth1ProposalData.vote,
|
cfg,
|
||||||
default(GraffitiBytes),
|
hashedState[],
|
||||||
attPool.getAttestationsForTestBlock(stateData, cache),
|
proposerIdx,
|
||||||
eth1ProposalData.deposits,
|
dag.head.root,
|
||||||
@[],
|
privKey.genRandaoReveal(
|
||||||
@[],
|
getStateField(stateData.data, fork),
|
||||||
@[],
|
getStateField(stateData.data, genesis_validators_root),
|
||||||
ExecutionPayload(),
|
slot).toValidatorSig(),
|
||||||
noRollback,
|
eth1ProposalData.vote,
|
||||||
cache)
|
default(GraffitiBytes),
|
||||||
|
attPool.getAttestationsForTestBlock(stateData, cache),
|
||||||
|
eth1ProposalData.deposits,
|
||||||
|
@[],
|
||||||
|
@[],
|
||||||
|
@[],
|
||||||
|
ExecutionPayload(),
|
||||||
|
noRollback,
|
||||||
|
cache)
|
||||||
|
elif T is altair.SignedBeaconBlock:
|
||||||
|
makeBeaconBlock(
|
||||||
|
cfg,
|
||||||
|
hashedState[],
|
||||||
|
proposerIdx,
|
||||||
|
dag.head.root,
|
||||||
|
privKey.genRandaoReveal(
|
||||||
|
getStateField(stateData.data, fork),
|
||||||
|
getStateField(stateData.data, genesis_validators_root),
|
||||||
|
slot).toValidatorSig(),
|
||||||
|
eth1ProposalData.vote,
|
||||||
|
default(GraffitiBytes),
|
||||||
|
attPool.getAttestationsForTestBlock(stateData, cache),
|
||||||
|
eth1ProposalData.deposits,
|
||||||
|
@[],
|
||||||
|
@[],
|
||||||
|
@[],
|
||||||
|
syncCommitteePool[].produceSyncAggregate(dag.head),
|
||||||
|
ExecutionPayload(),
|
||||||
|
noRollback,
|
||||||
|
cache)
|
||||||
|
else:
|
||||||
|
static: doAssert false
|
||||||
|
|
||||||
var
|
var
|
||||||
newBlock = T(
|
newBlock = T(
|
||||||
|
@ -249,7 +377,9 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
||||||
|
|
||||||
let newDeposits = int clamp(gauss(r, 5.0, 8.0), 0.0, 1000.0)
|
let newDeposits = int clamp(gauss(r, 5.0, 8.0), 0.0, 1000.0)
|
||||||
for i in 0 ..< newDeposits:
|
for i in 0 ..< newDeposits:
|
||||||
let d = makeDeposit(merkleizer.getChunkCount.int, {skipBLSValidation})
|
let validatorIdx = merkleizer.getChunkCount.int
|
||||||
|
let d = makeDeposit(validatorIdx, {skipBLSValidation})
|
||||||
|
validatorKeyToIndex[d.pubkey] = validatorIdx
|
||||||
eth1Block.deposits.add d
|
eth1Block.deposits.add d
|
||||||
merkleizer.addChunk hash_tree_root(d).data
|
merkleizer.addChunk hash_tree_root(d).data
|
||||||
|
|
||||||
|
@ -268,6 +398,11 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
||||||
if attesterRatio > 0.0:
|
if attesterRatio > 0.0:
|
||||||
withTimer(timers[tAttest]):
|
withTimer(timers[tAttest]):
|
||||||
handleAttestations(slot)
|
handleAttestations(slot)
|
||||||
|
if syncCommitteeRatio > 0.0:
|
||||||
|
withTimer(timers[tSyncCommittees]):
|
||||||
|
handleSyncCommitteeActions(slot)
|
||||||
|
|
||||||
|
syncCommitteePool[].clearPerSlotData()
|
||||||
|
|
||||||
# TODO if attestation pool was smarter, it would include older attestations
|
# TODO if attestation pool was smarter, it would include older attestations
|
||||||
# too!
|
# too!
|
||||||
|
|
|
@ -14,7 +14,7 @@ import
|
||||||
../beacon_chain/spec/[helpers, signatures, state_transition, forks],
|
../beacon_chain/spec/[helpers, signatures, state_transition, forks],
|
||||||
../beacon_chain/consensus_object_pools/attestation_pool
|
../beacon_chain/consensus_object_pools/attestation_pool
|
||||||
|
|
||||||
func makeFakeValidatorPrivKey(i: int): ValidatorPrivKey =
|
func makeFakeValidatorPrivKey*(i: int): ValidatorPrivKey =
|
||||||
# 0 is not a valid BLS private key - 1000 helps interop with rust BLS library,
|
# 0 is not a valid BLS private key - 1000 helps interop with rust BLS library,
|
||||||
# lighthouse.
|
# lighthouse.
|
||||||
# TODO: switch to https://github.com/ethereum/eth2.0-pm/issues/60
|
# TODO: switch to https://github.com/ethereum/eth2.0-pm/issues/60
|
||||||
|
|
Loading…
Reference in New Issue