altair-capable beacon block creation (#2834)
* altair-capable beacon block creation * update block_sim to use sync committees and the new block production interface
This commit is contained in:
parent
3efcdb0de5
commit
2d8a796a93
|
@ -191,19 +191,15 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
let res = await makeBeaconBlockForHeadAndSlot(
|
||||
node, qrandao, proposer.get(), qgraffiti, qhead, qslot)
|
||||
if res.isErr():
|
||||
return RestApiResponse.jsonError(Http400, BlockProduceError)
|
||||
return RestApiResponse.jsonError(Http400, res.error())
|
||||
res.get()
|
||||
return
|
||||
when message is phase0.BeaconBlock:
|
||||
# TODO (cheatfate): This could be removed when `altair` branch will be
|
||||
# merged.
|
||||
RestApiResponse.jsonResponse(message)
|
||||
else:
|
||||
case message.kind
|
||||
of BeaconBlockFork.Phase0:
|
||||
RestApiResponse.jsonResponse(message.phase0Block.message)
|
||||
of BeaconBlockFork.Altair:
|
||||
return RestApiResponse.jsonError(Http400, BlockProduceError)
|
||||
RestApiResponse.jsonResponse(message.phase0Block)
|
||||
else:
|
||||
RestApiResponse.jsonError(Http400,
|
||||
"Unable to produce block for altair fork")
|
||||
|
||||
# https://ethereum.github.io/beacon-APIs/#/Validator/produceBlockV2
|
||||
router.api(MethodGet, "/api/eth/v2/validator/blocks/{slot}") do (
|
||||
|
@ -253,25 +249,9 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
let res = await makeBeaconBlockForHeadAndSlot(
|
||||
node, qrandao, proposer.get(), qgraffiti, qhead, qslot)
|
||||
if res.isErr():
|
||||
return RestApiResponse.jsonError(Http400, BlockProduceError)
|
||||
return RestApiResponse.jsonError(Http400, res.error())
|
||||
res.get()
|
||||
return
|
||||
when message is phase0.BeaconBlock:
|
||||
# TODO (cheatfate): This could be removed when `altair` branch will be
|
||||
# merged.
|
||||
RestApiResponse.jsonResponse(
|
||||
(version: "phase0", data: message)
|
||||
)
|
||||
else:
|
||||
case message.kind
|
||||
of BeaconBlockFork.Phase0:
|
||||
RestApiResponse.jsonResponse(
|
||||
(version: "phase0", data: message.phase0Block.message)
|
||||
)
|
||||
of BeaconBlockFork.Altair:
|
||||
RestApiResponse.jsonResponse(
|
||||
(version: "altair", data: message.altairBlock.message)
|
||||
)
|
||||
return RestApiResponse.jsonResponsePlain(message)
|
||||
|
||||
# https://ethereum.github.io/beacon-APIs/#/Validator/produceAttestationData
|
||||
router.api(MethodGet, "/api/eth/v1/validator/attestation_data") do (
|
||||
|
|
|
@ -411,7 +411,9 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
# It was not integrated into the beacon node's database.
|
||||
return 202
|
||||
else:
|
||||
let res = await proposeSignedBlock(node, head, AttachedValidator(), blck)
|
||||
let res = await proposeSignedBlock(
|
||||
node, head, AttachedValidator(),
|
||||
ForkedSignedBeaconBlock.init(blck))
|
||||
if res == head:
|
||||
# TODO altair-transition, but not immediate testnet-priority
|
||||
node.network.broadcastBeaconBlock(ForkedSignedBeaconBlock.init(blck))
|
||||
|
|
|
@ -46,7 +46,13 @@ proc installValidatorApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
if message.isErr():
|
||||
raise newException(CatchableError,
|
||||
"could not retrieve block for slot: " & $slot)
|
||||
return message.get()
|
||||
let blck = message.get()
|
||||
case blck.kind
|
||||
of BeaconBlockFork.Phase0:
|
||||
return blck.phase0Block
|
||||
of BeaconBlockFork.Altair:
|
||||
raise newException(CatchableError,
|
||||
"could not retrieve block for altair blocks")
|
||||
|
||||
rpcServer.rpc("post_v1_validator_block") do (body: phase0.SignedBeaconBlock) -> bool:
|
||||
debug "post_v1_validator_block",
|
||||
|
@ -57,7 +63,8 @@ proc installValidatorApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
if head.slot >= body.message.slot:
|
||||
raise newException(CatchableError,
|
||||
"Proposal is for a past slot: " & $body.message.slot)
|
||||
if head == await proposeSignedBlock(node, head, AttachedValidator(), body):
|
||||
if head == await proposeSignedBlock(
|
||||
node, head, AttachedValidator(), ForkedSignedBeaconBlock.init(body)):
|
||||
raise newException(CatchableError, "Could not propose block")
|
||||
return true
|
||||
|
||||
|
|
|
@ -406,6 +406,7 @@ proc makeBeaconBlock*(
|
|||
proposerSlashings: seq[ProposerSlashing],
|
||||
attesterSlashings: seq[AttesterSlashing],
|
||||
voluntaryExits: seq[SignedVoluntaryExit],
|
||||
sync_aggregate: SyncAggregate,
|
||||
executionPayload: ExecutionPayload,
|
||||
rollback: RollbackAltairHashedProc,
|
||||
cache: var StateCache): Result[altair.BeaconBlock, string] =
|
||||
|
@ -432,10 +433,7 @@ proc makeBeaconBlock*(
|
|||
deposits: List[Deposit, Limit MAX_DEPOSITS](deposits),
|
||||
voluntary_exits:
|
||||
List[SignedVoluntaryExit, Limit MAX_VOLUNTARY_EXITS](voluntaryExits),
|
||||
sync_aggregate: SyncAggregate(sync_committee_signature:
|
||||
default(CookedSig).toValidatorSig)))
|
||||
|
||||
# TODO sync committees
|
||||
sync_aggregate: sync_aggregate))
|
||||
|
||||
let res = process_block(cfg, state.data, blck, {skipBlsValidation}, cache)
|
||||
|
||||
|
|
|
@ -69,6 +69,7 @@ logScope: topics = "beacval"
|
|||
type
|
||||
SendResult* = Result[void, cstring]
|
||||
SendBlockResult* = Result[bool, cstring]
|
||||
ForkedBlockResult* = Result[ForkedBeaconBlock, string]
|
||||
|
||||
proc findValidator(validators: auto, pubKey: ValidatorPubKey):
|
||||
Option[ValidatorIndex] =
|
||||
|
@ -316,7 +317,7 @@ proc makeBeaconBlockForHeadAndSlot*(node: BeaconNode,
|
|||
validator_index: ValidatorIndex,
|
||||
graffiti: GraffitiBytes,
|
||||
head: BlockRef, slot: Slot
|
||||
): Future[Result[phase0.BeaconBlock, string]] {.async.} =
|
||||
): Future[ForkedBlockResult] {.async.} =
|
||||
# Advance state to the slot that we're proposing for
|
||||
|
||||
let
|
||||
|
@ -330,8 +331,10 @@ proc makeBeaconBlockForHeadAndSlot*(node: BeaconNode,
|
|||
|
||||
if eth1Proposal.hasMissingDeposits:
|
||||
error "Eth1 deposits not available. Skipping block proposal", slot
|
||||
return Result[phase0.BeaconBlock, string].err("Eth1 deposits not available")
|
||||
return ForkedBlockResult.err("Eth1 deposits not available")
|
||||
|
||||
let doPhase0 = slot.epoch < node.dag.cfg.ALTAIR_FORK_EPOCH
|
||||
return if doPhase0:
|
||||
func restore(v: var phase0.HashedBeaconState) =
|
||||
# TODO address this ugly workaround - there should probably be a
|
||||
# `state_transition` that takes a `StateData` instead and updates
|
||||
|
@ -339,7 +342,7 @@ proc makeBeaconBlockForHeadAndSlot*(node: BeaconNode,
|
|||
doAssert v.addr == addr proposalStateAddr.data.hbsPhase0
|
||||
assign(proposalStateAddr[], poolPtr.headState)
|
||||
|
||||
return makeBeaconBlock(
|
||||
makeBeaconBlock(
|
||||
node.dag.cfg,
|
||||
stateData.data.hbsPhase0,
|
||||
validator_index,
|
||||
|
@ -355,38 +358,73 @@ proc makeBeaconBlockForHeadAndSlot*(node: BeaconNode,
|
|||
node.exitPool[].getVoluntaryExitsForBlock(),
|
||||
default(ExecutionPayload),
|
||||
restore,
|
||||
cache)
|
||||
cache).map(proc (t: auto): auto = ForkedBeaconBlock.init(t))
|
||||
else:
|
||||
func restore(v: var altair.HashedBeaconState) =
|
||||
# TODO address this ugly workaround - there should probably be a
|
||||
# `state_transition` that takes a `StateData` instead and updates
|
||||
# the block as well
|
||||
doAssert v.addr == addr proposalStateAddr.data.hbsAltair
|
||||
assign(proposalStateAddr[], poolPtr.headState)
|
||||
|
||||
makeBeaconBlock(
|
||||
node.dag.cfg,
|
||||
stateData.data.hbsAltair,
|
||||
validator_index,
|
||||
head.root,
|
||||
randao_reveal,
|
||||
eth1Proposal.vote,
|
||||
graffiti,
|
||||
node.attestationPool[].getAttestationsForBlock(
|
||||
stateData.data.hbsAltair, cache),
|
||||
eth1Proposal.deposits,
|
||||
node.exitPool[].getProposerSlashingsForBlock(),
|
||||
node.exitPool[].getAttesterSlashingsForBlock(),
|
||||
node.exitPool[].getVoluntaryExitsForBlock(),
|
||||
node.sync_committee_msg_pool[].produceSyncAggregate(head),
|
||||
default(ExecutionPayload),
|
||||
restore,
|
||||
cache).map(proc (t: auto): auto = ForkedBeaconBlock.init(t))
|
||||
|
||||
proc proposeSignedBlock*(node: BeaconNode,
|
||||
head: BlockRef,
|
||||
validator: AttachedValidator,
|
||||
newBlock: phase0.SignedBeaconBlock):
|
||||
newBlock: ForkedSignedBeaconBlock):
|
||||
Future[BlockRef] {.async.} =
|
||||
let newBlockRef = node.dag.addRawBlock(node.quarantine, newBlock) do (
|
||||
let newBlockRef =
|
||||
case newBlock.kind:
|
||||
of BeaconBlockFork.Phase0:
|
||||
node.dag.addRawBlock(node.quarantine, newBlock.phase0Block) do (
|
||||
blckRef: BlockRef, trustedBlock: phase0.TrustedSignedBeaconBlock,
|
||||
epochRef: EpochRef):
|
||||
# Callback add to fork choice if signed block valid (and becomes trusted)
|
||||
node.attestationPool[].addForkChoice(
|
||||
epochRef, blckRef, trustedBlock.message,
|
||||
node.beaconClock.now().slotOrZero())
|
||||
of BeaconBlockFork.Altair:
|
||||
node.dag.addRawBlock(node.quarantine, newBlock.altairBlock) do (
|
||||
blckRef: BlockRef, trustedBlock: altair.TrustedSignedBeaconBlock,
|
||||
epochRef: EpochRef):
|
||||
# Callback add to fork choice if signed block valid (and becomes trusted)
|
||||
node.attestationPool[].addForkChoice(
|
||||
epochRef, blckRef, trustedBlock.message,
|
||||
node.beaconClock.now().slotOrZero())
|
||||
|
||||
if newBlockRef.isErr:
|
||||
withBlck(newBlock):
|
||||
warn "Unable to add proposed block to block pool",
|
||||
newBlock = shortLog(newBlock.message),
|
||||
blockRoot = shortLog(newBlock.root)
|
||||
|
||||
newBlock = blck.message, root = blck.root
|
||||
return head
|
||||
|
||||
withBlck(newBlock):
|
||||
notice "Block proposed",
|
||||
blck = shortLog(newBlock.message),
|
||||
blockRoot = shortLog(newBlockRef[].root),
|
||||
blck = shortLog(blck.message), root = blck.root,
|
||||
validator = shortLog(validator)
|
||||
|
||||
if node.config.dumpEnabled:
|
||||
dump(node.config.dumpDirOutgoing, newBlock)
|
||||
dump(node.config.dumpDirOutgoing, blck)
|
||||
|
||||
node.network.broadcast(
|
||||
getBeaconBlocksTopic(node.dag.forkDigests.phase0), newBlock)
|
||||
node.network.broadcastBeaconBlock(newBlock)
|
||||
|
||||
beacon_blocks_proposed.inc()
|
||||
|
||||
|
@ -412,22 +450,23 @@ proc proposeBlock(node: BeaconNode,
|
|||
getStateField(node.dag.headState.data, genesis_validators_root)
|
||||
randao = await validator.genRandaoReveal(
|
||||
fork, genesis_validators_root, slot)
|
||||
message = await makeBeaconBlockForHeadAndSlot(
|
||||
var newBlock = await makeBeaconBlockForHeadAndSlot(
|
||||
node, randao, validator_index, node.graffitiBytes, head, slot)
|
||||
|
||||
if not message.isOk():
|
||||
if newBlock.isErr():
|
||||
return head # already logged elsewhere!
|
||||
|
||||
var
|
||||
newBlock = phase0.SignedBeaconBlock(
|
||||
message: message.get()
|
||||
)
|
||||
let blck = newBlock.get()
|
||||
|
||||
newBlock.root = hash_tree_root(newBlock.message)
|
||||
# TODO abstract this, or move it into makeBeaconBlockForHeadAndSlot, and in
|
||||
# general this is far too much copy/paste
|
||||
let forked = case blck.kind:
|
||||
of BeaconBlockFork.Phase0:
|
||||
let root = hash_tree_root(blck.phase0Block)
|
||||
|
||||
# TODO: recomputed in block proposal
|
||||
let signing_root = compute_block_root(
|
||||
fork, genesis_validators_root, slot, newBlock.root)
|
||||
fork, genesis_validators_root, slot, root)
|
||||
let notSlashable = node.attachedValidators
|
||||
.slashingProtection
|
||||
.registerBlock(validator_index, validator.pubkey, slot, signing_root)
|
||||
|
@ -439,10 +478,38 @@ proc proposeBlock(node: BeaconNode,
|
|||
existingProposal = notSlashable.error
|
||||
return head
|
||||
|
||||
newBlock.signature = await validator.signBlockProposal(
|
||||
fork, genesis_validators_root, slot, newBlock.root)
|
||||
let signature = await validator.signBlockProposal(
|
||||
fork, genesis_validators_root, slot, root)
|
||||
ForkedSignedBeaconBlock.init(
|
||||
phase0.SignedBeaconBlock(
|
||||
message: blck.phase0Block, root: root, signature: signature)
|
||||
)
|
||||
of BeaconBlockFork.Altair:
|
||||
let root = hash_tree_root(blck.altairBlock)
|
||||
|
||||
return await node.proposeSignedBlock(head, validator, newBlock)
|
||||
# TODO: recomputed in block proposal
|
||||
let signing_root = compute_block_root(
|
||||
fork, genesis_validators_root, slot, root)
|
||||
let notSlashable = node.attachedValidators
|
||||
.slashingProtection
|
||||
.registerBlock(validator_index, validator.pubkey, slot, signing_root)
|
||||
|
||||
if notSlashable.isErr:
|
||||
warn "Slashing protection activated",
|
||||
validator = validator.pubkey,
|
||||
slot = slot,
|
||||
existingProposal = notSlashable.error
|
||||
return head
|
||||
|
||||
let signature = await validator.signBlockProposal(
|
||||
fork, genesis_validators_root, slot, root)
|
||||
|
||||
ForkedSignedBeaconBlock.init(
|
||||
altair.SignedBeaconBlock(
|
||||
message: blck.altairBlock, root: root, signature: signature)
|
||||
)
|
||||
|
||||
return await node.proposeSignedBlock(head, validator, forked)
|
||||
|
||||
proc handleAttestations(node: BeaconNode, head: BlockRef, slot: Slot) =
|
||||
## Perform all attestations that the validators attached to this node should
|
||||
|
@ -873,16 +940,8 @@ proc sendBeaconBlock*(node: BeaconNode, forked: ForkedSignedBeaconBlock
|
|||
if head.slot >= forked.slot():
|
||||
node.network.broadcastBeaconBlock(forked)
|
||||
return SendBlockResult.ok(false)
|
||||
let res =
|
||||
case forked.kind
|
||||
of BeaconBlockFork.Phase0:
|
||||
await node.proposeSignedBlock(head, AttachedValidator(),
|
||||
forked.phase0Block)
|
||||
of BeaconBlockFork.Altair:
|
||||
# TODO altair-transition
|
||||
# await node.proposeSignedBlock(head, AttachedValidator(),
|
||||
# forked.altairBlock)
|
||||
head
|
||||
|
||||
let res = await node.proposeSignedBlock(head, AttachedValidator(), forked)
|
||||
if res == head:
|
||||
# `res == head` means failure, in such case we need to broadcast block
|
||||
# manually because of the specification.
|
||||
|
|
|
@ -16,18 +16,20 @@
|
|||
|
||||
import
|
||||
math, stats, times, strformat,
|
||||
options, random, tables, os,
|
||||
tables, options, random, tables, os,
|
||||
confutils, chronicles, eth/db/kvstore_sqlite3,
|
||||
eth/keys,
|
||||
chronos/timer, eth/keys,
|
||||
../tests/testblockutil,
|
||||
../beacon_chain/spec/[
|
||||
beaconstate, forks, helpers, signatures, state_transition],
|
||||
../beacon_chain/spec/datatypes/[phase0, altair],
|
||||
../beacon_chain/[beacon_node_types, beacon_chain_db],
|
||||
../beacon_chain/[beacon_node_types, beacon_chain_db, beacon_clock],
|
||||
../beacon_chain/eth1/eth1_monitor,
|
||||
../beacon_chain/validators/validator_pool,
|
||||
../beacon_chain/gossip_processing/gossip_validation,
|
||||
../beacon_chain/consensus_object_pools/[blockchain_dag, block_quarantine,
|
||||
block_clearance, attestation_pool],
|
||||
block_clearance, attestation_pool,
|
||||
sync_committee_msg_pool],
|
||||
./simutils
|
||||
|
||||
type Timers = enum
|
||||
|
@ -36,8 +38,12 @@ type Timers = enum
|
|||
tHashBlock = "Tree-hash block"
|
||||
tSignBlock = "Sign block"
|
||||
tAttest = "Have committee attest to block"
|
||||
tSyncCommittees = "Produce sync committee actions"
|
||||
tReplay = "Replay all produced blocks"
|
||||
|
||||
template seconds(x: uint64): timer.Duration =
|
||||
timer.seconds(int(x))
|
||||
|
||||
func gauss(r: var Rand; mu = 0.0; sigma = 1.0): float =
|
||||
# TODO This is present in Nim 1.4
|
||||
const K = sqrt(2 / E)
|
||||
|
@ -54,6 +60,7 @@ func gauss(r: var Rand; mu = 0.0; sigma = 1.0): float =
|
|||
cli do(slots = SLOTS_PER_EPOCH * 6,
|
||||
validators = SLOTS_PER_EPOCH * 400, # One per shard is minimum
|
||||
attesterRatio {.desc: "ratio of validators that attest in each round"} = 0.82,
|
||||
syncCommitteeRatio {.desc: "ratio of validators that perform sync committee actions in each round"} = 0.75,
|
||||
blockRatio {.desc: "ratio of slots with blocks"} = 1.0,
|
||||
replay = true):
|
||||
let
|
||||
|
@ -61,7 +68,9 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
|||
genesisBlock = get_initial_beacon_block(state[].data)
|
||||
genesisTime = float state[].data.genesis_time
|
||||
|
||||
var cfg = defaultRuntimeConfig
|
||||
var
|
||||
validatorKeyToIndex = initTable[ValidatorPubKey, int]()
|
||||
cfg = defaultRuntimeConfig
|
||||
|
||||
cfg.ALTAIR_FORK_EPOCH = 96.Slot.epoch
|
||||
|
||||
|
@ -73,12 +82,16 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
|||
ChainDAGRef.preInit(db, state[].data, state[].data, genesisBlock)
|
||||
putInitialDepositContractSnapshot(db, depositContractSnapshot)
|
||||
|
||||
for i in 0 ..< state.data.validators.len:
|
||||
validatorKeyToIndex[state.data.validators[i].pubkey] = i
|
||||
|
||||
var
|
||||
dag = ChainDAGRef.init(cfg, db, {})
|
||||
eth1Chain = Eth1Chain.init(cfg, db)
|
||||
merkleizer = depositContractSnapshot.createMerkleizer
|
||||
quarantine = QuarantineRef.init(keys.newRng())
|
||||
attPool = AttestationPool.init(dag, quarantine)
|
||||
syncCommitteePool = newClone SyncCommitteeMsgPool.init()
|
||||
timers: array[Timers, RunningStat]
|
||||
attesters: RunningStat
|
||||
r = initRand(1)
|
||||
|
@ -125,6 +138,90 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
|||
signature: sig.toValidatorSig()
|
||||
), [validatorIdx], sig, data.slot)
|
||||
|
||||
proc handleSyncCommitteeActions(slot: Slot) =
|
||||
type
|
||||
Aggregator = object
|
||||
committeeIdx: SyncCommitteeIndex
|
||||
validatorIdx: int
|
||||
selectionProof: ValidatorSig
|
||||
|
||||
let
|
||||
syncCommittee = @(dag.syncCommitteeParticipants(slot + 1))
|
||||
genesisValidatorsRoot = dag.genesisValidatorsRoot
|
||||
fork = dag.forkAtEpoch(slot.epoch)
|
||||
signingRoot = sync_committee_msg_signing_root(
|
||||
fork, slot.epoch, genesisValidatorsRoot, dag.head.root)
|
||||
messagesTime = slot.toBeaconTime(seconds(SECONDS_PER_SLOT div 3))
|
||||
contributionsTime = slot.toBeaconTime(seconds(2 * SECONDS_PER_SLOT div 3))
|
||||
|
||||
var aggregators: seq[Aggregator]
|
||||
|
||||
for committeeIdx in allSyncCommittees():
|
||||
for valKey in syncSubcommittee(syncCommittee, committeeIdx):
|
||||
if rand(r, 1.0) > syncCommitteeRatio:
|
||||
continue
|
||||
|
||||
let
|
||||
validatorIdx = validatorKeyToIndex[valKey]
|
||||
validarorPrivKey = makeFakeValidatorPrivKey(validatorIdx)
|
||||
signature = blsSign(validarorPrivKey, signingRoot.data)
|
||||
msg = SyncCommitteeMessage(
|
||||
slot: slot,
|
||||
beacon_block_root: dag.head.root,
|
||||
validator_index: uint64 validatorIdx,
|
||||
signature: signature.toValidatorSig)
|
||||
|
||||
let res = dag.validateSyncCommitteeMessage(
|
||||
syncCommitteePool,
|
||||
msg,
|
||||
committeeIdx,
|
||||
messagesTime,
|
||||
false)
|
||||
|
||||
doAssert res.isOk
|
||||
|
||||
let
|
||||
selectionProofSigningRoot =
|
||||
sync_committee_selection_proof_signing_root(
|
||||
fork, genesisValidatorsRoot, slot, uint64 committeeIdx)
|
||||
selectionProofSig = blsSign(
|
||||
validarorPrivKey, selectionProofSigningRoot.data).toValidatorSig
|
||||
|
||||
if is_sync_committee_aggregator(selectionProofSig):
|
||||
aggregators.add Aggregator(
|
||||
committeeIdx: committeeIdx,
|
||||
validatorIdx: validatorIdx,
|
||||
selectionProof: selectionProofSig)
|
||||
|
||||
for aggregator in aggregators:
|
||||
var contribution: SyncCommitteeContribution
|
||||
let contributionWasProduced = syncCommitteePool[].produceContribution(
|
||||
slot, dag.head, aggregator.committeeIdx, contribution)
|
||||
|
||||
if contributionWasProduced:
|
||||
let
|
||||
contributionAndProof = ContributionAndProof(
|
||||
aggregator_index: uint64 aggregator.validatorIdx,
|
||||
contribution: contribution,
|
||||
selection_proof: aggregator.selectionProof)
|
||||
|
||||
signingRoot = contribution_and_proof_signing_root(
|
||||
fork, genesisValidatorsRoot, contributionAndProof)
|
||||
|
||||
validarorPrivKey = makeFakeValidatorPrivKey(aggregator.validatorIdx)
|
||||
|
||||
signedContributionAndProof = SignedContributionAndProof(
|
||||
message: contributionAndProof,
|
||||
signature: blsSign(validarorPrivKey, signingRoot.data).toValidatorSig)
|
||||
|
||||
res = dag.validateSignedContributionAndProof(
|
||||
syncCommitteePool,
|
||||
signedContributionAndProof,
|
||||
contributionsTime,
|
||||
false)
|
||||
|
||||
doAssert res.isOk
|
||||
|
||||
proc getNewBlock[T](
|
||||
stateData: var StateData, slot: Slot, cache: var StateCache): T =
|
||||
let
|
||||
|
@ -144,7 +241,15 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
|||
addr stateData.data.hbsAltair
|
||||
else:
|
||||
static: doAssert false
|
||||
message = makeBeaconBlock(
|
||||
|
||||
# TODO this is ugly, to need to almost-but-not-quite-identical calls to
|
||||
# makeBeaconBlock. Add a quasi-dummy SyncAggregate param to the phase 0
|
||||
# makeBeaconBlock, to avoid code duplication.
|
||||
#
|
||||
# One could combine these "when"s, but this "when" should disappear.
|
||||
message =
|
||||
when T is phase0.SignedBeaconBlock:
|
||||
makeBeaconBlock(
|
||||
cfg,
|
||||
hashedState[],
|
||||
proposerIdx,
|
||||
|
@ -163,6 +268,29 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
|||
ExecutionPayload(),
|
||||
noRollback,
|
||||
cache)
|
||||
elif T is altair.SignedBeaconBlock:
|
||||
makeBeaconBlock(
|
||||
cfg,
|
||||
hashedState[],
|
||||
proposerIdx,
|
||||
dag.head.root,
|
||||
privKey.genRandaoReveal(
|
||||
getStateField(stateData.data, fork),
|
||||
getStateField(stateData.data, genesis_validators_root),
|
||||
slot).toValidatorSig(),
|
||||
eth1ProposalData.vote,
|
||||
default(GraffitiBytes),
|
||||
attPool.getAttestationsForTestBlock(stateData, cache),
|
||||
eth1ProposalData.deposits,
|
||||
@[],
|
||||
@[],
|
||||
@[],
|
||||
syncCommitteePool[].produceSyncAggregate(dag.head),
|
||||
ExecutionPayload(),
|
||||
noRollback,
|
||||
cache)
|
||||
else:
|
||||
static: doAssert false
|
||||
|
||||
var
|
||||
newBlock = T(
|
||||
|
@ -249,7 +377,9 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
|||
|
||||
let newDeposits = int clamp(gauss(r, 5.0, 8.0), 0.0, 1000.0)
|
||||
for i in 0 ..< newDeposits:
|
||||
let d = makeDeposit(merkleizer.getChunkCount.int, {skipBLSValidation})
|
||||
let validatorIdx = merkleizer.getChunkCount.int
|
||||
let d = makeDeposit(validatorIdx, {skipBLSValidation})
|
||||
validatorKeyToIndex[d.pubkey] = validatorIdx
|
||||
eth1Block.deposits.add d
|
||||
merkleizer.addChunk hash_tree_root(d).data
|
||||
|
||||
|
@ -268,6 +398,11 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
|||
if attesterRatio > 0.0:
|
||||
withTimer(timers[tAttest]):
|
||||
handleAttestations(slot)
|
||||
if syncCommitteeRatio > 0.0:
|
||||
withTimer(timers[tSyncCommittees]):
|
||||
handleSyncCommitteeActions(slot)
|
||||
|
||||
syncCommitteePool[].clearPerSlotData()
|
||||
|
||||
# TODO if attestation pool was smarter, it would include older attestations
|
||||
# too!
|
||||
|
|
|
@ -14,7 +14,7 @@ import
|
|||
../beacon_chain/spec/[helpers, signatures, state_transition, forks],
|
||||
../beacon_chain/consensus_object_pools/attestation_pool
|
||||
|
||||
func makeFakeValidatorPrivKey(i: int): ValidatorPrivKey =
|
||||
func makeFakeValidatorPrivKey*(i: int): ValidatorPrivKey =
|
||||
# 0 is not a valid BLS private key - 1000 helps interop with rust BLS library,
|
||||
# lighthouse.
|
||||
# TODO: switch to https://github.com/ethereum/eth2.0-pm/issues/60
|
||||
|
|
Loading…
Reference in New Issue