mirror of
https://github.com/status-im/nimbus-eth2.git
synced 2025-01-11 14:54:12 +00:00
cleanups (#3819)
* avoid circular panda imports * move deposit merkleization helpers to spec/ * normalize validator signature helpers to spec names / params * remove redundant functions for remote signing
This commit is contained in:
parent
f08f9e2bd7
commit
c145916414
@ -21,7 +21,6 @@ import
|
||||
eth/common/eth_types as commonEthTypes, eth/net/nat,
|
||||
eth/p2p/discoveryv5/enr,
|
||||
json_serialization, web3/[ethtypes, confutils_defs],
|
||||
|
||||
./spec/[keystore, network, crypto],
|
||||
./spec/datatypes/base,
|
||||
./networking/network_metadata,
|
||||
|
@ -13,7 +13,6 @@
|
||||
import
|
||||
# Status libraries
|
||||
stew/[bitops2, objects],
|
||||
chronos,
|
||||
# Beacon chain internals
|
||||
../spec/datatypes/[phase0, altair, bellatrix],
|
||||
"."/[block_pools_types, blockchain_dag]
|
||||
|
@ -8,7 +8,6 @@
|
||||
{.push raises: [Defect].}
|
||||
|
||||
import chronicles
|
||||
from ".."/".."/conf import StdoutLogKind
|
||||
|
||||
type
|
||||
VanityLogs* = object
|
||||
@ -17,22 +16,6 @@ type
|
||||
|
||||
# Created by http://beatscribe.com/ (beatscribe#1008 on Discord)
|
||||
# These need to be the main body of the log not to be reformatted or escaped.
|
||||
proc mono🐼() = notice "\n" & "text-version.txt".staticRead
|
||||
proc color🐼() = notice "\n" & "color-version.ans".staticRead
|
||||
proc blink🐼() = notice "\n" & "blink-version.ans".staticRead
|
||||
|
||||
func getPandas*(stdoutKind: StdoutLogKind): VanityLogs =
|
||||
case stdoutKind
|
||||
of StdoutLogKind.Auto: raiseAssert "inadmissable here"
|
||||
of StdoutLogKind.Colors:
|
||||
VanityLogs(
|
||||
onMergeTransitionBlock: color🐼,
|
||||
onFinalizedMergeTransitionBlock: blink🐼)
|
||||
of StdoutLogKind.NoColors:
|
||||
VanityLogs(
|
||||
onMergeTransitionBlock: mono🐼,
|
||||
onFinalizedMergeTransitionBlock: mono🐼)
|
||||
of StdoutLogKind.Json, StdoutLogKind.None:
|
||||
VanityLogs(
|
||||
onMergeTransitionBlock: (proc() = notice "🐼 Proof of Stake Activated 🐼"),
|
||||
onFinalizedMergeTransitionBlock: (proc() = notice "🐼 Proof of Stake Finalized 🐼"))
|
||||
proc mono🐼*() = notice "\n" & "text-version.txt".staticRead
|
||||
proc color🐼*() = notice "\n" & "color-version.ans".staticRead
|
||||
proc blink🐼*() = notice "\n" & "blink-version.ans".staticRead
|
||||
|
@ -74,8 +74,6 @@ type
|
||||
when hasGenesisDetection:
|
||||
activeValidatorsCount*: uint64
|
||||
|
||||
DepositsMerkleizer* = SszMerkleizer[depositContractLimit]
|
||||
|
||||
Eth1Chain* = object
|
||||
db: BeaconChainDB
|
||||
cfg: RuntimeConfig
|
||||
@ -180,12 +178,6 @@ declareGauge eth1_finalized_deposits,
|
||||
declareGauge eth1_chain_len,
|
||||
"The length of the in-memory chain of Eth1 blocks"
|
||||
|
||||
func depositCountU64(s: DepositContractState): uint64 =
|
||||
for i in 0 .. 23:
|
||||
doAssert s.deposit_count[i] == 0
|
||||
|
||||
uint64.fromBytesBE s.deposit_count.toOpenArray(24, 31)
|
||||
|
||||
template cfg(m: Eth1Monitor): auto =
|
||||
m.depositsChain.cfg
|
||||
|
||||
@ -687,19 +679,6 @@ proc onBlockHeaders(p: Web3DataProviderRef,
|
||||
func getDepositsRoot*(m: DepositsMerkleizer): Eth2Digest =
|
||||
mixInLength(m.getFinalHash, int m.totalChunks)
|
||||
|
||||
func toDepositContractState*(merkleizer: DepositsMerkleizer): DepositContractState =
|
||||
# TODO There is an off by one discrepancy in the size of the arrays here that
|
||||
# need to be investigated. It shouldn't matter as long as the tree is
|
||||
# not populated to its maximum size.
|
||||
result.branch[0..31] = merkleizer.getCombinedChunks[0..31]
|
||||
result.deposit_count[24..31] = merkleizer.getChunkCount().toBytesBE
|
||||
|
||||
func createMerkleizer(s: DepositContractState): DepositsMerkleizer =
|
||||
DepositsMerkleizer.init(s.branch, s.depositCountU64)
|
||||
|
||||
func createMerkleizer*(s: DepositContractSnapshot): DepositsMerkleizer =
|
||||
createMerkleizer(s.depositContractState)
|
||||
|
||||
func eth1DataFromMerkleizer(eth1Block: Eth2Digest,
|
||||
merkleizer: DepositsMerkleizer): Eth1Data =
|
||||
Eth1Data(
|
||||
@ -960,13 +939,14 @@ template getOrDefault[T, E](r: Result[T, E]): T =
|
||||
get(r, default(TT))
|
||||
|
||||
proc init*(T: type Eth1Chain, cfg: RuntimeConfig, db: BeaconChainDB): T =
|
||||
let finalizedDeposits = db.getEth2FinalizedTo().getOrDefault()
|
||||
let m = finalizedDeposits.createMerkleizer
|
||||
let
|
||||
finalizedDeposits = db.getEth2FinalizedTo().getOrDefault()
|
||||
m = DepositsMerkleizer.init(finalizedDeposits.depositContractState)
|
||||
|
||||
T(db: db,
|
||||
cfg: cfg,
|
||||
finalizedBlockHash: finalizedDeposits.eth1Block,
|
||||
finalizedDepositsMerkleizer: finalizedDeposits.createMerkleizer)
|
||||
finalizedDepositsMerkleizer: m)
|
||||
|
||||
proc createInitialDepositSnapshot*(
|
||||
depositContractAddress: Eth1Address,
|
||||
|
@ -19,12 +19,10 @@ import
|
||||
../spec/[eth2_merkleization, digest],
|
||||
../spec/datatypes/base
|
||||
|
||||
const depositContractLimit* = Limit(1'u64 shl DEPOSIT_CONTRACT_TREE_DEPTH)
|
||||
|
||||
func attachMerkleProofs*(deposits: var openArray[Deposit]) =
|
||||
let depositsRoots = mapIt(deposits, hash_tree_root(it.data))
|
||||
|
||||
var incrementalMerkleProofs = createMerkleizer(depositContractLimit)
|
||||
var incrementalMerkleProofs = createMerkleizer(DEPOSIT_CONTRACT_LIMIT)
|
||||
|
||||
for i in 0 ..< depositsRoots.len:
|
||||
incrementalMerkleProofs.addChunkAndGenMerkleProof(depositsRoots[i], deposits[i].proof)
|
||||
|
@ -14,6 +14,7 @@ import
|
||||
stew/[byteutils, io2],
|
||||
eth/p2p/discoveryv5/[enr, random2],
|
||||
eth/keys,
|
||||
./consensus_object_pools/vanity_logs/pandas,
|
||||
./rpc/[rest_api, state_ttl_cache],
|
||||
./spec/datatypes/[altair, bellatrix, phase0],
|
||||
./spec/[engine_authentication, weak_subjectivity],
|
||||
@ -30,8 +31,6 @@ from
|
||||
import
|
||||
TopicParams, validateParameters, init
|
||||
|
||||
from "."/consensus_object_pools/vanity_logs/pandas import getPandas
|
||||
|
||||
when defined(windows):
|
||||
import winlean
|
||||
|
||||
@ -145,6 +144,22 @@ versionGauge.set(1, labelValues=[fullVersionStr, gitRevision])
|
||||
|
||||
logScope: topics = "beacnde"
|
||||
|
||||
func getPandas(stdoutKind: StdoutLogKind): VanityLogs =
|
||||
case stdoutKind
|
||||
of StdoutLogKind.Auto: raiseAssert "inadmissable here"
|
||||
of StdoutLogKind.Colors:
|
||||
VanityLogs(
|
||||
onMergeTransitionBlock: color🐼,
|
||||
onFinalizedMergeTransitionBlock: blink🐼)
|
||||
of StdoutLogKind.NoColors:
|
||||
VanityLogs(
|
||||
onMergeTransitionBlock: mono🐼,
|
||||
onFinalizedMergeTransitionBlock: mono🐼)
|
||||
of StdoutLogKind.Json, StdoutLogKind.None:
|
||||
VanityLogs(
|
||||
onMergeTransitionBlock: (proc() = notice "🐼 Proof of Stake Activated 🐼"),
|
||||
onFinalizedMergeTransitionBlock: (proc() = notice "🐼 Proof of Stake Finalized 🐼"))
|
||||
|
||||
proc loadChainDag(
|
||||
config: BeaconNodeConf,
|
||||
cfg: RuntimeConfig,
|
||||
|
@ -94,6 +94,8 @@ const
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/specs/phase0/validator.md#misc
|
||||
ATTESTATION_SUBNET_COUNT* = 64
|
||||
|
||||
DEPOSIT_CONTRACT_LIMIT* = Limit(1'u64 shl DEPOSIT_CONTRACT_TREE_DEPTH)
|
||||
|
||||
template maxSize*(n: int) {.pragma.}
|
||||
|
||||
# Block validation flow
|
||||
@ -861,6 +863,23 @@ func init*(T: type GraffitiBytes, input: string): GraffitiBytes
|
||||
raise newException(ValueError, "The graffiti value should be 32 characters or less")
|
||||
distinctBase(result)[0 ..< input.len] = toBytes(input)
|
||||
|
||||
func init*(
|
||||
T: type Attestation,
|
||||
indices_in_committee: openArray[uint64],
|
||||
committee_len: int,
|
||||
data: AttestationData,
|
||||
signature: ValidatorSig): Result[T, cstring] =
|
||||
var bits = CommitteeValidatorsBits.init(committee_len)
|
||||
for index_in_committee in indices_in_committee:
|
||||
if index_in_committee >= committee_len.uint64: return err("Invalid index for committee")
|
||||
bits.setBit index_in_committee
|
||||
|
||||
ok Attestation(
|
||||
aggregation_bits: bits,
|
||||
data: data,
|
||||
signature: signature
|
||||
)
|
||||
|
||||
func defaultGraffitiBytes*(): GraffitiBytes =
|
||||
const graffitiBytes =
|
||||
toBytes("Nimbus/" & fullVersionStr)
|
||||
|
@ -28,21 +28,32 @@ import
|
||||
chronicles,
|
||||
nimcrypto/[sha2, hash],
|
||||
stew/[byteutils, endians2, objects],
|
||||
json_serialization,
|
||||
blscurve
|
||||
json_serialization
|
||||
|
||||
export
|
||||
# Exports from sha2 / hash are explicit to avoid exporting upper-case `$` and
|
||||
# constant-time `==`
|
||||
sha2.update, hash.fromHex, json_serialization
|
||||
hash.fromHex, json_serialization
|
||||
|
||||
type
|
||||
Eth2Digest* = MDigest[32 * 8] ## `hash32` from spec
|
||||
|
||||
when BLS_BACKEND == BLST:
|
||||
const PREFER_BLST_SHA256* {.booldefine.} = true
|
||||
|
||||
when PREFER_BLST_SHA256:
|
||||
import blscurve
|
||||
when BLS_BACKEND == BLST:
|
||||
const USE_BLST_SHA256 = true
|
||||
else:
|
||||
const USE_BLST_SHA256 = false
|
||||
else:
|
||||
const USE_BLST_SHA256 = false
|
||||
|
||||
when USE_BLST_SHA256:
|
||||
export blscurve.update
|
||||
type Eth2DigestCtx* = BLST_SHA256_CTX
|
||||
else:
|
||||
export sha2.update
|
||||
type Eth2DigestCtx* = sha2.sha256
|
||||
|
||||
func `$`*(x: Eth2Digest): string =
|
||||
@ -60,13 +71,13 @@ chronicles.formatIt Eth2Digest:
|
||||
func eth2digest*(v: openArray[byte]): Eth2Digest {.noinit.} =
|
||||
## Apply the Eth2 Hash function
|
||||
## Do NOT use for secret data.
|
||||
when BLS_BACKEND == BLST:
|
||||
when USE_BLST_SHA256:
|
||||
# BLST has a fast assembly optimized SHA256
|
||||
result.data.bls_sha256_digest(v)
|
||||
else:
|
||||
# We use the init-update-finish interface to avoid
|
||||
# the expensive burning/clearing memory (20~30% perf)
|
||||
let ctx: Eth2DigestCtx
|
||||
var ctx {.noinit.}: Eth2DigestCtx
|
||||
ctx.init()
|
||||
ctx.update(v)
|
||||
ctx.finish()
|
||||
@ -83,9 +94,9 @@ template withEth2Hash*(body: untyped): Eth2Digest =
|
||||
body
|
||||
finish(h)
|
||||
else:
|
||||
when BLS_BACKEND == BLST:
|
||||
when USE_BLST_SHA256:
|
||||
block:
|
||||
var h {.inject, noinit.}: Eth2DigestCtx
|
||||
var h {.inject, noinit.}: Eth2DigestCtx
|
||||
init(h)
|
||||
body
|
||||
var res {.noinit.}: Eth2Digest
|
||||
@ -93,7 +104,7 @@ template withEth2Hash*(body: untyped): Eth2Digest =
|
||||
res
|
||||
else:
|
||||
block:
|
||||
let h {.inject, noinit.}: Eth2DigestCtx
|
||||
var h {.inject, noinit.}: Eth2DigestCtx
|
||||
init(h)
|
||||
body
|
||||
finish(h)
|
||||
|
@ -121,7 +121,7 @@ type
|
||||
slot*: Slot
|
||||
beacon_block_root*: Eth2Digest
|
||||
subcommittee_index*: uint64
|
||||
aggregation_bits*: SyncCommitteeAggregationBits ##\
|
||||
aggregation_bits*: SyncCommitteeAggregationBits
|
||||
signature*: ValidatorSig
|
||||
|
||||
RestContributionAndProof* = object
|
||||
|
@ -16,8 +16,27 @@ import
|
||||
|
||||
export ssz_codec, merkleization, proofs
|
||||
|
||||
type
|
||||
DepositsMerkleizer* = SszMerkleizer[DEPOSIT_CONTRACT_LIMIT]
|
||||
|
||||
func hash_tree_root*(x: phase0.HashedBeaconState | altair.HashedBeaconState) {.
|
||||
error: "HashedBeaconState should not be hashed".}
|
||||
|
||||
func hash_tree_root*(x: phase0.SomeSignedBeaconBlock | altair.SomeSignedBeaconBlock) {.
|
||||
error: "SignedBeaconBlock should not be hashed".}
|
||||
|
||||
func depositCountU64(s: DepositContractState): uint64 =
|
||||
for i in 0 .. 23:
|
||||
doAssert s.deposit_count[i] == 0
|
||||
|
||||
uint64.fromBytesBE s.deposit_count.toOpenArray(24, 31)
|
||||
|
||||
func init*(T: type DepositsMerkleizer, s: DepositContractState): DepositsMerkleizer =
|
||||
DepositsMerkleizer.init(s.branch, s.depositCountU64)
|
||||
|
||||
func toDepositContractState*(merkleizer: DepositsMerkleizer): DepositContractState =
|
||||
# TODO There is an off by one discrepancy in the size of the arrays here that
|
||||
# need to be investigated. It shouldn't matter as long as the tree is
|
||||
# not populated to its maximum size.
|
||||
result.branch[0..31] = merkleizer.getCombinedChunks[0..31]
|
||||
result.deposit_count[24..31] = merkleizer.getChunkCount().toBytesBE
|
||||
|
@ -28,7 +28,7 @@ proc serveAttestation(service: AttestationServiceRef, adata: AttestationData,
|
||||
res.get()
|
||||
let fork = vc.forkAtEpoch(adata.slot.epoch)
|
||||
|
||||
# TODO: signing_root is recomputed in signBlockProposal just after,
|
||||
# TODO: signing_root is recomputed in getAttestationSignature just after,
|
||||
# but not for locally attached validators.
|
||||
let signingRoot =
|
||||
compute_attestation_signing_root(
|
||||
@ -47,18 +47,21 @@ proc serveAttestation(service: AttestationServiceRef, adata: AttestationData,
|
||||
validator_index = vindex, badVoteDetails = $notSlashable.error
|
||||
return false
|
||||
|
||||
let attestation =
|
||||
block:
|
||||
let res = await validator.produceAndSignAttestation(adata,
|
||||
int(duty.data.committee_length),
|
||||
Natural(duty.data.validator_committee_index),
|
||||
fork, vc.beaconGenesis.genesis_validators_root)
|
||||
let attestation = block:
|
||||
let signature = block:
|
||||
let res = await validator.getAttestationSignature(
|
||||
fork, vc.beaconGenesis.genesis_validators_root, adata)
|
||||
if res.isErr():
|
||||
error "Unable to sign attestation", validator = shortLog(validator),
|
||||
error_msg = res.error()
|
||||
return false
|
||||
res.get()
|
||||
|
||||
Attestation.init(
|
||||
[duty.data.validator_committee_index],
|
||||
int(duty.data.committee_length), adata, signature).expect(
|
||||
"data validity checked earlier")
|
||||
|
||||
debug "Sending attestation", attestation = shortLog(attestation),
|
||||
validator = shortLog(validator), validator_index = vindex,
|
||||
attestation_root = shortLog(attestationRoot),
|
||||
@ -110,8 +113,8 @@ proc serveAggregateAndProof*(service: AttestationServiceRef,
|
||||
|
||||
let signature =
|
||||
block:
|
||||
let res = await signAggregateAndProof(validator, proof, fork,
|
||||
genesisRoot)
|
||||
let res = await getAggregateAndProofSignature(
|
||||
validator, fork, genesisRoot, proof)
|
||||
if res.isErr():
|
||||
error "Unable to sign aggregate and proof using remote signer",
|
||||
validator = shortLog(validator),
|
||||
|
@ -24,7 +24,8 @@ proc publishBlock(vc: ValidatorClientRef, currentSlot, slot: Slot,
|
||||
try:
|
||||
let randaoReveal =
|
||||
block:
|
||||
let res = await validator.genRandaoReveal(fork, genesisRoot, slot)
|
||||
let res = await validator.getEpochSignature(
|
||||
fork, genesisRoot, slot.epoch)
|
||||
if res.isErr():
|
||||
error "Unable to generate randao reveal usint remote signer",
|
||||
validator = shortLog(validator), error_msg = res.error()
|
||||
@ -44,7 +45,7 @@ proc publishBlock(vc: ValidatorClientRef, currentSlot, slot: Slot,
|
||||
return
|
||||
|
||||
let blockRoot = withBlck(beaconBlock): hash_tree_root(blck)
|
||||
# TODO: signing_root is recomputed in signBlockProposal just after
|
||||
# TODO: signing_root is recomputed in getBlockSignature just after
|
||||
let signing_root = compute_block_signing_root(fork, genesisRoot, slot,
|
||||
blockRoot)
|
||||
let notSlashable = vc.attachedValidators
|
||||
@ -55,7 +56,7 @@ proc publishBlock(vc: ValidatorClientRef, currentSlot, slot: Slot,
|
||||
if notSlashable.isOk():
|
||||
let signature =
|
||||
block:
|
||||
let res = await validator.signBlockProposal(fork, genesisRoot,
|
||||
let res = await validator.getBlockSignature(fork, genesisRoot,
|
||||
slot, blockRoot,
|
||||
beaconBlock)
|
||||
if res.isErr():
|
||||
|
@ -17,8 +17,8 @@ chronicles.formatIt(DutiesServiceLoop):
|
||||
|
||||
proc checkDuty(duty: RestAttesterDuty): bool =
|
||||
(duty.committee_length <= MAX_VALIDATORS_PER_COMMITTEE) and
|
||||
(uint64(duty.committee_index) <= MAX_COMMITTEES_PER_SLOT) and
|
||||
(uint64(duty.validator_committee_index) <= duty.committee_length) and
|
||||
(uint64(duty.committee_index) < MAX_COMMITTEES_PER_SLOT) and
|
||||
(uint64(duty.validator_committee_index) < duty.committee_length) and
|
||||
(uint64(duty.validator_index) <= VALIDATOR_REGISTRY_LIMIT)
|
||||
|
||||
proc checkSyncDuty(duty: RestSyncCommitteeDuty): bool =
|
||||
@ -159,7 +159,8 @@ proc pollForAttesterDuties*(vc: ValidatorClientRef,
|
||||
for item in addOrReplaceItems:
|
||||
let validator = vc.attachedValidators.getValidator(item.duty.pubkey)
|
||||
let fork = vc.forkAtEpoch(item.duty.slot.epoch)
|
||||
let future = validator.getSlotSig(fork, genesisRoot, item.duty.slot)
|
||||
let future = validator.getSlotSignature(
|
||||
fork, genesisRoot, item.duty.slot)
|
||||
pending.add(future)
|
||||
validators.add(validator)
|
||||
|
||||
|
@ -40,9 +40,9 @@ proc serveSyncCommitteeMessage*(service: SyncCommitteeServiceRef,
|
||||
|
||||
message =
|
||||
block:
|
||||
let res = await signSyncCommitteeMessage(validator, fork,
|
||||
genesisValidatorsRoot,
|
||||
slot, beaconBlockRoot)
|
||||
let res = await getSyncCommitteeMessage(validator, fork,
|
||||
genesisValidatorsRoot,
|
||||
slot, beaconBlockRoot)
|
||||
if res.isErr():
|
||||
error "Unable to sign committee message using remote signer",
|
||||
validator = shortLog(validator), slot = slot,
|
||||
@ -139,12 +139,11 @@ proc serveContributionAndProof*(service: SyncCommitteeServiceRef,
|
||||
validatorIdx = validator.index.get()
|
||||
genesisRoot = vc.beaconGenesis.genesis_validators_root
|
||||
fork = vc.forkAtEpoch(slot.epoch)
|
||||
signedProof = (ref SignedContributionAndProof)(
|
||||
message: proof)
|
||||
|
||||
let signature =
|
||||
block:
|
||||
let res = await validator.sign(signedProof, fork, genesisRoot)
|
||||
let res = await validator.getContributionAndProofSignature(
|
||||
fork, genesisRoot, proof)
|
||||
if res.isErr():
|
||||
error "Unable to sign sync committee contribution using remote signer",
|
||||
validator = shortLog(validator),
|
||||
@ -152,28 +151,27 @@ proc serveContributionAndProof*(service: SyncCommitteeServiceRef,
|
||||
error_msg = res.error()
|
||||
return false
|
||||
res.get()
|
||||
|
||||
debug "Sending sync contribution",
|
||||
contribution = shortLog(signedProof.message.contribution),
|
||||
contribution = shortLog(proof.contribution),
|
||||
validator = shortLog(validator), validator_index = validatorIdx,
|
||||
delay = vc.getDelay(slot.sync_contribution_deadline())
|
||||
|
||||
let restSignedProof = RestSignedContributionAndProof.init(
|
||||
signedProof.message, signedProof.signature)
|
||||
proof, signature)
|
||||
|
||||
let res =
|
||||
try:
|
||||
await vc.publishContributionAndProofs(@[restSignedProof])
|
||||
except ValidatorApiError as err:
|
||||
error "Unable to publish sync contribution",
|
||||
contribution = shortLog(signedProof.message.contribution),
|
||||
contribution = shortLog(proof.contribution),
|
||||
validator = shortLog(validator),
|
||||
validator_index = validatorIdx,
|
||||
err_msg = err.msg
|
||||
false
|
||||
except CatchableError as err:
|
||||
error "Unexpected error occurred while publishing sync contribution",
|
||||
contribution = shortLog(signedProof.message.contribution),
|
||||
contribution = shortLog(proof.contribution),
|
||||
validator = shortLog(validator),
|
||||
err_name = err.name, err_msg = err.msg
|
||||
false
|
||||
@ -184,7 +182,7 @@ proc serveContributionAndProof*(service: SyncCommitteeServiceRef,
|
||||
validator_index = validatorIdx
|
||||
else:
|
||||
warn "Sync contribution was not accepted by beacon node",
|
||||
contribution = shortLog(signedProof.message.contribution),
|
||||
contribution = shortLog(proof.contribution),
|
||||
validator = shortLog(validator),
|
||||
validator_index = validatorIdx
|
||||
return res
|
||||
|
@ -441,21 +441,24 @@ proc createAndSendAttestation(node: BeaconNode,
|
||||
fork: Fork,
|
||||
genesis_validators_root: Eth2Digest,
|
||||
validator: AttachedValidator,
|
||||
attestationData: AttestationData,
|
||||
data: AttestationData,
|
||||
committeeLen: int,
|
||||
indexInCommittee: int,
|
||||
subnet_id: SubnetId) {.async.} =
|
||||
try:
|
||||
var attestation =
|
||||
block:
|
||||
let res = await validator.produceAndSignAttestation(
|
||||
attestationData, committeeLen, indexInCommittee, fork,
|
||||
genesis_validators_root)
|
||||
let
|
||||
signature = block:
|
||||
let res = await validator.getAttestationSignature(
|
||||
fork, genesis_validators_root, data)
|
||||
if res.isErr():
|
||||
error "Unable to sign attestation", validator = shortLog(validator),
|
||||
error_msg = res.error()
|
||||
warn "Unable to sign attestation", validator = shortLog(validator),
|
||||
data = shortLog(data), error_msg = res.error()
|
||||
return
|
||||
res.get()
|
||||
attestation =
|
||||
Attestation.init(
|
||||
[uint64 indexInCommittee], committeeLen, data, signature).expect(
|
||||
"valid data")
|
||||
|
||||
let res = await node.sendAttestation(
|
||||
attestation, subnet_id, checkSignature = false)
|
||||
@ -467,12 +470,11 @@ proc createAndSendAttestation(node: BeaconNode,
|
||||
return
|
||||
|
||||
if node.config.dumpEnabled:
|
||||
dump(node.config.dumpDirOutgoing, attestation.data,
|
||||
validator.pubkey)
|
||||
dump(node.config.dumpDirOutgoing, attestation.data, validator.pubkey)
|
||||
|
||||
let
|
||||
wallTime = node.beaconClock.now()
|
||||
delay = wallTime - attestationData.slot.attestation_deadline()
|
||||
delay = wallTime - data.slot.attestation_deadline()
|
||||
|
||||
notice "Attestation sent",
|
||||
attestation = shortLog(attestation), validator = shortLog(validator),
|
||||
@ -482,7 +484,7 @@ proc createAndSendAttestation(node: BeaconNode,
|
||||
except CatchableError as exc:
|
||||
# An error could happen here when the signature task fails - we must
|
||||
# not leak the exception because this is an asyncSpawn task
|
||||
notice "Error sending attestation", err = exc.msg
|
||||
warn "Error sending attestation", err = exc.msg
|
||||
|
||||
proc getBlockProposalEth1Data*(node: BeaconNode,
|
||||
state: ForkedHashedBeaconState):
|
||||
@ -724,15 +726,14 @@ proc proposeBlock(node: BeaconNode,
|
||||
|
||||
let
|
||||
fork = node.dag.forkAtEpoch(slot.epoch)
|
||||
genesis_validators_root =
|
||||
getStateField(node.dag.headState, genesis_validators_root)
|
||||
genesis_validators_root = node.dag.genesis_validators_root
|
||||
randao =
|
||||
block:
|
||||
let res = await validator.genRandaoReveal(
|
||||
fork, genesis_validators_root, slot)
|
||||
let res = await validator.getEpochSignature(
|
||||
fork, genesis_validators_root, slot.epoch)
|
||||
if res.isErr():
|
||||
error "Unable to generate randao reveal",
|
||||
validator = shortLog(validator), error_msg = res.error()
|
||||
warn "Unable to generate randao reveal",
|
||||
validator = shortLog(validator), error_msg = res.error()
|
||||
return head
|
||||
res.get()
|
||||
|
||||
@ -764,11 +765,11 @@ proc proposeBlock(node: BeaconNode,
|
||||
let
|
||||
signature =
|
||||
block:
|
||||
let res = await validator.signBlockProposal(
|
||||
let res = await validator.getBlockSignature(
|
||||
fork, genesis_validators_root, slot, blockRoot, forkedBlck)
|
||||
if res.isErr():
|
||||
error "Unable to sign block proposal",
|
||||
validator = shortLog(validator), error_msg = res.error()
|
||||
warn "Unable to sign block",
|
||||
validator = shortLog(validator), error_msg = res.error()
|
||||
return head
|
||||
res.get()
|
||||
signedBlock =
|
||||
@ -874,8 +875,7 @@ proc handleAttestations(node: BeaconNode, head: BlockRef, slot: Slot) =
|
||||
tmp.get()
|
||||
committees_per_slot = get_committee_count_per_slot(epochRef)
|
||||
fork = node.dag.forkAtEpoch(slot.epoch)
|
||||
genesis_validators_root =
|
||||
getStateField(node.dag.headState, genesis_validators_root)
|
||||
genesis_validators_root = node.dag.genesis_validators_root
|
||||
|
||||
for committee_index in get_committee_indices(committees_per_slot):
|
||||
let committee = get_beacon_committee(epochRef, slot, committee_index)
|
||||
@ -920,11 +920,10 @@ proc createAndSendSyncCommitteeMessage(node: BeaconNode,
|
||||
genesis_validators_root = node.dag.genesis_validators_root
|
||||
msg =
|
||||
block:
|
||||
let res = await signSyncCommitteeMessage(validator, fork,
|
||||
genesis_validators_root,
|
||||
slot, head.root)
|
||||
let res = await validator.getSyncCommitteeMessage(
|
||||
fork, genesis_validators_root, slot, head.root)
|
||||
if res.isErr():
|
||||
error "Unable to sign committee message using remote signer",
|
||||
warn "Unable to sign committee message",
|
||||
validator = shortLog(validator), slot = slot,
|
||||
block_root = shortLog(head.root)
|
||||
return
|
||||
@ -973,20 +972,24 @@ proc signAndSendContribution(node: BeaconNode,
|
||||
contribution: SyncCommitteeContribution,
|
||||
selectionProof: ValidatorSig) {.async.} =
|
||||
try:
|
||||
let msg = (ref SignedContributionAndProof)(
|
||||
message: ContributionAndProof(
|
||||
aggregator_index: uint64 validator.index.get,
|
||||
contribution: contribution,
|
||||
selection_proof: selectionProof))
|
||||
let
|
||||
fork = node.dag.forkAtEpoch(contribution.slot.epoch)
|
||||
genesis_validators_root = node.dag.genesis_validators_root
|
||||
msg = (ref SignedContributionAndProof)(
|
||||
message: ContributionAndProof(
|
||||
aggregator_index: uint64 validator.index.get,
|
||||
contribution: contribution,
|
||||
selection_proof: selectionProof))
|
||||
|
||||
let res = await validator.sign(
|
||||
msg, node.dag.forkAtEpoch(contribution.slot.epoch),
|
||||
node.dag.genesis_validators_root)
|
||||
msg[].signature = block:
|
||||
let res = await validator.getContributionAndProofSignature(
|
||||
fork, genesis_validators_root, msg[].message)
|
||||
|
||||
if res.isErr():
|
||||
error "Unable to sign sync committee contribution usign remote signer",
|
||||
validator = shortLog(validator), error_msg = res.error()
|
||||
return
|
||||
if res.isErr():
|
||||
warn "Unable to sign sync committee contribution",
|
||||
validator = shortLog(validator), error_msg = res.error()
|
||||
return
|
||||
res.get()
|
||||
|
||||
# Failures logged in sendSyncCommitteeContribution
|
||||
discard await node.sendSyncCommitteeContribution(msg[], false)
|
||||
@ -994,7 +997,7 @@ proc signAndSendContribution(node: BeaconNode,
|
||||
except CatchableError as exc:
|
||||
# An error could happen here when the signature task fails - we must
|
||||
# not leak the exception because this is an asyncSpawn task
|
||||
notice "Error sending sync committee contribution", err = exc.msg
|
||||
warn "Error sending sync committee contribution", err = exc.msg
|
||||
|
||||
proc handleSyncCommitteeContributions(node: BeaconNode,
|
||||
head: BlockRef, slot: Slot) {.async.} =
|
||||
@ -1043,7 +1046,7 @@ proc handleSyncCommitteeContributions(node: BeaconNode,
|
||||
|
||||
let selectionProofRes = proof.read()
|
||||
if selectionProofRes.isErr():
|
||||
error "Unable to sign selection proof using remote signer",
|
||||
warn "Unable to generate selection proof",
|
||||
validator = shortLog(candidateAggregators[i].validator),
|
||||
slot, head, subnet_id = candidateAggregators[i].subcommitteeIdx
|
||||
continue
|
||||
@ -1135,8 +1138,7 @@ proc sendAggregatedAttestations(
|
||||
tmp.get()
|
||||
|
||||
fork = node.dag.forkAtEpoch(slot.epoch)
|
||||
genesis_validators_root =
|
||||
getStateField(node.dag.headState, genesis_validators_root)
|
||||
genesis_validators_root = node.dag.genesis_validators_root
|
||||
committees_per_slot = get_committee_count_per_slot(epochRef)
|
||||
|
||||
var
|
||||
@ -1152,8 +1154,8 @@ proc sendAggregatedAttestations(
|
||||
let validator = node.getAttachedValidator(epochRef, validator_index)
|
||||
if validator != nil:
|
||||
# the validator index and private key pair.
|
||||
slotSigs.add getSlotSig(validator, fork,
|
||||
genesis_validators_root, slot)
|
||||
slotSigs.add validator.getSlotSignature(
|
||||
fork, genesis_validators_root, slot)
|
||||
slotSigsData.add (committee_index, validator_index, validator)
|
||||
|
||||
await allFutures(slotSigs)
|
||||
@ -1163,7 +1165,7 @@ proc sendAggregatedAttestations(
|
||||
let
|
||||
data = slotSigsData[i]
|
||||
slotSig = slotSigs[i].read().valueOr:
|
||||
error "Unable to create slot signature using remote signer",
|
||||
warn "Unable to create slot signature",
|
||||
validator = shortLog(data.v),
|
||||
slot, error = error
|
||||
continue
|
||||
@ -1174,10 +1176,10 @@ proc sendAggregatedAttestations(
|
||||
continue
|
||||
|
||||
sig = block:
|
||||
let res = await signAggregateAndProof(data.v,
|
||||
aggregateAndProof, fork, genesis_validators_root)
|
||||
let res = await getAggregateAndProofSignature(data.v,
|
||||
fork, genesis_validators_root, aggregateAndProof)
|
||||
if res.isErr():
|
||||
error "Unable to sign aggregated attestation using remote signer",
|
||||
warn "Unable to sign aggregate",
|
||||
validator = shortLog(data.v), error_msg = res.error()
|
||||
return
|
||||
res.get()
|
||||
@ -1542,8 +1544,7 @@ proc registerDuties*(node: BeaconNode, wallSlot: Slot) {.async.} =
|
||||
return
|
||||
|
||||
let
|
||||
genesis_validators_root =
|
||||
getStateField(node.dag.headState, genesis_validators_root)
|
||||
genesis_validators_root = node.dag.genesis_validators_root
|
||||
head = node.dag.head
|
||||
|
||||
# Getting the slot signature is expensive but cached - in "normal" cases we'll
|
||||
@ -1570,10 +1571,10 @@ proc registerDuties*(node: BeaconNode, wallSlot: Slot) {.async.} =
|
||||
let
|
||||
subnet_id = compute_subnet_for_attestation(
|
||||
committees_per_slot, slot, committee_index)
|
||||
let slotSigRes = await getSlotSig(validator, fork,
|
||||
genesis_validators_root, slot)
|
||||
let slotSigRes = await validator.getSlotSignature(
|
||||
fork, genesis_validators_root, slot)
|
||||
if slotSigRes.isErr():
|
||||
error "Unable to create slot signature using remote signer",
|
||||
error "Unable to create slot signature",
|
||||
validator = shortLog(validator),
|
||||
error_msg = slotSigRes.error()
|
||||
continue
|
||||
|
@ -56,7 +56,6 @@ type
|
||||
SignResponse* = Web3SignerDataResponse
|
||||
|
||||
SignatureResult* = Result[ValidatorSig, string]
|
||||
AttestationResult* = Result[Attestation, string]
|
||||
SyncCommitteeMessageResult* = Result[SyncCommitteeMessage, string]
|
||||
|
||||
ValidatorPool* = object
|
||||
@ -189,173 +188,74 @@ proc signWithSingleKey(v: AttachedValidator,
|
||||
return SignatureResult.ok res.get.toValidatorSig
|
||||
|
||||
proc signData(v: AttachedValidator,
|
||||
request: Web3SignerRequest): Future[SignatureResult]
|
||||
{.async.} =
|
||||
return
|
||||
case v.kind
|
||||
of ValidatorKind.Local:
|
||||
SignatureResult.err "Invalid validator kind"
|
||||
of ValidatorKind.Remote:
|
||||
if v.clients.len == 1:
|
||||
await v.signWithSingleKey(request)
|
||||
else:
|
||||
await v.signWithDistributedKey(request)
|
||||
|
||||
proc signWithRemoteValidator*(v: AttachedValidator, fork: Fork,
|
||||
genesis_validators_root: Eth2Digest,
|
||||
blck: ForkedBeaconBlock): Future[SignatureResult]
|
||||
{.async.} =
|
||||
let request = Web3SignerRequest.init(fork, genesis_validators_root, blck.Web3SignerForkedBeaconBlock)
|
||||
debug "Signing block proposal using remote signer",
|
||||
validator = shortLog(v)
|
||||
return await v.signData(request)
|
||||
|
||||
proc signWithRemoteValidator*(v: AttachedValidator, fork: Fork,
|
||||
genesis_validators_root: Eth2Digest,
|
||||
adata: AttestationData): Future[SignatureResult]
|
||||
{.async.} =
|
||||
let request = Web3SignerRequest.init(fork, genesis_validators_root, adata)
|
||||
debug "Signing block proposal using remote signer",
|
||||
validator = shortLog(v)
|
||||
return await v.signData(request)
|
||||
|
||||
proc signWithRemoteValidator*(v: AttachedValidator, fork: Fork,
|
||||
genesis_validators_root: Eth2Digest,
|
||||
epoch: Epoch): Future[SignatureResult]
|
||||
{.async.} =
|
||||
let request = Web3SignerRequest.init(fork, genesis_validators_root, epoch)
|
||||
debug "Generating randao reveal signature using remote signer",
|
||||
validator = shortLog(v)
|
||||
return await v.signData(request)
|
||||
|
||||
proc signWithRemoteValidator*(v: AttachedValidator, fork: Fork,
|
||||
genesis_validators_root: Eth2Digest,
|
||||
proof: AggregateAndProof): Future[SignatureResult]
|
||||
{.async.} =
|
||||
let request = Web3SignerRequest.init(fork, genesis_validators_root, proof)
|
||||
debug "Signing aggregate and proof using remote signer",
|
||||
validator = shortLog(v)
|
||||
return await v.signData(request)
|
||||
|
||||
proc signWithRemoteValidator*(v: AttachedValidator, fork: Fork,
|
||||
genesis_validators_root: Eth2Digest,
|
||||
slot: Slot): Future[SignatureResult]
|
||||
{.async.} =
|
||||
let request = Web3SignerRequest.init(fork, genesis_validators_root, slot)
|
||||
debug "Signing aggregate slot using remote signer",
|
||||
validator = shortLog(v)
|
||||
return await v.signData(request)
|
||||
|
||||
proc signWithRemoteValidator*(v: AttachedValidator, fork: Fork,
|
||||
genesis_validators_root: Eth2Digest,
|
||||
slot: Slot,
|
||||
blockRoot: Eth2Digest): Future[SignatureResult]
|
||||
{.async.} =
|
||||
let request = Web3SignerRequest.init(fork, genesis_validators_root, blockRoot,
|
||||
slot)
|
||||
debug "Signing sync committee message using remote signer",
|
||||
validator = shortLog(v)
|
||||
return await v.signData(request)
|
||||
|
||||
proc signWithRemoteValidator*(v: AttachedValidator, fork: Fork,
|
||||
genesis_validators_root: Eth2Digest,
|
||||
slot: Slot,
|
||||
subcommittee: SyncSubcommitteeIndex): Future[SignatureResult]
|
||||
{.async.} =
|
||||
let request = Web3SignerRequest.init(
|
||||
fork, genesis_validators_root,
|
||||
SyncAggregatorSelectionData(slot: slot, subcommittee_index: uint64 subcommittee)
|
||||
)
|
||||
debug "Signing sync aggregator selection data using remote signer",
|
||||
validator = shortLog(v)
|
||||
return await v.signData(request)
|
||||
|
||||
proc signWithRemoteValidator*(v: AttachedValidator, fork: Fork,
|
||||
genesis_validators_root: Eth2Digest,
|
||||
contribution: ContributionAndProof
|
||||
): Future[SignatureResult] {.async.} =
|
||||
let request = Web3SignerRequest.init(
|
||||
fork, genesis_validators_root, contribution
|
||||
)
|
||||
debug "Signing sync contribution and proof message using remote signer",
|
||||
validator = shortLog(v)
|
||||
return await v.signData(request)
|
||||
request: Web3SignerRequest): Future[SignatureResult] =
|
||||
doAssert v.kind == ValidatorKind.Remote
|
||||
debug "Signing request with remote signer",
|
||||
validator = shortLog(v), kind = request.kind
|
||||
if v.clients.len == 1:
|
||||
v.signWithSingleKey(request)
|
||||
else:
|
||||
v.signWithDistributedKey(request)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/specs/phase0/validator.md#signature
|
||||
proc signBlockProposal*(v: AttachedValidator, fork: Fork,
|
||||
proc getBlockSignature*(v: AttachedValidator, fork: Fork,
|
||||
genesis_validators_root: Eth2Digest, slot: Slot,
|
||||
blockRoot: Eth2Digest, blck: ForkedBeaconBlock
|
||||
block_root: Eth2Digest, blck: ForkedBeaconBlock
|
||||
): Future[SignatureResult] {.async.} =
|
||||
return
|
||||
case v.kind
|
||||
of ValidatorKind.Local:
|
||||
SignatureResult.ok(
|
||||
get_block_signature(fork, genesis_validators_root, slot, blockRoot,
|
||||
v.data.privateKey).toValidatorSig()
|
||||
)
|
||||
get_block_signature(
|
||||
fork, genesis_validators_root, slot, block_root,
|
||||
v.data.privateKey).toValidatorSig())
|
||||
of ValidatorKind.Remote:
|
||||
await signWithRemoteValidator(v, fork, genesis_validators_root,
|
||||
blck)
|
||||
let request = Web3SignerRequest.init(
|
||||
fork, genesis_validators_root, blck.Web3SignerForkedBeaconBlock)
|
||||
await v.signData(request)
|
||||
|
||||
proc signAttestation*(v: AttachedValidator,
|
||||
data: AttestationData,
|
||||
fork: Fork, genesis_validators_root: Eth2Digest
|
||||
): Future[SignatureResult] {.async.} =
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/specs/phase0/validator.md#aggregate-signature
|
||||
proc getAttestationSignature*(v: AttachedValidator, fork: Fork,
|
||||
genesis_validators_root: Eth2Digest,
|
||||
data: AttestationData
|
||||
): Future[SignatureResult] {.async.} =
|
||||
return
|
||||
case v.kind
|
||||
of ValidatorKind.Local:
|
||||
SignatureResult.ok(
|
||||
get_attestation_signature(fork, genesis_validators_root, data,
|
||||
v.data.privateKey).toValidatorSig()
|
||||
)
|
||||
get_attestation_signature(
|
||||
fork, genesis_validators_root, data,
|
||||
v.data.privateKey).toValidatorSig())
|
||||
of ValidatorKind.Remote:
|
||||
await signWithRemoteValidator(v, fork, genesis_validators_root, data)
|
||||
let request = Web3SignerRequest.init(fork, genesis_validators_root, data)
|
||||
await v.signData(request)
|
||||
|
||||
proc produceAndSignAttestation*(validator: AttachedValidator,
|
||||
attestationData: AttestationData,
|
||||
committeeLen: int, indexInCommittee: Natural,
|
||||
fork: Fork,
|
||||
genesis_validators_root: Eth2Digest):
|
||||
Future[AttestationResult] {.async.} =
|
||||
let validatorSignature =
|
||||
block:
|
||||
let res = await validator.signAttestation(attestationData, fork,
|
||||
genesis_validators_root)
|
||||
if res.isErr():
|
||||
return AttestationResult.err(res.error())
|
||||
res.get()
|
||||
|
||||
var aggregationBits = CommitteeValidatorsBits.init(committeeLen)
|
||||
aggregationBits.setBit indexInCommittee
|
||||
|
||||
return AttestationResult.ok(
|
||||
Attestation(data: attestationData, signature: validatorSignature,
|
||||
aggregation_bits: aggregationBits)
|
||||
)
|
||||
|
||||
proc signAggregateAndProof*(v: AttachedValidator,
|
||||
aggregate_and_proof: AggregateAndProof,
|
||||
fork: Fork, genesis_validators_root: Eth2Digest):
|
||||
Future[SignatureResult] {.async.} =
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/specs/phase0/validator.md#broadcast-aggregate
|
||||
proc getAggregateAndProofSignature*(v: AttachedValidator,
|
||||
fork: Fork,
|
||||
genesis_validators_root: Eth2Digest,
|
||||
aggregate_and_proof: AggregateAndProof
|
||||
): Future[SignatureResult] {.async.} =
|
||||
return
|
||||
case v.kind
|
||||
of ValidatorKind.Local:
|
||||
SignatureResult.ok(
|
||||
get_aggregate_and_proof_signature(fork, genesis_validators_root,
|
||||
aggregate_and_proof,
|
||||
v.data.privateKey).toValidatorSig()
|
||||
get_aggregate_and_proof_signature(
|
||||
fork, genesis_validators_root, aggregate_and_proof,
|
||||
v.data.privateKey).toValidatorSig()
|
||||
)
|
||||
of ValidatorKind.Remote:
|
||||
await signWithRemoteValidator(v, fork, genesis_validators_root,
|
||||
aggregate_and_proof)
|
||||
let request = Web3SignerRequest.init(
|
||||
fork, genesis_validators_root, aggregate_and_proof)
|
||||
await v.signData(request)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/specs/altair/validator.md#prepare-sync-committee-message
|
||||
proc signSyncCommitteeMessage*(v: AttachedValidator,
|
||||
fork: Fork,
|
||||
genesis_validators_root: Eth2Digest,
|
||||
slot: Slot,
|
||||
beacon_block_root: Eth2Digest
|
||||
): Future[SyncCommitteeMessageResult] {.async.} =
|
||||
proc getSyncCommitteeMessage*(v: AttachedValidator,
|
||||
fork: Fork,
|
||||
genesis_validators_root: Eth2Digest,
|
||||
slot: Slot,
|
||||
beacon_block_root: Eth2Digest
|
||||
): Future[SyncCommitteeMessageResult] {.async.} =
|
||||
let signature =
|
||||
case v.kind
|
||||
of ValidatorKind.Local:
|
||||
@ -363,8 +263,9 @@ proc signSyncCommitteeMessage*(v: AttachedValidator,
|
||||
fork, genesis_validators_root, slot, beacon_block_root,
|
||||
v.data.privateKey).toValidatorSig())
|
||||
of ValidatorKind.Remote:
|
||||
await signWithRemoteValidator(v, fork, genesis_validators_root,
|
||||
slot, beacon_block_root)
|
||||
let request = Web3SignerRequest.init(
|
||||
fork, genesis_validators_root, beacon_block_root, slot)
|
||||
await v.signData(request)
|
||||
|
||||
if signature.isErr:
|
||||
return SyncCommitteeMessageResult.err("Failed to obtain signature")
|
||||
@ -380,8 +281,7 @@ proc signSyncCommitteeMessage*(v: AttachedValidator,
|
||||
)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/specs/altair/validator.md#aggregation-selection
|
||||
proc getSyncCommitteeSelectionProof*(v: AttachedValidator,
|
||||
fork: Fork,
|
||||
proc getSyncCommitteeSelectionProof*(v: AttachedValidator, fork: Fork,
|
||||
genesis_validators_root: Eth2Digest,
|
||||
slot: Slot,
|
||||
subcommittee_index: SyncSubcommitteeIndex
|
||||
@ -393,59 +293,60 @@ proc getSyncCommitteeSelectionProof*(v: AttachedValidator,
|
||||
fork, genesis_validators_root, slot, subcommittee_index,
|
||||
v.data.privateKey).toValidatorSig())
|
||||
of ValidatorKind.Remote:
|
||||
await signWithRemoteValidator(v, fork, genesis_validators_root,
|
||||
slot, subcommittee_index)
|
||||
let request = Web3SignerRequest.init(
|
||||
fork, genesis_validators_root,
|
||||
SyncAggregatorSelectionData(
|
||||
slot: slot, subcommittee_index: uint64 subcommittee_index)
|
||||
)
|
||||
await v.signData(request)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/specs/altair/validator.md#signature
|
||||
proc sign*(v: AttachedValidator, msg: ref SignedContributionAndProof,
|
||||
fork: Fork, genesis_validators_root: Eth2Digest
|
||||
): Future[SignatureResult] {.async.} =
|
||||
let signature =
|
||||
case v.kind
|
||||
of ValidatorKind.Local:
|
||||
SignatureResult.ok(get_contribution_and_proof_signature(
|
||||
fork, genesis_validators_root, msg.message, v.data.privateKey).toValidatorSig())
|
||||
of ValidatorKind.Remote:
|
||||
await signWithRemoteValidator(v, fork, genesis_validators_root,
|
||||
msg.message)
|
||||
|
||||
if signature.isOk:
|
||||
msg.signature = signature.get()
|
||||
|
||||
return signature
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/specs/phase0/validator.md#randao-reveal
|
||||
func genRandaoReveal*(k: ValidatorPrivKey, fork: Fork,
|
||||
genesis_validators_root: Eth2Digest,
|
||||
slot: Slot): CookedSig =
|
||||
get_epoch_signature(fork, genesis_validators_root, slot.epoch, k)
|
||||
|
||||
proc genRandaoReveal*(v: AttachedValidator, fork: Fork,
|
||||
genesis_validators_root: Eth2Digest, slot: Slot):
|
||||
Future[SignatureResult] {.async.} =
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/specs/altair/validator.md#broadcast-sync-committee-contribution
|
||||
proc getContributionAndProofSignature*(v: AttachedValidator, fork: Fork,
|
||||
genesis_validators_root: Eth2Digest,
|
||||
contribution_and_proof: ContributionAndProof
|
||||
): Future[SignatureResult] {.async.} =
|
||||
return
|
||||
case v.kind
|
||||
of ValidatorKind.Local:
|
||||
SignatureResult.ok(genRandaoReveal(v.data.privateKey, fork,
|
||||
genesis_validators_root,
|
||||
slot).toValidatorSig())
|
||||
SignatureResult.ok(get_contribution_and_proof_signature(
|
||||
fork, genesis_validators_root, contribution_and_proof,
|
||||
v.data.privateKey).toValidatorSig())
|
||||
of ValidatorKind.Remote:
|
||||
await signWithRemoteValidator(v, fork, genesis_validators_root,
|
||||
slot.epoch())
|
||||
let request = Web3SignerRequest.init(
|
||||
fork, genesis_validators_root, contribution_and_proof)
|
||||
await v.signData(request)
|
||||
|
||||
proc getSlotSig*(v: AttachedValidator, fork: Fork,
|
||||
genesis_validators_root: Eth2Digest, slot: Slot
|
||||
): Future[SignatureResult] {.async.} =
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/specs/phase0/validator.md#randao-reveal
|
||||
proc getEpochSignature*(v: AttachedValidator, fork: Fork,
|
||||
genesis_validators_root: Eth2Digest, epoch: Epoch
|
||||
): Future[SignatureResult] {.async.} =
|
||||
return
|
||||
case v.kind
|
||||
of ValidatorKind.Local:
|
||||
SignatureResult.ok(get_epoch_signature(
|
||||
fork, genesis_validators_root, epoch,
|
||||
v.data.privateKey).toValidatorSig())
|
||||
of ValidatorKind.Remote:
|
||||
let request = Web3SignerRequest.init(
|
||||
fork, genesis_validators_root, epoch)
|
||||
await v.signData(request)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/specs/phase0/validator.md#aggregation-selection
|
||||
proc getSlotSignature*(v: AttachedValidator, fork: Fork,
|
||||
genesis_validators_root: Eth2Digest, slot: Slot
|
||||
): Future[SignatureResult] {.async.} =
|
||||
if v.slotSignature.isSome and v.slotSignature.get.slot == slot:
|
||||
return SignatureResult.ok(v.slotSignature.get.signature)
|
||||
|
||||
let signature =
|
||||
case v.kind
|
||||
of ValidatorKind.Local:
|
||||
SignatureResult.ok(get_slot_signature(fork, genesis_validators_root, slot,
|
||||
v.data.privateKey).toValidatorSig())
|
||||
SignatureResult.ok(get_slot_signature(
|
||||
fork, genesis_validators_root, slot,
|
||||
v.data.privateKey).toValidatorSig())
|
||||
of ValidatorKind.Remote:
|
||||
await signWithRemoteValidator(v, fork, genesis_validators_root, slot)
|
||||
let request = Web3SignerRequest.init(fork, genesis_validators_root, slot)
|
||||
await v.signData(request)
|
||||
|
||||
if signature.isErr:
|
||||
return signature
|
||||
|
@ -86,7 +86,7 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
||||
validatorMonitor = newClone(ValidatorMonitor.init())
|
||||
dag = ChainDAGRef.init(cfg, db, validatorMonitor, {})
|
||||
eth1Chain = Eth1Chain.init(cfg, db)
|
||||
merkleizer = depositContractSnapshot.createMerkleizer
|
||||
merkleizer = DepositsMerkleizer.init(depositContractSnapshot.depositContractState)
|
||||
taskpool = Taskpool.new()
|
||||
verifier = BatchVerifier(rng: keys.newRng(), taskpool: taskpool)
|
||||
quarantine = newClone(Quarantine.init())
|
||||
@ -113,8 +113,11 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
||||
attestationHead = dag.head.atSlot(slot)
|
||||
|
||||
dag.withUpdatedState(tmpState[], attestationHead.toBlockSlotId.expect("not nil")) do:
|
||||
let committees_per_slot =
|
||||
get_committee_count_per_slot(state, slot.epoch, cache)
|
||||
let
|
||||
fork = getStateField(state, fork)
|
||||
genesis_validators_root = getStateField(state, genesis_validators_root)
|
||||
committees_per_slot =
|
||||
get_committee_count_per_slot(state, slot.epoch, cache)
|
||||
|
||||
for committee_index in get_committee_indices(committees_per_slot):
|
||||
let committee = get_beacon_committee(
|
||||
@ -126,18 +129,15 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
||||
data = makeAttestationData(
|
||||
state, slot, committee_index, bid.root)
|
||||
sig =
|
||||
get_attestation_signature(getStateField(state, fork),
|
||||
getStateField(state, genesis_validators_root),
|
||||
data, MockPrivKeys[validator_index])
|
||||
var aggregation_bits = CommitteeValidatorsBits.init(committee.len)
|
||||
aggregation_bits.setBit index_in_committee
|
||||
get_attestation_signature(
|
||||
fork, genesis_validators_root, data,
|
||||
MockPrivKeys[validator_index])
|
||||
attestation = Attestation.init(
|
||||
[uint64 index_in_committee], committee.len, data,
|
||||
sig.toValidatorSig()).expect("valid data")
|
||||
|
||||
attPool.addAttestation(
|
||||
Attestation(
|
||||
data: data,
|
||||
aggregation_bits: aggregation_bits,
|
||||
signature: sig.toValidatorSig()
|
||||
), [validator_index], sig, data.slot.start_beacon_time)
|
||||
attestation, [validator_index], sig, data.slot.start_beacon_time)
|
||||
do:
|
||||
raiseAssert "withUpdatedState failed"
|
||||
|
||||
@ -267,10 +267,10 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
||||
cfg,
|
||||
hashedState[],
|
||||
proposerIdx,
|
||||
privKey.genRandaoReveal(
|
||||
get_epoch_signature(
|
||||
getStateField(state, fork),
|
||||
getStateField(state, genesis_validators_root),
|
||||
slot).toValidatorSig(),
|
||||
slot.epoch, privKey).toValidatorSig(),
|
||||
eth1ProposalData.vote,
|
||||
default(GraffitiBytes),
|
||||
attPool.getAttestationsForBlock(state, cache),
|
||||
|
@ -12,8 +12,7 @@ import
|
||||
../beacon_chain/beacon_chain_db,
|
||||
../beacon_chain/spec/datatypes/[phase0, altair],
|
||||
../beacon_chain/spec/[beaconstate, forks, helpers],
|
||||
../beacon_chain/consensus_object_pools/[blockchain_dag, block_pools_types],
|
||||
../beacon_chain/eth1/eth1_monitor
|
||||
../beacon_chain/consensus_object_pools/[blockchain_dag, block_pools_types]
|
||||
|
||||
template withTimer*(stats: var RunningStat, body: untyped) =
|
||||
# TODO unify timing somehow
|
||||
|
@ -144,9 +144,9 @@ cli do(validatorsDir: string, secretsDir: string,
|
||||
blockAggregates = aggregates.filterIt(
|
||||
it.data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= slot and
|
||||
slot <= it.data.slot + SLOTS_PER_EPOCH)
|
||||
randao_reveal =
|
||||
validators[proposer].genRandaoReveal(
|
||||
fork, genesis_validators_root, slot).toValidatorSig()
|
||||
randao_reveal = get_epoch_signature(
|
||||
fork, genesis_validators_root, slot.epoch,
|
||||
validators[proposer]).toValidatorSig()
|
||||
message = makeBeaconBlock(
|
||||
cfg,
|
||||
state[],
|
||||
|
@ -222,7 +222,7 @@ suite "Gossip validation - Extra": # Not based on preset config
|
||||
privateKey: MockPrivKeys[index])
|
||||
validator = AttachedValidator(pubkey: pubkey,
|
||||
kind: ValidatorKind.Local, data: keystoreData, index: some(index))
|
||||
resMsg = waitFor signSyncCommitteeMessage(
|
||||
resMsg = waitFor getSyncCommitteeMessage(
|
||||
validator, state[].data.fork, state[].data.genesis_validators_root, slot,
|
||||
state[].root)
|
||||
msg = resMsg.get()
|
||||
@ -249,9 +249,11 @@ suite "Gossip validation - Extra": # Not based on preset config
|
||||
contribution.message.contribution)
|
||||
syncCommitteeMsgPool[].addContribution(
|
||||
contribution[], contribution.message.contribution.signature.load.get)
|
||||
let signRes = waitFor validator.sign(
|
||||
contribution, state[].data.fork, state[].data.genesis_validators_root)
|
||||
let signRes = waitFor validator.getContributionAndProofSignature(
|
||||
state[].data.fork, state[].data.genesis_validators_root,
|
||||
contribution[].message)
|
||||
doAssert(signRes.isOk())
|
||||
contribution[].signature = signRes.get()
|
||||
contribution
|
||||
aggregate = syncCommitteeMsgPool[].produceSyncAggregate(state[].root)
|
||||
|
||||
|
@ -122,3 +122,11 @@ suite "Beacon state" & preset():
|
||||
state[].phase0Data.dependent_root(Epoch(1)) ==
|
||||
state[].phase0Data.data.get_block_root_at_slot(Epoch(1).start_slot - 1)
|
||||
state[].phase0Data.dependent_root(Epoch(0)) == genBlock.root
|
||||
|
||||
test "merklizer state roundtrip":
|
||||
let
|
||||
dcs = DepositContractState()
|
||||
merkleizer = DepositsMerkleizer.init(dcs)
|
||||
|
||||
check:
|
||||
dcs == merkleizer.toDepositContractState()
|
||||
|
@ -11,7 +11,6 @@ import
|
||||
eth/keys,
|
||||
stew/endians2,
|
||||
../beacon_chain/consensus_object_pools/sync_committee_msg_pool,
|
||||
../beacon_chain/validators/validator_pool,
|
||||
../beacon_chain/spec/datatypes/bellatrix,
|
||||
../beacon_chain/spec/[
|
||||
beaconstate, helpers, keystore, signatures, state_transition, validator]
|
||||
@ -99,10 +98,10 @@ proc addTestBlock*(
|
||||
privKey = MockPrivKeys[proposer_index.get]
|
||||
randao_reveal =
|
||||
if skipBlsValidation notin flags:
|
||||
privKey.genRandaoReveal(
|
||||
get_epoch_signature(
|
||||
getStateField(state, fork),
|
||||
getStateField(state, genesis_validators_root),
|
||||
getStateField(state, slot)).toValidatorSig()
|
||||
getStateField(state, slot).epoch, privKey).toValidatorSig()
|
||||
else:
|
||||
ValidatorSig()
|
||||
|
||||
@ -228,13 +227,13 @@ func makeAttestation*(
|
||||
# monotonic enumerable index, is wasteful and slow. Most test callers
|
||||
# want ValidatorIndex, so that's supported too.
|
||||
let
|
||||
sac_index = committee.find(validator_index)
|
||||
index_in_committee = committee.find(validator_index)
|
||||
data = makeAttestationData(state, slot, committee_index, beacon_block_root)
|
||||
|
||||
doAssert sac_index != -1, "find_beacon_committee should guarantee this"
|
||||
doAssert index_in_committee != -1, "find_beacon_committee should guarantee this"
|
||||
|
||||
var aggregation_bits = CommitteeValidatorsBits.init(committee.len)
|
||||
aggregation_bits.setBit sac_index
|
||||
aggregation_bits.setBit index_in_committee
|
||||
|
||||
let sig = if skipBlsValidation in flags:
|
||||
ValidatorSig()
|
||||
|
Loading…
x
Reference in New Issue
Block a user