fix some todo (#1645)
* remove some superfluous gcsafes * remove getTailState (unused) * don't store old epochrefs in blocks * document attestation pool a bit * remove `pcs =` cruft from log
This commit is contained in:
parent
804b152d1d
commit
20dd38fdf9
|
@ -65,14 +65,17 @@ proc init*(T: type AttestationPool, chainDag: ChainDAGRef, quarantine: Quarantin
|
|||
forkChoice: forkChoice
|
||||
)
|
||||
|
||||
proc processAttestation(
|
||||
proc addForkChoiceVotes(
|
||||
pool: var AttestationPool, slot: Slot, participants: HashSet[ValidatorIndex],
|
||||
block_root: Eth2Digest, wallSlot: Slot) =
|
||||
# Add attestation votes to fork choice
|
||||
if (let v = pool.forkChoice.on_attestation(
|
||||
pool.chainDag, slot, block_root, participants, wallSlot);
|
||||
v.isErr):
|
||||
warn "Couldn't process attestation", err = v.error()
|
||||
# This indicates that the fork choice and the chain dag are out of sync -
|
||||
# this is most likely the result of a bug, but we'll try to keep going -
|
||||
# hopefully the fork choice will heal itself over time.
|
||||
error "Couldn't add attestation to fork choice, bug?", err = v.error()
|
||||
|
||||
func candidateIdx(pool: AttestationPool, slot: Slot): Option[uint64] =
|
||||
if slot >= pool.startingSlot and
|
||||
|
@ -103,7 +106,15 @@ proc addAttestation*(pool: var AttestationPool,
|
|||
attestation: Attestation,
|
||||
participants: HashSet[ValidatorIndex],
|
||||
wallSlot: Slot) =
|
||||
# Add an attestation whose parent we know
|
||||
## Add an attestation to the pool, assuming it's been validated already.
|
||||
## Attestations may be either agggregated or not - we're pursuing an eager
|
||||
## strategy where we'll drop validations we already knew about and combine
|
||||
## the new attestation with an existing one if possible.
|
||||
##
|
||||
## This strategy is not optimal in the sense that it would be possible to find
|
||||
## a better packing of attestations by delaying the aggregation, but because
|
||||
## it's possible to include more than one aggregate in a block we can be
|
||||
## somewhat lazy instead of looking for a perfect packing.
|
||||
logScope:
|
||||
attestation = shortLog(attestation)
|
||||
|
||||
|
@ -111,7 +122,7 @@ proc addAttestation*(pool: var AttestationPool,
|
|||
|
||||
let candidateIdx = pool.candidateIdx(attestation.data.slot)
|
||||
if candidateIdx.isNone:
|
||||
debug "Attestation slot out of range",
|
||||
debug "Skipping old attestation for block production",
|
||||
startingSlot = pool.startingSlot
|
||||
return
|
||||
|
||||
|
@ -129,11 +140,7 @@ proc addAttestation*(pool: var AttestationPool,
|
|||
# The validations in the new attestation are a subset of one of the
|
||||
# attestations that we already have on file - no need to add this
|
||||
# attestation to the database
|
||||
# TODO what if the new attestation is useful for creating bigger
|
||||
# sets by virtue of not overlapping with some other attestation
|
||||
# and therefore being useful after all?
|
||||
trace "Ignoring subset attestation",
|
||||
newParticipants = participants
|
||||
trace "Ignoring subset attestation", newParticipants = participants
|
||||
found = true
|
||||
break
|
||||
|
||||
|
@ -141,14 +148,13 @@ proc addAttestation*(pool: var AttestationPool,
|
|||
# Attestations in the pool that are a subset of the new attestation
|
||||
# can now be removed per same logic as above
|
||||
|
||||
trace "Removing subset attestations",
|
||||
newParticipants = participants
|
||||
trace "Removing subset attestations", newParticipants = participants
|
||||
|
||||
a.validations.keepItIf(
|
||||
not it.aggregation_bits.isSubsetOf(validation.aggregation_bits))
|
||||
|
||||
a.validations.add(validation)
|
||||
pool.processAttestation(
|
||||
pool.addForkChoiceVotes(
|
||||
attestation.data.slot, participants, attestation.data.beacon_block_root,
|
||||
wallSlot)
|
||||
|
||||
|
@ -165,7 +171,7 @@ proc addAttestation*(pool: var AttestationPool,
|
|||
data: attestation.data,
|
||||
validations: @[validation]
|
||||
))
|
||||
pool.processAttestation(
|
||||
pool.addForkChoiceVotes(
|
||||
attestation.data.slot, participants, attestation.data.beacon_block_root,
|
||||
wallSlot)
|
||||
|
||||
|
@ -183,18 +189,16 @@ proc addForkChoice*(pool: var AttestationPool,
|
|||
pool.chainDag, epochRef, blckRef, blck, wallSlot)
|
||||
|
||||
if state.isErr:
|
||||
# TODO If this happens, it is effectively a bug - the BlockRef structure
|
||||
# guarantees that the DAG is valid and the state transition should
|
||||
# guarantee that the justified and finalized epochs are ok! However,
|
||||
# we'll log it for now to avoid crashes
|
||||
error "Unexpected error when applying block",
|
||||
# This indicates that the fork choice and the chain dag are out of sync -
|
||||
# this is most likely the result of a bug, but we'll try to keep going -
|
||||
# hopefully the fork choice will heal itself over time.
|
||||
error "Couldn't add block to fork choice, bug?",
|
||||
blck = shortLog(blck), err = state.error
|
||||
|
||||
proc getAttestationsForSlot*(pool: AttestationPool, newBlockSlot: Slot):
|
||||
Option[AttestationsSeen] =
|
||||
if newBlockSlot < (GENESIS_SLOT + MIN_ATTESTATION_INCLUSION_DELAY):
|
||||
debug "Too early for attestations",
|
||||
newBlockSlot = shortLog(newBlockSlot)
|
||||
debug "Too early for attestations", newBlockSlot = shortLog(newBlockSlot)
|
||||
return none(AttestationsSeen)
|
||||
|
||||
let
|
||||
|
@ -210,13 +214,10 @@ proc getAttestationsForSlot*(pool: AttestationPool, newBlockSlot: Slot):
|
|||
some(pool.candidates[candidateIdx.get()])
|
||||
|
||||
proc getAttestationsForBlock*(pool: AttestationPool,
|
||||
state: BeaconState): seq[Attestation] =
|
||||
state: BeaconState,
|
||||
cache: var StateCache): seq[Attestation] =
|
||||
## Retrieve attestations that may be added to a new block at the slot of the
|
||||
## given state
|
||||
logScope: pcs = "retrieve_attestation"
|
||||
|
||||
# TODO this shouldn't really need state -- it's to recheck/validate, but that
|
||||
# should be refactored
|
||||
let newBlockSlot = state.slot
|
||||
var attestations: seq[AttestationEntry]
|
||||
|
||||
|
@ -236,7 +237,6 @@ proc getAttestationsForBlock*(pool: AttestationPool,
|
|||
if attestations.len == 0:
|
||||
return
|
||||
|
||||
var cache = StateCache()
|
||||
for a in attestations:
|
||||
var
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.2/specs/phase0/validator.md#construct-attestation
|
||||
|
@ -249,14 +249,10 @@ proc getAttestationsForBlock*(pool: AttestationPool,
|
|||
agg {.noInit.}: AggregateSignature
|
||||
agg.init(a.validations[0].aggregate_signature)
|
||||
|
||||
# TODO what's going on here is that when producing a block, we need to
|
||||
# include only such attestations that will not cause block validation
|
||||
# to fail. How this interacts with voting and the acceptance of
|
||||
# attestations into the pool in general is an open question that needs
|
||||
# revisiting - for example, when attestations are added, against which
|
||||
# state should they be validated, if at all?
|
||||
# TODO we're checking signatures here every time which is very slow and we don't want
|
||||
# to include a broken attestation
|
||||
# Signature verification here is more of a sanity check - it could
|
||||
# be optimized away, though for now it's easier to reuse the logic from
|
||||
# the state transition function to ensure that the new block will not
|
||||
# fail application.
|
||||
if (let v = check_attestation(state, attestation, {}, cache); v.isErr):
|
||||
warn "Attestation no longer validates...",
|
||||
attestation = shortLog(attestation),
|
||||
|
@ -265,13 +261,6 @@ proc getAttestationsForBlock*(pool: AttestationPool,
|
|||
continue
|
||||
|
||||
for i in 1..a.validations.high:
|
||||
# TODO We need to select a set of attestations that maximise profit by
|
||||
# adding the largest combined attestation set that we can find - this
|
||||
# unfortunately looks an awful lot like
|
||||
# https://en.wikipedia.org/wiki/Set_packing - here we just iterate
|
||||
# and naively add as much as possible in one go, by we could also
|
||||
# add the same attestation data twice, as long as there's at least
|
||||
# one new attestation in there
|
||||
if not attestation.aggregation_bits.overlaps(
|
||||
a.validations[i].aggregation_bits):
|
||||
attestation.aggregation_bits.combine(a.validations[i].aggregation_bits)
|
||||
|
@ -333,4 +322,6 @@ proc selectHead*(pool: var AttestationPool, wallSlot: Slot): BlockRef =
|
|||
|
||||
proc prune*(pool: var AttestationPool) =
|
||||
if (let v = pool.forkChoice.prune(); v.isErr):
|
||||
error "Pruning failed", err = v.error() # TODO should never happen
|
||||
# If pruning fails, it's likely the result of a bug - this shouldn't happen
|
||||
# but we'll keep running hoping that the fork chocie will recover eventually
|
||||
error "Couldn't prune fork choice, bug?", err = v.error()
|
||||
|
|
|
@ -404,15 +404,12 @@ proc addMessageHandlers(node: BeaconNode): Future[void] =
|
|||
node.getAttestationHandlers()
|
||||
)
|
||||
|
||||
proc onSlotStart(node: BeaconNode, lastSlot, scheduledSlot: Slot) {.gcsafe, async.} =
|
||||
proc onSlotStart(node: BeaconNode, lastSlot, scheduledSlot: Slot) {.async.} =
|
||||
## Called at the beginning of a slot - usually every slot, but sometimes might
|
||||
## skip a few in case we're running late.
|
||||
## lastSlot: the last slot that we successfully processed, so we know where to
|
||||
## start work from
|
||||
## scheduledSlot: the slot that we were aiming for, in terms of timing
|
||||
|
||||
logScope: pcs = "slot_start"
|
||||
|
||||
let
|
||||
# The slot we should be at, according to the clock
|
||||
beaconTime = node.beaconClock.now()
|
||||
|
@ -598,12 +595,12 @@ proc startSyncManager(node: BeaconNode) =
|
|||
func getLocalHeadSlot(): Slot =
|
||||
node.chainDag.head.slot
|
||||
|
||||
proc getLocalWallSlot(): Slot {.gcsafe.} =
|
||||
proc getLocalWallSlot(): Slot =
|
||||
let epoch = node.beaconClock.now().slotOrZero.compute_epoch_at_slot() +
|
||||
1'u64
|
||||
epoch.compute_start_slot_at_epoch()
|
||||
|
||||
func getFirstSlotAtFinalizedEpoch(): Slot {.gcsafe.} =
|
||||
func getFirstSlotAtFinalizedEpoch(): Slot =
|
||||
let fepoch = node.chainDag.headState.data.data.finalized_checkpoint.epoch
|
||||
compute_start_slot_at_epoch(fepoch)
|
||||
|
||||
|
@ -631,7 +628,7 @@ proc currentSlot(node: BeaconNode): Slot =
|
|||
node.beaconClock.now.slotOrZero
|
||||
|
||||
proc connectedPeersCount(node: BeaconNode): int =
|
||||
nbc_peers.value.int
|
||||
len(node.network.peerPool)
|
||||
|
||||
proc installBeaconApiHandlers(rpcServer: RpcServer, node: BeaconNode) =
|
||||
rpcServer.rpc("getBeaconHead") do () -> Slot:
|
||||
|
@ -893,8 +890,7 @@ proc start(node: BeaconNode) =
|
|||
SLOTS_PER_EPOCH,
|
||||
SECONDS_PER_SLOT,
|
||||
SPEC_VERSION,
|
||||
dataDir = node.config.dataDir.string,
|
||||
pcs = "start_beacon_node"
|
||||
dataDir = node.config.dataDir.string
|
||||
|
||||
if genesisTime.inFuture:
|
||||
notice "Waiting for genesis", genesisIn = genesisTime.offset
|
||||
|
@ -1035,7 +1031,7 @@ when hasPrompt:
|
|||
|
||||
when compiles(defaultChroniclesStream.output.writer):
|
||||
defaultChroniclesStream.output.writer =
|
||||
proc (logLevel: LogLevel, msg: LogOutputStr) {.gcsafe, raises: [Defect].} =
|
||||
proc (logLevel: LogLevel, msg: LogOutputStr) {.raises: [Defect].} =
|
||||
try:
|
||||
# p.hidePrompt
|
||||
erase statusBar
|
||||
|
|
|
@ -338,9 +338,6 @@ proc init*(T: type ChainDAGRef,
|
|||
while cur.blck != nil:
|
||||
let root = db.getStateRoot(cur.blck.root, cur.slot)
|
||||
if root.isSome():
|
||||
# TODO load StateData from BeaconChainDB
|
||||
# We save state root separately for empty slots which means we might
|
||||
# sometimes not find a state even though we saved its state root
|
||||
if db.getState(root.get(), tmpState.data.data, noRollback):
|
||||
tmpState.data.root = root.get()
|
||||
tmpState.blck = cur.blck
|
||||
|
@ -422,24 +419,25 @@ proc getEpochRef*(dag: ChainDAGRef, blck: BlockRef, epoch: Epoch): EpochRef =
|
|||
newEpochRef = EpochRef.init(state, cache, prevEpochRef)
|
||||
|
||||
# TODO consider constraining the number of epochrefs per state
|
||||
ancestor.blck.epochRefs.add newEpochRef
|
||||
newEpochRef.updateKeyStores(blck.parent, dag.finalizedHead.blck)
|
||||
if ancestor.blck.slot >= dag.finalizedHead.blck.slot:
|
||||
# Only cache epoch information for unfinalized blocks - earlier states
|
||||
# are seldomly used (ie RPC), so no need to cache
|
||||
ancestor.blck.epochRefs.add newEpochRef
|
||||
newEpochRef.updateKeyStores(blck.parent, dag.finalizedHead.blck)
|
||||
newEpochRef
|
||||
|
||||
proc getState(
|
||||
dag: ChainDAGRef, state: var StateData, stateRoot: Eth2Digest,
|
||||
blck: BlockRef): bool =
|
||||
let stateAddr = unsafeAddr state # local scope
|
||||
func restore(v: var BeaconState) =
|
||||
if stateAddr == (unsafeAddr dag.headState):
|
||||
# TODO seeing the headState in the restore shouldn't happen - we load
|
||||
# head states only when updating the head position, and by that time
|
||||
# the database will have gone through enough sanity checks that
|
||||
# SSZ exceptions shouldn't happen, which is when restore happens.
|
||||
# Nonetheless, this is an ugly workaround that needs to go away
|
||||
doAssert false, "Cannot alias headState"
|
||||
let restoreAddr =
|
||||
# Any restore point will do as long as it's not the object being updated
|
||||
if unsafeAddr(state) == unsafeAddr(dag.headState):
|
||||
unsafeAddr dag.tmpState
|
||||
else:
|
||||
unsafeAddr dag.headState
|
||||
|
||||
assign(stateAddr[], dag.headState)
|
||||
func restore(v: var BeaconState) =
|
||||
assign(v, restoreAddr[].data.data)
|
||||
|
||||
if not dag.db.getState(stateRoot, state.data.data, restore):
|
||||
return false
|
||||
|
@ -471,7 +469,6 @@ proc putState*(dag: ChainDAGRef, state: StateData) =
|
|||
# TODO we save state at every epoch start but never remove them - we also
|
||||
# potentially save multiple states per slot if reorgs happen, meaning
|
||||
# we could easily see a state explosion
|
||||
logScope: pcs = "save_state_at_epoch_start"
|
||||
|
||||
# As a policy, we only store epoch boundary states without the epoch block
|
||||
# (if it exists) applied - the rest can be reconstructed by loading an epoch
|
||||
|
@ -689,13 +686,6 @@ proc updateStateData*(
|
|||
startSlot,
|
||||
blck = shortLog(bs)
|
||||
|
||||
proc loadTailState*(dag: ChainDAGRef): StateData =
|
||||
## Load the state associated with the current tail in the dag
|
||||
let stateRoot = dag.db.getBlock(dag.tail.root).get().message.state_root
|
||||
let found = dag.getState(result, stateRoot, dag.tail)
|
||||
# TODO turn into regular error, this can happen
|
||||
doAssert found, "Failed to load tail state, database corrupt?"
|
||||
|
||||
proc delState(dag: ChainDAGRef, bs: BlockSlot) =
|
||||
# Delete state state and mapping for a particular block+slot
|
||||
if not bs.slot.isEpoch:
|
||||
|
@ -716,7 +706,6 @@ proc updateHead*(
|
|||
doAssert not newHead.parent.isNil() or newHead.slot == 0
|
||||
logScope:
|
||||
newHead = shortLog(newHead)
|
||||
pcs = "fork_choice"
|
||||
|
||||
if dag.head == newHead:
|
||||
debug "No head block update"
|
||||
|
|
|
@ -38,7 +38,7 @@ func getOrResolve*(dag: ChainDAGRef, quarantine: var QuarantineRef, root: Eth2Di
|
|||
proc addRawBlock*(
|
||||
dag: var ChainDAGRef, quarantine: var QuarantineRef,
|
||||
signedBlock: SignedBeaconBlock, onBlockAdded: OnBlockAdded
|
||||
): Result[BlockRef, BlockError]
|
||||
): Result[BlockRef, BlockError] {.gcsafe.}
|
||||
|
||||
proc addResolvedBlock(
|
||||
dag: var ChainDAGRef, quarantine: var QuarantineRef,
|
||||
|
@ -47,7 +47,6 @@ proc addResolvedBlock(
|
|||
onBlockAdded: OnBlockAdded
|
||||
) =
|
||||
# TODO move quarantine processing out of here
|
||||
logScope: pcs = "block_resolution"
|
||||
doAssert state.data.data.slot == signedBlock.message.slot,
|
||||
"state must match block"
|
||||
doAssert state.blck.root == signedBlock.message.parent_root,
|
||||
|
|
|
@ -249,7 +249,7 @@ proc main() {.async.} =
|
|||
quit 1
|
||||
|
||||
if cfg.maxDelay > 0.0:
|
||||
delayGenerator = proc (): chronos.Duration {.gcsafe.} =
|
||||
delayGenerator = proc (): chronos.Duration =
|
||||
chronos.milliseconds (rand(cfg.minDelay..cfg.maxDelay)*1000).int
|
||||
|
||||
await sendDeposits(deposits, cfg.web3Url, cfg.privateKey,
|
||||
|
|
|
@ -288,7 +288,7 @@ proc openStream(node: Eth2Node,
|
|||
|
||||
proc init*(T: type Peer, network: Eth2Node, info: PeerInfo): Peer {.gcsafe.}
|
||||
|
||||
proc getPeer*(node: Eth2Node, peerId: PeerID): Peer {.gcsafe.} =
|
||||
proc getPeer*(node: Eth2Node, peerId: PeerID): Peer =
|
||||
node.peers.withValue(peerId, peer) do:
|
||||
return peer[]
|
||||
do:
|
||||
|
@ -493,7 +493,7 @@ else:
|
|||
proc makeEth2Request(peer: Peer, protocolId: string, requestBytes: Bytes,
|
||||
ResponseMsg: type,
|
||||
timeout: Duration): Future[NetRes[ResponseMsg]]
|
||||
{.gcsafe, async.} =
|
||||
{.async.} =
|
||||
var deadline = sleepAsync timeout
|
||||
|
||||
let stream = awaitWithTimeout(peer.network.openStream(peer, protocolId),
|
||||
|
@ -578,7 +578,7 @@ proc implementSendProcBody(sendProc: SendProc) =
|
|||
|
||||
proc handleIncomingStream(network: Eth2Node,
|
||||
conn: Connection,
|
||||
MsgType: type) {.async, gcsafe.} =
|
||||
MsgType: type) {.async.} =
|
||||
mixin callUserHandler, RecType
|
||||
|
||||
type MsgRec = RecType(MsgType)
|
||||
|
@ -679,7 +679,7 @@ proc handleIncomingStream(network: Eth2Node,
|
|||
proc handleOutgoingPeer(peer: Peer): Future[bool] {.async.} =
|
||||
let network = peer.network
|
||||
|
||||
proc onPeerClosed(udata: pointer) {.gcsafe.} =
|
||||
proc onPeerClosed(udata: pointer) =
|
||||
debug "Peer (outgoing) lost", peer
|
||||
nbc_peers.set int64(len(network.peerPool))
|
||||
|
||||
|
@ -695,7 +695,7 @@ proc handleOutgoingPeer(peer: Peer): Future[bool] {.async.} =
|
|||
proc handleIncomingPeer(peer: Peer): Future[bool] {.async.} =
|
||||
let network = peer.network
|
||||
|
||||
proc onPeerClosed(udata: pointer) {.gcsafe.} =
|
||||
proc onPeerClosed(udata: pointer) =
|
||||
debug "Peer (incoming) lost", peer
|
||||
nbc_peers.set int64(len(network.peerPool))
|
||||
|
||||
|
@ -1104,7 +1104,7 @@ proc p2pProtocolBackendImpl*(p: P2PProtocol): Backend =
|
|||
|
||||
proc setupNat(conf: BeaconNodeConf): tuple[ip: Option[ValidIpAddress],
|
||||
tcpPort: Port,
|
||||
udpPort: Port] {.gcsafe.} =
|
||||
udpPort: Port] =
|
||||
# defaults
|
||||
result.tcpPort = conf.tcpPort
|
||||
result.udpPort = conf.udpPort
|
||||
|
@ -1185,7 +1185,9 @@ func gossipId(data: openArray[byte]): string =
|
|||
func msgIdProvider(m: messages.Message): string =
|
||||
gossipId(m.data)
|
||||
|
||||
proc createEth2Node*(rng: ref BrHmacDrbgContext, conf: BeaconNodeConf, enrForkId: ENRForkID): Eth2Node {.gcsafe.} =
|
||||
proc createEth2Node*(
|
||||
rng: ref BrHmacDrbgContext, conf: BeaconNodeConf,
|
||||
enrForkId: ENRForkID): Eth2Node =
|
||||
var
|
||||
(extIp, extTcpPort, extUdpPort) = setupNat(conf)
|
||||
hostAddress = tcpEndPoint(conf.libp2pAddress, conf.tcpPort)
|
||||
|
@ -1252,8 +1254,8 @@ func peersCount*(node: Eth2Node): int =
|
|||
|
||||
proc subscribe*[MsgType](node: Eth2Node,
|
||||
topic: string,
|
||||
msgHandler: proc(msg: MsgType) {.gcsafe.} ) {.async, gcsafe.} =
|
||||
proc execMsgHandler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
msgHandler: proc(msg: MsgType) {.gcsafe.} ) {.async.} =
|
||||
proc execMsgHandler(topic: string, data: seq[byte]) {.async.} =
|
||||
inc nbc_gossip_messages_received
|
||||
trace "Incoming pubsub message received",
|
||||
len = data.len, topic, msgId = gossipId(data)
|
||||
|
@ -1270,8 +1272,8 @@ proc subscribe*[MsgType](node: Eth2Node,
|
|||
|
||||
await node.pubsub.subscribe(topic & "_snappy", execMsgHandler)
|
||||
|
||||
proc subscribe*(node: Eth2Node, topic: string) {.async, gcsafe.} =
|
||||
proc dummyMsgHandler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc subscribe*(node: Eth2Node, topic: string) {.async.} =
|
||||
proc dummyMsgHandler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
await node.pubsub.subscribe(topic & "_snappy", dummyMsgHandler)
|
||||
|
@ -1281,7 +1283,7 @@ proc addValidator*[MsgType](node: Eth2Node,
|
|||
msgValidator: proc(msg: MsgType): bool {.gcsafe.} ) =
|
||||
# Validate messages as soon as subscribed
|
||||
proc execValidator(
|
||||
topic: string, message: GossipMsg): Future[bool] {.async, gcsafe.} =
|
||||
topic: string, message: GossipMsg): Future[bool] {.async.} =
|
||||
trace "Validating incoming gossip message",
|
||||
len = message.data.len, topic, msgId = gossipId(message.data)
|
||||
try:
|
||||
|
|
|
@ -127,13 +127,12 @@ proc storeBlock(
|
|||
start = Moment.now()
|
||||
attestationPool = self.attestationPool
|
||||
|
||||
{.gcsafe.}: # TODO: fork choice and quarantine should sync via messages instead of callbacks
|
||||
let blck = self.chainDag.addRawBlock(self.quarantine, signedBlock) do (
|
||||
blckRef: BlockRef, signedBlock: SignedBeaconBlock,
|
||||
epochRef: EpochRef, state: HashedBeaconState):
|
||||
# Callback add to fork choice if valid
|
||||
attestationPool[].addForkChoice(
|
||||
epochRef, blckRef, signedBlock.message, wallSlot)
|
||||
let blck = self.chainDag.addRawBlock(self.quarantine, signedBlock) do (
|
||||
blckRef: BlockRef, signedBlock: SignedBeaconBlock,
|
||||
epochRef: EpochRef, state: HashedBeaconState):
|
||||
# Callback add to fork choice if valid
|
||||
attestationPool[].addForkChoice(
|
||||
epochRef, blckRef, signedBlock.message, wallSlot)
|
||||
|
||||
self.dumpBlock(signedBlock, blck)
|
||||
|
||||
|
@ -254,12 +253,6 @@ proc blockValidator*(
|
|||
# already-seen data, but it is fairly aggressive about forgetting about
|
||||
# what it has seen already
|
||||
debug "Dropping already-seen gossip block", delay
|
||||
# TODO:
|
||||
# Potentially use a fast exit here. We only need to check that
|
||||
# the contents of the incoming message match our previously seen
|
||||
# version of the block. We don't need to use HTR for this - for
|
||||
# better efficiency we can use vanilla SHA256 or direct comparison
|
||||
# if we still have the previous block in memory.
|
||||
return false
|
||||
|
||||
# Start of block processing - in reality, we have already gone through SSZ
|
||||
|
|
|
@ -326,8 +326,7 @@ func prune*(
|
|||
)
|
||||
|
||||
trace "Pruning blocks from fork choice",
|
||||
finalizedRoot = shortlog(finalized_root),
|
||||
pcs = "prune"
|
||||
finalizedRoot = shortlog(finalized_root)
|
||||
|
||||
let final_phys_index = finalized_index-self.nodes.offset
|
||||
for node_index in 0 ..< final_phys_index:
|
||||
|
|
|
@ -490,7 +490,7 @@ func init*(p: typedesc[PeerInfo],
|
|||
|
||||
proc pubsubLogger(conf: InspectorConf, switch: Switch,
|
||||
resolveQueue: AsyncQueue[PeerID], topic: string,
|
||||
data: seq[byte]): Future[void] {.async, gcsafe.} =
|
||||
data: seq[byte]): Future[void] {.async.} =
|
||||
info "Received pubsub message", size = len(data),
|
||||
topic = topic,
|
||||
message = bu.toHex(data)
|
||||
|
|
|
@ -155,7 +155,7 @@ proc readResponseChunk(conn: Connection, peer: Peer,
|
|||
return neterr UnexpectedEOF
|
||||
|
||||
proc readResponse(conn: Connection, peer: Peer,
|
||||
MsgType: type, timeout: Duration): Future[NetRes[MsgType]] {.gcsafe, async.} =
|
||||
MsgType: type, timeout: Duration): Future[NetRes[MsgType]] {.async.} =
|
||||
when MsgType is seq:
|
||||
type E = ElemType(MsgType)
|
||||
var results: MsgType
|
||||
|
|
|
@ -101,9 +101,6 @@ func get_attesting_balance(
|
|||
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.2/specs/phase0/beacon-chain.md#justification-and-finalization
|
||||
proc process_justification_and_finalization*(state: var BeaconState,
|
||||
cache: var StateCache, updateFlags: UpdateFlags = {}) {.nbench.} =
|
||||
|
||||
logScope: pcs = "process_justification_and_finalization"
|
||||
|
||||
if get_current_epoch(state) <= GENESIS_EPOCH + 1:
|
||||
return
|
||||
|
||||
|
|
|
@ -129,9 +129,7 @@ proc isSynced*(node: BeaconNode, head: BlockRef): bool =
|
|||
true
|
||||
|
||||
proc sendAttestation*(
|
||||
node: BeaconNode, attestation: Attestation, num_active_validators: uint64) =
|
||||
logScope: pcs = "send_attestation"
|
||||
|
||||
node: BeaconNode, attestation: Attestation, num_active_validators: uint64) =
|
||||
node.network.broadcast(
|
||||
getAttestationTopic(node.forkDigest, attestation, num_active_validators),
|
||||
attestation)
|
||||
|
@ -159,8 +157,6 @@ proc createAndSendAttestation(node: BeaconNode,
|
|||
committeeLen: int,
|
||||
indexInCommittee: int,
|
||||
num_active_validators: uint64) {.async.} =
|
||||
logScope: pcs = "send_attestation"
|
||||
|
||||
var attestation = await validator.produceAndSignAttestation(
|
||||
attestationData, committeeLen, indexInCommittee, fork,
|
||||
genesis_validators_root)
|
||||
|
@ -234,7 +230,7 @@ proc makeBeaconBlockForHeadAndSlot*(node: BeaconNode,
|
|||
await getRandaoReveal(val_info),
|
||||
eth1data,
|
||||
graffiti,
|
||||
node.attestationPool[].getAttestationsForBlock(state),
|
||||
node.attestationPool[].getAttestationsForBlock(state, cache),
|
||||
deposits,
|
||||
restore,
|
||||
cache)
|
||||
|
@ -253,15 +249,14 @@ proc proposeSignedBlock*(node: BeaconNode,
|
|||
validator: AttachedValidator,
|
||||
newBlock: SignedBeaconBlock): Future[BlockRef] {.async.} =
|
||||
|
||||
{.gcsafe.}: # TODO: fork choice and quarantine should sync via messages instead of callbacks
|
||||
let newBlockRef = node.chainDag.addRawBlock(node.quarantine,
|
||||
newBlock) do (
|
||||
blckRef: BlockRef, signedBlock: SignedBeaconBlock,
|
||||
epochRef: EpochRef, state: HashedBeaconState):
|
||||
# Callback add to fork choice if valid
|
||||
node.attestationPool[].addForkChoice(
|
||||
epochRef, blckRef, signedBlock.message,
|
||||
node.beaconClock.now().slotOrZero())
|
||||
let newBlockRef = node.chainDag.addRawBlock(node.quarantine,
|
||||
newBlock) do (
|
||||
blckRef: BlockRef, signedBlock: SignedBeaconBlock,
|
||||
epochRef: EpochRef, state: HashedBeaconState):
|
||||
# Callback add to fork choice if valid
|
||||
node.attestationPool[].addForkChoice(
|
||||
epochRef, blckRef, signedBlock.message,
|
||||
node.beaconClock.now().slotOrZero())
|
||||
|
||||
if newBlockRef.isErr:
|
||||
warn "Unable to add proposed block to block pool",
|
||||
|
@ -289,8 +284,6 @@ proc proposeBlock(node: BeaconNode,
|
|||
validator_index: ValidatorIndex,
|
||||
head: BlockRef,
|
||||
slot: Slot): Future[BlockRef] {.async.} =
|
||||
logScope: pcs = "block_proposal"
|
||||
|
||||
if head.slot >= slot:
|
||||
# We should normally not have a head newer than the slot we're proposing for
|
||||
# but this can happen if block proposal is delayed
|
||||
|
@ -319,8 +312,6 @@ proc proposeBlock(node: BeaconNode,
|
|||
proc handleAttestations(node: BeaconNode, head: BlockRef, slot: Slot) =
|
||||
## Perform all attestations that the validators attached to this node should
|
||||
## perform during the given slot
|
||||
logScope: pcs = "handleAttestations"
|
||||
|
||||
if slot + SLOTS_PER_EPOCH < head.slot:
|
||||
# The latest block we know about is a lot newer than the slot we're being
|
||||
# asked to attest to - this makes it unlikely that it will be included
|
||||
|
@ -404,8 +395,7 @@ proc handleProposal(node: BeaconNode, head: BlockRef, slot: Slot):
|
|||
headRoot = shortLog(head.root),
|
||||
slot = shortLog(slot),
|
||||
proposer_index = proposer.get()[0],
|
||||
proposer = shortLog(proposer.get()[1].initPubKey()),
|
||||
pcs = "wait_for_proposal"
|
||||
proposer = shortLog(proposer.get()[1].initPubKey())
|
||||
|
||||
return head
|
||||
|
||||
|
@ -424,13 +414,13 @@ proc broadcastAggregatedAttestations(
|
|||
let
|
||||
committees_per_slot =
|
||||
get_committee_count_per_slot(state, aggregationSlot.epoch, cache)
|
||||
|
||||
|
||||
var
|
||||
slotSigs: seq[Future[ValidatorSig]] = @[]
|
||||
slotSigsData: seq[tuple[committee_index: uint64,
|
||||
validator_idx: ValidatorIndex,
|
||||
v: AttachedValidator]] = @[]
|
||||
|
||||
|
||||
for committee_index in 0'u64..<committees_per_slot:
|
||||
let committee = get_beacon_committee(
|
||||
state, aggregationSlot, committee_index.CommitteeIndex, cache)
|
||||
|
@ -455,7 +445,7 @@ proc broadcastAggregatedAttestations(
|
|||
# Don't broadcast when, e.g., this node isn't aggregator
|
||||
# TODO verify there is only one isSome() with test.
|
||||
if aggregateAndProof.isSome:
|
||||
let sig = await signAggregateAndProof(curr[0].v,
|
||||
let sig = await signAggregateAndProof(curr[0].v,
|
||||
aggregateAndProof.get, state.fork,
|
||||
state.genesis_validators_root)
|
||||
var signedAP = SignedAggregateAndProof(
|
||||
|
|
|
@ -116,7 +116,7 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
|||
privKey.genRandaoReveal(state.fork, state.genesis_validators_root, slot),
|
||||
eth1data,
|
||||
default(GraffitiBytes),
|
||||
attPool.getAttestationsForBlock(state),
|
||||
attPool.getAttestationsForBlock(state, cache),
|
||||
@[],
|
||||
noRollback,
|
||||
cache)
|
||||
|
|
|
@ -78,7 +78,7 @@ suiteReport "Attestation pool processing" & preset():
|
|||
check:
|
||||
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache)
|
||||
|
||||
let attestations = pool[].getAttestationsForBlock(state.data.data)
|
||||
let attestations = pool[].getAttestationsForBlock(state.data.data, cache)
|
||||
|
||||
check:
|
||||
attestations.len == 1
|
||||
|
@ -110,7 +110,7 @@ suiteReport "Attestation pool processing" & preset():
|
|||
discard process_slots(
|
||||
state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache)
|
||||
|
||||
let attestations = pool[].getAttestationsForBlock(state.data.data)
|
||||
let attestations = pool[].getAttestationsForBlock(state.data.data, cache)
|
||||
|
||||
check:
|
||||
attestations.len == 1
|
||||
|
@ -134,7 +134,7 @@ suiteReport "Attestation pool processing" & preset():
|
|||
check:
|
||||
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache)
|
||||
|
||||
let attestations = pool[].getAttestationsForBlock(state.data.data)
|
||||
let attestations = pool[].getAttestationsForBlock(state.data.data, cache)
|
||||
|
||||
check:
|
||||
attestations.len == 1
|
||||
|
@ -161,7 +161,7 @@ suiteReport "Attestation pool processing" & preset():
|
|||
check:
|
||||
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache)
|
||||
|
||||
let attestations = pool[].getAttestationsForBlock(state.data.data)
|
||||
let attestations = pool[].getAttestationsForBlock(state.data.data, cache)
|
||||
|
||||
check:
|
||||
attestations.len == 1
|
||||
|
@ -187,7 +187,7 @@ suiteReport "Attestation pool processing" & preset():
|
|||
check:
|
||||
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache)
|
||||
|
||||
let attestations = pool[].getAttestationsForBlock(state.data.data)
|
||||
let attestations = pool[].getAttestationsForBlock(state.data.data, cache)
|
||||
|
||||
check:
|
||||
attestations.len == 1
|
||||
|
@ -398,7 +398,7 @@ suiteReport "Attestation validation " & preset():
|
|||
chainDag = init(ChainDAGRef, defaultRuntimePreset, makeTestDB(SLOTS_PER_EPOCH * 3))
|
||||
quarantine = QuarantineRef()
|
||||
pool = newClone(AttestationPool.init(chainDag, quarantine))
|
||||
state = newClone(loadTailState(chainDag))
|
||||
state = newClone(chainDag.headState)
|
||||
cache = StateCache()
|
||||
# Slot 0 is a finalized slot - won't be making attestations for it..
|
||||
check:
|
||||
|
|
|
@ -130,7 +130,7 @@ suiteReport "Block pool processing" & preset():
|
|||
check:
|
||||
dag.getRef(default Eth2Digest) == nil
|
||||
|
||||
wrappedTimedTest "loadTailState gets genesis block on first load" & preset():
|
||||
wrappedTimedTest "loading tail block works" & preset():
|
||||
let
|
||||
b0 = dag.get(dag.tail.root)
|
||||
|
||||
|
|
Loading…
Reference in New Issue