diff --git a/AllTests-mainnet.md b/AllTests-mainnet.md index 3a11f2793..494cccb35 100644 --- a/AllTests-mainnet.md +++ b/AllTests-mainnet.md @@ -6,13 +6,15 @@ AllTests-mainnet + Attestations may overlap, bigger first [Preset: mainnet] OK + Attestations may overlap, smaller first [Preset: mainnet] OK + Attestations should be combined [Preset: mainnet] OK -+ Can add and retrieve simple attestation [Preset: mainnet] OK ++ Can add and retrieve simple attestations [Preset: mainnet] OK ++ Everyone voting for something different [Preset: mainnet] OK + Fork choice returns block with attestation OK + Fork choice returns latest block with no attestations OK + Trying to add a block twice tags the second as an error OK + Trying to add a duplicate block from an old pruned epoch is tagged as an error OK ++ Working with aggregates [Preset: mainnet] OK ``` -OK: 9/9 Fail: 0/9 Skip: 0/9 +OK: 11/11 Fail: 0/11 Skip: 0/11 ## Attestation validation [Preset: mainnet] ```diff + Validation sanity OK @@ -290,4 +292,4 @@ OK: 2/2 Fail: 0/2 Skip: 0/2 OK: 1/1 Fail: 0/1 Skip: 0/1 ---TOTAL--- -OK: 155/164 Fail: 0/164 Skip: 9/164 +OK: 157/166 Fail: 0/166 Skip: 9/166 diff --git a/beacon_chain/beacon_node_types.nim b/beacon_chain/beacon_node_types.nim index 4fee777f2..5e93c0562 100644 --- a/beacon_chain/beacon_node_types.nim +++ b/beacon_chain/beacon_node_types.nim @@ -10,14 +10,12 @@ import std/[deques, intsets, streams, tables], stew/endians2, - spec/[datatypes, digest, crypto], - consensus_object_pools/block_pools_types, - fork_choice/fork_choice_types, - validators/slashing_protection + ./spec/[datatypes, digest, crypto], + ./consensus_object_pools/block_pools_types, + ./fork_choice/fork_choice_types, + ./validators/slashing_protection -from libp2p/protocols/pubsub/pubsub import ValidationResult - -export block_pools_types, ValidationResult +export tables, block_pools_types const ATTESTATION_LOOKBACK* = @@ -37,27 +35,22 @@ type ## added to the aggregate meaning that only non-overlapping aggregates may ## be further combined. aggregation_bits*: CommitteeValidatorsBits - aggregate_signature*: CookedSig - aggregate_signature_raw*: ValidatorSig + aggregate_signature*: AggregateSignature AttestationEntry* = object ## Each entry holds the known signatures for a particular, distinct vote data*: AttestationData - blck*: BlockRef - aggregation_bits*: CommitteeValidatorsBits - validations*: seq[Validation] + committee_len*: int + singles*: Table[int, CookedSig] ## \ + ## On the attestation subnets, only attestations with a single vote are + ## allowed - these can be collected separately to top up aggregates with - + ## here we collect them by mapping index in committee to a vote + aggregates*: seq[Validation] - AttestationsSeen* = object - attestations*: seq[AttestationEntry] ## \ + AttestationTable* = Table[Eth2Digest, AttestationEntry] ## Depending on the world view of the various validators, they may have - ## voted on different states - here we collect all the different - ## combinations that validators have come up with so that later, we can - ## count how popular each world view is (fork choice) - ## TODO this could be a Table[AttestationData, seq[Validation] or something - ## less naive - - # These provide types for attestation pool's cache attestations. - AttestationDataKey* = (Slot, uint64, Epoch, Epoch) + ## voted on different states - this map keeps track of each vote keyed by + ## hash_tree_root(AttestationData) AttestationPool* = object ## The attestation pool keeps track of all attestations that potentially @@ -66,11 +59,7 @@ type ## "free" attestations with those found in past blocks - these votes ## are tracked separately in the fork choice. - attestationAggregates*: Table[Slot, Table[Eth2Digest, Attestation]] - ## An up-to-date aggregate of each (htr-ed) attestation_data we see for - ## each slot. We keep aggregates up to 32 slots back from the current slot. - - candidates*: array[ATTESTATION_LOOKBACK, AttestationsSeen] ## \ + candidates*: array[ATTESTATION_LOOKBACK, AttestationTable] ## \ ## We keep one item per slot such that indexing matches slot number ## together with startingSlot @@ -86,21 +75,6 @@ type nextAttestationEpoch*: seq[tuple[subnet: Epoch, aggregate: Epoch]] ## \ ## sequence based on validator indices - attestedValidators*: - Table[AttestationDataKey, CommitteeValidatorsBits] ## \ - ## Cache for quick lookup during beacon block construction of attestations - ## which have already been included, and therefore should be skipped. This - ## isn't that useful for a couple validators per node, but pays off when a - ## larger number of local validators is attached. - - lastPreviousEpochAttestationsLen*: int - lastCurrentEpochAttestationsLen*: int ## \ - lastPreviousEpochAttestation*: PendingAttestation - lastCurrentEpochAttestation*: PendingAttestation - ## Used to detect and incorporate new attestations since the last block - ## created. Defaults are fine as initial values and don't need explicit - ## initialization. - ExitPool* = object ## The exit pool tracks attester slashings, proposer slashings, and ## voluntary exits that could be added to a proposed block. diff --git a/beacon_chain/consensus_object_pools/attestation_pool.nim b/beacon_chain/consensus_object_pools/attestation_pool.nim index a786f09bd..a2bde7e45 100644 --- a/beacon_chain/consensus_object_pools/attestation_pool.nim +++ b/beacon_chain/consensus_object_pools/attestation_pool.nim @@ -13,10 +13,10 @@ import # Status libraries chronicles, stew/byteutils, json_serialization/std/sets as jsonSets, # Internal - ../spec/[beaconstate, datatypes, crypto, digest], + ../spec/[beaconstate, datatypes, crypto, digest, validator], ../ssz/merkleization, "."/[spec_cache, blockchain_dag, block_quarantine], - ../beacon_node_types, + ../beacon_node_types, ../extras, ../fork_choice/fork_choice export beacon_node_types @@ -111,44 +111,104 @@ func candidateIdx(pool: AttestationPool, slot: Slot): Option[uint64] = proc updateCurrent(pool: var AttestationPool, wallSlot: Slot) = if wallSlot + 1 < pool.candidates.lenu64: - return + return # Genesis - if pool.startingSlot + pool.candidates.lenu64 - 1 > wallSlot: + let + newStartingSlot = wallSlot + 1 - pool.candidates.lenu64 + + if newStartingSlot < pool.startingSlot: error "Current slot older than attestation pool view, clock reset?", - poolSlot = pool.startingSlot, wallSlot + startingSlot = pool.startingSlot, newStartingSlot, wallSlot return # As time passes we'll clear out any old attestations as they are no longer # viable to be included in blocks - let newWallSlot = wallSlot + 1 - pool.candidates.lenu64 - for i in pool.startingSlot..newWallSlot: - pool.candidates[i.uint64 mod pool.candidates.lenu64] = AttestationsSeen() + if newStartingSlot - pool.startingSlot >= pool.candidates.lenu64(): + # In case many slots passed since the last update, avoid iterating over + # the same indices over and over + pool.candidates = default(type(pool.candidates)) + else: + for i in pool.startingSlot..newStartingSlot: + pool.candidates[i.uint64 mod pool.candidates.lenu64] = AttestationTable() - pool.startingSlot = newWallSlot + pool.startingSlot = newStartingSlot - # now also clear old aggregated attestations - var keysToRemove: seq[Slot] = @[] - for k, v in pool.attestationAggregates.pairs: - if k < pool.startingSlot: - keysToRemove.add k - for k in keysToRemove: - pool.attestationAggregates.del k +proc oneIndex(bits: CommitteeValidatorsBits): Option[int] = + # Find the index of the set bit, iff one bit is set + var res = none(int) + var idx = 0 + for idx in 0.. aggregate! - if not aggregated_attestation.aggregation_bits.overlaps(attestation.aggregation_bits): - var agg {.noInit.}: AggregateSignature - agg.init(aggregated_attestation.signature) - aggregated_attestation.aggregation_bits.combine(attestation.aggregation_bits) - agg.aggregate(attestation.signature) - aggregated_attestation.signature = agg.finish() +func toAttestation(entry: AttestationEntry, validation: Validation): Attestation = + Attestation( + aggregation_bits: validation.aggregation_bits, + data: entry.data, + signature: validation.aggregate_signature.finish().exportRaw() + ) + +func updateAggregates(entry: var AttestationEntry) = + # Upgrade the list of aggregates to ensure that there is at least one + # aggregate (assuming there are singles) and all aggregates have all + # singles incorporated + if entry.singles.len() == 0: + return + + if entry.aggregates.len() == 0: + # If there are singles, we can create an aggregate from them that will + # represent our best knowledge about the current votes + for index_in_committee, signature in entry.singles: + if entry.aggregates.len() == 0: + # Create aggregate on first iteration.. + entry.aggregates.add( + Validation( + aggregation_bits: CommitteeValidatorsBits.init(entry.committee_len), + aggregate_signature: AggregateSignature.init(signature) + )) + else: + entry.aggregates[0].aggregate_signature.aggregate(signature) + + entry.aggregates[0].aggregation_bits.setBit(index_in_committee) + else: + # There already exist aggregates - we'll try to top them up by adding + # singles to them - for example, it may happen that we're being asked to + # produce a block 4s after creating an aggregate and new information may + # have arrived by then. + # In theory, also aggregates could be combined but finding the best + # combination is hard, so we'll pragmatically use singles only here + var updated = false + for index_in_committee, signature in entry.singles: + for v in entry.aggregates.mitems(): + if not v.aggregation_bits[index_in_committee]: + v.aggregation_bits.setBit(index_in_committee) + v.aggregate_signature.aggregate(signature) + updated = true + + if updated: + # One or more aggregates were updated - time to remove the ones that are + # pure subsets of the others. This may lead to quadratic behaviour, but + # the number of aggregates for the entry is limited by the number of + # aggregators on the topic which is capped `is_aggregator` and + # TARGET_AGGREGATORS_PER_COMMITTEE + var i = 0 + while i < entry.aggregates.len(): + var j = 0 + while j < entry.aggregates.len(): + if i != j and entry.aggregates[i].aggregation_bits.isSubsetOf( + entry.aggregates[j].aggregation_bits): + entry.aggregates[i] = entry.aggregates[j] + entry.aggregates.del(j) + dec i # Rerun checks on the new `i` item + break + else: + inc j + inc i proc addAttestation*(pool: var AttestationPool, attestation: Attestation, @@ -156,17 +216,16 @@ proc addAttestation*(pool: var AttestationPool, signature: CookedSig, wallSlot: Slot) = ## Add an attestation to the pool, assuming it's been validated already. - ## Attestations may be either agggregated or not - we're pursuing an eager - ## strategy where we'll drop validations we already knew about and combine - ## the new attestation with an existing one if possible. ## - ## This strategy is not optimal in the sense that it would be possible to find - ## a better packing of attestations by delaying the aggregation, but because - ## it's possible to include more than one aggregate in a block we can be - ## somewhat lazy instead of looking for a perfect packing. + ## Assuming the votes in the attestation have not already been seen, the + ## attestation will be added to the fork choice and lazily added to a list of + ## attestations for future aggregation and block production. logScope: attestation = shortLog(attestation) + doAssert attestation.signature == signature.exportRaw(), + "Deserialized signature must match the one in the attestation" + updateCurrent(pool, wallSlot) let candidateIdx = pool.candidateIdx(attestation.data.slot) @@ -175,66 +234,54 @@ proc addAttestation*(pool: var AttestationPool, startingSlot = pool.startingSlot return - pool.addToAggregates(attestation) - let - attestationsSeen = addr pool.candidates[candidateIdx.get] - # Only attestestions with valid signatures get here + singleIndex = oneIndex(attestation.aggregation_bits) + root = hash_tree_root(attestation.data) + # Careful with pointer, candidate table must not be touched after here + entry = addr pool.candidates[candidateIdx.get].mGetOrPut( + root, + AttestationEntry( + data: attestation.data, + committee_len: attestation.aggregation_bits.len())) - template getValidation(): auto = - doAssert attestation.signature == signature.exportRaw - Validation( - aggregation_bits: attestation.aggregation_bits, - aggregate_signature: signature, - aggregate_signature_raw: attestation.signature) + if singleIndex.isSome(): + if singleIndex.get() in entry[].singles: + trace "Attestation already seen", + singles = entry[].singles.len(), + aggregates = entry[].aggregates.len() - var found = false - for a in attestationsSeen.attestations.mitems(): - if a.data == attestation.data: - for v in a.validations: - if attestation.aggregation_bits.isSubsetOf(v.aggregation_bits): - # The validations in the new attestation are a subset of one of the - # attestations that we already have on file - no need to add this - # attestation to the database - trace "Ignoring subset attestation", newParticipants = participants - found = true - break - - if not found: - # Attestations in the pool that are a subset of the new attestation - # can now be removed per same logic as above - - trace "Removing subset attestations", newParticipants = participants - - a.validations.keepItIf( - not it.aggregation_bits.isSubsetOf(attestation.aggregation_bits)) - - a.validations.add(getValidation()) - pool.addForkChoiceVotes( - attestation.data.slot, participants, attestation.data.beacon_block_root, - wallSlot) - - debug "Attestation resolved", - attestation = shortLog(attestation), - validations = a.validations.len() - - found = true - - break - - if not found: - attestationsSeen.attestations.add(AttestationEntry( - data: attestation.data, - validations: @[getValidation()], - aggregation_bits: attestation.aggregation_bits - )) - pool.addForkChoiceVotes( - attestation.data.slot, participants, attestation.data.beacon_block_root, - wallSlot) + return debug "Attestation resolved", - attestation = shortLog(attestation), - validations = 1 + singles = entry[].singles.len(), + aggregates = entry[].aggregates.len() + + entry[].singles[singleIndex.get()] = signature + else: + # More than one vote in this attestation + for i in 0.. 0: - pool.lastPreviousEpochAttestation = - state.previous_epoch_attestations[pool.lastPreviousEpochAttestationsLen - 1] - if pool.lastCurrentEpochAttestationsLen > 0: - pool.lastCurrentEpochAttestation = - state.current_epoch_attestations[pool.lastCurrentEpochAttestationsLen - 1] +type + AttestationCacheKey* = (Slot, uint64) + AttestationCache = Table[AttestationCacheKey, CommitteeValidatorsBits] ##\ + ## Cache for quick lookup during beacon block construction of attestations + ## which have already been included, and therefore should be skipped. + +func getAttestationCacheKey(ad: AttestationData): AttestationCacheKey = + # The committee is unique per slot and committee index which means we can use + # it as key for a participation cache - this is checked in `check_attestation` + (ad.slot, ad.index) + +func add( + attCache: var AttestationCache, data: AttestationData, + aggregation_bits: CommitteeValidatorsBits) = + let key = data.getAttestationCacheKey() + attCache.withValue(key, v) do: + v[].incl(aggregation_bits) + do: + attCache[key] = aggregation_bits + +func init(T: type AttestationCache, state: BeaconState): T = + # Load attestations that are scheduled for being given rewards for + for i in 0.. maxAttestationSlot: # Around genesis.. + break - agg {.noInit.}: AggregateSignature - agg.init(a.validations[0].aggregate_signature) + let + slot = Slot(maxAttestationSlot - i) + candidateIdx = pool.candidateIdx(slot) - # Signature verification here is more of a sanity check - it could - # be optimized away, though for now it's easier to reuse the logic from - # the state transition function to ensure that the new block will not - # fail application. - if (let v = check_attestation(state, attestation, {}, cache); v.isErr): - warn "Attestation no longer validates...", - attestation = shortLog(attestation), - err = v.error + if candidateIdx.isNone(): + # Passed the collection horizon - shouldn't happen because it's based on + # ATTESTATION_LOOKBACK + break - continue + for _, entry in pool.candidates[candidateIdx.get()].mpairs(): + entry.updateAggregates() - for i in 1..a.validations.high: - if not attestation.aggregation_bits.overlaps( - a.validations[i].aggregation_bits): - attestation.aggregation_bits.combine(a.validations[i].aggregation_bits) - agg.aggregate(a.validations[i].aggregate_signature) + for j in 0..= MAX_ATTESTATIONS: - debug "getAttestationsForBlock: returning early after hitting MAX_ATTESTATIONS", - attestationSlot = newBlockSlot - 1 - return + # Careful, must not update the attestation table for the pointer to + # remain valid + candidates.add((score, slot, addr entry, j)) -func getAggregatedAttestation*(pool: AttestationPool, + # Using a greedy algorithm, select as many attestations as possible that will + # fit in the block. + # + # For each round, we'll look for the best attestation and add it to the result + # then re-score the other candidates. + # + # A possible improvement here would be to use a maximum cover algorithm. + var + prevEpoch = state.get_previous_epoch() + prevEpochSpace = + state.previous_epoch_attestations.maxLen - state.previous_epoch_attestations.len() + + var res: seq[Attestation] + + while candidates.len > 0 and res.lenu64() < MAX_ATTESTATIONS: + block: + # Find the candidate with the highest score - slot is used as a + # tie-breaker so that more recent attestations are added first + let + candidate = + # Fast path for when all remaining candidates fit + if candidates.lenu64 < MAX_ATTESTATIONS: candidates.len - 1 + else: maxIndex(candidates) + (_, slot, entry, j) = candidates[candidate] + + candidates.del(candidate) # careful, `del` reorders candidates + + if entry[].data.target.epoch == prevEpoch: + if prevEpochSpace < 1: + continue # No need to rescore since we didn't add the attestation + + prevEpochSpace -= 1 + + res.add(entry[].toAttestation(entry[].aggregates[j])) + + # Update cache so that the new votes are taken into account when updating + # the score below + attCache.add(entry[].data, entry[].aggregates[j].aggregation_bits) + + block: + # Because we added some votes, it's quite possible that some candidates + # are no longer interesting - update the scores of the existing candidates + for it in candidates.mitems(): + it.score = attCache.score( + it.entry[].data, + it.entry[].aggregates[it.validation].aggregation_bits) + + candidates.keepItIf: + # Only keep candidates that might add coverage + it.score > 0 + + res + +func bestValidation(aggregates: openArray[Validation]): (int, int) = + # Look for best validation based on number of votes in the aggregate + doAssert aggregates.len() > 0, + "updateAggregates should have created at least one aggregate" + var + bestIndex = 0 + best = aggregates[bestIndex].aggregation_bits.countOnes() + + for i in 1.. best: + best = count + bestIndex = i + (bestIndex, best) + +func getAggregatedAttestation*(pool: var AttestationPool, slot: Slot, - ad_htr: Eth2Digest): Option[Attestation] = - try: - if pool.attestationAggregates.contains(slot) and - pool.attestationAggregates[slot].contains(ad_htr): - return some pool.attestationAggregates[slot][ad_htr] - except KeyError: - doAssert(false) # shouldn't be possible because we check with `contains` - none(Attestation) - -proc getAggregatedAttestation*(pool: AttestationPool, - slot: Slot, - index: CommitteeIndex): Option[Attestation] = - let attestations = pool.getAttestationsForSlot( - slot + MIN_ATTESTATION_INCLUSION_DELAY) - if attestations.isNone: + attestation_data_root: Eth2Digest): Option[Attestation] = + let + candidateIdx = pool.candidateIdx(slot) + if candidateIdx.isNone: return none(Attestation) - for a in attestations.get.attestations: - doAssert a.data.slot == slot - if index.uint64 != a.data.index: - continue + pool.candidates[candidateIdx.get].withValue(attestation_data_root, entry): + entry[].updateAggregates() - var - # https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#construct-attestation - attestation = Attestation( - aggregation_bits: a.validations[0].aggregation_bits, - data: a.data, - signature: a.validations[0].aggregate_signature_raw - ) + let (bestIndex, _) = bestValidation(entry[].aggregates) - agg {.noInit.}: AggregateSignature - - agg.init(a.validations[0].aggregate_signature) - for v in a.validations[1..^1]: - if not attestation.aggregation_bits.overlaps(v.aggregation_bits): - attestation.aggregation_bits.combine(v.aggregation_bits) - agg.aggregate(v.aggregate_signature) - - attestation.signature = agg.finish() - - return some(attestation) + # Found the right hash, no need to look further + return some(entry[].toAttestation(entry[].aggregates[bestIndex])) none(Attestation) +proc getAggregatedAttestation*(pool: var AttestationPool, + slot: Slot, + index: CommitteeIndex): Option[Attestation] = + ## Select the attestation that has the most votes going for it in the given + ## slot/index + ## https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#construct-aggregate + let + candidateIdx = pool.candidateIdx(slot) + if candidateIdx.isNone: + return none(Attestation) + + var res: Option[Attestation] + for _, entry in pool.candidates[candidateIdx.get].mpairs(): + doAssert entry.data.slot == slot + if index.uint64 != entry.data.index: + continue + + entry.updateAggregates() + + let (bestIndex, best) = bestValidation(entry.aggregates) + + if res.isNone() or best > res.get().aggregation_bits.countOnes(): + res = some(entry.toAttestation(entry.aggregates[bestIndex])) + + res + proc selectHead*(pool: var AttestationPool, wallSlot: Slot): BlockRef = ## Trigger fork choice and returns the new head block. ## Can return `nil` diff --git a/beacon_chain/consensus_object_pools/block_clearance.nim b/beacon_chain/consensus_object_pools/block_clearance.nim index 7f6caec2a..99ae6922c 100644 --- a/beacon_chain/consensus_object_pools/block_clearance.nim +++ b/beacon_chain/consensus_object_pools/block_clearance.nim @@ -16,7 +16,9 @@ import ../spec/[crypto, datatypes, digest, helpers, signatures, signatures_batch, state_transition], ./block_pools_types, ./blockchain_dag, ./block_quarantine -export results +from libp2p/protocols/pubsub/pubsub import ValidationResult + +export results, ValidationResult # Clearance # --------------------------------------------- diff --git a/beacon_chain/consensus_object_pools/block_pools_types.nim b/beacon_chain/consensus_object_pools/block_pools_types.nim index 1e001f6e0..e4f381664 100644 --- a/beacon_chain/consensus_object_pools/block_pools_types.nim +++ b/beacon_chain/consensus_object_pools/block_pools_types.nim @@ -17,9 +17,7 @@ import ../spec/[datatypes, crypto, digest, signatures_batch], ../beacon_chain_db, ../extras -from libp2p/protocols/pubsub/pubsub import ValidationResult -export ValidationResult, sets, tables - +export sets, tables # ############################################# # diff --git a/beacon_chain/gossip_processing/gossip_validation.nim b/beacon_chain/gossip_processing/gossip_validation.nim index 11c22c115..7b6dc3966 100644 --- a/beacon_chain/gossip_processing/gossip_validation.nim +++ b/beacon_chain/gossip_processing/gossip_validation.nim @@ -26,6 +26,9 @@ import ../extras, ./batch_validation +from libp2p/protocols/pubsub/pubsub import ValidationResult +export ValidationResult + logScope: topics = "gossip_checks" @@ -118,25 +121,14 @@ func check_beacon_and_target_block( func check_aggregation_count( attestation: Attestation, singular: bool): Result[void, (ValidationResult, cstring)] = - var onesCount = 0 - # TODO a cleverer algorithm, along the lines of countOnes() in nim-stew - # But that belongs in nim-stew, since it'd break abstraction layers, to - # use details of its representation from nimbus-eth2. - for aggregation_bit in attestation.aggregation_bits: - if not aggregation_bit: - continue - onesCount += 1 - if singular: # More than one ok - if onesCount > 1: - return err((ValidationResult.Reject, cstring( - "Attestation has too many aggregation bits"))) - else: - break # Found the one we needed - - if onesCount < 1: + let ones = attestation.aggregation_bits.countOnes() + if singular and ones != 1: return err((ValidationResult.Reject, cstring( - "Attestation has too few aggregation bits"))) + "Attestation must have a single attestation bit set"))) + elif not singular and ones < 1: + return err((ValidationResult.Reject, cstring( + "Attestation must have at least one attestation bit set"))) ok() diff --git a/beacon_chain/spec/crypto.nim b/beacon_chain/spec/crypto.nim index b45a05f84..b73091498 100644 --- a/beacon_chain/spec/crypto.nim +++ b/beacon_chain/spec/crypto.nim @@ -125,6 +125,9 @@ func init*(agg: var AggregateSignature, sig: CookedSig) {.inline.}= ## Initializes an aggregate signature context agg.init(blscurve.Signature(sig)) +func init*(T: type AggregateSignature, sig: CookedSig | ValidatorSig): T = + result.init(sig) + proc aggregate*(agg: var AggregateSignature, sig: ValidatorSig) {.inline.}= ## Aggregate two Validator Signatures ## Both signatures must be valid @@ -134,11 +137,11 @@ proc aggregate*(agg: var AggregateSignature, sig: CookedSig) {.inline.}= ## Aggregate two Validator Signatures agg.aggregate(blscurve.Signature(sig)) -func finish*(agg: AggregateSignature): ValidatorSig {.inline.}= +func finish*(agg: AggregateSignature): CookedSig {.inline.}= ## Canonicalize an AggregateSignature into a signature var sig: blscurve.Signature sig.finish(agg) - ValidatorSig(blob: sig.exportRaw()) + CookedSig(sig) # https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#bls-signatures proc blsVerify*( diff --git a/beacon_chain/ssz/bitseqs.nim b/beacon_chain/ssz/bitseqs.nim index 75e4d785a..3412d76c3 100644 --- a/beacon_chain/ssz/bitseqs.nim +++ b/beacon_chain/ssz/bitseqs.nim @@ -235,7 +235,8 @@ func `$`*(a: BitSeq): string = for i in countdown(length - 1, 0): result.add if a[i]: '1' else: '0' -func combine*(tgt: var BitSeq, src: BitSeq) = +func incl*(tgt: var BitSeq, src: BitSeq) = + # Update `tgt` to include the bits of `src`, as if applying `or` to each bit doAssert tgt.len == src.len for tgtWord, srcWord in words(tgt, src): tgtWord = tgtWord or srcWord @@ -245,6 +246,12 @@ func overlaps*(a, b: BitSeq): bool = if (wa and wb) != 0: return true +func countOverlap*(a, b: BitSeq): int = + var res = 0 + for wa, wb in words(a, b): + res += countOnes(wa and wb) + res + func isSubsetOf*(a, b: BitSeq): bool = let alen = a.len doAssert b.len == alen @@ -253,10 +260,24 @@ func isSubsetOf*(a, b: BitSeq): bool = return false true -proc isZeros*(x: BitSeq): bool = +func isZeros*(x: BitSeq): bool = for w in words(x): if w != 0: return false return true +func countOnes*(x: BitSeq): int = + # Count the number of set bits + var res = 0 + for w in words(x): + res += w.countOnes() + res + +func clear*(x: var BitSeq) = + for w in words(x): + w = 0 + +func countZeros*(x: BitSeq): int = + x.len() - x.countOnes() + template bytes*(x: BitSeq): untyped = seq[byte](x) diff --git a/beacon_chain/ssz/types.nim b/beacon_chain/ssz/types.nim index e231765b5..7abf41893 100644 --- a/beacon_chain/ssz/types.nim +++ b/beacon_chain/ssz/types.nim @@ -183,9 +183,12 @@ template `==`*(a, b: BitList): bool = BitSeq(a) == BitSeq(b) template setBit*(x: var BitList, idx: Natural) = setBit(BitSeq(x), idx) template clearBit*(x: var BitList, idx: Natural) = clearBit(BitSeq(x), idx) template overlaps*(a, b: BitList): bool = overlaps(BitSeq(a), BitSeq(b)) -template combine*(a: var BitList, b: BitList) = combine(BitSeq(a), BitSeq(b)) +template incl*(a: var BitList, b: BitList) = incl(BitSeq(a), BitSeq(b)) template isSubsetOf*(a, b: BitList): bool = isSubsetOf(BitSeq(a), BitSeq(b)) template isZeros*(x: BitList): bool = isZeros(BitSeq(x)) +template countOnes*(x: BitList): int = countOnes(BitSeq(x)) +template countZeros*(x: BitList): int = countZeros(BitSeq(x)) +template countOverlap*(x, y: BitList): int = countOverlap(BitSeq(x), BitSeq(y)) template `$`*(a: BitList): string = $(BitSeq(a)) iterator items*(x: BitList): bool = diff --git a/beacon_chain/validators/attestation_aggregation.nim b/beacon_chain/validators/attestation_aggregation.nim index 8a9ecceca..0fa3c6133 100644 --- a/beacon_chain/validators/attestation_aggregation.nim +++ b/beacon_chain/validators/attestation_aggregation.nim @@ -26,7 +26,7 @@ func is_aggregator*(epochRef: EpochRef, slot: Slot, index: CommitteeIndex, return is_aggregator(committee_len, slot_signature) proc aggregate_attestations*( - pool: AttestationPool, epochRef: EpochRef, slot: Slot, index: CommitteeIndex, + pool: var AttestationPool, epochRef: EpochRef, slot: Slot, index: CommitteeIndex, validatorIndex: ValidatorIndex, slot_signature: ValidatorSig): Option[AggregateAndProof] = doAssert validatorIndex in get_beacon_committee(epochRef, slot, index) doAssert index.uint64 < get_committee_count_per_slot(epochRef) diff --git a/research/state_sim.nim b/research/state_sim.nim index 92a731ae9..afa9af801 100644 --- a/research/state_sim.nim +++ b/research/state_sim.nim @@ -138,10 +138,10 @@ cli do(slots = SLOTS_PER_EPOCH * 5, makeAttestation(state[].data, latest_block_root, scas, target_slot, i.CommitteeIndex, v, cache, flags) if not att2.aggregation_bits.overlaps(attestation.aggregation_bits): - attestation.aggregation_bits.combine(att2.aggregation_bits) + attestation.aggregation_bits.incl(att2.aggregation_bits) if skipBlsValidation notin flags: agg.aggregate(att2.signature) - attestation.signature = agg.finish() + attestation.signature = agg.finish().exportRaw() if not first: # add the attestation if any of the validators attested, as given diff --git a/tests/mocking/mock_attestations.nim b/tests/mocking/mock_attestations.nim index 58750a8af..a5740d10c 100644 --- a/tests/mocking/mock_attestations.nim +++ b/tests/mocking/mock_attestations.nim @@ -78,7 +78,7 @@ proc signMockAttestation*(state: BeaconState, attestation: var Attestation) = agg.aggregate(sig) if first_iter != true: - attestation.signature = agg.finish() + attestation.signature = agg.finish().exportRaw() # Otherwise no participants so zero sig proc mockAttestationImpl( diff --git a/tests/test_attestation_pool.nim b/tests/test_attestation_pool.nim index b87278afa..29bf9a9ab 100644 --- a/tests/test_attestation_pool.nim +++ b/tests/test_attestation_pool.nim @@ -8,19 +8,21 @@ {.used.} import - # Standard library - std/unittest, + std/sequtils, # Status lib + unittest2, chronicles, chronos, stew/byteutils, eth/keys, # Internal - ../beacon_chain/spec/[crypto, datatypes, digest, validator, state_transition, - helpers, beaconstate, presets, network], ../beacon_chain/[beacon_node_types, extras, beacon_clock], ../beacon_chain/gossip_processing/[gossip_validation, batch_validation], ../beacon_chain/fork_choice/[fork_choice_types, fork_choice], - ../beacon_chain/consensus_object_pools/[block_quarantine, blockchain_dag, block_clearance, attestation_pool], + ../beacon_chain/consensus_object_pools/[ + block_quarantine, blockchain_dag, block_clearance, attestation_pool], + ../beacon_chain/ssz/merkleization, + ../beacon_chain/spec/[crypto, datatypes, digest, validator, state_transition, + helpers, beaconstate, presets, network], # Test utilities ./testutil, ./testblockutil @@ -34,27 +36,18 @@ func combine(tgt: var Attestation, src: Attestation) = # In a BLS aggregate signature, one needs to count how many times a # particular public key has been added - since we use a single bit per key, we # can only it once, thus we can never combine signatures that overlap already! - if not tgt.aggregation_bits.overlaps(src.aggregation_bits): - tgt.aggregation_bits.combine(src.aggregation_bits) + doAssert not tgt.aggregation_bits.overlaps(src.aggregation_bits) - var agg {.noInit.}: AggregateSignature - agg.init(tgt.signature) - agg.aggregate(src.signature) - tgt.signature = agg.finish() + tgt.aggregation_bits.incl(src.aggregation_bits) + + var agg {.noInit.}: AggregateSignature + agg.init(tgt.signature) + agg.aggregate(src.signature) + tgt.signature = agg.finish().exportRaw() func loadSig(a: Attestation): CookedSig = a.signature.load.get().CookedSig -template wrappedTimedTest(name: string, body: untyped) = - # `check` macro takes a copy of whatever it's checking, on the stack! - # This leads to stack overflow - # We can mitigate that by wrapping checks in proc - block: # Symbol namespacing - proc wrappedTest() = - timedTest name: - body - wrappedTest() - proc pruneAtFinalization(dag: ChainDAGRef, attPool: AttestationPool) = if dag.needStateCachesAndForkChoicePruning(): dag.pruneStateCachesDAG() @@ -65,9 +58,9 @@ suiteReport "Attestation pool processing" & preset(): ## mock data. setup: - # Genesis state that results in 3 members per committee + # Genesis state that results in 6 members per committee var - chainDag = init(ChainDAGRef, defaultRuntimePreset, makeTestDB(SLOTS_PER_EPOCH * 3)) + chainDag = init(ChainDAGRef, defaultRuntimePreset, makeTestDB(SLOTS_PER_EPOCH * 6)) quarantine = QuarantineRef.init(keys.newRng()) pool = newClone(AttestationPool.init(chainDag, quarantine)) state = newClone(chainDag.headState) @@ -76,27 +69,187 @@ suiteReport "Attestation pool processing" & preset(): check: process_slots(state.data, state.data.data.slot + 1, cache) - wrappedTimedTest "Can add and retrieve simple attestation" & preset(): + timedTest "Can add and retrieve simple attestations" & preset(): let # Create an attestation for slot 1! - beacon_committee = get_beacon_committee( + bc0 = get_beacon_committee( state.data.data, state.data.data.slot, 0.CommitteeIndex, cache) attestation = makeAttestation( - state.data.data, state.blck.root, beacon_committee[0], cache) + state.data.data, state.blck.root, bc0[0], cache) pool[].addAttestation( - attestation, @[beacon_committee[0]], attestation.loadSig, + attestation, @[bc0[0]], attestation.loadSig, attestation.data.slot) check: - process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache) + # Added attestation, should get it back + toSeq(pool[].attestations(none(Slot), none(CommitteeIndex))) == + @[attestation] + toSeq(pool[].attestations( + some(attestation.data.slot), none(CommitteeIndex))) == @[attestation] + toSeq(pool[].attestations( + some(attestation.data.slot), some(attestation.data.index.CommitteeIndex))) == + @[attestation] + toSeq(pool[].attestations(none(Slot), some(attestation.data.index.CommitteeIndex))) == + @[attestation] + toSeq(pool[].attestations(some( + attestation.data.slot + 1), none(CommitteeIndex))) == [] + toSeq(pool[].attestations( + none(Slot), some(CommitteeIndex(attestation.data.index + 1)))) == [] + + process_slots( + state.data, state.data.data.slot + MIN_ATTESTATION_INCLUSION_DELAY, cache) let attestations = pool[].getAttestationsForBlock(state.data.data, cache) check: attestations.len == 1 + pool[].getAggregatedAttestation(1.Slot, 0.CommitteeIndex).isSome() - wrappedTimedTest "Attestations may arrive in any order" & preset(): + let + root1 = addTestBlock( + state.data, state.blck.root, + cache, attestations = attestations, nextSlot = false).root + bc1 = get_beacon_committee( + state.data.data, state.data.data.slot, 0.CommitteeIndex, cache) + att1 = makeAttestation( + state.data.data, root1, bc1[0], cache) + + check: + process_slots( + state.data, state.data.data.slot + MIN_ATTESTATION_INCLUSION_DELAY, cache) + + check: + # shouldn't include already-included attestations + pool[].getAttestationsForBlock(state.data.data, cache) == [] + + pool[].addAttestation( + att1, @[bc1[0]], att1.loadSig, att1.data.slot) + + check: + # but new ones should go in + pool[].getAttestationsForBlock(state.data.data, cache).len() == 1 + + let + att2 = makeAttestation( + state.data.data, root1, bc1[1], cache) + pool[].addAttestation( + att2, @[bc1[1]], att2.loadSig, att2.data.slot) + + let + combined = pool[].getAttestationsForBlock(state.data.data, cache) + + check: + # New attestations should be combined with old attestations + combined.len() == 1 + combined[0].aggregation_bits.countOnes() == 2 + + pool[].addAttestation( + combined[0], @[bc1[1], bc1[0]], combined[0].loadSig, combined[0].data.slot) + + check: + # readding the combined attestation shouldn't have an effect + pool[].getAttestationsForBlock(state.data.data, cache).len() == 1 + + let + # Someone votes for a different root + att3 = makeAttestation(state.data.data, Eth2Digest(), bc1[2], cache) + pool[].addAttestation( + att3, @[bc1[2]], att3.loadSig, att3.data.slot) + + check: + # We should now get both attestations for the block, but the aggregate + # should be the one with the most votes + pool[].getAttestationsForBlock(state.data.data, cache).len() == 2 + pool[].getAggregatedAttestation(2.Slot, 0.CommitteeIndex). + get().aggregation_bits.countOnes() == 2 + pool[].getAggregatedAttestation(2.Slot, hash_tree_root(att2.data)). + get().aggregation_bits.countOnes() == 2 + + let + # Someone votes for a different root + att4 = makeAttestation(state.data.data, Eth2Digest(), bc1[2], cache) + pool[].addAttestation( + att4, @[bc1[2]], att3.loadSig, att3.data.slot) + + timedTest "Working with aggregates" & preset(): + let + # Create an attestation for slot 1! + bc0 = get_beacon_committee( + state.data.data, state.data.data.slot, 0.CommitteeIndex, cache) + + var + att0 = makeAttestation(state.data.data, state.blck.root, bc0[0], cache) + att0x = att0 + att1 = makeAttestation(state.data.data, state.blck.root, bc0[1], cache) + att2 = makeAttestation(state.data.data, state.blck.root, bc0[2], cache) + att3 = makeAttestation(state.data.data, state.blck.root, bc0[3], cache) + + # Both attestations include member 2 but neither is a subset of the other + att0.combine(att2) + att1.combine(att2) + + pool[].addAttestation(att0, @[bc0[0], bc0[2]], att0.loadSig, att0.data.slot) + pool[].addAttestation(att1, @[bc0[1], bc0[2]], att1.loadSig, att1.data.slot) + + check: + process_slots( + state.data, state.data.data.slot + MIN_ATTESTATION_INCLUSION_DELAY, cache) + + check: + pool[].getAttestationsForBlock(state.data.data, cache).len() == 2 + # Can get either aggregate here, random! + pool[].getAggregatedAttestation(1.Slot, 0.CommitteeIndex).isSome() + + # Add in attestation 3 - both aggregates should now have it added + pool[].addAttestation(att3, @[bc0[3]], att3.loadSig, att3.data.slot) + + block: + let attestations = pool[].getAttestationsForBlock(state.data.data, cache) + check: + attestations.len() == 2 + attestations[0].aggregation_bits.countOnes() == 3 + # Can get either aggregate here, random! + pool[].getAggregatedAttestation(1.Slot, 0.CommitteeIndex).isSome() + + # Add in attestation 0 as single - attestation 1 is now a superset of the + # aggregates in the pool, so everything else should be removed + pool[].addAttestation(att0x, @[bc0[0]], att0x.loadSig, att0x.data.slot) + + block: + let attestations = pool[].getAttestationsForBlock(state.data.data, cache) + check: + attestations.len() == 1 + attestations[0].aggregation_bits.countOnes() == 4 + pool[].getAggregatedAttestation(1.Slot, 0.CommitteeIndex).isSome() + + timedTest "Everyone voting for something different" & preset(): + var attestations: int + for i in 0.. MAX_ATTESTATIONS, + "6*SLOTS_PER_EPOCH validators > 128 mainnet MAX_ATTESTATIONS" + check: + # Fill block with attestations + pool[].getAttestationsForBlock(state.data.data, cache).lenu64() == + MAX_ATTESTATIONS + pool[].getAggregatedAttestation(state.data.data.slot - 1, 0.CommitteeIndex).isSome() + + timedTest "Attestations may arrive in any order" & preset(): var cache = StateCache() let # Create an attestation for slot 1! @@ -118,7 +271,7 @@ suiteReport "Attestation pool processing" & preset(): pool[].addAttestation( attestation1, @[bc1[0]], attestation1.loadSig, attestation1.data.slot) pool[].addAttestation( - attestation0, @[bc0[0]], attestation0.loadSig, attestation1.data.slot) + attestation0, @[bc0[0]], attestation0.loadSig, attestation0.data.slot) discard process_slots( state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache) @@ -128,7 +281,7 @@ suiteReport "Attestation pool processing" & preset(): check: attestations.len == 1 - wrappedTimedTest "Attestations should be combined" & preset(): + timedTest "Attestations should be combined" & preset(): var cache = StateCache() let # Create an attestation for slot 1! @@ -152,7 +305,7 @@ suiteReport "Attestation pool processing" & preset(): check: attestations.len == 1 - wrappedTimedTest "Attestations may overlap, bigger first" & preset(): + timedTest "Attestations may overlap, bigger first" & preset(): var cache = StateCache() var @@ -179,7 +332,7 @@ suiteReport "Attestation pool processing" & preset(): check: attestations.len == 1 - wrappedTimedTest "Attestations may overlap, smaller first" & preset(): + timedTest "Attestations may overlap, smaller first" & preset(): var cache = StateCache() var # Create an attestation for slot 1! @@ -205,7 +358,7 @@ suiteReport "Attestation pool processing" & preset(): check: attestations.len == 1 - wrappedTimedTest "Fork choice returns latest block with no attestations": + timedTest "Fork choice returns latest block with no attestations": var cache = StateCache() let b1 = addTestBlock(state.data, chainDag.tail.root, cache) @@ -233,7 +386,7 @@ suiteReport "Attestation pool processing" & preset(): check: head2 == b2Add[] - wrappedTimedTest "Fork choice returns block with attestation": + timedTest "Fork choice returns block with attestation": var cache = StateCache() let b10 = makeTestBlock(state.data, chainDag.tail.root, cache) @@ -293,7 +446,7 @@ suiteReport "Attestation pool processing" & preset(): # Two votes for b11 head4 == b11Add[] - wrappedTimedTest "Trying to add a block twice tags the second as an error": + timedTest "Trying to add a block twice tags the second as an error": var cache = StateCache() let b10 = makeTestBlock(state.data, chainDag.tail.root, cache) @@ -319,7 +472,7 @@ suiteReport "Attestation pool processing" & preset(): doAssert: b10Add_clone.error == (ValidationResult.Ignore, Duplicate) - wrappedTimedTest "Trying to add a duplicate block from an old pruned epoch is tagged as an error": + timedTest "Trying to add a duplicate block from an old pruned epoch is tagged as an error": # Note: very sensitive to stack usage chainDag.updateFlags.incl {skipBLSValidation} @@ -361,7 +514,7 @@ suiteReport "Attestation pool processing" & preset(): pool[].addForkChoice(epochRef, blckRef, signedBlock.message, blckRef.slot) let head = pool[].selectHead(blockRef[].slot) - doassert: head == blockRef[] + doAssert: head == blockRef[] chainDag.updateHead(head, quarantine) pruneAtFinalization(chainDag, pool[]) @@ -418,7 +571,7 @@ suiteReport "Attestation validation " & preset(): check: process_slots(state.data, state.data.data.slot + 1, cache) - wrappedTimedTest "Validation sanity": + timedTest "Validation sanity": # TODO: refactor tests to avoid skipping BLS validation chainDag.updateFlags.incl {skipBLSValidation} diff --git a/tests/test_bitseqs.nim b/tests/test_bitseqs.nim index ddb49b4ae..66ab1cbca 100644 --- a/tests/test_bitseqs.nim +++ b/tests/test_bitseqs.nim @@ -12,21 +12,33 @@ suite "Bit fields": check: not a[0] + a.isZeros() a.setBit 1 check: not a[0] a[1] + a.countOnes() == 1 + a.countZeros() == 99 + not a.isZeros() + a.countOverlap(a) == 1 b.setBit 2 - a.combine(b) + a.incl(b) check: not a[0] a[1] a[2] + a.countOverlap(a) == 2 + a.countOverlap(b) == 1 + b.countOverlap(a) == 1 + b.countOverlap(b) == 1 + a.clear() + check: + not a[1] test "iterating words": for bitCount in [8, 3, 7, 8, 14, 15, 16, 19, 260]: @@ -73,4 +85,4 @@ suite "Bit fields": check: not a.overlaps(b) not b.overlaps(a) - + a.countOverlap(b) == 0 diff --git a/tests/test_block_pool.nim b/tests/test_block_pool.nim index 92efb0b05..ec3070490 100644 --- a/tests/test_block_pool.nim +++ b/tests/test_block_pool.nim @@ -133,9 +133,7 @@ suiteReport "Block pool processing" & preset(): stateData = newClone(dag.headState) cache = StateCache() b1 = addTestBlock(stateData.data, dag.tail.root, cache) - b1Root = hash_tree_root(b1.message) - b2 = addTestBlock(stateData.data, b1Root, cache) - b2Root {.used.} = hash_tree_root(b2.message) + b2 = addTestBlock(stateData.data, b1.root, cache) wrappedTimedTest "getRef returns nil for missing blocks": check: dag.getRef(default Eth2Digest) == nil @@ -154,7 +152,7 @@ suiteReport "Block pool processing" & preset(): check: b1Get.isSome() - b1Get.get().refs.root == b1Root + b1Get.get().refs.root == b1.root b1Add[].root == b1Get.get().refs.root dag.heads.len == 1 dag.heads[0] == b1Add[] @@ -263,12 +261,12 @@ suiteReport "Block pool processing" & preset(): check: # ensure we loaded the correct head state - dag2.head.root == b2Root + dag2.head.root == b2.root hash_tree_root(dag2.headState.data.data) == b2.message.state_root - dag2.get(b1Root).isSome() - dag2.get(b2Root).isSome() + dag2.get(b1.root).isSome() + dag2.get(b2.root).isSome() dag2.heads.len == 1 - dag2.heads[0].root == b2Root + dag2.heads[0].root == b2.root wrappedTimedTest "Adding the same block twice returns a Duplicate error" & preset(): let diff --git a/tests/testblockutil.nim b/tests/testblockutil.nim index 136e8d1b3..ee9edd4b9 100644 --- a/tests/testblockutil.nim +++ b/tests/testblockutil.nim @@ -81,9 +81,11 @@ proc addTestBlock*( attestations = newSeq[Attestation](), deposits = newSeq[Deposit](), graffiti = default(GraffitiBytes), - flags: set[UpdateFlag] = {}): SignedBeaconBlock = + flags: set[UpdateFlag] = {}, + nextSlot = true): SignedBeaconBlock = # Create and add a block to state - state will advance by one slot! - doAssert process_slots(state, state.data.slot + 1, cache, flags) + if nextSlot: + doAssert process_slots(state, state.data.slot + 1, cache, flags) let proposer_index = get_beacon_proposer_index(state.data, cache) @@ -235,7 +237,7 @@ proc makeFullAttestations*( hackPrivKey(state.validators[committee[j]]) )) - attestation.signature = agg.finish() + attestation.signature = agg.finish().exportRaw() result.add attestation iterator makeTestBlocks*( diff --git a/vendor/nim-stew b/vendor/nim-stew index ee78822e0..7d2790fdf 160000 --- a/vendor/nim-stew +++ b/vendor/nim-stew @@ -1 +1 @@ -Subproject commit ee78822e057ac5f39804ecb6ac1096734be13ef8 +Subproject commit 7d2790fdf493dd0869be5ed1b2ecea768eb008c6