only deserialize attestation and aggregation gossiped signatures once (#2472)

* only deserialize attestation and aggregation gossiped signatures once

* re-indent some aggregate checks into block scope

* spelling

* remove debugging assertion

* put part of gossip validation back into block context

* attestation pool test signature loading isn't so unsafe, and exportRaw isn't free

* remove more development doAsserts; don't exportRaw in loops
This commit is contained in:
tersec 2021-04-09 12:59:24 +00:00 committed by GitHub
parent 9776fbfe17
commit 79bb0d5379
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 194 additions and 131 deletions

View File

@ -38,6 +38,7 @@ type
## be further combined.
aggregation_bits*: CommitteeValidatorsBits
aggregate_signature*: CookedSig
aggregate_signature_raw*: ValidatorSig
AttestationEntry* = object
## Each entry holds the known signatures for a particular, distinct vote

View File

@ -153,6 +153,7 @@ func addToAggregates(pool: var AttestationPool, attestation: Attestation) =
proc addAttestation*(pool: var AttestationPool,
attestation: Attestation,
participants: seq[ValidatorIndex],
signature: CookedSig,
wallSlot: Slot) =
## Add an attestation to the pool, assuming it's been validated already.
## Attestations may be either agggregated or not - we're pursuing an eager
@ -179,15 +180,19 @@ proc addAttestation*(pool: var AttestationPool,
let
attestationsSeen = addr pool.candidates[candidateIdx.get]
# Only attestestions with valid signatures get here
validation = Validation(
template getValidation(): auto =
doAssert attestation.signature == signature.exportRaw
Validation(
aggregation_bits: attestation.aggregation_bits,
aggregate_signature: load(attestation.signature).get.CookedSig)
aggregate_signature: signature,
aggregate_signature_raw: attestation.signature)
var found = false
for a in attestationsSeen.attestations.mitems():
if a.data == attestation.data:
for v in a.validations:
if validation.aggregation_bits.isSubsetOf(v.aggregation_bits):
if attestation.aggregation_bits.isSubsetOf(v.aggregation_bits):
# The validations in the new attestation are a subset of one of the
# attestations that we already have on file - no need to add this
# attestation to the database
@ -202,9 +207,9 @@ proc addAttestation*(pool: var AttestationPool,
trace "Removing subset attestations", newParticipants = participants
a.validations.keepItIf(
not it.aggregation_bits.isSubsetOf(validation.aggregation_bits))
not it.aggregation_bits.isSubsetOf(attestation.aggregation_bits))
a.validations.add(validation)
a.validations.add(getValidation())
pool.addForkChoiceVotes(
attestation.data.slot, participants, attestation.data.beacon_block_root,
wallSlot)
@ -220,7 +225,7 @@ proc addAttestation*(pool: var AttestationPool,
if not found:
attestationsSeen.attestations.add(AttestationEntry(
data: attestation.data,
validations: @[validation],
validations: @[getValidation()],
aggregation_bits: attestation.aggregation_bits
))
pool.addForkChoiceVotes(
@ -284,7 +289,7 @@ iterator attestations*(pool: AttestationPool, slot: Option[Slot],
yield Attestation(
aggregation_bits: validation.aggregation_bits,
data: entry.data,
signature: validation.aggregate_signature.exportRaw
signature: validation.aggregate_signature_raw
)
func getAttestationDataKey(ad: AttestationData): AttestationDataKey =
@ -380,7 +385,7 @@ proc getAttestationsForBlock*(pool: var AttestationPool,
attestation = Attestation(
aggregation_bits: a.validations[0].aggregation_bits,
data: a.data,
signature: a.validations[0].aggregate_signature.exportRaw
signature: a.validations[0].aggregate_signature_raw
)
agg {.noInit.}: AggregateSignature
@ -451,7 +456,7 @@ proc getAggregatedAttestation*(pool: AttestationPool,
attestation = Attestation(
aggregation_bits: a.validations[0].aggregation_bits,
data: a.data,
signature: a.validations[0].aggregate_signature.exportRaw
signature: a.validations[0].aggregate_signature_raw
)
agg {.noInit.}: AggregateSignature

View File

@ -171,7 +171,7 @@ proc scheduleAttestationCheck*(
fork: Fork, genesis_validators_root: Eth2Digest,
epochRef: EpochRef,
attestation: Attestation
): Option[Future[Result[void, cstring]]] =
): Option[(Future[Result[void, cstring]], CookedSig)] =
## Schedule crypto verification of an attestation
##
## The buffer is processed:
@ -183,14 +183,14 @@ proc scheduleAttestationCheck*(
## and a future with the deferred attestation check otherwise.
doAssert batchCrypto.pendingBuffer.len < BatchedCryptoSize
let sanity = batchCrypto
.pendingBuffer
.addAttestation(
fork, genesis_validators_root, epochRef,
attestation
)
let (sanity, sig) = batchCrypto
.pendingBuffer
.addAttestation(
fork, genesis_validators_root, epochRef,
attestation
)
if not sanity:
return none(Future[Result[void, cstring]])
return none((Future[Result[void, cstring]], CookedSig))
let fut = newFuture[Result[void, cstring]](
"batch_validation.scheduleAttestationCheck"
@ -198,14 +198,17 @@ proc scheduleAttestationCheck*(
batchCrypto.schedule(fut)
return some(fut)
return some((fut, sig))
proc scheduleAggregateChecks*(
batchCrypto: ref BatchCrypto,
fork: Fork, genesis_validators_root: Eth2Digest,
epochRef: EpochRef,
signedAggregateAndProof: SignedAggregateAndProof
): Option[tuple[slotCheck, aggregatorCheck, aggregateCheck: Future[Result[void, cstring]]]] =
): Option[(
tuple[slotCheck, aggregatorCheck, aggregateCheck:
Future[Result[void, cstring]]],
CookedSig)] =
## Schedule crypto verification of an aggregate
##
## This involves 3 checks:
@ -225,7 +228,10 @@ proc scheduleAggregateChecks*(
template aggregate_and_proof: untyped = signedAggregateAndProof.message
template aggregate: untyped = aggregate_and_proof.aggregate
type R = tuple[slotCheck, aggregatorCheck, aggregateCheck: Future[Result[void, cstring]]]
type R = (
tuple[slotCheck, aggregatorCheck, aggregateCheck:
Future[Result[void, cstring]]],
CookedSig)
# Enqueue in the buffer
# ------------------------------------------------------
@ -254,15 +260,14 @@ proc scheduleAggregateChecks*(
if not sanity:
return none(R)
block:
let sanity = batchCrypto
.pendingBuffer
.addAttestation(
fork, genesis_validators_root, epochRef,
aggregate
)
if not sanity:
return none(R)
let (sanity, sig) = batchCrypto
.pendingBuffer
.addAttestation(
fork, genesis_validators_root, epochRef,
aggregate
)
if not sanity:
return none(R)
let futSlot = newFuture[Result[void, cstring]](
"batch_validation.scheduleAggregateChecks.slotCheck"
@ -279,4 +284,4 @@ proc scheduleAggregateChecks*(
batchCrypto.schedule(futAggregator, checkThreshold = false)
batchCrypto.schedule(futAggregate)
return some((futSlot, futAggregator, futAggregate))
return some(((futSlot, futAggregator, futAggregate), sig))

View File

@ -222,10 +222,12 @@ proc attestationValidator*(
beacon_attestations_received.inc()
beacon_attestation_delay.observe(delay.toFloatSeconds())
self[].checkForPotentialDoppelganger(attestation.data, v.value, wallSlot)
self[].checkForPotentialDoppelganger(
attestation.data, v.value.attestingIndices, wallSlot)
trace "Attestation validated"
self.verifQueues[].addAttestation(attestation, v.get())
let (attestingIndices, sig) = v.get()
self.verifQueues[].addAttestation(attestation, attestingIndices, sig)
return ValidationResult.Accept
@ -266,14 +268,17 @@ proc aggregateValidator*(
beacon_aggregate_delay.observe(delay.toFloatSeconds())
self[].checkForPotentialDoppelganger(
signedAggregateAndProof.message.aggregate.data, v.value, wallSlot)
signedAggregateAndProof.message.aggregate.data, v.value.attestingIndices,
wallSlot)
trace "Aggregate validated",
aggregator_index = signedAggregateAndProof.message.aggregator_index,
selection_proof = signedAggregateAndProof.message.selection_proof,
wallSlot
self.verifQueues[].addAggregate(signedAggregateAndProof, v.get())
let (attestingIndices, sig) = v.get()
self.verifQueues[].addAggregate(
signedAggregateAndProof, attestingIndices, sig)
return ValidationResult.Accept

View File

@ -40,6 +40,7 @@ type
AttestationEntry = object
v: Attestation
attesting_indices: seq[ValidatorIndex]
sig: CookedSig
AggregateEntry = AttestationEntry
@ -160,7 +161,9 @@ proc addBlock*(self: var VerifQueueManager, syncBlock: SyncBlock) =
# addLast doesn't fail
asyncSpawn(self.blocksQueue.addLast(BlockEntry(v: syncBlock)))
proc addAttestation*(self: var VerifQueueManager, att: Attestation, att_indices: seq[ValidatorIndex]) =
proc addAttestation*(
self: var VerifQueueManager, att: Attestation,
att_indices: seq[ValidatorIndex], sig: CookedSig) =
## Enqueue a Gossip-validated attestation for consensus verification
# Backpressure:
# If buffer is full, the oldest attestation is dropped and the newest is enqueued
@ -185,11 +188,13 @@ proc addAttestation*(self: var VerifQueueManager, att: Attestation, att_indices:
try:
self.attestationsQueue.addLastNoWait(
AttestationEntry(v: att, attesting_indices: att_indices))
AttestationEntry(v: att, attesting_indices: att_indices, sig: sig))
except AsyncQueueFullError as exc:
raiseAssert "We just checked that queue is not full! " & exc.msg
proc addAggregate*(self: var VerifQueueManager, agg: SignedAggregateAndProof, att_indices: seq[ValidatorIndex]) =
proc addAggregate*(
self: var VerifQueueManager, agg: SignedAggregateAndProof,
att_indices: seq[ValidatorIndex], sig: CookedSig) =
## Enqueue a Gossip-validated aggregate attestation for consensus verification
# Backpressure:
# If buffer is full, the oldest aggregate is dropped and the newest is enqueued
@ -216,7 +221,8 @@ proc addAggregate*(self: var VerifQueueManager, agg: SignedAggregateAndProof, at
try:
self.aggregatesQueue.addLastNoWait(AggregateEntry(
v: agg.message.aggregate,
attesting_indices: att_indices))
attesting_indices: att_indices,
sig: sig))
except AsyncQueueFullError as exc:
raiseAssert "We just checked that queue is not full! " & exc.msg
@ -281,7 +287,7 @@ proc processAttestation(
trace "Processing attestation"
self.consensusManager.attestationPool[].addAttestation(
entry.v, entry.attesting_indices, wallSlot)
entry.v, entry.attesting_indices, entry.sig, wallSlot)
proc processAggregate(
self: var VerifQueueManager, entry: AggregateEntry) =
@ -298,7 +304,7 @@ proc processAggregate(
trace "Processing aggregate"
self.consensusManager.attestationPool[].addAttestation(
entry.v, entry.attesting_indices, wallSlot)
entry.v, entry.attesting_indices, entry.sig, wallSlot)
proc processBlock(self: var VerifQueueManager, entry: BlockEntry) =
logScope:

View File

@ -166,7 +166,8 @@ proc validateAttestation*(
attestation: Attestation,
wallTime: BeaconTime,
topicCommitteeIndex: uint64, checksExpensive: bool):
Future[Result[seq[ValidatorIndex], (ValidationResult, cstring)]] {.async.} =
Future[Result[tuple[attestingIndices: seq[ValidatorIndex], sig: CookedSig],
(ValidationResult, cstring)]] {.async.} =
# Some of the checks below have been reordered compared to the spec, to
# perform the cheap checks first - in particular, we want to avoid loading
# an `EpochRef` and checking signatures. This reordering might lead to
@ -274,7 +275,11 @@ proc validateAttestation*(
"Validator has already voted in epoch")))
if not checksExpensive:
return ok(attesting_indices)
# Only sendAttestation, which discards result, doesn't use checksExpensive
# TODO this means that (a) this becomes an "expensive" check and (b) it is
# doing in-principle unnecessary work, since this should be known from the
# attestation creation.
return ok((attesting_indices, attestation.signature.load.get().CookedSig))
# The signature of attestation is valid.
block:
@ -286,20 +291,22 @@ proc validateAttestation*(
if v.isErr():
return err((ValidationResult.Reject, v.error))
# Buffer crypto checks
let deferredCrypto = batchCrypto
.scheduleAttestationCheck(
fork, genesis_validators_root, epochRef,
attestation
)
if deferredCrypto.isNone():
return err((ValidationResult.Reject,
cstring("validateAttestation: crypto sanity checks failure")))
# Buffer crypto checks
let deferredCrypto = batchCrypto
.scheduleAttestationCheck(
fork, genesis_validators_root, epochRef,
attestation
)
if deferredCrypto.isNone():
return err((ValidationResult.Reject,
cstring("validateAttestation: crypto sanity checks failure")))
# Await the crypto check
let cryptoChecked = await deferredCrypto.get()
if cryptoChecked.isErr():
return err((ValidationResult.Reject, cryptoChecked.error))
# Await the crypto check
let
(cryptoFut, sig) = deferredCrypto.get()
cryptoChecked = await cryptoFut
if cryptoChecked.isErr():
return err((ValidationResult.Reject, cryptoChecked.error))
# Only valid attestations go in the list, which keeps validator_index
# in range
@ -308,7 +315,7 @@ proc validateAttestation*(
pool.nextAttestationEpoch[validator_index].subnet =
attestation.data.target.epoch + 1
return ok(attesting_indices)
return ok((attesting_indices, sig))
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof
proc validateAggregate*(
@ -316,7 +323,8 @@ proc validateAggregate*(
batchCrypto: ref BatchCrypto,
signedAggregateAndProof: SignedAggregateAndProof,
wallTime: BeaconTime):
Future[Result[seq[ValidatorIndex], (ValidationResult, cstring)]] {.async.} =
Future[Result[tuple[attestingIndices: seq[ValidatorIndex], sig: CookedSig],
(ValidationResult, cstring)]] {.async.} =
# Some of the checks below have been reordered compared to the spec, to
# perform the cheap checks first - in particular, we want to avoid loading
# an `EpochRef` and checking signatures. This reordering might lead to
@ -419,34 +427,37 @@ proc validateAggregate*(
if aggregate_and_proof.aggregator_index >= epochRef.validator_keys.lenu64:
return err((ValidationResult.Reject, cstring("Invalid aggregator_index")))
let
fork = getStateField(pool.chainDag.headState, fork)
genesis_validators_root =
getStateField(pool.chainDag.headState, genesis_validators_root)
let
fork = getStateField(pool.chainDag.headState, fork)
genesis_validators_root =
getStateField(pool.chainDag.headState, genesis_validators_root)
let deferredCrypto = batchCrypto
.scheduleAggregateChecks(
fork, genesis_validators_root, epochRef,
signed_aggregate_and_proof
)
if deferredCrypto.isNone():
return err((ValidationResult.Reject,
cstring("validateAttestation: crypto sanity checks failure")))
let deferredCrypto = batchCrypto
.scheduleAggregateChecks(
fork, genesis_validators_root, epochRef,
signed_aggregate_and_proof
)
if deferredCrypto.isNone():
return err((ValidationResult.Reject,
cstring("validateAttestation: crypto sanity checks failure")))
# [REJECT] aggregate_and_proof.selection_proof
let slotChecked = await deferredCrypto.get().slotCheck
if slotChecked.isErr():
return err((ValidationResult.Reject, cstring(
"Selection_proof signature verification failed")))
# [REJECT] aggregate_and_proof.selection_proof
let
(cryptoFuts, sig) = deferredCrypto.get()
slotChecked = await cryptoFuts.slotCheck
if slotChecked.isErr():
return err((ValidationResult.Reject, cstring(
"Selection_proof signature verification failed")))
block:
# [REJECT] The aggregator signature, signed_aggregate_and_proof.signature, is valid.
let aggregatorChecked = await deferredCrypto.get().aggregatorCheck
let aggregatorChecked = await cryptoFuts.aggregatorCheck
if aggregatorChecked.isErr():
return err((ValidationResult.Reject, cstring(
"signed_aggregate_and_proof aggregator signature verification failed")))
# [REJECT] The aggregator signature, signed_aggregate_and_proof.signature, is valid.
let aggregateChecked = await deferredCrypto.get().aggregateCheck
let aggregateChecked = await cryptoFuts.aggregateCheck
if aggregateChecked.isErr():
return err((ValidationResult.Reject, cstring(
"signed_aggregate_and_proof aggregate attester signatures verification failed")))
@ -470,7 +481,7 @@ proc validateAggregate*(
let attesting_indices = get_attesting_indices(
epochRef, aggregate.data, aggregate.aggregation_bits)
return ok(attesting_indices)
return ok((attesting_indices, sig))
{.push raises: [Defect].}

View File

@ -29,27 +29,29 @@ func `$`*(s: SignatureSet): string =
# there is no guarantee that pubkeys and signatures received are valid
# unlike when Nimbus did eager loading which ensured they were correct beforehand
template loadOrExitFalse(signature: ValidatorSig): blscurve.Signature =
template loadOrExit(signature: ValidatorSig, failReturn: auto):
blscurve.Signature =
## Load a BLS signature from a raw signature
## Exists the **caller** with false if the signature is invalid
## Exits the **caller** with false if the signature is invalid
let sig = signature.load()
if sig.isNone:
return false # this exits the calling scope, as templates are inlined.
return failReturn # this exits the calling scope, as templates are inlined.
sig.unsafeGet()
template loadWithCacheOrExitFalse(pubkey: ValidatorPubKey): blscurve.PublicKey =
template loadWithCacheOrExit(pubkey: ValidatorPubKey, failReturn: auto):
blscurve.PublicKey =
## Load a BLS signature from a raw public key
## Exists the **caller** with false if the public key is invalid
## Exits the **caller** with false if the public key is invalid
let pk = pubkey.loadWithCache()
if pk.isNone:
return false # this exits the calling scope, as templates are inlined.
return failReturn # this exits the calling scope, as templates are inlined.
pk.unsafeGet()
func addSignatureSet[T](
sigs: var seq[SignatureSet],
pubkey: blscurve.PublicKey,
sszObj: T,
signature: ValidatorSig,
signature: ValidatorSig | blscurve.Signature,
genesis_validators_root: Eth2Digest,
fork: Fork,
epoch: Epoch,
@ -66,11 +68,18 @@ func addSignatureSet[T](
)
).data
sigs.add((
pubkey,
signing_root,
signature.loadOrExitFalse()
))
when signature is ValidatorSig:
sigs.add((
pubkey,
signing_root,
signature.loadOrExit(false)
))
else:
sigs.add((
pubkey,
signing_root,
signature
))
return true
@ -82,10 +91,10 @@ proc aggregateAttesters(
doAssert attestation.attesting_indices.len > 0
var attestersAgg{.noInit.}: AggregatePublicKey
attestersAgg.init(state.validators[attestation.attesting_indices[0]]
.pubkey.loadWithCacheOrExitFalse())
.pubkey.loadWithCacheOrExit(false))
for i in 1 ..< attestation.attesting_indices.len:
attestersAgg.aggregate(state.validators[attestation.attesting_indices[i]]
.pubkey.loadWithCacheOrExitFalse())
.pubkey.loadWithCacheOrExit(false))
aggPK.finish(attestersAgg)
return true
@ -156,14 +165,14 @@ proc addAttestation(
):
if not result: # first iteration
attestersAgg.init(state.validators[valIndex]
.pubkey.loadWithCacheOrExitFalse())
.pubkey.loadWithCacheOrExit(false))
result = true
else:
attestersAgg.aggregate(state.validators[valIndex]
.pubkey.loadWithCacheOrExitFalse())
.pubkey.loadWithCacheOrExit(false))
if not result:
# There was no attesters
# There were no attesters
return false
var attesters{.noinit.}: blscurve.PublicKey
@ -188,45 +197,51 @@ proc addAttestation*(
fork: Fork, genesis_validators_root: Eth2Digest,
epochRef: auto,
attestation: Attestation
): bool =
): tuple[valid: bool, sig: CookedSig] =
## Add an attestation for batched BLS verification
## purposes
## This only verifies cryptography
##
## Returns true if the attestation was added to the batching buffer
## Returns false if saniy checks failed (non-empty, keys are valid)
## Returns false if sanity checks failed (non-empty, keys are valid)
## In that case the seq[SignatureSet] is unmodified
mixin get_attesting_indices, validator_keys, pubkey
result = false
let defaultFail = (false, default(CookedSig))
result = defaultFail
var attestersAgg{.noInit.}: AggregatePublicKey
for valIndex in epochRef.get_attesting_indices(
attestation.data,
attestation.aggregation_bits):
if not result: # first iteration
if not result.valid: # first iteration
attestersAgg.init(epochRef.validator_keys[valIndex]
.loadWithCacheOrExitFalse())
result = true
.loadWithCacheOrExit(defaultFail))
result.valid = true
else:
attestersAgg.aggregate(epochRef.validator_keys[valIndex]
.loadWithCacheOrExitFalse())
.loadWithCacheOrExit(defaultFail))
if not result:
# There was no attesters
return false
if not result.valid:
# There were no attesters
return defaultFail
var attesters{.noinit.}: blscurve.PublicKey
attesters.finish(attestersAgg)
return sigs.addSignatureSet(
attesters,
attestation.data,
attestation.signature,
genesis_validators_root,
fork,
attestation.data.target.epoch,
DOMAIN_BEACON_ATTESTER)
let cookedSig = attestation.signature.loadOrExit(defaultFail)
return (
sigs.addSignatureSet(
attesters,
attestation.data,
cookedSig,
genesis_validators_root,
fork,
attestation.data.target.epoch,
DOMAIN_BEACON_ATTESTER),
CookedSig(cookedSig))
proc addIndexedAttestation*(
sigs: var seq[SignatureSet],
@ -271,7 +286,7 @@ proc addSlotSignature*(
let epoch = compute_epoch_at_slot(slot)
return sigs.addSignatureSet(
pubkey.loadWithCacheOrExitFalse(),
pubkey.loadWithCacheOrExit(false),
sszObj = slot,
signature,
genesis_validators_root,
@ -290,7 +305,7 @@ proc addAggregateAndProofSignature*(
let epoch = compute_epoch_at_slot(aggregate_and_proof.aggregate.data.slot)
return sigs.addSignatureSet(
pubkey.loadWithCacheOrExitFalse(),
pubkey.loadWithCacheOrExit(false),
sszObj = aggregate_and_proof,
signature,
genesis_validators_root,
@ -325,7 +340,7 @@ proc collectSignatureSets*(
return false
let pubkey = state.validators[proposer_index]
.pubkey.loadWithCacheOrExitFalse()
.pubkey.loadWithCacheOrExit(false)
let epoch = signed_block.message.slot.compute_epoch_at_slot()
# 1. Block proposer
@ -372,7 +387,7 @@ proc collectSignatureSets*(
let proposer1 = state.validators[header_1.message.proposer_index]
let epoch1 = header_1.message.slot.compute_epoch_at_slot()
if not sigs.addSignatureSet(
proposer1.pubkey.loadWithCacheOrExitFalse(),
proposer1.pubkey.loadWithCacheOrExit(false),
header_1.message,
header_1.signature,
state.genesis_validators_root,
@ -388,7 +403,7 @@ proc collectSignatureSets*(
let proposer2 = state.validators[header_2.message.proposer_index]
let epoch2 = header_2.message.slot.compute_epoch_at_slot()
if not sigs.addSignatureSet(
proposer2.pubkey.loadWithCacheOrExitFalse(),
proposer2.pubkey.loadWithCacheOrExit(false),
header_2.message,
header_2.signature,
state.genesis_validators_root,
@ -453,7 +468,7 @@ proc collectSignatureSets*(
if not sigs.addSignatureSet(
state.validators[volex.message.validator_index]
.pubkey.loadWithCacheOrExitFalse(),
.pubkey.loadWithCacheOrExit(false),
volex.message,
volex.signature,
state.genesis_validators_root,

View File

@ -118,7 +118,7 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
data: data,
aggregation_bits: aggregation_bits,
signature: sig
), @[validatorIdx], data.slot)
), @[validatorIdx], sig.load.get().CookedSig, data.slot)
proc proposeBlock(slot: Slot) =
if rand(r, 1.0) > blockRatio:

View File

@ -42,6 +42,9 @@ func combine(tgt: var Attestation, src: Attestation) =
agg.aggregate(src.signature)
tgt.signature = agg.finish()
func loadSig(a: Attestation): CookedSig =
a.signature.load.get().CookedSig
template wrappedTimedTest(name: string, body: untyped) =
# `check` macro takes a copy of whatever it's checking, on the stack!
# This leads to stack overflow
@ -82,7 +85,8 @@ suiteReport "Attestation pool processing" & preset():
state.data.data, state.blck.root, beacon_committee[0], cache)
pool[].addAttestation(
attestation, @[beacon_committee[0]], attestation.data.slot)
attestation, @[beacon_committee[0]], attestation.loadSig,
attestation.data.slot)
check:
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache)
@ -111,8 +115,10 @@ suiteReport "Attestation pool processing" & preset():
state.data.data, state.blck.root, bc1[0], cache)
# test reverse order
pool[].addAttestation(attestation1, @[bc1[0]], attestation1.data.slot)
pool[].addAttestation(attestation0, @[bc0[0]], attestation1.data.slot)
pool[].addAttestation(
attestation1, @[bc1[0]], attestation1.loadSig, attestation1.data.slot)
pool[].addAttestation(
attestation0, @[bc0[0]], attestation0.loadSig, attestation1.data.slot)
discard process_slots(
state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache)
@ -133,8 +139,10 @@ suiteReport "Attestation pool processing" & preset():
attestation1 = makeAttestation(
state.data.data, state.blck.root, bc0[1], cache)
pool[].addAttestation(attestation0, @[bc0[0]], attestation0.data.slot)
pool[].addAttestation(attestation1, @[bc0[1]], attestation1.data.slot)
pool[].addAttestation(
attestation0, @[bc0[0]], attestation0.loadSig, attestation0.data.slot)
pool[].addAttestation(
attestation1, @[bc0[1]], attestation1.loadSig, attestation1.data.slot)
check:
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache)
@ -158,8 +166,10 @@ suiteReport "Attestation pool processing" & preset():
attestation0.combine(attestation1)
pool[].addAttestation(attestation0, @[bc0[0]], attestation0.data.slot)
pool[].addAttestation(attestation1, @[bc0[1]], attestation1.data.slot)
pool[].addAttestation(
attestation0, @[bc0[0]], attestation0.loadSig, attestation0.data.slot)
pool[].addAttestation(
attestation1, @[bc0[1]], attestation1.loadSig, attestation1.data.slot)
check:
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache)
@ -182,8 +192,10 @@ suiteReport "Attestation pool processing" & preset():
attestation0.combine(attestation1)
pool[].addAttestation(attestation1, @[bc0[1]], attestation1.data.slot)
pool[].addAttestation(attestation0, @[bc0[0]], attestation0.data.slot)
pool[].addAttestation(
attestation1, @[bc0[1]], attestation1.loadSig, attestation1.data.slot)
pool[].addAttestation(
attestation0, @[bc0[0]], attestation0.loadSig, attestation0.data.slot)
check:
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache)
@ -250,7 +262,8 @@ suiteReport "Attestation pool processing" & preset():
state.data.data, state.data.data.slot - 1, 1.CommitteeIndex, cache)
attestation0 = makeAttestation(state.data.data, b10.root, bc1[0], cache)
pool[].addAttestation(attestation0, @[bc1[0]], attestation0.data.slot)
pool[].addAttestation(
attestation0, @[bc1[0]], attestation0.loadSig, attestation0.data.slot)
let head2 = pool[].selectHead(b10Add[].slot)
@ -261,7 +274,8 @@ suiteReport "Attestation pool processing" & preset():
let
attestation1 = makeAttestation(state.data.data, b11.root, bc1[1], cache)
attestation2 = makeAttestation(state.data.data, b11.root, bc1[2], cache)
pool[].addAttestation(attestation1, @[bc1[1]], attestation1.data.slot)
pool[].addAttestation(
attestation1, @[bc1[1]], attestation1.loadSig, attestation1.data.slot)
let head3 = pool[].selectHead(b10Add[].slot)
let bigger = if b11.root.data < b10.root.data: b10Add else: b11Add
@ -270,7 +284,8 @@ suiteReport "Attestation pool processing" & preset():
# Ties broken lexicographically in spec -> ?
head3 == bigger[]
pool[].addAttestation(attestation2, @[bc1[2]], attestation2.data.slot)
pool[].addAttestation(
attestation2, @[bc1[2]], attestation2.loadSig, attestation2.data.slot)
let head4 = pool[].selectHead(b11Add[].slot)