2019-02-19 23:35:02 +00:00
|
|
|
# beacon_chain
|
2020-03-11 09:26:18 +00:00
|
|
|
# Copyright (c) 2018-2020 Status Research & Development GmbH
|
2019-02-19 23:35:02 +00:00
|
|
|
# Licensed and distributed under either of
|
2019-11-25 15:30:02 +00:00
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
2019-02-19 23:35:02 +00:00
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2019-11-14 10:47:55 +00:00
|
|
|
{.used.}
|
|
|
|
|
2019-02-19 23:35:02 +00:00
|
|
|
import
|
2020-08-27 07:34:12 +00:00
|
|
|
std/unittest,
|
|
|
|
chronicles, chronos,
|
2020-04-29 11:44:07 +00:00
|
|
|
stew/byteutils,
|
2020-08-15 17:33:58 +00:00
|
|
|
./testutil, ./testblockutil,
|
2020-07-27 16:04:44 +00:00
|
|
|
../beacon_chain/spec/[crypto, datatypes, digest, validator, state_transition,
|
2020-08-27 07:34:12 +00:00
|
|
|
helpers, beaconstate, presets, network],
|
|
|
|
../beacon_chain/[
|
|
|
|
beacon_node_types, attestation_pool, attestation_aggregation, extras, time],
|
2020-07-30 19:18:17 +00:00
|
|
|
../beacon_chain/fork_choice/[fork_choice_types, fork_choice],
|
2020-07-31 14:49:06 +00:00
|
|
|
../beacon_chain/block_pools/[chain_dag, clearance]
|
2020-07-09 09:29:32 +00:00
|
|
|
|
2020-08-15 17:33:58 +00:00
|
|
|
func combine(tgt: var Attestation, src: Attestation, flags: UpdateFlags) =
|
|
|
|
## Combine the signature and participation bitfield, with the assumption that
|
|
|
|
## the same data is being signed - if the signatures overlap, they are not
|
|
|
|
## combined.
|
|
|
|
|
|
|
|
doAssert tgt.data == src.data
|
|
|
|
|
|
|
|
# In a BLS aggregate signature, one needs to count how many times a
|
|
|
|
# particular public key has been added - since we use a single bit per key, we
|
|
|
|
# can only it once, thus we can never combine signatures that overlap already!
|
|
|
|
if not tgt.aggregation_bits.overlaps(src.aggregation_bits):
|
|
|
|
tgt.aggregation_bits.combine(src.aggregation_bits)
|
|
|
|
|
|
|
|
if skipBlsValidation notin flags:
|
|
|
|
var agg {.noInit.}: AggregateSignature
|
|
|
|
agg.init(tgt.signature)
|
|
|
|
agg.aggregate(src.signature)
|
|
|
|
tgt.signature = agg.finish()
|
|
|
|
|
2020-07-09 09:29:32 +00:00
|
|
|
template wrappedTimedTest(name: string, body: untyped) =
|
|
|
|
# `check` macro takes a copy of whatever it's checking, on the stack!
|
2020-08-26 15:23:34 +00:00
|
|
|
# This leads to stack overflow
|
|
|
|
# We can mitigate that by wrapping checks in proc
|
2020-07-09 09:29:32 +00:00
|
|
|
block: # Symbol namespacing
|
|
|
|
proc wrappedTest() =
|
|
|
|
timedTest name:
|
|
|
|
body
|
|
|
|
wrappedTest()
|
2020-04-29 11:44:07 +00:00
|
|
|
|
|
|
|
suiteReport "Attestation pool processing" & preset():
|
|
|
|
## For now just test that we can compile and execute block processing with
|
|
|
|
## mock data.
|
2019-12-19 14:13:35 +00:00
|
|
|
|
2020-04-29 11:44:07 +00:00
|
|
|
setup:
|
|
|
|
# Genesis state that results in 3 members per committee
|
|
|
|
var
|
2020-08-27 07:34:12 +00:00
|
|
|
chainDag = init(ChainDAGRef, defaultRuntimePreset, makeTestDB(SLOTS_PER_EPOCH * 3))
|
|
|
|
quarantine = QuarantineRef()
|
2020-07-31 14:49:06 +00:00
|
|
|
pool = newClone(AttestationPool.init(chainDag, quarantine))
|
2020-08-27 07:34:12 +00:00
|
|
|
state = newClone(chainDag.headState)
|
2020-09-07 15:04:33 +00:00
|
|
|
cache = StateCache()
|
2020-04-29 11:44:07 +00:00
|
|
|
# Slot 0 is a finalized slot - won't be making attestations for it..
|
2020-05-19 15:46:29 +00:00
|
|
|
check:
|
2020-09-07 15:04:33 +00:00
|
|
|
process_slots(state.data, state.data.data.slot + 1, cache)
|
2020-04-29 11:44:07 +00:00
|
|
|
|
2020-08-26 15:23:34 +00:00
|
|
|
wrappedTimedTest "Can add and retrieve simple attestation" & preset():
|
2020-04-29 11:44:07 +00:00
|
|
|
let
|
|
|
|
# Create an attestation for slot 1!
|
|
|
|
beacon_committee = get_beacon_committee(
|
|
|
|
state.data.data, state.data.data.slot, 0.CommitteeIndex, cache)
|
|
|
|
attestation = makeAttestation(
|
|
|
|
state.data.data, state.blck.root, beacon_committee[0], cache)
|
|
|
|
|
2020-08-27 07:34:12 +00:00
|
|
|
pool[].addAttestation(
|
|
|
|
attestation, [beacon_committee[0]].toHashSet(), attestation.data.slot)
|
2019-12-19 14:13:35 +00:00
|
|
|
|
2020-05-19 15:46:29 +00:00
|
|
|
check:
|
2020-09-07 15:04:33 +00:00
|
|
|
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache)
|
2020-04-29 11:44:07 +00:00
|
|
|
|
2020-09-14 14:50:03 +00:00
|
|
|
let attestations = pool[].getAttestationsForBlock(state.data.data, cache)
|
2019-12-19 14:13:35 +00:00
|
|
|
|
2020-04-29 11:44:07 +00:00
|
|
|
check:
|
|
|
|
attestations.len == 1
|
|
|
|
|
2020-08-26 15:23:34 +00:00
|
|
|
wrappedTimedTest "Attestations may arrive in any order" & preset():
|
2020-07-15 10:44:18 +00:00
|
|
|
var cache = StateCache()
|
2020-04-29 11:44:07 +00:00
|
|
|
let
|
|
|
|
# Create an attestation for slot 1!
|
|
|
|
bc0 = get_beacon_committee(
|
|
|
|
state.data.data, state.data.data.slot, 0.CommitteeIndex, cache)
|
|
|
|
attestation0 = makeAttestation(
|
|
|
|
state.data.data, state.blck.root, bc0[0], cache)
|
|
|
|
|
2020-05-19 15:46:29 +00:00
|
|
|
check:
|
2020-09-07 15:04:33 +00:00
|
|
|
process_slots(state.data, state.data.data.slot + 1, cache)
|
2020-04-29 11:44:07 +00:00
|
|
|
|
|
|
|
let
|
|
|
|
bc1 = get_beacon_committee(state.data.data,
|
|
|
|
state.data.data.slot, 0.CommitteeIndex, cache)
|
|
|
|
attestation1 = makeAttestation(
|
|
|
|
state.data.data, state.blck.root, bc1[0], cache)
|
|
|
|
|
|
|
|
# test reverse order
|
2020-08-27 07:34:12 +00:00
|
|
|
pool[].addAttestation(
|
|
|
|
attestation1, [bc1[0]].toHashSet, attestation1.data.slot)
|
|
|
|
pool[].addAttestation(
|
|
|
|
attestation0, [bc0[0]].toHashSet, attestation1.data.slot)
|
2020-04-29 11:44:07 +00:00
|
|
|
|
2020-09-07 15:04:33 +00:00
|
|
|
discard process_slots(
|
|
|
|
state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache)
|
2020-04-29 11:44:07 +00:00
|
|
|
|
2020-09-14 14:50:03 +00:00
|
|
|
let attestations = pool[].getAttestationsForBlock(state.data.data, cache)
|
2020-04-29 11:44:07 +00:00
|
|
|
|
|
|
|
check:
|
|
|
|
attestations.len == 1
|
|
|
|
|
2020-08-26 15:23:34 +00:00
|
|
|
wrappedTimedTest "Attestations should be combined" & preset():
|
2020-07-15 10:44:18 +00:00
|
|
|
var cache = StateCache()
|
2020-04-29 11:44:07 +00:00
|
|
|
let
|
|
|
|
# Create an attestation for slot 1!
|
|
|
|
bc0 = get_beacon_committee(
|
|
|
|
state.data.data, state.data.data.slot, 0.CommitteeIndex, cache)
|
|
|
|
attestation0 = makeAttestation(
|
|
|
|
state.data.data, state.blck.root, bc0[0], cache)
|
|
|
|
attestation1 = makeAttestation(
|
|
|
|
state.data.data, state.blck.root, bc0[1], cache)
|
|
|
|
|
2020-08-27 07:34:12 +00:00
|
|
|
pool[].addAttestation(
|
|
|
|
attestation0, [bc0[0]].toHashSet, attestation0.data.slot)
|
|
|
|
pool[].addAttestation(
|
|
|
|
attestation1, [bc0[1]].toHashSet, attestation1.data.slot)
|
2020-04-29 11:44:07 +00:00
|
|
|
|
2020-05-19 15:46:29 +00:00
|
|
|
check:
|
2020-09-07 15:04:33 +00:00
|
|
|
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache)
|
2020-04-29 11:44:07 +00:00
|
|
|
|
2020-09-14 14:50:03 +00:00
|
|
|
let attestations = pool[].getAttestationsForBlock(state.data.data, cache)
|
2020-04-29 11:44:07 +00:00
|
|
|
|
|
|
|
check:
|
|
|
|
attestations.len == 1
|
|
|
|
|
2020-08-26 15:23:34 +00:00
|
|
|
wrappedTimedTest "Attestations may overlap, bigger first" & preset():
|
2020-07-15 10:44:18 +00:00
|
|
|
var cache = StateCache()
|
2020-04-29 11:44:07 +00:00
|
|
|
|
|
|
|
var
|
|
|
|
# Create an attestation for slot 1!
|
|
|
|
bc0 = get_beacon_committee(
|
|
|
|
state.data.data, state.data.data.slot, 0.CommitteeIndex, cache)
|
|
|
|
attestation0 = makeAttestation(
|
|
|
|
state.data.data, state.blck.root, bc0[0], cache)
|
|
|
|
attestation1 = makeAttestation(
|
|
|
|
state.data.data, state.blck.root, bc0[1], cache)
|
2019-12-19 14:13:35 +00:00
|
|
|
|
2020-04-29 11:44:07 +00:00
|
|
|
attestation0.combine(attestation1, {})
|
|
|
|
|
2020-08-27 07:34:12 +00:00
|
|
|
pool[].addAttestation(
|
|
|
|
attestation0, [bc0[0]].toHashSet, attestation0.data.slot)
|
|
|
|
pool[].addAttestation(
|
|
|
|
attestation1, [bc0[1]].toHashSet, attestation1.data.slot)
|
2019-12-19 13:02:28 +00:00
|
|
|
|
2020-05-19 15:46:29 +00:00
|
|
|
check:
|
2020-09-07 15:04:33 +00:00
|
|
|
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache)
|
2019-12-19 13:02:28 +00:00
|
|
|
|
2020-09-14 14:50:03 +00:00
|
|
|
let attestations = pool[].getAttestationsForBlock(state.data.data, cache)
|
2019-12-19 13:02:28 +00:00
|
|
|
|
2020-04-29 11:44:07 +00:00
|
|
|
check:
|
|
|
|
attestations.len == 1
|
2019-12-19 13:02:28 +00:00
|
|
|
|
2020-08-26 15:23:34 +00:00
|
|
|
wrappedTimedTest "Attestations may overlap, smaller first" & preset():
|
2020-07-15 10:44:18 +00:00
|
|
|
var cache = StateCache()
|
2020-04-29 11:44:07 +00:00
|
|
|
var
|
|
|
|
# Create an attestation for slot 1!
|
|
|
|
bc0 = get_beacon_committee(state.data.data,
|
|
|
|
state.data.data.slot, 0.CommitteeIndex, cache)
|
|
|
|
attestation0 = makeAttestation(
|
|
|
|
state.data.data, state.blck.root, bc0[0], cache)
|
|
|
|
attestation1 = makeAttestation(
|
|
|
|
state.data.data, state.blck.root, bc0[1], cache)
|
2019-12-19 13:02:28 +00:00
|
|
|
|
2020-04-29 11:44:07 +00:00
|
|
|
attestation0.combine(attestation1, {})
|
2019-12-19 14:13:35 +00:00
|
|
|
|
2020-08-27 07:34:12 +00:00
|
|
|
pool[].addAttestation(
|
|
|
|
attestation1, [bc0[1]].toHashSet, attestation1.data.slot)
|
|
|
|
pool[].addAttestation(
|
|
|
|
attestation0, [bc0[0]].toHashSet, attestation0.data.slot)
|
2019-12-19 13:02:28 +00:00
|
|
|
|
2020-05-19 15:46:29 +00:00
|
|
|
check:
|
2020-09-07 15:04:33 +00:00
|
|
|
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache)
|
2019-12-19 13:02:28 +00:00
|
|
|
|
2020-09-14 14:50:03 +00:00
|
|
|
let attestations = pool[].getAttestationsForBlock(state.data.data, cache)
|
2019-12-19 13:02:28 +00:00
|
|
|
|
2020-04-29 11:44:07 +00:00
|
|
|
check:
|
|
|
|
attestations.len == 1
|
2019-12-19 13:02:28 +00:00
|
|
|
|
2020-08-26 15:23:34 +00:00
|
|
|
wrappedTimedTest "Fork choice returns latest block with no attestations":
|
2020-07-15 10:44:18 +00:00
|
|
|
var cache = StateCache()
|
2020-04-29 11:44:07 +00:00
|
|
|
let
|
2020-07-30 19:18:17 +00:00
|
|
|
b1 = addTestBlock(state.data, chainDag.tail.root, cache)
|
2020-07-31 14:49:06 +00:00
|
|
|
b1Add = chainDag.addRawBlock(quarantine, b1) do (
|
2020-07-22 09:42:55 +00:00
|
|
|
blckRef: BlockRef, signedBlock: SignedBeaconBlock,
|
2020-08-18 20:29:33 +00:00
|
|
|
epochRef: EpochRef, state: HashedBeaconState):
|
2020-07-22 09:42:55 +00:00
|
|
|
# Callback add to fork choice if valid
|
2020-08-03 18:39:43 +00:00
|
|
|
pool[].addForkChoice(epochRef, blckRef, signedBlock.message, blckRef.slot)
|
2020-07-22 09:42:55 +00:00
|
|
|
|
2020-07-25 19:41:12 +00:00
|
|
|
let head = pool[].selectHead(b1Add[].slot)
|
2019-12-19 14:13:35 +00:00
|
|
|
|
2020-04-29 11:44:07 +00:00
|
|
|
check:
|
2020-07-09 09:29:32 +00:00
|
|
|
head == b1Add[]
|
2019-12-19 14:13:35 +00:00
|
|
|
|
2020-04-29 11:44:07 +00:00
|
|
|
let
|
2020-07-16 13:16:51 +00:00
|
|
|
b2 = addTestBlock(state.data, b1.root, cache)
|
2020-07-31 14:49:06 +00:00
|
|
|
b2Add = chainDag.addRawBlock(quarantine, b2) do (
|
2020-07-22 09:42:55 +00:00
|
|
|
blckRef: BlockRef, signedBlock: SignedBeaconBlock,
|
2020-08-18 20:29:33 +00:00
|
|
|
epochRef: EpochRef, state: HashedBeaconState):
|
2020-07-22 09:42:55 +00:00
|
|
|
# Callback add to fork choice if valid
|
2020-08-03 18:39:43 +00:00
|
|
|
pool[].addForkChoice(epochRef, blckRef, signedBlock.message, blckRef.slot)
|
2020-06-10 06:58:12 +00:00
|
|
|
|
2020-07-25 19:41:12 +00:00
|
|
|
let head2 = pool[].selectHead(b2Add[].slot)
|
2019-12-19 13:02:28 +00:00
|
|
|
|
2020-04-29 11:44:07 +00:00
|
|
|
check:
|
2020-07-09 09:29:32 +00:00
|
|
|
head2 == b2Add[]
|
2019-12-19 13:02:28 +00:00
|
|
|
|
2020-08-26 15:23:34 +00:00
|
|
|
wrappedTimedTest "Fork choice returns block with attestation":
|
2020-07-15 10:44:18 +00:00
|
|
|
var cache = StateCache()
|
2020-04-29 11:44:07 +00:00
|
|
|
let
|
2020-07-30 19:18:17 +00:00
|
|
|
b10 = makeTestBlock(state.data, chainDag.tail.root, cache)
|
2020-07-31 14:49:06 +00:00
|
|
|
b10Add = chainDag.addRawBlock(quarantine, b10) do (
|
2020-07-22 09:42:55 +00:00
|
|
|
blckRef: BlockRef, signedBlock: SignedBeaconBlock,
|
2020-08-18 20:29:33 +00:00
|
|
|
epochRef: EpochRef, state: HashedBeaconState):
|
2020-07-22 09:42:55 +00:00
|
|
|
# Callback add to fork choice if valid
|
2020-08-03 18:39:43 +00:00
|
|
|
pool[].addForkChoice(epochRef, blckRef, signedBlock.message, blckRef.slot)
|
2020-06-10 06:58:12 +00:00
|
|
|
|
2020-07-25 19:41:12 +00:00
|
|
|
let head = pool[].selectHead(b10Add[].slot)
|
2019-12-19 13:02:28 +00:00
|
|
|
|
2020-04-29 11:44:07 +00:00
|
|
|
check:
|
2020-07-09 09:29:32 +00:00
|
|
|
head == b10Add[]
|
2019-12-19 13:02:28 +00:00
|
|
|
|
2020-04-29 11:44:07 +00:00
|
|
|
let
|
2020-07-30 19:18:17 +00:00
|
|
|
b11 = makeTestBlock(state.data, chainDag.tail.root, cache,
|
2020-06-29 17:30:19 +00:00
|
|
|
graffiti = GraffitiBytes [1'u8, 0, 0, 0 ,0 ,0 ,0 ,0 ,0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
|
2020-04-29 11:44:07 +00:00
|
|
|
)
|
2020-07-31 14:49:06 +00:00
|
|
|
b11Add = chainDag.addRawBlock(quarantine, b11) do (
|
2020-07-22 09:42:55 +00:00
|
|
|
blckRef: BlockRef, signedBlock: SignedBeaconBlock,
|
2020-08-18 20:29:33 +00:00
|
|
|
epochRef: EpochRef, state: HashedBeaconState):
|
2020-07-22 09:42:55 +00:00
|
|
|
# Callback add to fork choice if valid
|
2020-08-03 18:39:43 +00:00
|
|
|
pool[].addForkChoice(epochRef, blckRef, signedBlock.message, blckRef.slot)
|
2019-12-19 13:02:28 +00:00
|
|
|
|
2020-04-29 11:44:07 +00:00
|
|
|
bc1 = get_beacon_committee(
|
2020-08-17 18:36:13 +00:00
|
|
|
state.data.data, state.data.data.slot - 1, 1.CommitteeIndex, cache)
|
2020-07-16 13:16:51 +00:00
|
|
|
attestation0 = makeAttestation(state.data.data, b10.root, bc1[0], cache)
|
2019-06-03 08:26:38 +00:00
|
|
|
|
2020-08-27 07:34:12 +00:00
|
|
|
pool[].addAttestation(
|
|
|
|
attestation0, [bc1[0]].toHashSet, attestation0.data.slot)
|
2019-06-03 08:26:38 +00:00
|
|
|
|
2020-07-25 19:41:12 +00:00
|
|
|
let head2 = pool[].selectHead(b10Add[].slot)
|
2019-06-03 08:26:38 +00:00
|
|
|
|
2020-04-29 11:44:07 +00:00
|
|
|
check:
|
|
|
|
# Single vote for b10 and no votes for b11
|
2020-07-09 09:29:32 +00:00
|
|
|
head2 == b10Add[]
|
2019-06-03 08:26:38 +00:00
|
|
|
|
2020-04-29 11:44:07 +00:00
|
|
|
let
|
2020-07-16 13:16:51 +00:00
|
|
|
attestation1 = makeAttestation(state.data.data, b11.root, bc1[1], cache)
|
|
|
|
attestation2 = makeAttestation(state.data.data, b11.root, bc1[2], cache)
|
2020-08-27 07:34:12 +00:00
|
|
|
pool[].addAttestation(
|
|
|
|
attestation1, [bc1[1]].toHashSet, attestation1.data.slot)
|
2019-06-03 08:26:38 +00:00
|
|
|
|
2020-07-25 19:41:12 +00:00
|
|
|
let head3 = pool[].selectHead(b10Add[].slot)
|
|
|
|
let bigger = if b11.root.data < b10.root.data: b10Add else: b11Add
|
2020-04-29 11:44:07 +00:00
|
|
|
|
|
|
|
check:
|
2020-06-10 06:58:12 +00:00
|
|
|
# Ties broken lexicographically in spec -> ?
|
2020-07-25 19:41:12 +00:00
|
|
|
head3 == bigger[]
|
2020-04-29 11:44:07 +00:00
|
|
|
|
2020-08-27 07:34:12 +00:00
|
|
|
pool[].addAttestation(
|
|
|
|
attestation2, [bc1[2]].toHashSet, attestation2.data.slot)
|
2020-04-29 11:44:07 +00:00
|
|
|
|
2020-07-25 19:41:12 +00:00
|
|
|
let head4 = pool[].selectHead(b11Add[].slot)
|
2020-04-29 11:44:07 +00:00
|
|
|
|
|
|
|
check:
|
|
|
|
# Two votes for b11
|
2020-07-09 09:29:32 +00:00
|
|
|
head4 == b11Add[]
|
|
|
|
|
2020-08-26 15:23:34 +00:00
|
|
|
wrappedTimedTest "Trying to add a block twice tags the second as an error":
|
2020-07-15 10:44:18 +00:00
|
|
|
var cache = StateCache()
|
2020-07-09 09:29:32 +00:00
|
|
|
let
|
2020-07-30 19:18:17 +00:00
|
|
|
b10 = makeTestBlock(state.data, chainDag.tail.root, cache)
|
2020-07-31 14:49:06 +00:00
|
|
|
b10Add = chainDag.addRawBlock(quarantine, b10) do (
|
2020-07-22 09:42:55 +00:00
|
|
|
blckRef: BlockRef, signedBlock: SignedBeaconBlock,
|
2020-08-18 20:29:33 +00:00
|
|
|
epochRef: EpochRef, state: HashedBeaconState):
|
2020-07-22 09:42:55 +00:00
|
|
|
# Callback add to fork choice if valid
|
2020-08-03 18:39:43 +00:00
|
|
|
pool[].addForkChoice(epochRef, blckRef, signedBlock.message, blckRef.slot)
|
2020-07-09 09:29:32 +00:00
|
|
|
|
2020-07-25 19:41:12 +00:00
|
|
|
let head = pool[].selectHead(b10Add[].slot)
|
2020-07-09 09:29:32 +00:00
|
|
|
|
|
|
|
check:
|
|
|
|
head == b10Add[]
|
|
|
|
|
|
|
|
# -------------------------------------------------------------
|
|
|
|
# Add back the old block to ensure we have a duplicate error
|
|
|
|
let b10_clone = b10 # Assumes deep copy
|
2020-07-31 14:49:06 +00:00
|
|
|
let b10Add_clone = chainDag.addRawBlock(quarantine, b10_clone) do (
|
2020-07-22 09:42:55 +00:00
|
|
|
blckRef: BlockRef, signedBlock: SignedBeaconBlock,
|
2020-08-18 20:29:33 +00:00
|
|
|
epochRef: EpochRef, state: HashedBeaconState):
|
2020-07-22 09:42:55 +00:00
|
|
|
# Callback add to fork choice if valid
|
2020-08-03 18:39:43 +00:00
|
|
|
pool[].addForkChoice(epochRef, blckRef, signedBlock.message, blckRef.slot)
|
2020-07-22 09:42:55 +00:00
|
|
|
|
2020-10-20 12:31:20 +00:00
|
|
|
doAssert: b10Add_clone.error == (ValidationResult.Ignore, Duplicate)
|
2020-07-09 09:29:32 +00:00
|
|
|
|
|
|
|
wrappedTimedTest "Trying to add a duplicate block from an old pruned epoch is tagged as an error":
|
2020-08-26 15:23:34 +00:00
|
|
|
# Note: very sensitive to stack usage
|
|
|
|
|
2020-07-30 19:18:17 +00:00
|
|
|
chainDag.updateFlags.incl {skipBLSValidation}
|
2020-07-23 17:01:07 +00:00
|
|
|
var cache = StateCache()
|
2020-07-09 09:29:32 +00:00
|
|
|
let
|
2020-08-27 07:34:12 +00:00
|
|
|
b10 = addTestBlock(state.data, chainDag.tail.root, cache)
|
|
|
|
b10Add = chainDag.addRawBlock(quarantine, b10) do (
|
2020-07-22 09:42:55 +00:00
|
|
|
blckRef: BlockRef, signedBlock: SignedBeaconBlock,
|
2020-08-18 20:29:33 +00:00
|
|
|
epochRef: EpochRef, state: HashedBeaconState):
|
2020-07-22 09:42:55 +00:00
|
|
|
# Callback add to fork choice if valid
|
2020-08-03 18:39:43 +00:00
|
|
|
pool[].addForkChoice(epochRef, blckRef, signedBlock.message, blckRef.slot)
|
2020-07-09 09:29:32 +00:00
|
|
|
|
2020-07-25 19:41:12 +00:00
|
|
|
let head = pool[].selectHead(b10Add[].slot)
|
2020-07-09 09:29:32 +00:00
|
|
|
|
|
|
|
doAssert: head == b10Add[]
|
|
|
|
|
|
|
|
# -------------------------------------------------------------
|
|
|
|
let b10_clone = b10 # Assumes deep copy
|
|
|
|
|
|
|
|
# -------------------------------------------------------------
|
|
|
|
# Pass an epoch
|
2020-07-16 13:16:51 +00:00
|
|
|
var block_root = b10.root
|
2020-07-09 09:29:32 +00:00
|
|
|
|
|
|
|
var attestations: seq[Attestation]
|
|
|
|
|
|
|
|
for epoch in 0 ..< 5:
|
|
|
|
let start_slot = compute_start_slot_at_epoch(Epoch epoch)
|
2020-07-25 19:41:12 +00:00
|
|
|
let committees_per_slot =
|
2020-07-27 10:59:57 +00:00
|
|
|
get_committee_count_per_slot(state.data.data, Epoch epoch, cache)
|
2020-07-09 09:29:32 +00:00
|
|
|
for slot in start_slot ..< start_slot + SLOTS_PER_EPOCH:
|
2020-08-27 07:34:12 +00:00
|
|
|
let new_block = addTestBlock(
|
|
|
|
state.data, block_root, cache, attestations = attestations)
|
2020-07-09 09:29:32 +00:00
|
|
|
|
2020-07-16 13:16:51 +00:00
|
|
|
block_root = new_block.root
|
2020-08-27 07:34:12 +00:00
|
|
|
let blockRef = chainDag.addRawBlock(quarantine, new_block) do (
|
2020-07-22 09:42:55 +00:00
|
|
|
blckRef: BlockRef, signedBlock: SignedBeaconBlock,
|
2020-08-18 20:29:33 +00:00
|
|
|
epochRef: EpochRef, state: HashedBeaconState):
|
2020-07-22 09:42:55 +00:00
|
|
|
# Callback add to fork choice if valid
|
2020-08-03 18:39:43 +00:00
|
|
|
pool[].addForkChoice(epochRef, blckRef, signedBlock.message, blckRef.slot)
|
2020-07-09 09:29:32 +00:00
|
|
|
|
2020-07-25 19:41:12 +00:00
|
|
|
let head = pool[].selectHead(blockRef[].slot)
|
2020-07-09 09:29:32 +00:00
|
|
|
doassert: head == blockRef[]
|
2020-08-31 09:00:38 +00:00
|
|
|
chainDag.updateHead(head, quarantine)
|
2020-07-09 09:29:32 +00:00
|
|
|
|
|
|
|
attestations.setlen(0)
|
2020-07-25 19:41:12 +00:00
|
|
|
for index in 0'u64 ..< committees_per_slot:
|
2020-07-09 09:29:32 +00:00
|
|
|
let committee = get_beacon_committee(
|
|
|
|
state.data.data, state.data.data.slot, index.CommitteeIndex, cache)
|
|
|
|
|
|
|
|
# Create a bitfield filled with the given count per attestation,
|
|
|
|
# exactly on the right-most part of the committee field.
|
|
|
|
var aggregation_bits = init(CommitteeValidatorsBits, committee.len)
|
|
|
|
for v in 0 ..< committee.len * 2 div 3 + 1:
|
|
|
|
aggregation_bits[v] = true
|
|
|
|
|
|
|
|
attestations.add Attestation(
|
|
|
|
aggregation_bits: aggregation_bits,
|
|
|
|
data: makeAttestationData(
|
|
|
|
state.data.data, state.data.data.slot,
|
2020-11-04 21:52:47 +00:00
|
|
|
index.CommitteeIndex, blockroot)
|
2020-07-09 09:29:32 +00:00
|
|
|
# signature: ValidatorSig()
|
|
|
|
)
|
|
|
|
|
2020-07-15 10:44:18 +00:00
|
|
|
cache = StateCache()
|
2020-07-09 09:29:32 +00:00
|
|
|
|
|
|
|
# -------------------------------------------------------------
|
|
|
|
# Prune
|
|
|
|
|
2020-07-30 19:18:17 +00:00
|
|
|
doAssert: chainDag.finalizedHead.slot != 0
|
2020-07-09 09:29:32 +00:00
|
|
|
|
2020-07-25 19:41:12 +00:00
|
|
|
pool[].prune()
|
2020-08-27 07:34:12 +00:00
|
|
|
doAssert: b10.root notin pool.forkChoice.backend
|
2020-07-09 09:29:32 +00:00
|
|
|
|
|
|
|
# Add back the old block to ensure we have a duplicate error
|
2020-08-27 07:34:12 +00:00
|
|
|
let b10Add_clone = chainDag.addRawBlock(quarantine, b10_clone) do (
|
2020-07-22 09:42:55 +00:00
|
|
|
blckRef: BlockRef, signedBlock: SignedBeaconBlock,
|
2020-08-18 20:29:33 +00:00
|
|
|
epochRef: EpochRef, state: HashedBeaconState):
|
2020-07-22 09:42:55 +00:00
|
|
|
# Callback add to fork choice if valid
|
2020-08-03 18:39:43 +00:00
|
|
|
pool[].addForkChoice(epochRef, blckRef, signedBlock.message, blckRef.slot)
|
2020-07-22 09:42:55 +00:00
|
|
|
|
2020-10-20 12:31:20 +00:00
|
|
|
doAssert: b10Add_clone.error == (ValidationResult.Ignore, Duplicate)
|
2020-08-27 07:34:12 +00:00
|
|
|
|
|
|
|
|
|
|
|
suiteReport "Attestation validation " & preset():
|
|
|
|
setup:
|
|
|
|
# Genesis state that results in 3 members per committee
|
|
|
|
var
|
|
|
|
chainDag = init(ChainDAGRef, defaultRuntimePreset, makeTestDB(SLOTS_PER_EPOCH * 3))
|
|
|
|
quarantine = QuarantineRef()
|
|
|
|
pool = newClone(AttestationPool.init(chainDag, quarantine))
|
2020-09-14 14:50:03 +00:00
|
|
|
state = newClone(chainDag.headState)
|
2020-09-07 15:04:33 +00:00
|
|
|
cache = StateCache()
|
2020-08-27 07:34:12 +00:00
|
|
|
# Slot 0 is a finalized slot - won't be making attestations for it..
|
|
|
|
check:
|
2020-09-07 15:04:33 +00:00
|
|
|
process_slots(state.data, state.data.data.slot + 1, cache)
|
2020-08-27 07:34:12 +00:00
|
|
|
|
|
|
|
wrappedTimedTest "Validation sanity":
|
|
|
|
chainDag.updateFlags.incl {skipBLSValidation}
|
|
|
|
|
|
|
|
var
|
|
|
|
cache: StateCache
|
|
|
|
for blck in makeTestBlocks(
|
|
|
|
chainDag.headState.data, chainDag.head.root, cache,
|
|
|
|
int(SLOTS_PER_EPOCH * 5), false):
|
|
|
|
let added = chainDag.addRawBlock(quarantine, blck) do (
|
|
|
|
blckRef: BlockRef, signedBlock: SignedBeaconBlock,
|
|
|
|
epochRef: EpochRef, state: HashedBeaconState):
|
|
|
|
# Callback add to fork choice if valid
|
|
|
|
pool[].addForkChoice(epochRef, blckRef, signedBlock.message, blckRef.slot)
|
|
|
|
|
|
|
|
check: added.isOk()
|
2020-08-31 09:00:38 +00:00
|
|
|
chainDag.updateHead(added[], quarantine)
|
2020-08-27 07:34:12 +00:00
|
|
|
|
|
|
|
var
|
|
|
|
# Create an attestation for slot 1!
|
|
|
|
beacon_committee = get_beacon_committee(
|
|
|
|
chainDag.headState.data.data, chainDag.head.slot, 0.CommitteeIndex, cache)
|
|
|
|
attestation = makeAttestation(
|
|
|
|
chainDag.headState.data.data, chainDag.head.root, beacon_committee[0], cache)
|
|
|
|
|
|
|
|
committees_per_slot =
|
|
|
|
get_committee_count_per_slot(chainDag.headState.data.data,
|
|
|
|
attestation.data.slot.epoch, cache)
|
|
|
|
|
|
|
|
subnet = compute_subnet_for_attestation(
|
|
|
|
committees_per_slot,
|
|
|
|
attestation.data.slot, attestation.data.index.CommitteeIndex)
|
|
|
|
|
|
|
|
beaconTime = attestation.data.slot.toBeaconTime()
|
|
|
|
|
|
|
|
check:
|
|
|
|
validateAttestation(pool[], attestation, beaconTime, subnet).isOk
|
|
|
|
|
2020-09-25 17:51:44 +00:00
|
|
|
# Same validator again
|
|
|
|
validateAttestation(pool[], attestation, beaconTime, subnet).error()[0] ==
|
2020-10-20 12:31:20 +00:00
|
|
|
ValidationResult.Ignore
|
2020-09-25 17:51:44 +00:00
|
|
|
|
2020-12-14 20:58:32 +00:00
|
|
|
pool[].nextAttestationEpoch.setLen(0) # reset for test
|
2020-09-25 17:51:44 +00:00
|
|
|
check:
|
2020-08-27 07:34:12 +00:00
|
|
|
# Wrong subnet
|
|
|
|
validateAttestation(pool[], attestation, beaconTime, subnet + 1).isErr
|
|
|
|
|
2020-12-14 20:58:32 +00:00
|
|
|
pool[].nextAttestationEpoch.setLen(0) # reset for test
|
2020-09-25 17:51:44 +00:00
|
|
|
check:
|
2020-08-27 07:34:12 +00:00
|
|
|
# Too far in the future
|
|
|
|
validateAttestation(
|
|
|
|
pool[], attestation, beaconTime - 1.seconds, subnet + 1).isErr
|
|
|
|
|
2020-12-14 20:58:32 +00:00
|
|
|
pool[].nextAttestationEpoch.setLen(0) # reset for test
|
2020-09-25 17:51:44 +00:00
|
|
|
check:
|
2020-08-27 07:34:12 +00:00
|
|
|
# Too far in the past
|
|
|
|
validateAttestation(
|
|
|
|
pool[], attestation,
|
|
|
|
beaconTime - (SECONDS_PER_SLOT * SLOTS_PER_EPOCH - 1).int.seconds,
|
|
|
|
subnet + 1).isErr
|