mirror of
https://github.com/status-im/nimbus-eth2.git
synced 2025-02-13 15:07:01 +00:00
* reorganize ssz dependencies This PR continues the work in https://github.com/status-im/nimbus-eth2/pull/2646, https://github.com/status-im/nimbus-eth2/pull/2779 as well as past issues with serialization and type, to disentangle SSZ from eth2 and at the same time simplify imports and exports with a structured approach. The principal idea here is that when a library wants to introduce SSZ support, they do so via 3 files: * `ssz_codecs` which imports and reexports `codecs` - this covers the basic byte conversions and ensures no overloads get lost * `xxx_merkleization` imports and exports `merkleization` to specialize and get access to `hash_tree_root` and friends * `xxx_ssz_serialization` imports and exports `ssz_serialization` to specialize ssz for a specific library Those that need to interact with SSZ always import the `xxx_` versions of the modules and never `ssz` itself so as to keep imports simple and safe. This is similar to how the REST / JSON-RPC serializers are structured in that someone wanting to serialize spec types to REST-JSON will import `eth2_rest_serialization` and nothing else. * split up ssz into a core library that is independendent of eth2 types * rename `bytes_reader` to `codec` to highlight that it contains coding and decoding of bytes and native ssz types * remove tricky List init overload that causes compile issues * get rid of top-level ssz import * reenable merkleization tests * move some "standard" json serializers to spec * remove `ValidatorIndex` serialization for now * remove test_ssz_merkleization * add tests for over/underlong byte sequences * fix broken seq[byte] test - seq[byte] is not an SSZ type There are a few things this PR doesn't solve: * like #2646 this PR is weak on how to handle root and other dontSerialize fields that "sometimes" should be computed - the same problem appears in REST / JSON-RPC etc * Fix a build problem on macOS * Another way to fix the macOS builds Co-authored-by: Zahary Karadjov <zahary@gmail.com>
153 lines
5.6 KiB
Nim
153 lines
5.6 KiB
Nim
# beacon_chain
|
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
|
# Licensed and distributed under either of
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
{.used.}
|
|
|
|
import
|
|
# Status lib
|
|
unittest2,
|
|
chronicles, chronos,
|
|
eth/keys,
|
|
# Internal
|
|
../beacon_chain/[beacon_node_types, beacon_clock],
|
|
../beacon_chain/gossip_processing/[gossip_validation, batch_validation],
|
|
../beacon_chain/fork_choice/[fork_choice_types, fork_choice],
|
|
../beacon_chain/consensus_object_pools/[
|
|
block_quarantine, blockchain_dag, block_clearance, attestation_pool],
|
|
../beacon_chain/spec/datatypes/phase0,
|
|
../beacon_chain/spec/[forks, state_transition, helpers, network],
|
|
# Test utilities
|
|
./testutil, ./testdbutil, ./testblockutil
|
|
|
|
proc pruneAtFinalization(dag: ChainDAGRef, attPool: AttestationPool) =
|
|
if dag.needStateCachesAndForkChoicePruning():
|
|
dag.pruneStateCachesDAG()
|
|
# pool[].prune() # We test logic without att_1_0 pool / fork choice pruning
|
|
|
|
suite "Gossip validation " & preset():
|
|
setup:
|
|
# Genesis state that results in 3 members per committee
|
|
var
|
|
dag = init(ChainDAGRef, defaultRuntimeConfig, makeTestDB(SLOTS_PER_EPOCH * 3), {})
|
|
quarantine = QuarantineRef.init(keys.newRng())
|
|
pool = newClone(AttestationPool.init(dag, quarantine))
|
|
state = newClone(dag.headState)
|
|
cache = StateCache()
|
|
rewards = RewardInfo()
|
|
batchCrypto = BatchCrypto.new(keys.newRng(), eager = proc(): bool = false)
|
|
# Slot 0 is a finalized slot - won't be making attestations for it..
|
|
check:
|
|
process_slots(
|
|
defaultRuntimeConfig, state.data, getStateField(state.data, slot) + 1,
|
|
cache, rewards, {})
|
|
|
|
test "Validation sanity":
|
|
# TODO: refactor tests to avoid skipping BLS validation
|
|
dag.updateFlags.incl {skipBLSValidation}
|
|
|
|
var
|
|
cache: StateCache
|
|
for blck in makeTestBlocks(
|
|
dag.headState.data, dag.head.root, cache,
|
|
int(SLOTS_PER_EPOCH * 5), false):
|
|
let added = dag.addRawBlock(quarantine, blck) do (
|
|
blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock,
|
|
epochRef: EpochRef):
|
|
# Callback add to fork choice if valid
|
|
pool[].addForkChoice(epochRef, blckRef, signedBlock.message, blckRef.slot)
|
|
|
|
check: added.isOk()
|
|
dag.updateHead(added[], quarantine)
|
|
pruneAtFinalization(dag, pool[])
|
|
|
|
var
|
|
# Create attestations for slot 1
|
|
beacon_committee = get_beacon_committee(
|
|
dag.headState.data, dag.head.slot, 0.CommitteeIndex, cache)
|
|
att_1_0 = makeAttestation(
|
|
dag.headState.data, dag.head.root, beacon_committee[0], cache)
|
|
att_1_1 = makeAttestation(
|
|
dag.headState.data, dag.head.root, beacon_committee[1], cache)
|
|
|
|
committees_per_slot =
|
|
get_committee_count_per_slot(dag.headState.data,
|
|
att_1_0.data.slot.epoch, cache)
|
|
|
|
subnet = compute_subnet_for_attestation(
|
|
committees_per_slot,
|
|
att_1_0.data.slot, att_1_0.data.index.CommitteeIndex)
|
|
|
|
beaconTime = att_1_0.data.slot.toBeaconTime()
|
|
|
|
check:
|
|
validateAttestation(pool, batchCrypto, att_1_0, beaconTime, subnet, true).waitFor().isOk
|
|
|
|
# Same validator again
|
|
validateAttestation(pool, batchCrypto, att_1_0, beaconTime, subnet, true).waitFor().error()[0] ==
|
|
ValidationResult.Ignore
|
|
|
|
pool[].nextAttestationEpoch.setLen(0) # reset for test
|
|
check:
|
|
# Wrong subnet
|
|
validateAttestation(
|
|
pool, batchCrypto, att_1_0, beaconTime, SubnetId(subnet.uint8 + 1), true).waitFor().isErr
|
|
|
|
pool[].nextAttestationEpoch.setLen(0) # reset for test
|
|
check:
|
|
# Too far in the future
|
|
validateAttestation(
|
|
pool, batchCrypto, att_1_0, beaconTime - 1.seconds, subnet, true).waitFor().isErr
|
|
|
|
pool[].nextAttestationEpoch.setLen(0) # reset for test
|
|
check:
|
|
# Too far in the past
|
|
validateAttestation(
|
|
pool, batchCrypto, att_1_0,
|
|
beaconTime - (SECONDS_PER_SLOT * SLOTS_PER_EPOCH - 1).int.seconds,
|
|
subnet, true).waitFor().isErr
|
|
|
|
block:
|
|
var broken = att_1_0
|
|
broken.signature.blob[0] += 1
|
|
pool[].nextAttestationEpoch.setLen(0) # reset for test
|
|
check:
|
|
# Invalid signature
|
|
validateAttestation(
|
|
pool, batchCrypto, broken, beaconTime, subnet, true).waitFor().
|
|
error()[0] == ValidationResult.Reject
|
|
|
|
block:
|
|
var broken = att_1_0
|
|
broken.signature.blob[5] += 1
|
|
pool[].nextAttestationEpoch.setLen(0) # reset for test
|
|
# One invalid, one valid (batched)
|
|
let
|
|
fut_1_0 = validateAttestation(
|
|
pool, batchCrypto, broken, beaconTime, subnet, true)
|
|
fut_1_1 = validateAttestation(
|
|
pool, batchCrypto, att_1_1, beaconTime, subnet, true)
|
|
|
|
check:
|
|
fut_1_0.waitFor().error()[0] == ValidationResult.Reject
|
|
fut_1_1.waitFor().isOk()
|
|
|
|
block:
|
|
var broken = att_1_0
|
|
# This shouldn't deserialize, which is a different way to break it
|
|
broken.signature.blob = default(type broken.signature.blob)
|
|
pool[].nextAttestationEpoch.setLen(0) # reset for test
|
|
# One invalid, one valid (batched)
|
|
let
|
|
fut_1_0 = validateAttestation(
|
|
pool, batchCrypto, broken, beaconTime, subnet, true)
|
|
fut_1_1 = validateAttestation(
|
|
pool, batchCrypto, att_1_1, beaconTime, subnet, true)
|
|
|
|
check:
|
|
fut_1_0.waitFor().error()[0] == ValidationResult.Reject
|
|
fut_1_1.waitFor().isOk()
|