nimbus-eth1/tests/test_clique.nim
Jordan Hrycaj cfe955c962
Feature/implement poa processing (#748)
* re-shuffled Clique functions

why:
  Due to the port from the go-sources, the interface logic is not optimal
  for nimbus. The main visible function is currently snapshot() and most
  of the _procurement_ of this function result has been moved to a
  sub-directory.

* run eip-225 Clique test against p2p/chain.persistBlocks()

why:
  Previously, loading the test block chains was fugdged with the purpose
  only to fill the database. As it is now clear how nimbus works on
  Goerli, the same can be achieved with a more realistic scenario.

details:
  Eventually these tests will be pre-cursor to the reply tests for the
  Goerli chain supporting TDD approach with more simple cases.

* fix exception annotations for executor module

why:
  needed for exception tracking

details:
  main annoyance are vmState methods (in state.nim) which potentially
  throw a base level Exception (a proc would only throws CatchableError)

* split p2p/chain into sub-modules and fix exception annotations

why:
  make space for implementing PoA stuff

* provide over-loadable Clique PRNG

why:
  There is a PRNG provided for generating reproducible number sequences.
  The functions which employ the PRNG to generate time slots were ported
  ported from the go-implementation. They are currently unused.

* implement trusted signer assembly in p2p/chain.persistBlocks()

details:
  * PoA processing moved there at the end of a transaction. Currently,
   there is no action (eg. transaction rollback) if this fails.
  * The unit tests with staged blocks work ok. In particular, there should
    be tests with to-be-rejected blocks.
  * TODO: 1.Optimise throughput/cache handling; 2.Verify headers

* fix statement cast in pool.nim

* added table features to LRU cache

why:
  Clique uses the LRU cache using a mixture of volatile online items
  from the LRU cache and database checkpoints for hard synchronisation.
  For performance, Clique needs more table like features.

details:
  First, last, and query key added, as well as efficient random delete
  added. Also key-item pair iterator added for debugging.

* re-factored LRU snapshot caching

why:
  Caching was sub-optimal (aka. bonkers) in that it skipped over memory
  caches in many cases and so mostly rebuild the snapshot from the
  last on-disk checkpoint.

details;
  The LRU snapshot toValue() handler has been moved into the module
  clique_snapshot. This is for the fact that toValue() is not supposed
  to see the whole LRU cache database. So there must be a higher layer
  working with the the whole LRU cache and the on-disk checkpoint
  database.

also:
  some clean up

todo:
  The code still assumes that the block headers are valid in itself. This
  is particular important when an epoch header (aka re-sync header) is
  processed as it must contain the PoA result of all previous headers.

  So blocks need to be verified when they come in before used for PoA
  processing.

* fix some snapshot cache fringe cases

why:
  Must not index empty sequences in clique_snapshot module
2021-07-14 16:13:27 +01:00

202 lines
6.8 KiB
Nim

# Nimbus
# Copyright (c) 2018-2019 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
import
std/[algorithm, os, sequtils, strformat, strutils],
../nimbus/db/db_chain,
../nimbus/p2p/[chain, clique],
./test_clique/[pool, undump],
eth/[common, keys],
stint,
unittest2
let
goerliCapture = "test_clique" / "goerli51840.txt.gz"
# ------------------------------------------------------------------------------
# Helpers
# ------------------------------------------------------------------------------
proc getBlockHeader(ap: TesterPool; number: BlockNumber): BlockHeader =
## Shortcut => db/db_chain.getBlockHeader()
doAssert ap.db.getBlockHeader(number, result)
# ------------------------------------------------------------------------------
# Test Runners
# ------------------------------------------------------------------------------
# clique/snapshot_test.go(99): func TestClique(t *testing.T) {
proc runCliqueSnapshot(noisy = true) =
## Clique PoA Snapshot
## ::
## Tests that Clique signer voting is evaluated correctly for various
## simple and complex scenarios, as well as that a few special corner
## cases fail correctly.
##
suite "Clique PoA Snapshot":
var
pool = newVoterPool()
const
skipSet = {999}
testSet = {0 .. 999}
pool.debug = noisy
# clique/snapshot_test.go(379): for i, tt := range tests {
for tt in voterSamples.filterIt(it.id in testSet):
test &"Snapshots {tt.id:2}: {tt.info.substr(0,50)}...":
pool.say "\n"
if tt.id in skipSet:
skip()
else:
# Assemble a chain of headers from the cast votes
# see clique/snapshot_test.go(407): config := *params.TestChainConfig
pool
.resetVoterChain(tt.signers, tt.epoch)
# see clique/snapshot_test.go(425): for j, block := range blocks {
.appendVoter(tt.votes)
.commitVoterChain
# see clique/snapshot_test.go(477): if err != nil {
if pool.error != cliqueNoError:
# Note that clique/snapshot_test.go does not verify _here_ against
# the scheduled test error -- rather this voting error is supposed
# to happen earlier (processed at clique/snapshot_test.go(467)) when
# assembling the block chain (sounds counter intuitive to the author
# of this source file as the scheduled errors are _clique_ related).
check pool.error[0] == tt.failure
else:
let
expected = tt.results.mapIt("@" & it).sorted
snapResult = pool.pp(pool.cliqueSigners).sorted
pool.say "*** snap state=", pool.snapshot.pp(16)
pool.say " result=[", snapResult.join(",") & "]"
pool.say " expected=[", expected.join(",") & "]"
# Verify the final list of signers against the expected ones
check snapResult == expected
proc runGoerliReplay(noisy = true; dir = "tests"; stopAfterBlock = 0u64) =
var
pool = newVoterPool()
cache: array[7,(seq[BlockHeader],seq[BlockBody])]
cInx = 0
stoppedOk = false
pool.debug = noisy
let stopThreshold = if stopAfterBlock == 0u64: uint64.high.u256
else: stopAfterBlock.u256
suite "Replay Goerli Chain":
for w in (dir / goerliCapture).undumpNextGroup:
if w[0][0].blockNumber == 0.u256:
# Verify Genesis
doAssert w[0][0] == pool.getBlockHeader(0.u256)
else:
# Condense in cache
cache[cInx] = w
cInx.inc
# Handy for partial tests
if stopThreshold < cache[cInx-1][0][0].blockNumber:
stoppedOk = true
break
# Run from cache if complete set
if cache.len <= cInx:
cInx = 0
let
first = cache[0][0][0].blockNumber
last = cache[^1][0][^1].blockNumber
test &"Goerli Blocks #{first}..#{last} ({cache.len} transactions)":
for (headers,bodies) in cache:
let addedPersistBlocks = pool.chain.persistBlocks(headers,bodies)
check addedPersistBlocks == ValidationResult.Ok
if addedPersistBlocks != ValidationResult.Ok: return
# Rest from cache
if 0 < cInx:
let
first = cache[0][0][0].blockNumber
last = cache[cInx-1][0][^1].blockNumber
test &"Goerli Blocks #{first}..#{last} ({cInx} transactions)":
for (headers,bodies) in cache:
let addedPersistBlocks = pool.chain.persistBlocks(headers,bodies)
check addedPersistBlocks == ValidationResult.Ok
if addedPersistBlocks != ValidationResult.Ok: return
if stoppedOk:
test &"Runner stopped after reaching #{stopThreshold}":
discard
proc runGoerliBaybySteps(noisy = true; dir = "tests"; stopAfterBlock = 20u64) =
var
pool = newVoterPool()
stoppedOk = false
pool.debug = noisy
let stopThreshold = if stopAfterBlock == 0u64: uint64.high.u256
else: stopAfterBlock.u256
suite "Replay Goerli Chain Transactions Single Blockwise":
for w in (dir / goerliCapture).undumpNextGroup:
if w[0][0].blockNumber == 0.u256:
# Verify Genesis
doAssert w[0][0] == pool.getBlockHeader(0.u256)
else:
for n in 0 ..< w[0].len:
let
header = w[0][n]
body = w[1][n]
parents = w[0][0 ..< n]
# Handy for partial tests
if stopThreshold < header.blockNumber:
stoppedOk = true
break
test &"Goerli Block #{header.blockNumber} + {parents.len} parents":
check pool.chain.clique.cliqueSnapshot(header,parents).isOk
let addedPersistBlocks = pool.chain.persistBlocks(@[header],@[body])
check addedPersistBlocks == ValidationResult.Ok
if addedPersistBlocks != ValidationResult.Ok: return
if stoppedOk:
test &"Runner stopped after reaching #{stopThreshold}":
discard
# ------------------------------------------------------------------------------
# Main function(s)
# ------------------------------------------------------------------------------
proc cliqueMain*(noisy = defined(debug)) =
noisy.runCliqueSnapshot
noisy.runGoerliBaybySteps
noisy.runGoerliReplay
when isMainModule:
let noisy = defined(debug)
#noisy.runCliqueSnapshot
noisy.runGoerliBaybySteps(dir = ".")
#noisy.runGoerliReplay(dir = ".", stopAfterBlock = 0)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------