Fearture/poa clique tuning (#765)

* Provide API

details:
  API is bundled via clique.nim.

* Set extraValidation as default for PoA chains

why:
  This triggers consensus verification and an update of the list
  of authorised signers. These signers are integral part of the
  PoA block chain.

todo:
  Option argument to control validation for the nimbus binary.

* Fix snapshot state block number

why:
  Using sub-sequence here, so the len() function was wrong.

* Optional start where block verification begins

why:
  Can speed up time building loading initial parts of block chain. For
  PoA, this allows to prove & test that authorised signers can be
  (correctly) calculated starting at any point on the block chain.

todo:
  On Goerli around blocks #193537..#197568, processing time increases
  disproportionally -- needs to be understand

* For Clique test, get old grouping back (7 transactions per log entry)

why:
  Forgot to change back after troubleshooting

* Fix field/function/module-name misunderstanding

why:
  Make compilation work

* Use eth_types.blockHash() rather than utils.hash() in Clique modules

why:
  Prefer lib module

* Dissolve snapshot_misc.nim

details:
  .. into clique_verify.nim (the other source file clique_unused.nim
  is inactive)

* Hide unused AsyncLock in Clique descriptor

details:
  Unused here but was part of the Go reference implementation

* Remove fakeDiff flag from Clique descriptor

details:
  This flag was a kludge in the Go reference implementation used for the
  canonical tests. The tests have been adapted so there is no need for
  the fakeDiff flag and its implementation.

* Not observing minimum distance from epoch sync point

why:
  For compiling PoA state, the go implementation will walk back to the
  epoch header with at least 90000 blocks apart from the current header
  in the absence of other synchronisation points.

  Here just the nearest epoch header is used. The assumption is that all
  the checkpoints before have been vetted already regardless of the
  current branch.

details:
  The behaviour of using the nearest vs the minimum distance epoch is
  controlled by a flag and can be changed at run time.

* Analysing processing time (patch adds some debugging/visualisation support)

why:
  At the first half million blocks of the Goerli replay, blocks on the
  interval #194854..#196224 take exceptionally long to process, but not
  due to PoA processing.

details:
  It turns out that much time is spent in p2p/excecutor.processBlock()
  where the elapsed transaction execution time is significantly greater
  for many of these blocks.

  Between the 1371 blocks #194854..#196224 there are 223 blocks with more
  than 1/2 seconds execution time whereas there are only 4 such blocks
  before and 13 such after this range up to #504192.

* fix debugging symbol in clique_desc (causes CI failing)

* Fixing canonical reference tests

why:
  Two errors were introduced earlier but ovelooked:
   1. "Remove fakeDiff flag .." patch was incomplete
   2. "Not observing minimum distance .." introduced problem w/tests 23/24

details:
  Fixing 2. needed to revert the behaviour by setting the
  applySnapsMinBacklog flag for the Clique descriptor. Also a new
  test was added to lock the new behaviour.

* Remove cruft

why:
  Clique/PoA processing was intended to take place somewhere in
  executor/process_block.processBlock() but was decided later to run
  from chain/persist_block.persistBlock() instead.

* Update API comment

* ditto
This commit is contained in:
Jordan Hrycaj 2021-07-30 15:06:51 +01:00 committed by GitHub
parent e9bfcedcaf
commit ca07c40a48
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
27 changed files with 639 additions and 363 deletions

View File

@ -134,6 +134,8 @@ type
rng*: ref BrHmacDrbgContext
accounts*: Table[EthAddress, NimbusAccount]
importFile*: string
verifyFromOk*: bool ## activate `verifyFrom` setting
verifyFrom*: uint64 ## verification start block, 0 for disable
const
# these are public network id
@ -269,6 +271,14 @@ proc processInteger*(v: string, o: var int): ConfigStatus =
except ValueError:
result = ErrorParseOption
proc processUInt64*(v: string, o: var uint64): ConfigStatus =
## Convert string to integer.
try:
o = parseBiggestUInt(v).uint64
result = Success
except ValueError:
result = ErrorParseOption
proc processFloat*(v: string, o: var float): ConfigStatus =
## Convert string to float.
try:
@ -376,6 +386,11 @@ proc processEthArguments(key, value: string): ConfigStatus =
result = processPruneList(value, config.prune)
of "import":
config.importFile = value
of "verifyfrom":
var res = 0u64
result = processUInt64(value, res)
config.verifyFrom = uint64(result)
config.verifyFromOk = true
else:
result = EmptyOption

View File

@ -120,7 +120,12 @@ proc start(nimbus: NimbusNode) =
if ProtocolFlags.Les in conf.net.protocols:
nimbus.ethNode.addCapability les
nimbus.ethNode.chain = newChain(chainDB)
# chainRef: some name to avoid module-name/filed/function misunderstandings
let chainRef = newChain(chainDB)
nimbus.ethNode.chain = chainRef
if conf.verifyFromOk:
chainRef.extraValidation = 0 < conf.verifyFrom
chainRef.verifyFrom = conf.verifyFrom
## Creating RPC Server
if RpcFlags.Enabled in conf.rpc.flags:

View File

@ -43,10 +43,15 @@ type
blockZeroHash: KeccakHash
extraValidation: bool ##\
## Trigger extra validation, currently with `persistBlocksin()` only.
## Trigger extra validation, currently within `persistBlocks()`
## function only.
verifyFrom: BlockNumber ##\
## First block to when `extraValidation` will be applied (only
## effective if `extraValidation` is true.)
cacheByEpoch: EpochHashCache ##\
## Objects cache to speed up lookup in validation functions.
## Objects cache to speed up hash lookup in validation functions.
poa: Clique ##\
## For non-PoA networks (when `db.config.poaEngine` is `false`),
@ -110,41 +115,63 @@ func calculateForkIds(c: ChainConfig,
prevCRC = result[fork].crc
# ------------------------------------------------------------------------------
# Public constructor
# Private constructor helper
# ------------------------------------------------------------------------------
proc newChain*(db: BaseChainDB; poa: Clique; extraValidation = false):
Chain {.gcsafe, raises: [Defect,CatchableError].} =
proc initChain(c: Chain; db: BaseChainDB; poa: Clique; extraValidation: bool)
{.gcsafe, raises: [Defect,CatchableError].} =
## Constructor for the `Chain` descriptor object. For most applications,
## the `poa` argument is transparent and should be initilaised on the fly
## which is available below.
result.new
result.db = db
c.db = db
if not db.config.daoForkSupport:
db.config.daoForkBlock = db.config.homesteadBlock
let g = defaultGenesisBlockForNetwork(db.networkId)
result.blockZeroHash = g.toBlock.blockHash
let genesisCRC = crc32(0, result.blockZeroHash.data)
result.forkIds = calculateForkIds(db.config, genesisCRC)
result.extraValidation = extraValidation
c.blockZeroHash = g.toBlock.blockHash
let genesisCRC = crc32(0, c.blockZeroHash.data)
c.forkIds = calculateForkIds(db.config, genesisCRC)
c.extraValidation = extraValidation
# Initalise the PoA state regardless of whether it is needed on the current
# network. For non-PoA networks (when `db.config.poaEngine` is `false`),
# this descriptor is ignored.
result.poa = db.newCliqueCfg.newClique
c.poa = db.newClique
# Always initialise the epoch cache even though it migh no be used
# unless `extraValidation` is set `true`.
result.cacheByEpoch.initEpochHashCache
c.cacheByEpoch.initEpochHashCache
# ------------------------------------------------------------------------------
# Public constructors
# ------------------------------------------------------------------------------
proc newChain*(db: BaseChainDB; poa: Clique; extraValidation: bool): Chain
{.gcsafe, raises: [Defect,CatchableError].} =
## Constructor for the `Chain` descriptor object. For most applications,
## the `poa` argument is transparent and should be initilaised on the fly
## which is available below. The argument `extraValidation` enables extra
## block chain validation if set `true`.
new result
result.initChain(db, poa, extraValidation)
proc newChain*(db: BaseChainDB, extraValidation = false):
Chain {.gcsafe, raises: [Defect,CatchableError].} =
proc newChain*(db: BaseChainDB, extraValidation: bool): Chain
{.gcsafe, raises: [Defect,CatchableError].} =
## Constructor for the `Chain` descriptor object with default initialisation
## for the PoA handling. PoA handling is applicable on PoA networks only and
## the initialisation (takes place but) is ignored, otherwise.
db.newChain(db.newCliqueCfg.newClique, extraValidation)
## for the PoA handling. The argument `extraValidation` enables extra block
## chain validation if set `true`.
new result
result.initChain(db, db.newClique, extraValidation)
proc newChain*(db: BaseChainDB): Chain
{.gcsafe, raises: [Defect,CatchableError].} =
## Constructor for the `Chain` descriptor object. All sub-object descriptors
## are initialised with defaults. So is extra block chain validation
## * `enabled` for PoA networks (such as Goerli)
## * `disabled` for nopn-PaA networks
new result
result.initChain(db, db.newClique, db.config.poaEngine)
# ------------------------------------------------------------------------------
# Public `AbstractChainDB` getter overload methods
@ -187,6 +214,25 @@ proc forkIds*(c: Chain): auto {.inline.} =
## Getter
c.forkIds
proc verifyFrom*(c: Chain): auto {.inline.} =
## Getter
c.verifyFrom
# ------------------------------------------------------------------------------
# Public `Chain` setters
# ------------------------------------------------------------------------------
proc `extraValidation=`*(c: Chain; extraValidation: bool) {.inline.} =
## Setter. If set `true`, the assignment value `extraValidation` enables
## extra block chain validation.
c.extraValidation = extraValidation
proc `verifyFrom=`*(c: Chain; verifyFrom: uint64) {.inline.} =
## Setter. The assignment value `verifyFrom` defines the first block where
## validation should start if the `Clique` field `extraValidation` was set
## `true`.
c.verifyFrom = verifyFrom.u256
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -10,7 +10,6 @@
import
../../db/db_chain,
../../utils,
../../vm_state,
../clique,
../executor,
@ -19,12 +18,13 @@ import
./chain_helpers,
chronicles,
eth/[common, trie/db],
nimcrypto,
stew/endians2,
stint
when not defined(release):
import ../../tracer
import
../../tracer,
../../utils
{.push raises: [Defect].}
@ -63,28 +63,28 @@ proc persistBlocksImpl(c: Chain; headers: openarray[BlockHeader];
if validationResult != ValidationResult.OK:
return validationResult
if c.extraValidation and not c.db.config.poaEngine:
let res = c.db.validateHeaderAndKinship(
header,
body,
checkSealOK = false, # TODO: how to checkseal from here
c.cacheByEpoch)
if res.isErr:
debug "block validation error",
msg = res.error
return ValidationResult.Error
if c.extraValidation and c.db.config.poaEngine:
var parent = if 0 < i: @[headers[i-1]] else: @[]
let rc = c.clique.cliqueVerify(header,parent)
if rc.isOK:
# mark it off so it woul not auto-restore previous state
c.clique.cliqueDispose(cliqueState)
if c.extraValidation and c.verifyFrom <= header.blockNumber:
if c.db.config.poaEngine:
var parent = if 0 < i: @[headers[i-1]] else: @[]
let rc = c.clique.cliqueVerify(header,parent)
if rc.isOK:
# mark it off so it would not auto-restore previous state
c.clique.cliqueDispose(cliqueState)
else:
debug "PoA header verification failed",
blockNumber = header.blockNumber,
msg = $rc.error
return ValidationResult.Error
else:
debug "PoA header verification failed",
blockNumber = header.blockNumber,
msg = $rc.error
return ValidationResult.Error
let res = c.db.validateHeaderAndKinship(
header,
body,
checkSealOK = false, # TODO: how to checkseal from here
c.cacheByEpoch)
if res.isErr:
debug "block validation error",
msg = res.error
return ValidationResult.Error
discard c.db.persistHeaderToDb(header)
discard c.db.persistTransactions(header.blockNumber, body.transactions)

View File

@ -20,6 +20,7 @@
import
std/[sequtils],
../db/db_chain,
./clique/[clique_cfg, clique_defs, clique_desc, clique_verify],
./clique/snapshot/[ballot, snapshot_desc],
eth/common,
@ -27,11 +28,12 @@ import
{.push raises: [Defect].}
# note that mining is unsupported, so the `clique_mining` module is ignored
# Note that mining is unsupported. Unused code ported from the Go
# implementation is stashed into the `clique_unused` module.
export
clique_cfg,
clique_defs,
clique_desc
clique_desc.Clique
type
CliqueState* = ##\
@ -44,21 +46,33 @@ type
# Public
# ------------------------------------------------------------------------------
proc newClique*(db: BaseChainDB): Clique =
## Constructor for a new Clique proof-of-authority consensus engine. The
## initial state of the engine is `empty`, there are no authorised signers.
db.newCliqueCfg.newClique
proc cliqueSave*(c: var Clique): CliqueState =
## Save current `Clique` state.
## Save current `Clique` state. This state snapshot saves the internal
## data that make up the list of authorised signers (see `cliqueSigners()`
## below.)
ok(c.snapshot)
proc cliqueRestore*(c: var Clique; state: var CliqueState) =
## Restore current `Clique` state from a saved snapshot.
##
## For the particular `state` argument this fuction is disabled with
## `cliqueDispose()`. So it can be savely handled in a `defer:` statement.
## `cliqueDispose()`. So it can be savely wrapped in a `defer:` statement.
## In transaction lingo, this would then be the rollback function.
if state.isOk:
c.snapshot = state.value
proc cliqueDispose*(c: var Clique; state: var CliqueState) =
## Disable the function `cliqueDispose()` for the particular `state`
## argument
## argument.
##
## In transaction lingo, this would be the commit function if
## `cliqueRestore()` was wrapped in a `defer:` statement.
state = err(CliqueState)
@ -67,12 +81,20 @@ proc cliqueVerify*(c: Clique; header: BlockHeader;
{.gcsafe, raises: [Defect,CatchableError].} =
## Check whether a header conforms to the consensus rules. The caller may
## optionally pass on a batch of parents (ascending order) to avoid looking
## those up from the database. This might be useful for concurrently
## verifying a batch of new headers.
## those up from the database. This function updates the list of authorised
## signers (see `cliqueSigners()` below.)
##
## On success, the latest authorised signers list is available via the
## fucntion `c.cliqueSigners()`. Otherwise, the latest error is also stored
## in the `Clique` descriptor
## in the `Clique` descriptor and is accessible as `c.failed`.
##
## This function is not transaction-save, that is the internal state of
## the authorised signers list has the state of the last update after a
## successful header verification. The hash of the failing header together
## with the error message is then accessible as `c.failed`.
##
## Use the directives `cliqueSave()`, `cliqueDispose()`, and/or
## `cliqueRestore()` for transaction.
var list = toSeq(parents)
c.cliqueVerifySeq(header, list)
@ -87,16 +109,21 @@ proc cliqueVerify*(c: Clique;
headers: openArray[BlockHeader]): CliqueOkResult
{.gcsafe, raises: [Defect,CatchableError].} =
## This function verifies a batch of headers checking each header for
## consensus rules conformance. The `headers` list is supposed to contain a
## chain of headers, i e. `headers[i]` is parent to `headers[i+1]`.
## consensus rules conformance (see also the other `cliqueVerify()` function
## instance.) The `headers` list is supposed to contain a chain of headers,
## i.e. `headers[i]` is parent to `headers[i+1]`.
##
## On success, the latest authorised signers list is available via the
## fucntion `c.cliqueSigners()`. Otherwise, the latest error is also stored
## in the `Clique` descriptor
## in the `Clique` descriptor and is accessible as `c.failed`.
##
## If there is an error, this error is also stored within the `Clique`
## descriptor and can be retrieved via `c.failed` along with the hash/ID of
## the failed block header.
## This function is not transaction-save, that is the internal state of
## the authorised signers list has the state of the last update after a
## successful header verification. The hash of the failing header together
## with the error message is then accessible as `c.failed`.
##
## Use the directives `cliqueSave()`, `cliqueDispose()`, and/or
## `cliqueRestore()` for transaction.
var list = toSeq(headers)
c.cliqueVerifySeq(list)
@ -105,10 +132,15 @@ proc cliqueSigners*(c: Clique): seq[EthAddress] {.inline.} =
## Retrieve the sorted list of authorized signers for the current state
## of the `Clique` descriptor.
##
## Note the the returned list is sorted on-the-fly each time this function
## is invoked.
## Note that the return argument list is sorted on-the-fly each time this
## function is invoked.
c.snapshot.ballot.authSigners
proc cliqueSignersLen*(c: Clique): int {.inline.} =
## Get the number of authorized signers for the current state of the
## `Clique` descriptor. The result is equivalent to `c.cliqueSigners.len`.
c.snapshot.ballot.authSignersLen
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -19,7 +19,6 @@
##
import
../../../utils,
../../../utils/lru_cache,
../clique_defs,
../clique_helpers,
@ -46,7 +45,7 @@ proc initEcRecover*(cache: var EcRecover) =
proc(header:BlockHeader): EcKey32 =
## If the signature's already cached, return that
# clique/clique.go(148): hash := header.Hash()
header.hash.data
header.blockHash.data
var toValue: LruValue[BlockHeader,EthAddress,CliqueError] =

View File

@ -26,10 +26,20 @@ import
./clique_defs,
./snapshot/[lru_snaps, snapshot_desc],
chronicles,
chronos,
eth/[common, keys, rlp],
stew/results
const
enableCliqueAsyncLock* = ##\
## Async locks are currently unused by `Clique` but were part of the Go
## reference implementation. The unused code fragment from the reference
## implementation are buried in the file `clique_unused.nim` and not used
## otherwise.
defined(clique_async_lock)
when enableCliqueAsyncLock:
import chronos
type
# clique/clique.go(142): type SignerFn func(signer [..]
CliqueSignerFn* = ## Hashes and signs the data to be signed by
@ -71,11 +81,22 @@ type
proposals: Proposals ##\
## Cu1rrent list of proposals we are pushing
asyncLock: AsyncLock ##\
## Protects the signer fields
applySnapsMinBacklog: bool ##\
## Epoch is a restart and sync point. Eip-225 requires that the epoch
## header contains the full list of currently authorised signers.
##
## If this flag is set `true`, then the `cliqueSnapshot()` function will
## walk back to the `epoch` header with at least `cfg.roThreshold` blocks
## apart from the current header. This is how it is done in the reference
## implementation.
##
## Leving the flag `false`, the assumption is that all the checkponts
## before have been vetted already regardless of the current branch. So
## the nearest `epoch` header is used.
fakeDiff: bool ##\
## Testing/debugging only: skip difficulty verifications
when enableCliqueAsyncLock:
asyncLock: AsyncLock ##\
## Protects the signer fields
{.push raises: [Defect].}
@ -90,11 +111,12 @@ logScope:
proc newClique*(cfg: CliqueCfg): Clique =
## Initialiser for Clique proof-of-authority consensus engine with the
## initial signers set to the ones provided by the user.
Clique(cfg: cfg,
recents: cfg.initLruSnaps,
snapshot: cfg.newSnapshot(BlockHeader()),
proposals: initTable[EthAddress,bool](),
asyncLock: newAsyncLock())
result = Clique(cfg: cfg,
recents: cfg.initLruSnaps,
snapshot: cfg.newSnapshot(BlockHeader()),
proposals: initTable[EthAddress,bool]())
when enableCliqueAsyncLock:
result.asyncLock = newAsyncLock()
# ------------------------------------------------------------------------------
# Public /pretty print
@ -139,9 +161,18 @@ proc db*(c: Clique): auto {.inline.} =
## Getter
c.cfg.db
proc fakeDiff*(c: Clique): auto {.inline.} =
## Getter
c.fakeDiff
proc applySnapsMinBacklog*(c: Clique): auto {.inline.} =
## Getter.
##
## If this flag is set `true`, then the `cliqueSnapshot()` function will
## walk back to the `epoch` header with at least `cfg.roThreshold` blocks
## apart from the current header. This is how it is done in the reference
## implementation.
##
## Setting the flag `false` which is the default, the assumption is that all
## the checkponts before have been vetted already regardless of the current
## branch. So the nearest `epoch` header is used.
c.applySnapsMinBacklog
# ------------------------------------------------------------------------------
# Public setters
@ -153,10 +184,6 @@ proc `db=`*(c: Clique; db: BaseChainDB) {.inline.} =
c.proposals = initTable[EthAddress,bool]()
c.recents = c.cfg.initLruSnaps
proc `fakeDiff=`*(c: Clique; debug: bool) =
## Setter
c.fakeDiff = debug
proc `snapshot=`*(c: Clique; snaps: Snapshot) =
## Setter
c.snapshot = snaps
@ -165,23 +192,28 @@ proc `failed=`*(c: Clique; failure: CliqueFailed) =
## Setter
c.failed = failure
proc `applySnapsMinBacklog=`*(c: Clique; value: bool) {.inline.} =
## Setter
c.applySnapsMinBacklog = value
# ------------------------------------------------------------------------------
# Public lock/unlock
# ------------------------------------------------------------------------------
proc lock*(c: Clique) {.inline, raises: [Defect,CatchableError].} =
## Lock descriptor
waitFor c.asyncLock.acquire
when enableCliqueAsyncLock:
proc lock*(c: Clique) {.inline, raises: [Defect,CatchableError].} =
## Lock descriptor
waitFor c.asyncLock.acquire
proc unLock*(c: Clique) {.inline, raises: [Defect,AsyncLockError].} =
## Unlock descriptor
c.asyncLock.release
proc unLock*(c: Clique) {.inline, raises: [Defect,AsyncLockError].} =
## Unlock descriptor
c.asyncLock.release
template doExclusively*(c: Clique; action: untyped) =
## Handy helper
c.lock
action
c.unlock
template doExclusively*(c: Clique; action: untyped) =
## Handy helper
c.lock
action
c.unlock
# ------------------------------------------------------------------------------
# End

View File

@ -22,7 +22,6 @@ import
std/[sequtils, strformat, strutils],
../../constants,
../../db/db_chain,
../../utils,
./clique_cfg,
./clique_defs,
./clique_desc,
@ -57,6 +56,7 @@ type
subChn: LocalSubChain ## chain[] sub-range
parents: seq[BlockHeader] ## explicit parents
{.push raises: [Defect].}
logScope:
@ -66,13 +66,20 @@ logScope:
# Private debugging functions, pretty printing
# ------------------------------------------------------------------------------
proc say(d: LocalSnaps; v: varargs[string,`$`]) {.inline.} =
# d.c.cfg.say v
proc say(d: var LocalSnaps; v: varargs[string,`$`]) {.inline.} =
discard
# uncomment body to enable
#d.c.cfg.say v
proc pp(q: openArray[BlockHeader]; n: int): string {.inline.} =
"[" & toSeq(q[0 ..< n]).mapIt("#" & $it.blockNumber).join(", ") & "]"
result = "["
if 5 < n:
result &= toSeq(q[0 .. 2]).mapIt("#" & $it.blockNumber).join(", ")
result &= " .." & $n & ".. #" & $q[n-1].blockNumber
else:
result &= toSeq(q[0 ..< n]).mapIt("#" & $it.blockNumber).join(", ")
result &= "]"
proc pp(b: BlockNumber, q: openArray[BlockHeader]; n: int): string {.inline.} =
"#" & $b & " + " & q.pp(n)
@ -124,13 +131,16 @@ proc isEpoch(d: var LocalSnaps;
proc isSnapshotPosition(d: var LocalSnaps;
number: BlockNumber): bool {.inline.} =
# clique/clique.go(394): if number == 0 || (number%c.config.Epoch [..]
if number.isZero:
# At the genesis => snapshot the initial state.
return true
if d.isEpoch(number) and d.c.cfg.roThreshold < d.trail.chain.len:
# Wwe have piled up more headers than allowed to be re-orged (chain
# reinit from a freezer), regard checkpoint trusted and snapshot it.
return true
if d.isEpoch(number):
if number.isZero:
# At the genesis => snapshot the initial state.
return true
if not d.c.applySnapsMinBacklog:
return true
if d.c.cfg.roThreshold < d.trail.chain.len:
# We have piled up more headers than allowed to be re-orged (chain
# reinit from a freezer), regard checkpoint trusted and snapshot it.
return true
# ------------------------------------------------------------------------------
# Private functions
@ -151,7 +161,9 @@ proc findSnapshot(d: var LocalSnaps): bool
parentsLen.dec
while true:
d.say "findSnapshot ", header.pp(d.parents, parentsLen)
#d.say "findSnapshot ", header.pp(d.parents, parentsLen),
# " trail=", d.trail.chain.pp
let number = header.blockNumber
# Check whether the snapshot was recently visited and cahed
@ -202,7 +214,7 @@ proc findSnapshot(d: var LocalSnaps): bool
parentsLen.dec
header = d.parents[parentsLen]
# clique/clique.go(416): if header.Hash() != hash [..]
if header.hash != hash:
if header.blockHash != hash:
d.trail.error = (errUnknownAncestor,"")
return false
@ -227,7 +239,9 @@ proc applyTrail(d: var LocalSnaps): CliqueOkResult
let rc = d.trail.snaps.snapshotApplySeq(
d.trail.chain, d.subChn.top-1, d.subChn.first)
if rc.isErr:
d.say "applyTrail snaps=#",d.trail.snaps.blockNumber, " err=",$rc.error
return err(rc.error)
d.say "applyTrail snaps=#", d.trail.snaps.blockNumber
# If we've generated a new checkpoint snapshot, save to disk
if d.isCheckPoint(d.trail.snaps.blockNumber):
@ -249,7 +263,7 @@ proc updateSnapshot(d: var LocalSnaps): SnapshotResult
## This function was expects thet the LRU cache already has a slot allocated
## for the snapshot having run `getLruSnaps()`.
d.say "updateSnapshot ", d.start.header.blockNumber.pp(d.parents)
d.say "updateSnapshot begin ", d.start.header.blockNumber.pp(d.parents)
# Search for previous snapshots
if not d.findSnapshot:
@ -294,6 +308,10 @@ proc updateSnapshot(d: var LocalSnaps): SnapshotResult
# before checking the LRU cache first -- lol
return err((errSetLruSnaps, &"block #{d.trail.snaps.blockNumber}"))
if 1 < d.trail.chain.len:
d.say "updateSnapshot ok #", d.trail.snaps.blockNumber,
" trail.len=", d.trail.chain.len
ok(d.trail.snaps)
# ------------------------------------------------------------------------------
@ -313,7 +331,7 @@ proc cliqueSnapshotSeq*(c: Clique; header: Blockheader;
## If this function is successful, the compiled `Snapshot` will also be
## stored in the `Clique` descriptor which can be retrieved later
## via `c.snapshot`.
let rc1 = c.recents.getLruSnaps(header.hash)
let rc1 = c.recents.getLruSnaps(header.blockHash)
if rc1.isOk:
c.snapshot = rc1.value
return ok(rc1.value)
@ -326,7 +344,7 @@ proc cliqueSnapshotSeq*(c: Clique; header: Blockheader;
parents: parents,
start: LocalPivot(
header: header,
hash: header.hash))
hash: header.blockHash))
let rc2 = snaps.updateSnapshot
if rc2.isOk:

View File

@ -30,14 +30,16 @@ import
./clique_desc,
./clique_helpers,
./clique_snapshot,
./clique_signers,
./clique_verify,
./snapshot/[snapshot_desc, snapshot_misc],
./snapshot/[ballot, snapshot_desc],
chronicles,
chronos,
eth/[common, keys, rlp],
nimcrypto
when not enableCliqueAsyncLock:
{.fatal: "Async locks must be enabled in clique_desc, try: -d:clique_async_lock"}
{.push raises: [Defect].}
logScope:
@ -57,17 +59,56 @@ template syncExceptionWrap(action: untyped) =
except:
raise (ref CliqueSyncDefect)(msg: getCurrentException().msg)
# clique/clique.go(217): func (c *Clique) VerifyHeader(chain [..]
proc verifyHeader(c: Clique; header: BlockHeader): CliqueOkResult
{.gcsafe, raises: [Defect,CatchableError].} =
## See `clique.cliqueVerify()`
var blind: seq[BlockHeader]
c.cliqueVerifySeq(header, blind)
proc verifyHeader(c: Clique; header: BlockHeader;
parents: openArray[BlockHeader]): CliqueOkResult
{.gcsafe, raises: [Defect,CatchableError].} =
## See `clique.cliqueVerify()`
var list = toSeq(parents)
c.cliqueVerifySeq(header, list)
proc isValidVote(s: Snapshot; a: EthAddress; authorize: bool): bool {.inline.}=
s.ballot.isValidVote(a, authorize)
proc isSigner*(s: Snapshot; address: EthAddress): bool =
## See `clique_verify.isSigner()`
s.ballot.isAuthSigner(address)
# clique/snapshot.go(319): func (s *Snapshot) inturn(number [..]
proc inTurn*(s: Snapshot; number: BlockNumber, signer: EthAddress): bool =
## See `clique_verify.inTurn()`
let ascSignersList = s.ballot.authSigners
for offset in 0 ..< ascSignersList.len:
if ascSignersList[offset] == signer:
return (number mod ascSignersList.len.u256) == offset.u256
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
# clique/clique.go(681): func calcDifficulty(snap [..]
proc calcDifficulty(snap: Snapshot; signer: EthAddress): DifficultyInt =
if snap.inTurn(snap.blockNumber + 1, signer):
proc calcDifficulty(s: Snapshot; signer: EthAddress): DifficultyInt =
if s.inTurn(s.blockNumber + 1, signer):
DIFF_INTURN
else:
DIFF_NOTURN
proc recentBlockNumber*(s: Snapshot;
a: EthAddress): Result[BlockNumber,void] {.inline.} =
## Return `BlockNumber` for `address` argument (if any)
for (number,recent) in s.recents.pairs:
if recent == a:
return ok(number)
return err()
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
@ -84,17 +125,6 @@ proc author*(c: Clique; header: BlockHeader): Result[EthAddress,CliqueError]
c.cfg.ecRecover(header)
# clique/clique.go(217): func (c *Clique) VerifyHeader(chain [..]
proc verifyHeader*(c: Clique; header: BlockHeader): CliqueOkResult
{.gcsafe, raises: [Defect,CatchableError].} =
## For the Consensus Engine, `verifyHeader()` checks whether a header
## conforms to the consensus rules of a given engine. Verifying the seal
## may be done optionally here, or explicitly via the `verifySeal()` method.
##
## This implementation checks whether a header conforms to the consensus
## rules.
c.cliqueVerify(header)
# clique/clique.go(224): func (c *Clique) VerifyHeader(chain [..]
proc verifyHeaders*(c: Clique; headers: openArray[BlockHeader]):
Future[seq[CliqueOkResult]] {.async,gcsafe.} =
@ -114,7 +144,7 @@ proc verifyHeaders*(c: Clique; headers: openArray[BlockHeader]):
if isStopRequest:
result.add cliqueResultErr((errCliqueStopped,""))
break
result.add c.cliqueVerify(headers[n], headers[0 ..< n])
result.add c.verifyHeader(headers[n], headers[0 ..< n])
c.doExclusively:
c.stopVHeaderReq = false
@ -179,7 +209,7 @@ proc prepare*(c: Clique; header: var BlockHeader): CliqueOkResult
# Ensure the extra data has all its components
header.extraData.setLen(EXTRA_VANITY)
if (header.blockNumber mod c.cfg.epoch) == 0:
header.extraData.add c.cliqueSigners.mapIt(toSeq(it)).concat
header.extraData.add c.snapshot.ballot.authSigners.mapIt(toSeq(it)).concat
header.extraData.add 0.byte.repeat(EXTRA_SEAL)
# Mix digest is reserved for now, set to empty
@ -310,7 +340,7 @@ proc seal*(c: Clique; ethBlock: EthBlock):
return err((errUnauthorizedSigner,""))
# If we're amongst the recent signers, wait for the next block
let seen = c.snapshot.recent(signer)
let seen = c.snapshot.recentBlockNumber(signer)
if seen.isOk:
# Signer is among recents, only wait if the current block does not
# shift it out

View File

@ -21,7 +21,7 @@
##
import
std/[sequtils, strformat, tables, times],
std/[sequtils, strformat, strutils, tables, times],
../../chain_config,
../../constants,
../../db/db_chain,
@ -32,7 +32,7 @@ import
./clique_desc,
./clique_helpers,
./clique_snapshot,
./snapshot/[ballot, snapshot_desc, snapshot_misc],
./snapshot/[ballot, snapshot_desc],
chronicles,
eth/common,
stew/results
@ -42,6 +42,19 @@ import
logScope:
topics = "clique PoA verify header"
# ------------------------------------------------------------------------------
# Private helpers, pretty printing
# ------------------------------------------------------------------------------
proc say(c: Clique; v: varargs[string,`$`]) {.inline.} =
discard
# uncomment body to enable
#c.cfg.say v
proc pp(c: Clique; a: AddressHistory): string
{.inline, raises: [Defect,CatchableError].} =
"(" & toSeq(a.pairs).mapIt(&"#{it[0]}:{c.pp(it[1])}").join(" ") & ")"
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
@ -59,7 +72,7 @@ proc verifyForkHashes(c: Clique; header: BlockHeader): CliqueOkResult
# If the homestead reprice hash is set, validate it
let
eip150 = c.db.config.eip150Hash
hash = header.hash
hash = header.blockHash
if eip150 == hash:
return ok()
@ -67,6 +80,34 @@ proc verifyForkHashes(c: Clique; header: BlockHeader): CliqueOkResult
err((errCliqueGasRepriceFork,
&"Homestead gas reprice fork: have {eip150}, want {hash}"))
proc signersThreshold*(s: Snapshot): int {.inline.} =
## Minimum number of authorised signers needed.
s.ballot.authSignersThreshold
proc recentBlockNumber*(s: Snapshot;
a: EthAddress): Result[BlockNumber,void] {.inline.} =
## Return `BlockNumber` for `address` argument (if any)
for (number,recent) in s.recents.pairs:
if recent == a:
return ok(number)
return err()
proc isSigner*(s: Snapshot; address: EthAddress): bool {.inline.} =
## Checks whether argukment ``address` is in signers list
s.ballot.isAuthSigner(address)
# clique/snapshot.go(319): func (s *Snapshot) inturn(number [..]
proc inTurn*(s: Snapshot; number: BlockNumber, signer: EthAddress): bool =
## Returns `true` if a signer at a given block height is in-turn or not.
let ascSignersList = s.ballot.authSigners
for offset in 0 ..< ascSignersList.len:
if ascSignersList[offset] == signer:
return (number mod ascSignersList.len.u256) == offset.u256
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
@ -90,28 +131,32 @@ proc verifySeal(c: Clique; header: BlockHeader): CliqueOkResult
doAssert snapshot.blockHash == header.parentHash
# Resolve the authorization key and check against signers
let signer = c.cfg.ecRecover(header)
let signer = c.cfg.ecRecover(header)
if signer.isErr:
return err(signer.error)
if not snapshot.isSigner(signer.value):
return err((errUnauthorizedSigner,""))
let seen = snapshot.recent(signer.value)
c.say "verifySeal signer=", c.pp(signer.value), " ", c.pp(snapshot.recents)
let seen = snapshot.recentBlockNumber(signer.value)
if seen.isOk:
c.say "verifySeal signer=#", seen.value,
" header=#", header.blockNumber,
" threshold=#", snapshot.signersThreshold.u256
# Signer is among recents, only fail if the current block does not
# shift it out
# clique/clique.go(486): if limit := uint64(len(snap.Signers)/2 + 1); [..]
if header.blockNumber - snapshot.signersThreshold.u256 < seen.value:
return err((errRecentlySigned,""))
# Ensure that the difficulty corresponds to the turn-ness of the signer
if not c.fakeDiff:
if snapshot.inTurn(header.blockNumber, signer.value):
if header.difficulty != DIFF_INTURN:
return err((errWrongDifficulty,""))
else:
if header.difficulty != DIFF_NOTURN:
return err((errWrongDifficulty,""))
if snapshot.inTurn(header.blockNumber, signer.value):
if header.difficulty != DIFF_INTURN:
return err((errWrongDifficulty,"INTURN expected"))
else:
if header.difficulty != DIFF_NOTURN:
return err((errWrongDifficulty,"NOTURN expected"))
ok()
@ -137,7 +182,7 @@ proc verifyCascadingFields(c: Clique; header: BlockHeader;
return err((errUnknownAncestor,""))
if parent.blockNumber != header.blockNumber-1 or
parent.hash != header.parentHash:
parent.blockHash != header.parentHash:
return err((errUnknownAncestor,""))
# clique/clique.go(330): if parent.Time+c.config.Period > header.Time {
@ -169,7 +214,10 @@ proc verifyCascadingFields(c: Clique; header: BlockHeader;
# If the block is a checkpoint block, verify the signer list
if (header.blockNumber mod c.cfg.epoch.u256) == 0:
if c.snapshot.ballot.authSigners != header.extraData.extraDataAddresses:
var addrList = header.extraData.extraDataAddresses
# not using `authSigners()` here as it is too slow
if c.snapshot.ballot.authSignersLen != addrList.len or
not c.snapshot.ballot.isAuthSigner(addrList):
return err((errMismatchingCheckpointSigners,""))
# All basic checks passed, verify the seal and return
@ -253,20 +301,20 @@ proc cliqueVerifyImpl*(c: Clique; header: BlockHeader;
# Check header fields independent of parent blocks
let rc = c.verifyHeaderFields(header)
if rc.isErr:
c.failed = (header.hash, rc.error)
c.failed = (header.blockHash, rc.error)
return err(rc.error)
block:
# If all checks passed, validate any special fields for hard forks
let rc = c.verifyForkHashes(header)
if rc.isErr:
c.failed = (header.hash, rc.error)
c.failed = (header.blockHash, rc.error)
return err(rc.error)
# All basic checks passed, verify cascading fields
result = c.verifyCascadingFields(header, parents)
if result.isErr:
c.failed = (header.hash, result.error)
c.failed = (header.blockHash, result.error)
# ------------------------------------------------------------------------------
# Public function

View File

@ -99,6 +99,10 @@ proc authSigners*(t: var Ballot): seq[EthAddress] =
## Sorted ascending list of authorised signer addresses
toSeq(t.authSig.keys).sorted(EthAscending)
proc authSignersLen*(t: var Ballot): int =
## Returns the number of currently known authorised signers.
t.authSig.len
proc isAuthSignersListShrunk*(t: var Ballot): bool =
## Check whether the authorised signers list was shrunk recently after
## appying `addVote()`
@ -116,6 +120,16 @@ proc authSignersThreshold*(t: var Ballot): int =
# Public functions
# ------------------------------------------------------------------------------
proc isAuthSigner*(t: var Ballot; addresses: var seq[EthAddress]): bool =
## Check whether all `addresses` entries are authorised signers.
##
## Using this function should be preferable over `authSigners()` which has
## complexity `O(log n)` while this function runs with `O(n)`.
for a in addresses:
if a notin t.authSig:
return false
true
proc isAuthSigner*(t: var Ballot; address: EthAddress): bool =
## Check whether `address` is an authorised signer
address in t.authSig

View File

@ -19,7 +19,7 @@
##
import
std/[tables, times],
std/[algorithm, sequtils, strutils, tables, times],
../clique_cfg,
../clique_defs,
./ballot,
@ -34,12 +34,28 @@ logScope:
topics = "clique PoA snapshot-apply"
# ------------------------------------------------------------------------------
# Private functions needed to support RLP conversion
# Private helpers, pretty printing
# ------------------------------------------------------------------------------
proc say(s: Snapshot; v: varargs[string,`$`]) {.inline.} =
# s.cfg.say v
discard
# uncomment body to enable
s.cfg.say v
proc pp(a: openArray[BlockHeader]; first, last: int): string {.inline.} =
result = "["
var
n = last - first
q = toSeq(a)
if last < first:
q = a.reversed(last, first)
n = q.len
if 5 < n:
result &= toSeq(q[0 .. 2]).mapIt("#" & $it.blockNumber).join(", ")
result &= " .." & $n & ".. #" & $q[n-1].blockNumber
else:
result &= toSeq(q[0 ..< n]).mapIt("#" & $it.blockNumber).join(", ")
result &= "]"
# ------------------------------------------------------------------------------
# Private functions
@ -80,7 +96,7 @@ proc snapshotApplySeq*(s: Snapshot; headers: var seq[BlockHeader],
## Initialises an authorization snapshot `snap` by applying the `headers`
## to the argument snapshot desciptor `s`.
#s.say "applySnapshot ", s.pp(headers).join("\n" & ' '.repeat(18))
s.say "applySnapshot begin #", s.blockNumber, " + ", headers.pp(first, last)
# Sanity check that the headers can be applied
if headers[first].blockNumber != s.blockNumber + 1:
@ -126,7 +142,7 @@ proc snapshotApplySeq*(s: Snapshot; headers: var seq[BlockHeader],
# Resolve the authorization key and check against signers
let signer = ? s.cfg.ecRecover(header)
s.say "applySnapshot signer=", s.pp(signer)
#s.say "applySnapshot signer=", s.pp(signer)
if not s.ballot.isAuthSigner(signer):
s.say "applySnapshot signer not authorised => fail ", s.pp(29)
@ -135,6 +151,7 @@ proc snapshotApplySeq*(s: Snapshot; headers: var seq[BlockHeader],
for recent in s.recents.values:
if recent == signer:
s.say "applySnapshot signer recently seen ", s.pp(signer)
echo "+++ applySnapshot #", header.blockNumber, " err=errRecentlySigned"
return err((errRecentlySigned,""))
s.recents[number] = signer
@ -153,7 +170,7 @@ proc snapshotApplySeq*(s: Snapshot; headers: var seq[BlockHeader],
signer: signer,
blockNumber: number,
authorize: authOk)
s.say "applySnapshot calling addVote ", s.pp(vote)
#s.say "applySnapshot calling addVote ", s.pp(vote)
# clique/snapshot.go(253): if snap.cast(header.Coinbase, authorize) {
s.ballot.addVote(vote)
@ -168,7 +185,7 @@ proc snapshotApplySeq*(s: Snapshot; headers: var seq[BlockHeader],
") from recents={", s.pp(s.recents), "}"
s.recents.del(item)
s.say "applySnapshot state=", s.pp(25)
#s.say "applySnapshot state=", s.pp(25)
# If we're taking too much time (ecrecover), notify the user once a while
if s.cfg.logInterval < getTime() - logged:
@ -185,9 +202,12 @@ proc snapshotApplySeq*(s: Snapshot; headers: var seq[BlockHeader],
elapsed = sinceStart
# clique/snapshot.go(303): snap.Number += uint64(len(headers))
s.blockNumber = s.blockNumber + headers.len.u256
doAssert headers[last].blockNumber == s.blockNumber+(1+(last-first).abs).u256
s.blockNumber = headers[last].blockNumber
s.blockHash = headers[last].blockHash
result = ok()
s.say "applySnapshot ok"
ok()
proc snapshotApply*(s: Snapshot; headers: var seq[BlockHeader]): CliqueOkResult

View File

@ -21,7 +21,6 @@
import
std/[algorithm, sequtils, strformat, strutils, tables],
../../../db/storage_types,
../../../utils,
../clique_cfg,
../clique_defs,
../clique_helpers,
@ -35,7 +34,7 @@ type
## Snapshot/error result type
Result[Snapshot,CliqueError]
AddressHistory = Table[BlockNumber,EthAddress]
AddressHistory* = Table[BlockNumber,EthAddress]
SnapshotData* = object
blockNumber: BlockNumber ## block number where snapshot was created on
@ -115,6 +114,7 @@ proc getPrettyPrinters*(s: Snapshot): var PrettyPrinters =
## Mixin for pretty printers
s.cfg.prettyPrint
proc pp*(s: Snapshot; h: var AddressHistory): string {.gcsafe.} =
ppExceptionWrap:
toSeq(h.keys)
@ -159,7 +159,7 @@ proc newSnapshot*(cfg: CliqueCfg; header: BlockHeader): Snapshot =
## `extra data` field of the header.
new result
let signers = header.extraData.extraDataAddresses
result.initSnapshot(cfg, header.blockNumber, header.hash, signers)
result.initSnapshot(cfg, header.blockNumber, header.blockHash, signers)
# ------------------------------------------------------------------------------
# Public getters

View File

@ -1,75 +0,0 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
##
## Miscellaneous Snapshot Functions for Clique PoA Consensus Protocol
## ==================================================================
##
## For details see
## `EIP-225 <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
## and
## `go-ethereum <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
##
import
std/[tables],
./ballot,
./snapshot_desc,
chronicles,
eth/[common, rlp],
stew/results
{.push raises: [Defect].}
logScope:
topics = "clique PoA snapshot-misc"
# ------------------------------------------------------------------------------
# Public getters
# ------------------------------------------------------------------------------
proc signersThreshold*(s: Snapshot): int {.inline.} =
## Minimum number of authorised signers needed.
s.ballot.authSignersThreshold
#proc signers*(s: Snapshot): seq[EthAddress] {.inline.} =
# ## Retrieves the sorted list of authorized signers
# s.ballot.authSigners
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc isValidVote*(s: Snapshot; address: EthAddress; authorize: bool): bool =
## Returns `true` if voting makes sense, at all.
s.ballot.isValidVote(address, authorize)
proc recent*(s: Snapshot; address: EthAddress): Result[BlockNumber,void] =
## Return `BlockNumber` for `address` argument (if any)
for (number,recent) in s.recents.pairs:
if recent == address:
return ok(number)
return err()
proc isSigner*(s: Snapshot; address: EthAddress): bool =
## Checks whether argukment ``address` is in signers list
s.ballot.isAuthSigner(address)
# clique/snapshot.go(319): func (s *Snapshot) inturn(number [..]
proc inTurn*(s: Snapshot; number: BlockNumber, signer: EthAddress): bool =
## Returns `true` if a signer at a given block height is in-turn or not.
let ascSignersList = s.ballot.authSigners
for offset in 0 ..< ascSignersList.len:
if ascSignersList[offset] == signer:
return (number mod ascSignersList.len.u256) == offset.u256
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -20,7 +20,6 @@ import
./calculate_reward,
./executor_helpers,
./process_transaction,
./update_poastate,
chronicles,
eth/[common, trie/db],
nimcrypto
@ -118,10 +117,11 @@ proc procBlkEpilogue(vmState: BaseVMState; dbTx: DbTransaction;
# Public functions
# ------------------------------------------------------------------------------
proc processBlock*(vmState: BaseVMState;
header: BlockHeader, body: BlockBody): ValidationResult
proc processBlockNotPoA*(vmState: BaseVMState;
header: BlockHeader, body: BlockBody): ValidationResult
{.gcsafe, raises: [Defect,CatchableError].} =
## Processes `(header,body)` pair for a non-PoA network, only
## Processes `(header,body)` pair for a non-PoA network, only. This function
## will fail when applied to a PoA network like `Goerli`.
if vmState.chainDB.config.poaEngine:
# PoA consensus engine unsupported, see the other version of
# processBlock() below
@ -151,13 +151,20 @@ proc processBlock*(vmState: BaseVMState; poa: Clique;
header: BlockHeader, body: BlockBody): ValidationResult
{.gcsafe, raises: [Defect,CatchableError].} =
## Generalised function to processes `(header,body)` pair for any network,
## regardless of PoA or not
## regardless of PoA or not. Currently there is no mining support so this
## function is mostly the same as `processBlockNotPoA()`.
##
## Rather than calculating the PoA state change here, it is done with the
## verification in the `chain/persist_blocks.persistBlocks()` method. So
## the `poa` descriptor is currently unused and only provided for later
## implementations (but can be savely removed, as well.)
# Process PoA state transition first so there is no need to re-wind on error.
if vmState.chainDB.config.poaEngine and
not poa.updatePoaState(header, body):
debug "PoA update failed"
return ValidationResult.Error
# # Process PoA state transition first so there is no need to re-wind on
# # an error.
# if vmState.chainDB.config.poaEngine and
# not poa.updatePoaState(header, body):
# debug "PoA update failed"
# return ValidationResult.Error
var dbTx = vmState.chainDB.db.beginTransaction()
defer: dbTx.dispose()

View File

@ -1,19 +0,0 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
import
../clique,
eth/[common]
proc updatePoaState*(poa: Clique; header: BlockHeader; body: BlockBody): bool =
true
# End

View File

@ -210,7 +210,7 @@ proc validateUncles(chainDB: BaseChainDB; header: BlockHeader;
# Check for duplicates
var uncleSet = initHashSet[Hash256]()
for uncle in uncles:
let uncleHash = uncle.hash
let uncleHash = uncle.blockHash
if uncleHash in uncleSet:
return err("Block contains duplicate uncles")
else:
@ -219,10 +219,10 @@ proc validateUncles(chainDB: BaseChainDB; header: BlockHeader;
let recentAncestorHashes = chainDB.getAncestorsHashes(
MAX_UNCLE_DEPTH + 1, header)
let recentUncleHashes = chainDB.getUncleHashes(recentAncestorHashes)
let blockHash = header.hash
let blockHash = header.blockHash
for uncle in uncles:
let uncleHash = uncle.hash
let uncleHash = uncle.blockHash
if uncleHash == blockHash:
return err("Uncle has same hash as block")

View File

@ -174,7 +174,7 @@ proc dumpBlockState*(db: BaseChainDB, header: BlockHeader, body: BlockBody, dump
for idx, uncle in body.uncles:
before.captureAccount(stateBefore, uncle.coinbase, uncleName & $idx)
discard vmState.processBlock(header, body)
discard vmState.processBlockNotPoA(header, body)
var stateAfter = vmState.accountDb

View File

@ -23,7 +23,7 @@ proc executeBlock(blockEnv: JsonNode, memoryDB: TrieDatabaseRef, blockNumber: Ui
defer: transaction.dispose()
let
vmState = newBaseVMState(parent.stateRoot, header, chainDB)
validationResult = vmState.processBlock(header, body)
validationResult = vmState.processBlockNotPoA(header, body)
if validationResult != ValidationResult.OK:
error "block validation error", validationResult

View File

@ -27,7 +27,7 @@ proc dumpDebug(chainDB: BaseChainDB, blockNumber: Uint256) =
vmState = newBaseVMState(parent.stateRoot, header, captureChainDB)
captureChainDB.setHead(parent, true)
discard vmState.processBlock(header, body)
discard vmState.processBlockNotPoA(header, body)
transaction.rollback()
dumpDebuggingMetaData(captureChainDB, header, body, vmState, false)

View File

@ -96,7 +96,7 @@ proc huntProblematicBlock(blockNumber: Uint256): ValidationResult =
defer: transaction.dispose()
let
vmState = newHunterVMState(parentBlock.header.stateRoot, thisBlock.header, chainDB)
validationResult = vmState.processBlock(thisBlock.header, thisBlock.body)
validationResult = vmState.processBlockNotPoA(thisBlock.header, thisBlock.body)
if validationResult != ValidationResult.OK:
transaction.rollback()

View File

@ -31,7 +31,7 @@ proc validateBlock(chainDB: BaseChainDB, blockNumber: BlockNumber): BlockNumber
let
vmState = newBaseVMState(parent.stateRoot, headers[i], chainDB)
validationResult = vmState.processBlock(headers[i], bodies[i])
validationResult = vmState.processBlockNotPoA(headers[i], bodies[i])
if validationResult != ValidationResult.OK:
error "block validation error", validationResult, blockNumber = blockNumber + i.u256

View File

@ -247,7 +247,7 @@ proc importBlock(tester: var Tester, chainDB: BaseChainDB,
transactions: result.txs,
uncles: result.uncles
)
let res = tester.vmState.processBlock(result.header, body)
let res = tester.vmState.processBlockNotPoA(result.header, body)
if res == ValidationResult.Error:
if not (tb.hasException or (not tb.goodBlock)):
raise newException(ValidationError, "process block validation")

View File

@ -9,7 +9,7 @@
# according to those terms.
import
std/[algorithm, os, sequtils, strformat, strutils],
std/[algorithm, os, sequtils, strformat, strutils, times],
../nimbus/db/db_chain,
../nimbus/p2p/[chain, clique, clique/clique_snapshot],
./test_clique/[pool, undump],
@ -17,8 +17,9 @@ import
stint,
unittest2
let
const
goerliCapture = "test_clique" / "goerli51840.txt.gz"
groupReplayTransactions = 7
# ------------------------------------------------------------------------------
# Helpers
@ -28,6 +29,19 @@ proc getBlockHeader(ap: TesterPool; number: BlockNumber): BlockHeader =
## Shortcut => db/db_chain.getBlockHeader()
doAssert ap.db.getBlockHeader(number, result)
proc ppSecs(elapsed: Duration): string =
result = $elapsed.inSeconds
let ns = elapsed.inNanoseconds mod 1_000_000_000
if ns != 0:
# to rounded decimal seconds
let ds = (ns + 5_000_000i64) div 10_000_000i64
result &= &".{ds:02}"
result &= "s"
proc ppRow(elapsed: Duration): string =
let ms = elapsed.inMilliSeconds + 500
"x".repeat(ms div 1000)
# ------------------------------------------------------------------------------
# Test Runners
# ------------------------------------------------------------------------------
@ -63,7 +77,7 @@ proc runCliqueSnapshot(noisy = true; postProcessOk = false;
# Assemble a chain of headers from the cast votes
# see clique/snapshot_test.go(407): config := *params.TestChainConfig
pool
.resetVoterChain(tt.signers, tt.epoch)
.resetVoterChain(tt.signers, tt.epoch, tt.runBack)
# see clique/snapshot_test.go(425): for j, block := range blocks {
.appendVoter(tt.votes)
.commitVoterChain(postProcessOk)
@ -91,21 +105,24 @@ proc runCliqueSnapshot(noisy = true; postProcessOk = false; testId: int) =
noisy.runCliqueSnapshot(postProcessOk, testIds = {testId})
proc runGoerliReplay(noisy = true; dir = "tests"; stopAfterBlock = 0u64) =
proc runGoerliReplay(noisy = true; showElapsed = false,
dir = "tests"; captureFile = goerliCapture,
startAtBlock = 0u64; stopAfterBlock = 0u64) =
var
pool = newVoterPool()
cache: array[7,(seq[BlockHeader],seq[BlockBody])]
cache: array[groupReplayTransactions,(seq[BlockHeader],seq[BlockBody])]
cInx = 0
stoppedOk = false
pool.debug = noisy
pool.verifyFrom = startAtBlock
let stopThreshold = if stopAfterBlock == 0u64: uint64.high.u256
else: stopAfterBlock.u256
suite "Replay Goerli Chain":
for w in (dir / goerliCapture).undumpNextGroup:
for w in (dir / captureFile).undumpNextGroup:
if w[0][0].blockNumber == 0.u256:
# Verify Genesis
@ -127,29 +144,53 @@ proc runGoerliReplay(noisy = true; dir = "tests"; stopAfterBlock = 0u64) =
let
first = cache[0][0][0].blockNumber
last = cache[^1][0][^1].blockNumber
test &"Goerli Blocks #{first}..#{last} ({cache.len} transactions)":
blkRange = &"#{first}..#{last}"
info = if first <= startAtBlock.u256 and startAtBlock.u256 <= last:
&", verification #{startAtBlock}.."
else:
""
test &"Goerli Blocks {blkRange} ({cache.len} transactions{info})":
let start = getTime()
for (headers,bodies) in cache:
let addedPersistBlocks = pool.chain.persistBlocks(headers,bodies)
check addedPersistBlocks == ValidationResult.Ok
if addedPersistBlocks != ValidationResult.Ok: return
if showElapsed and startAtBlock.u256 <= last:
let
elpd = getTime() - start
info = &"{elpd.ppSecs:>7} {pool.cliqueSignersLen} {elpd.ppRow}"
echo &"\n elapsed {blkRange:<17} {info}"
# Rest from cache
if 0 < cInx:
let
first = cache[0][0][0].blockNumber
last = cache[cInx-1][0][^1].blockNumber
test &"Goerli Blocks #{first}..#{last} ({cInx} transactions)":
blkRange = &"#{first}..#{last}"
info = if first <= startAtBlock.u256 and startAtBlock.u256 <= last:
&", Verification #{startAtBlock}.."
else:
""
test &"Goerli Blocks {blkRange} ({cache.len} transactions{info})":
let start = getTime()
for (headers,bodies) in cache:
let addedPersistBlocks = pool.chain.persistBlocks(headers,bodies)
check addedPersistBlocks == ValidationResult.Ok
if addedPersistBlocks != ValidationResult.Ok: return
if showElapsed and startAtBlock.u256 <= last:
let
elpsd = getTime() - start
info = &"{elpsd.ppSecs:>7} {pool.cliqueSignersLen} {elpsd.ppRow}"
echo &"\n elapsed {blkRange:<17} {info}"
if stoppedOk:
test &"Runner stopped after reaching #{stopThreshold}":
discard
proc runGoerliBaybySteps(noisy = true; dir = "tests"; stopAfterBlock = 0u64) =
proc runGoerliBaybySteps(noisy = true;
dir = "tests"; captureFile = goerliCapture,
stopAfterBlock = 0u64) =
var
pool = newVoterPool()
stoppedOk = false
@ -161,7 +202,7 @@ proc runGoerliBaybySteps(noisy = true; dir = "tests"; stopAfterBlock = 0u64) =
suite "Replay Goerli Chain Transactions Single Blockwise":
for w in (dir / goerliCapture).undumpNextGroup:
for w in (dir / captureFile).undumpNextGroup:
if stoppedOk:
break
if w[0][0].blockNumber == 0.u256:
@ -192,21 +233,37 @@ proc runGoerliBaybySteps(noisy = true; dir = "tests"; stopAfterBlock = 0u64) =
# Main function(s)
# ------------------------------------------------------------------------------
let
skipIDs = {999}
proc cliqueMain*(noisy = defined(debug)) =
noisy.runCliqueSnapshot(true)
noisy.runCliqueSnapshot(false, skipIDs = skipIDs)
noisy.runCliqueSnapshot(false)
noisy.runGoerliBaybySteps
noisy.runGoerliReplay
noisy.runGoerliReplay(startAtBlock = 31100u64)
when isMainModule:
let
skipIDs = {999}
# A new capture file can be generated using
# `test_clique/indiump.dumpGroupNl()`
# placed at the end of
# `p2p/chain/persist_blocks.persistBlocks()`.
captureFile = "test_clique" / "goerli504192.txt.gz"
#captureFile = "test_clique" / "dump-stream.out.gz"
proc goerliReplay(noisy = true; showElapsed = true;
dir = "."; captureFile = captureFile;
startAtBlock = 0u64; stopAfterBlock = 0u64) =
runGoerliReplay(
noisy = noisy, showElapsed = showElapsed,
dir = dir, captureFile = captureFile,
startAtBlock = startAtBlock, stopAfterBlock = stopAfterBlock)
let noisy = defined(debug)
noisy.runCliqueSnapshot(true)
noisy.runCliqueSnapshot(false)
noisy.runGoerliBaybySteps(dir = ".")
noisy.runGoerliReplay(dir = ".")
noisy.runGoerliReplay(dir = ".", startAtBlock = 31100u64)
#noisy.goerliReplay(startAtBlock = 31100u64)
#noisy.goerliReplay(startAtBlock = 194881u64, stopAfterBlock = 198912u64)
# ------------------------------------------------------------------------------
# End

View File

@ -10,10 +10,11 @@
import
std/[random, sequtils, strformat, strutils, tables, times],
../../nimbus/[config, chain_config, constants, genesis, utils],
../../nimbus/[config, chain_config, constants, genesis],
../../nimbus/db/db_chain,
../../nimbus/p2p/[chain,
clique,
clique/clique_desc,
clique/clique_helpers,
clique/clique_snapshot,
clique/snapshot/snapshot_desc],
@ -172,13 +173,12 @@ proc ppBlockHeader(ap: TesterPool; v: BlockHeader; delim: string): string =
let sep = if 0 < delim.len: delim else: ";"
&"(blockNumber=#{v.blockNumber.truncate(uint64)}" &
&"{sep}parentHash={v.parentHash}" &
&"{sep}selfHash={v.hash}" &
&"{sep}selfHash={v.blockHash}" &
&"{sep}stateRoot={v.stateRoot}" &
&"{sep}coinbase={ap.ppAddress(v.coinbase)}" &
&"{sep}nonce={ap.ppNonce(v.nonce)}" &
&"{sep}extraData={ap.ppExtraData(v.extraData)})"
# ------------------------------------------------------------------------------
# Private: Constructor helpers
# ------------------------------------------------------------------------------
@ -193,7 +193,7 @@ proc resetChainDb(ap: TesterPool; extraData: Blob; debug = false) =
## Setup new block chain with bespoke genesis
ap.chain = BaseChainDB(
db: newMemoryDb(),
config: ap.boot.config).newChain(extraValidation = true)
config: ap.boot.config).newChain
ap.chain.clique.db.populateProgress
# new genesis block
var g = ap.boot.genesis
@ -265,10 +265,14 @@ proc debug*(ap: TesterPool): auto {.inline.} =
## Getter
ap.clique.cfg.debug
proc cliqueSigners*(ap: TesterPool; lastOk = false): auto {.inline.} =
proc cliqueSigners*(ap: TesterPool): auto {.inline.} =
## Getter
ap.clique.cliqueSigners
proc cliqueSignersLen*(ap: TesterPool): auto {.inline.} =
## Getter
ap.clique.cliqueSignersLen
proc snapshot*(ap: TesterPool): auto {.inline.} =
## Getter
ap.clique.snapshot
@ -285,6 +289,10 @@ proc `debug=`*(ap: TesterPool; debug: bool) {.inline,} =
## Set debugging mode on/off
ap.clique.cfg.debug = debug
proc `verifyFrom=`*(ap: TesterPool; verifyFrom: uint64) {.inline.} =
## Setter, block number where `Clique` should start
ap.chain.verifyFrom = verifyFrom
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
@ -334,7 +342,7 @@ proc sign*(ap: TesterPool; header: var BlockHeader; signer: string) =
# ------------------------------------------------------------------------------
proc resetVoterChain*(ap: TesterPool; signers: openArray[string];
epoch = 0): TesterPool {.discardable.} =
epoch = 0; runBack = true): TesterPool {.discardable.} =
## Reset the batch list for voter headers and update genesis block
result = ap
@ -355,6 +363,7 @@ proc resetVoterChain*(ap: TesterPool; signers: openArray[string];
# store modified genesis block and epoch
ap.resetChainDb(extraData, ap.debug )
ap.clique.cfg.epoch = epoch
ap.clique.applySnapsMinBacklog = runBack
# clique/snapshot_test.go(415): blocks, _ := core.GenerateChain(&config, [..]
@ -370,7 +379,7 @@ proc appendVoter*(ap: TesterPool;
ap.batch[^1][^1]
var header = BlockHeader(
parentHash: parent.hash,
parentHash: parent.blockHash,
ommersHash: EMPTY_UNCLE_HASH,
stateRoot: parent.stateRoot,
timestamp: parent.timestamp + initDuration(seconds = 100),
@ -386,7 +395,7 @@ proc appendVoter*(ap: TesterPool;
nonce: if voter.auth: NONCE_AUTH else: NONCE_DROP,
#
# clique/snapshot_test.go(436): header.Difficulty = diffInTurn [..]
difficulty: DIFF_INTURN, # Ignored, we just need a valid number
difficulty: if voter.noTurn: DIFF_NOTURN else: DIFF_INTURN,
#
extraData: 0.byte.repeat(EXTRA_VANITY + EXTRA_SEAL))
@ -422,7 +431,6 @@ proc commitVoterChain*(ap: TesterPool; postProcessOk = false;
## Otherwise the offending bloch is removed, the rest of the batch is
## adjusted and applied again repeatedly.
result = ap
ap.chain.clique.fakeDiff = true
var reChainOk = false
for n in 0 ..< ap.batch.len:
@ -440,7 +448,7 @@ proc commitVoterChain*(ap: TesterPool; postProcessOk = false;
if reChainOk:
var parent = ap.chain.clique.db.getCanonicalHead
for i in 0 ..< headers.len:
headers[i].parentHash = parent.hash
headers[i].parentHash = parent.blockHash
headers[i].blockNumber = parent.blockNumber + 1
parent = headers[i]
@ -453,7 +461,7 @@ proc commitVoterChain*(ap: TesterPool; postProcessOk = false;
# If the offending block is the last one of the last transaction,
# then there is nothing to do.
let culprit = headers.filterIt(ap.failed[0] == it.hash)
let culprit = headers.filterIt(ap.failed[0] == it.blockHash)
doAssert culprit.len == 1
let number = culprit[0].blockNumber
if n + 1 == ap.batch.len and number == headers[^1].blockNumber:

View File

@ -11,7 +11,6 @@
import
std/[sequtils, strformat, strutils],
../../nimbus/db/db_chain,
../../nimbus/utils,
./gunzip,
eth/[common, rlp],
nimcrypto,
@ -48,12 +47,12 @@ proc dumpGroupEndNl*: string =
proc dumpGroupBlockNl*(header: BlockHeader; body: BlockBody): string =
dumpGroupBlock(header, body) & "\n"
proc dumpGroupBeginNl*(db: var BaseChainDB;
proc dumpGroupBeginNl*(db: BaseChainDB;
headers: openArray[BlockHeader]): string =
if headers[0].blockNumber == 1.u256:
let
h0 = db.getBlockHeader(0.u256)
b0 = db.getBlockBody(h0.hash)
b0 = db.getBlockBody(h0.blockHash)
result = "" &
dumpGroupBegin(@[h0]) & "\n" &
dumpGroupBlockNl(h0,b0) &
@ -62,7 +61,7 @@ proc dumpGroupBeginNl*(db: var BaseChainDB;
result &= dumpGroupBegin(headers) & "\n"
proc dumpGroupNl*(db: var BaseChainDB; headers: openArray[BlockHeader];
proc dumpGroupNl*(db: BaseChainDB; headers: openArray[BlockHeader];
bodies: openArray[BlockBody]): string =
db.dumpGroupBeginNl(headers) &
toSeq(countup(0, headers.len-1))

View File

@ -24,12 +24,17 @@ type
## deauthorize)
checkpoint*: seq[string] ## List of authorized signers if this is an epoch
## block
noTurn*: bool ## initialise `NOTURN` it `true`, otherwise
## `INTURN` (not part of Go ref implementation,
## used here to avoid `fakeDiff` kludge in the
## Go implementation)
newbatch*: bool
TestSpecs* = object ## Define the various voting scenarios to test
id*: int ## Test id
info*: string ## Test description
epoch*: int ## Number of blocks in an epoch (unset = 30000)
runBack*: bool ## Set `applySnapsMinBacklog` flag
signers*: seq[string] ## Initial list of authorized signers in the
## genesis
votes*: seq[TesterVote] ## Chain of signed blocks, potentially influencing
@ -67,11 +72,11 @@ const
signers: @["A", "B"],
votes: @[TesterVote(signer: "A", voted: "C", auth: true),
TesterVote(signer: "B", voted: "C", auth: true),
TesterVote(signer: "A", voted: "D", auth: true),
TesterVote(signer: "B", voted: "D", auth: true),
TesterVote(signer: "C"),
TesterVote(signer: "A", voted: "E", auth: true),
TesterVote(signer: "B", voted: "E", auth: true)],
TesterVote(signer: "A", voted: "D", auth: true, noTurn: true),
TesterVote(signer: "B", voted: "D", auth: true, noTurn: true),
TesterVote(signer: "C", noTurn: true),
TesterVote(signer: "A", voted: "E", auth: true, noTurn: true),
TesterVote(signer: "B", voted: "E", auth: true, noTurn: true)],
results: @["A", "B", "C", "D"]),
TestSpecs(
@ -102,16 +107,16 @@ const
id: 7,
info: "Three signers, two of them deciding to drop the third",
signers: @["A", "B", "C"],
votes: @[TesterVote(signer: "A", voted: "C"),
TesterVote(signer: "B", voted: "C")],
votes: @[TesterVote(signer: "A", voted: "C", noTurn: true),
TesterVote(signer: "B", voted: "C", noTurn: true)],
results: @["A", "B"]),
TestSpecs(
id: 8,
info: "Four signers, consensus of two not being enough to drop anyone",
signers: @["A", "B", "C", "D"],
votes: @[TesterVote(signer: "A", voted: "C"),
TesterVote(signer: "B", voted: "C")],
votes: @[TesterVote(signer: "A", voted: "C", noTurn: true),
TesterVote(signer: "B", voted: "C", noTurn: true)],
results: @["A", "B", "C", "D"]),
TestSpecs(
@ -119,9 +124,9 @@ const
info: "Four signers, consensus of three already being enough to " &
"drop someone",
signers: @["A", "B", "C", "D"],
votes: @[TesterVote(signer: "A", voted: "D"),
TesterVote(signer: "B", voted: "D"),
TesterVote(signer: "C", voted: "D")],
votes: @[TesterVote(signer: "A", voted: "D", noTurn: true),
TesterVote(signer: "B", voted: "D", noTurn: true),
TesterVote(signer: "C", voted: "D", noTurn: true)],
results: @["A", "B", "C"]),
TestSpecs(
@ -145,8 +150,8 @@ const
TesterVote(signer: "B"),
TesterVote(signer: "A"),
TesterVote(signer: "B", voted: "D", auth: true),
TesterVote(signer: "A"),
TesterVote(signer: "B", voted: "C", auth: true)],
TesterVote(signer: "A", noTurn: true),
TesterVote(signer: "B", voted: "C", auth: true, noTurn: true)],
results: @["A", "B", "C", "D"]),
TestSpecs(
@ -164,17 +169,17 @@ const
id: 13,
info: "Deauthorizing multiple accounts concurrently is permitted",
signers: @["A", "B", "C", "D"],
votes: @[TesterVote(signer: "A", voted: "C"),
TesterVote(signer: "B"),
TesterVote(signer: "C"),
TesterVote(signer: "A", voted: "D"),
TesterVote(signer: "B"),
votes: @[TesterVote(signer: "A", voted: "C", noTurn: true),
TesterVote(signer: "B", noTurn: true),
TesterVote(signer: "C", noTurn: true),
TesterVote(signer: "A", voted: "D", noTurn: true),
TesterVote(signer: "B", noTurn: true),
TesterVote(signer: "C"),
TesterVote(signer: "A"),
TesterVote(signer: "B", voted: "D"),
TesterVote(signer: "C", voted: "D"),
TesterVote(signer: "A"),
TesterVote(signer: "B", voted: "C")],
TesterVote(signer: "C", voted: "D", noTurn: true),
TesterVote(signer: "A", noTurn: true),
TesterVote(signer: "B", voted: "C", noTurn: true)],
results: @["A", "B"]),
TestSpecs(
@ -185,7 +190,7 @@ const
votes: @[TesterVote(signer: "C", voted: "B"),
TesterVote(signer: "A", voted: "C"),
TesterVote(signer: "B", voted: "C"),
TesterVote(signer: "A", voted: "B")],
TesterVote(signer: "A", voted: "B", noTurn: true)],
results: @["A", "B"]),
TestSpecs(
@ -196,7 +201,7 @@ const
votes: @[TesterVote(signer: "C", voted: "D", auth: true),
TesterVote(signer: "A", voted: "C"),
TesterVote(signer: "B", voted: "C"),
TesterVote(signer: "A", voted: "D", auth: true)],
TesterVote(signer: "A", voted: "D", auth: true, noTurn: true)],
results: @["A", "B"]),
TestSpecs(
@ -204,15 +209,15 @@ const
info: "Cascading changes are not allowed, only the account being " &
"voted on may change",
signers: @["A", "B", "C", "D"],
votes: @[TesterVote(signer: "A", voted: "C"),
TesterVote(signer: "B"),
TesterVote(signer: "C"),
TesterVote(signer: "A", voted: "D"),
TesterVote(signer: "B", voted: "C"),
votes: @[TesterVote(signer: "A", voted: "C", noTurn: true),
TesterVote(signer: "B", noTurn: true),
TesterVote(signer: "C", noTurn: true),
TesterVote(signer: "A", voted: "D", noTurn: true),
TesterVote(signer: "B", voted: "C", noTurn: true),
TesterVote(signer: "C"),
TesterVote(signer: "A"),
TesterVote(signer: "B", voted: "D"),
TesterVote(signer: "C", voted: "D")],
TesterVote(signer: "C", voted: "D", noTurn: true)],
results: @["A", "B", "C"]),
TestSpecs(
@ -220,17 +225,17 @@ const
info: "Changes reaching consensus out of bounds (via a deauth) " &
"execute on touch",
signers: @["A", "B", "C", "D"],
votes: @[TesterVote(signer: "A", voted: "C"),
TesterVote(signer: "B"),
TesterVote(signer: "C"),
TesterVote(signer: "A", voted: "D"),
TesterVote(signer: "B", voted: "C"),
votes: @[TesterVote(signer: "A", voted: "C", noTurn: true),
TesterVote(signer: "B", noTurn: true),
TesterVote(signer: "C", noTurn: true),
TesterVote(signer: "A", voted: "D", noTurn: true),
TesterVote(signer: "B", voted: "C", noTurn: true),
TesterVote(signer: "C"),
TesterVote(signer: "A"),
TesterVote(signer: "B", voted: "D"),
TesterVote(signer: "C", voted: "D"),
TesterVote(signer: "A"),
TesterVote(signer: "C", voted: "C", auth: true)],
TesterVote(signer: "C", voted: "D", noTurn: true),
TesterVote(signer: "A", noTurn: true),
TesterVote(signer: "C", voted: "C", auth: true, noTurn: true)],
results: @["A", "B"]),
TestSpecs(
@ -238,17 +243,17 @@ const
info: "Changes reaching consensus out of bounds (via a deauth) " &
"may go out of consensus on first touch",
signers: @["A", "B", "C", "D"],
votes: @[TesterVote(signer: "A", voted: "C"),
TesterVote(signer: "B"),
TesterVote(signer: "C"),
TesterVote(signer: "A", voted: "D"),
TesterVote(signer: "B", voted: "C"),
votes: @[TesterVote(signer: "A", voted: "C", noTurn: true),
TesterVote(signer: "B", noTurn: true),
TesterVote(signer: "C", noTurn: true),
TesterVote(signer: "A", voted: "D", noTurn: true),
TesterVote(signer: "B", voted: "C", noTurn: true),
TesterVote(signer: "C"),
TesterVote(signer: "A"),
TesterVote(signer: "B", voted: "D"),
TesterVote(signer: "C", voted: "D"),
TesterVote(signer: "A"),
TesterVote(signer: "B", voted: "C", auth: true)],
TesterVote(signer: "C", voted: "D", noTurn: true),
TesterVote(signer: "A", noTurn: true),
TesterVote(signer: "B", voted: "C", auth: true, noTurn: true)],
results: @["A", "B", "C"]),
TestSpecs(
@ -262,27 +267,27 @@ const
signers: @["A", "B", "C", "D", "E"],
votes: @[
# Authorize F, 3 votes needed
TesterVote(signer: "A", voted: "F", auth: true),
TesterVote(signer: "B", voted: "F", auth: true),
TesterVote(signer: "C", voted: "F", auth: true),
TesterVote(signer: "A", voted: "F", auth: true, noTurn: true),
TesterVote(signer: "B", voted: "F", auth: true, noTurn: true),
TesterVote(signer: "C", voted: "F", auth: true, noTurn: true),
# Deauthorize F, 4 votes needed (leave A's previous vote "unchanged")
TesterVote(signer: "D", voted: "F"),
TesterVote(signer: "E", voted: "F"),
TesterVote(signer: "D", voted: "F", noTurn: true),
TesterVote(signer: "E", voted: "F", noTurn: true),
TesterVote(signer: "B", voted: "F"),
TesterVote(signer: "C", voted: "F"),
TesterVote(signer: "C", voted: "F", noTurn: true),
# Almost authorize F, 2/3 votes needed
TesterVote(signer: "D", voted: "F", auth: true),
TesterVote(signer: "E", voted: "F", auth: true),
TesterVote(signer: "D", voted: "F", auth: true, noTurn: true),
TesterVote(signer: "E", voted: "F", auth: true, noTurn: true),
# Deauthorize A, 3 votes needed
TesterVote(signer: "B", voted: "A"),
TesterVote(signer: "C", voted: "A"),
TesterVote(signer: "D", voted: "A"),
TesterVote(signer: "C", voted: "A", noTurn: true),
TesterVote(signer: "D", voted: "A", noTurn: true),
# Finish authorizing F, 3/3 votes needed
TesterVote(signer: "B", voted: "F", auth: true)],
TesterVote(signer: "B", voted: "F", auth: true, noTurn: true)],
results: @["B", "C", "D", "E", "F"]),
TestSpecs(
@ -300,7 +305,7 @@ const
id: 21,
info: "An unauthorized signer should not be able to sign blocks",
signers: @["A"],
votes: @[TesterVote(signer: "B")],
votes: @[TesterVote(signer: "B", noTurn: true)],
failure: errUnauthorizedSigner),
TestSpecs(
@ -318,10 +323,21 @@ const
"imported in a batch ",
epoch: 3,
signers: @["A", "B", "C"],
votes: @[TesterVote(signer: "A"),
TesterVote(signer: "B"),
TesterVote(signer: "A", checkpoint: @["A", "B", "C"]),
TesterVote(signer: "A")],
votes: @[TesterVote(signer: "A", noTurn: true),
TesterVote(signer: "B", noTurn: true),
TesterVote(signer: "A", checkpoint: @["A", "B", "C"],
noTurn: true),
TesterVote(signer: "A", noTurn: true)],
# Setting the `runBack` flag forces the shapshot handler searching for
# a checkpoint before entry 3. So the checkpont will be ignored for
# re-setting the system so that address `A` of block #3 is found in the
# list of recent signers (see documentation of the flag
# `applySnapsMinBacklog` for the `Clique` descriptor.)
#
# As far as I understand, there was no awareness of the tranaction batch
# in the Go implementation -- jordan.
runBack: true,
failure: errRecentlySigned),
# The last test does not differ from the previous one with the current
@ -335,11 +351,35 @@ const
"Rinkeby consensus split.",
epoch: 3,
signers: @["A", "B", "C"],
votes: @[TesterVote(signer: "A"),
TesterVote(signer: "B"),
TesterVote(signer: "A", checkpoint: @["A", "B", "C"]),
TesterVote(signer: "A", newbatch: true)],
failure: errRecentlySigned)]
votes: @[TesterVote(signer: "A", noTurn: true),
TesterVote(signer: "B", noTurn: true),
TesterVote(signer: "A", checkpoint: @["A", "B", "C"],
noTurn: true),
TesterVote(signer: "A", newbatch: true, noTurn: true)],
# Setting the `runBack` flag forces the shapshot handler searching for
# a checkpoint before entry 3. So the checkpont will be ignored for
# re-setting the system so that address `A` of block #3 is found in the
# list of recent signers (see documentation of the flag
# `applySnapsMinBacklog` for the `Clique` descriptor.)
#
# As far as I understand, there was no awareness of the tranaction batch
# in the Go implementation -- jordan.
runBack: true,
failure: errRecentlySigned),
# Not found in Go reference implementation
TestSpecs(
id: 25,
info: "Test 23/24 with using the most recent <epoch> checkpoint",
epoch: 3,
signers: @["A", "B", "C"],
votes: @[TesterVote(signer: "A", noTurn: true),
TesterVote(signer: "B", noTurn: true),
TesterVote(signer: "A", checkpoint: @["A", "B", "C"],
noTurn: true),
TesterVote(signer: "A", noTurn: true)],
results: @["A", "B", "C"])]
static:
# For convenience, make sure that IDs are increasing