Feature/implement poa processing (#748)

* re-shuffled Clique functions

why:
  Due to the port from the go-sources, the interface logic is not optimal
  for nimbus. The main visible function is currently snapshot() and most
  of the _procurement_ of this function result has been moved to a
  sub-directory.

* run eip-225 Clique test against p2p/chain.persistBlocks()

why:
  Previously, loading the test block chains was fugdged with the purpose
  only to fill the database. As it is now clear how nimbus works on
  Goerli, the same can be achieved with a more realistic scenario.

details:
  Eventually these tests will be pre-cursor to the reply tests for the
  Goerli chain supporting TDD approach with more simple cases.

* fix exception annotations for executor module

why:
  needed for exception tracking

details:
  main annoyance are vmState methods (in state.nim) which potentially
  throw a base level Exception (a proc would only throws CatchableError)

* split p2p/chain into sub-modules and fix exception annotations

why:
  make space for implementing PoA stuff

* provide over-loadable Clique PRNG

why:
  There is a PRNG provided for generating reproducible number sequences.
  The functions which employ the PRNG to generate time slots were ported
  ported from the go-implementation. They are currently unused.

* implement trusted signer assembly in p2p/chain.persistBlocks()

details:
  * PoA processing moved there at the end of a transaction. Currently,
   there is no action (eg. transaction rollback) if this fails.
  * The unit tests with staged blocks work ok. In particular, there should
    be tests with to-be-rejected blocks.
  * TODO: 1.Optimise throughput/cache handling; 2.Verify headers

* fix statement cast in pool.nim

* added table features to LRU cache

why:
  Clique uses the LRU cache using a mixture of volatile online items
  from the LRU cache and database checkpoints for hard synchronisation.
  For performance, Clique needs more table like features.

details:
  First, last, and query key added, as well as efficient random delete
  added. Also key-item pair iterator added for debugging.

* re-factored LRU snapshot caching

why:
  Caching was sub-optimal (aka. bonkers) in that it skipped over memory
  caches in many cases and so mostly rebuild the snapshot from the
  last on-disk checkpoint.

details;
  The LRU snapshot toValue() handler has been moved into the module
  clique_snapshot. This is for the fact that toValue() is not supposed
  to see the whole LRU cache database. So there must be a higher layer
  working with the the whole LRU cache and the on-disk checkpoint
  database.

also:
  some clean up

todo:
  The code still assumes that the block headers are valid in itself. This
  is particular important when an epoch header (aka re-sync header) is
  processed as it must contain the PoA result of all previous headers.

  So blocks need to be verified when they come in before used for PoA
  processing.

* fix some snapshot cache fringe cases

why:
  Must not index empty sequences in clique_snapshot module
This commit is contained in:
Jordan Hrycaj 2021-07-14 16:13:27 +01:00 committed by GitHub
parent 0fb0fc4680
commit cfe955c962
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
32 changed files with 2133 additions and 1296 deletions

View File

@ -1,225 +1,19 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
import
../chain_config,
../db/db_chain,
../genesis,
../utils,
../vm_state,
./clique,
./executor,
./validate,
./validate/epoch_hash_cache,
chronicles,
eth/[common, trie/db],
nimcrypto,
stew/endians2,
stint
./chain/[chain_desc, chain_misc, persist_blocks]
when not defined(release):
import ../tracer
export
chain_desc,
chain_misc,
persist_blocks
type
# Chain's forks not always equals to EVM's forks
ChainFork = enum
Frontier,
Homestead,
DAOFork,
Tangerine,
Spurious,
Byzantium,
Constantinople,
Petersburg,
Istanbul,
MuirGlacier,
Berlin,
London
Chain* = ref object of AbstractChainDB
db: BaseChainDB
forkIds: array[ChainFork, ForkID]
blockZeroHash: KeccakHash
extraValidation: bool ##\
## Trigger extra validation, currently with `persistBlocksin()` only.
cacheByEpoch: EpochHashCache ##\
## Objects cache to speed up lookup in validation functions.
poa: Clique ##\
## For non-PoA networks (when `db.config.poaEngine` is `false`),
## this descriptor is ignored.
func toChainFork(c: ChainConfig, number: BlockNumber): ChainFork =
if number >= c.londonBlock: London
elif number >= c.berlinBlock: Berlin
elif number >= c.muirGlacierBlock: MuirGlacier
elif number >= c.istanbulBlock: Istanbul
elif number >= c.petersburgBlock: Petersburg
elif number >= c.constantinopleBlock: Constantinople
elif number >= c.byzantiumBlock: Byzantium
elif number >= c.eip158Block: Spurious
elif number >= c.eip150Block: Tangerine
elif number >= c.daoForkBlock: DAOFork
elif number >= c.homesteadBlock: Homestead
else: Frontier
func toNextFork(n: BlockNumber): uint64 =
if n == high(BlockNumber):
result = 0'u64
else:
result = n.truncate(uint64)
func getNextFork(c: ChainConfig, fork: ChainFork): uint64 =
let next: array[ChainFork, uint64] = [
0'u64,
toNextFork(c.homesteadBlock),
toNextFork(c.daoForkBlock),
toNextFork(c.eip150Block),
toNextFork(c.eip158Block),
toNextFork(c.byzantiumBlock),
toNextFork(c.constantinopleBlock),
toNextFork(c.petersburgBlock),
toNextFork(c.istanbulBlock),
toNextFork(c.muirGlacierBlock),
toNextFork(c.berlinBlock),
toNextFork(c.londonBlock)
]
if fork == high(ChainFork):
result = 0
return
result = next[fork]
for x in fork..high(ChainFork):
if result != next[x]:
result = next[x]
break
func calculateForkId(c: ChainConfig, fork: ChainFork, prevCRC: uint32, prevFork: uint64): ForkID =
result.nextFork = c.getNextFork(fork)
if result.nextFork != prevFork:
result.crc = crc32(prevCRC, toBytesBE(prevFork))
else:
result.crc = prevCRC
func calculateForkIds(c: ChainConfig, genesisCRC: uint32): array[ChainFork, ForkID] =
var prevCRC = genesisCRC
var prevFork = c.getNextFork(Frontier)
for fork in ChainFork:
result[fork] = calculateForkId(c, fork, prevCRC, prevFork)
prevFork = result[fork].nextFork
prevCRC = result[fork].crc
proc newChain*(db: BaseChainDB, extraValidation = false): Chain =
result.new
result.db = db
if not db.config.daoForkSupport:
db.config.daoForkBlock = db.config.homesteadBlock
let g = defaultGenesisBlockForNetwork(db.networkId)
result.blockZeroHash = g.toBlock.blockHash
let genesisCRC = crc32(0, result.blockZeroHash.data)
result.forkIds = calculateForkIds(db.config, genesisCRC)
result.extraValidation = extraValidation
# Initalise the PoA state regardless of whether it is needed on the current
# network. For non-PoA networks (when `db.config.poaEngine` is `false`),
# this descriptor is ignored.
result.poa = db.newCliqueCfg.newClique
if extraValidation:
result.cacheByEpoch.initEpochHashCache
method genesisHash*(c: Chain): KeccakHash {.gcsafe.} =
c.blockZeroHash
method getBlockHeader*(c: Chain, b: HashOrNum, output: var BlockHeader): bool {.gcsafe.} =
case b.isHash
of true:
c.db.getBlockHeader(b.hash, output)
else:
c.db.getBlockHeader(b.number, output)
method getBestBlockHeader*(c: Chain): BlockHeader {.gcsafe.} =
c.db.getCanonicalHead()
method getSuccessorHeader*(c: Chain, h: BlockHeader, output: var BlockHeader, skip = 0'u): bool {.gcsafe.} =
let offset = 1 + skip.toBlockNumber
if h.blockNumber <= (not 0.toBlockNumber) - offset:
result = c.db.getBlockHeader(h.blockNumber + offset, output)
method getAncestorHeader*(c: Chain, h: BlockHeader, output: var BlockHeader, skip = 0'u): bool {.gcsafe.} =
let offset = 1 + skip.toBlockNumber
if h.blockNumber >= offset:
result = c.db.getBlockHeader(h.blockNumber - offset, output)
method getBlockBody*(c: Chain, blockHash: KeccakHash): BlockBodyRef =
result = nil
method persistBlocks*(c: Chain; headers: openarray[BlockHeader];
bodies: openarray[BlockBody]): ValidationResult {.gcsafe.} =
# Run the VM here
if headers.len != bodies.len:
debug "Number of headers not matching number of bodies"
return ValidationResult.Error
c.db.highestBlock = headers[^1].blockNumber
let transaction = c.db.db.beginTransaction()
defer: transaction.dispose()
trace "Persisting blocks",
fromBlock = headers[0].blockNumber,
toBlock = headers[^1].blockNumber
for i in 0 ..< headers.len:
let
(header, body) = (headers[i], bodies[i])
parentHeader = c.db.getBlockHeader(header.parentHash)
vmState = newBaseVMState(parentHeader.stateRoot, header, c.db)
# The following processing function call will update the PoA state which
# is passed as second function argument. The PoA state is ignored for
# non-PoA networks (in which case `vmState.processBlock(header,body)`
# would also be correct but not vice versa.)
validationResult = vmState.processBlock(c.poa, header, body)
when not defined(release):
if validationResult == ValidationResult.Error and
body.transactions.calcTxRoot == header.txRoot:
dumpDebuggingMetaData(c.db, header, body, vmState)
warn "Validation error. Debugging metadata dumped."
if validationResult != ValidationResult.OK:
return validationResult
if c.extraValidation:
let res = c.db.validateHeaderAndKinship(
header,
body,
checkSealOK = false, # TODO: how to checkseal from here
c.cacheByEpoch
)
if res.isErr:
debug "block validation error", msg = res.error
return ValidationResult.Error
discard c.db.persistHeaderToDb(header)
discard c.db.persistTransactions(header.blockNumber, body.transactions)
discard c.db.persistReceipts(vmState.receipts)
# update currentBlock *after* we persist it
# so the rpc return consistent result
# between eth_blockNumber and eth_syncing
c.db.currentBlock = header.blockNumber
transaction.commit()
method getTrieDB*(c: Chain): TrieDatabaseRef {.gcsafe.} =
c.db.db
method getForkId*(c: Chain, n: BlockNumber): ForkID {.gcsafe.} =
# EIP 2364/2124
let fork = c.db.config.toChainFork(n)
c.forkIds[fork]
# End

View File

@ -0,0 +1,192 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
import
../../chain_config,
../../db/db_chain,
../../genesis,
../../utils,
../clique,
../validate,
../validate/epoch_hash_cache,
chronicles,
eth/[common, trie/db],
stew/endians2,
stint
type
# Chain's forks not always equals to EVM's forks
ChainFork* = enum
Frontier,
Homestead,
DAOFork,
Tangerine,
Spurious,
Byzantium,
Constantinople,
Petersburg,
Istanbul,
MuirGlacier,
Berlin,
London
Chain* = ref object of AbstractChainDB
db: BaseChainDB
forkIds: array[ChainFork, ForkID]
blockZeroHash: KeccakHash
extraValidation: bool ##\
## Trigger extra validation, currently with `persistBlocksin()` only.
cacheByEpoch: EpochHashCache ##\
## Objects cache to speed up lookup in validation functions.
poa: Clique ##\
## For non-PoA networks (when `db.config.poaEngine` is `false`),
## this descriptor is ignored.
{.push raises: [Defect].}
# ------------------------------------------------------------------------------
# Helpers
# ------------------------------------------------------------------------------
func toNextFork(n: BlockNumber): uint64 =
if n == high(BlockNumber):
result = 0'u64
else:
result = n.truncate(uint64)
func getNextFork(c: ChainConfig, fork: ChainFork): uint64 =
let next: array[ChainFork, uint64] = [
0'u64,
toNextFork(c.homesteadBlock),
toNextFork(c.daoForkBlock),
toNextFork(c.eip150Block),
toNextFork(c.eip158Block),
toNextFork(c.byzantiumBlock),
toNextFork(c.constantinopleBlock),
toNextFork(c.petersburgBlock),
toNextFork(c.istanbulBlock),
toNextFork(c.muirGlacierBlock),
toNextFork(c.berlinBlock),
toNextFork(c.londonBlock)
]
if fork == high(ChainFork):
result = 0
return
result = next[fork]
for x in fork..high(ChainFork):
if result != next[x]:
result = next[x]
break
func calculateForkId(c: ChainConfig, fork: ChainFork,
prevCRC: uint32, prevFork: uint64): ForkID =
result.nextFork = c.getNextFork(fork)
if result.nextFork != prevFork:
result.crc = crc32(prevCRC, toBytesBE(prevFork))
else:
result.crc = prevCRC
func calculateForkIds(c: ChainConfig,
genesisCRC: uint32): array[ChainFork, ForkID] =
var prevCRC = genesisCRC
var prevFork = c.getNextFork(Frontier)
for fork in ChainFork:
result[fork] = calculateForkId(c, fork, prevCRC, prevFork)
prevFork = result[fork].nextFork
prevCRC = result[fork].crc
# ------------------------------------------------------------------------------
# Public constructor
# ------------------------------------------------------------------------------
proc newChain*(db: BaseChainDB; poa: Clique; extraValidation = false):
Chain {.gcsafe, raises: [Defect,CatchableError].} =
## Constructor for the `Chain` descriptor object. For most applications,
## the `poa` argument is transparent and should be initilaised on the fly
## which is available below.
result.new
result.db = db
if not db.config.daoForkSupport:
db.config.daoForkBlock = db.config.homesteadBlock
let g = defaultGenesisBlockForNetwork(db.networkId)
result.blockZeroHash = g.toBlock.blockHash
let genesisCRC = crc32(0, result.blockZeroHash.data)
result.forkIds = calculateForkIds(db.config, genesisCRC)
result.extraValidation = extraValidation
# Initalise the PoA state regardless of whether it is needed on the current
# network. For non-PoA networks (when `db.config.poaEngine` is `false`),
# this descriptor is ignored.
result.poa = db.newCliqueCfg.newClique
# Always initialise the epoch cache even though it migh no be used
# unless `extraValidation` is set `true`.
result.cacheByEpoch.initEpochHashCache
proc newChain*(db: BaseChainDB, extraValidation = false):
Chain {.gcsafe, raises: [Defect,CatchableError].} =
## Constructor for the `Chain` descriptor object with default initialisation
## for the PoA handling. PoA handling is applicable on PoA networks only and
## the initialisation (takes place but) is ignored, otherwise.
db.newChain(db.newCliqueCfg.newClique, extraValidation)
# ------------------------------------------------------------------------------
# Public `AbstractChainDB` getter overload methods
# ------------------------------------------------------------------------------
method genesisHash*(c: Chain): KeccakHash {.gcsafe.} =
## Getter: `AbstractChainDB` overload method
c.blockZeroHash
method getBestBlockHeader*(c: Chain): BlockHeader
{.gcsafe, raises: [Defect,CatchableError].} =
## Getter: `AbstractChainDB` overload method
c.db.getCanonicalHead()
method getTrieDB*(c: Chain): TrieDatabaseRef {.gcsafe.} =
## Getter: `AbstractChainDB` overload method
c.db.db
# ------------------------------------------------------------------------------
# Public `Chain` getters
# ------------------------------------------------------------------------------
proc clique*(c: Chain): var Clique {.inline.} =
## Getter
c.poa
proc cacheByEpoch*(c: Chain): var EpochHashCache {.inline.} =
## Getter
c.cacheByEpoch
proc db*(c: Chain): auto {.inline.} =
## Getter
c.db
proc extraValidation*(c: Chain): auto {.inline.} =
## Getter
c.extraValidation
proc forkIds*(c: Chain): auto {.inline.} =
## Getter
c.forkIds
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,28 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
type
P2PChainError* = object of CatchableError
## Catch and relay exception error
{.push raises: [Defect].}
template safeP2PChain*(info: string; code: untyped) =
try:
code
except CatchableError as e:
raise (ref CatchableError)(msg: e.msg)
except Defect as e:
raise (ref Defect)(msg: e.msg)
except:
let e = getCurrentException()
raise newException(P2PChainError, info & "(): " & $e.name & " -- " & e.msg)
# End

View File

@ -0,0 +1,79 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
import
../../chain_config,
../../db/db_chain,
./chain_desc,
chronicles,
eth/common,
stew/endians2,
stint
{.push raises: [Defect].}
# ------------------------------------------------------------------------------
# Helpers
# ------------------------------------------------------------------------------
func toChainFork(c: ChainConfig, number: BlockNumber): ChainFork =
if number >= c.londonBlock: London
elif number >= c.berlinBlock: Berlin
elif number >= c.muirGlacierBlock: MuirGlacier
elif number >= c.istanbulBlock: Istanbul
elif number >= c.petersburgBlock: Petersburg
elif number >= c.constantinopleBlock: Constantinople
elif number >= c.byzantiumBlock: Byzantium
elif number >= c.eip158Block: Spurious
elif number >= c.eip150Block: Tangerine
elif number >= c.daoForkBlock: DAOFork
elif number >= c.homesteadBlock: Homestead
else: Frontier
# ------------------------------------------------------------------------------
# Public `AbstractChainDB` overload methods
# ------------------------------------------------------------------------------
method getBlockHeader*(c: Chain, b: HashOrNum, output: var BlockHeader): bool
{.gcsafe, raises: [Defect,RlpError].} =
case b.isHash
of true:
c.db.getBlockHeader(b.hash, output)
else:
c.db.getBlockHeader(b.number, output)
method getSuccessorHeader*(c: Chain, h: BlockHeader, output: var BlockHeader,
skip = 0'u): bool {.gcsafe, raises: [Defect,RlpError].} =
let offset = 1 + skip.toBlockNumber
if h.blockNumber <= (not 0.toBlockNumber) - offset:
result = c.db.getBlockHeader(h.blockNumber + offset, output)
method getAncestorHeader*(c: Chain, h: BlockHeader, output: var BlockHeader,
skip = 0'u): bool {.gcsafe, raises: [Defect,RlpError].} =
let offset = 1 + skip.toBlockNumber
if h.blockNumber >= offset:
result = c.db.getBlockHeader(h.blockNumber - offset, output)
method getBlockBody*(c: Chain, blockHash: KeccakHash): BlockBodyRef =
## Always `nil`
result = nil
method getForkId*(c: Chain, n: BlockNumber): ForkID {.gcsafe.} =
## EIP 2364/2124
let fork = c.db.config.toChainFork(n)
c.forkIds[fork]
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,125 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
import
../../db/db_chain,
../../utils,
../../vm_state,
../clique,
../executor,
../validate,
./chain_desc,
./chain_helpers,
chronicles,
eth/[common, trie/db],
nimcrypto,
stew/endians2,
stint
# debugging clique
when defined(debug):
import
std/[algorithm, strformat, strutils],
../clique/clique_desc
when not defined(release):
import ../../tracer
{.push raises: [Defect].}
# ------------------------------------------------------------------------------
# Private
# ------------------------------------------------------------------------------
proc persistBlocksImpl(c: Chain; headers: openarray[BlockHeader];
bodies: openarray[BlockBody]): ValidationResult
# wildcard exception, wrapped below
{.inline, raises: [Exception].} =
c.db.highestBlock = headers[^1].blockNumber
let transaction = c.db.db.beginTransaction()
defer: transaction.dispose()
trace "Persisting blocks",
fromBlock = headers[0].blockNumber,
toBlock = headers[^1].blockNumber
for i in 0 ..< headers.len:
let
(header, body) = (headers[i], bodies[i])
parentHeader = c.db.getBlockHeader(header.parentHash)
vmState = newBaseVMState(parentHeader.stateRoot, header, c.db)
# The following processing function call will update the PoA state which
# is passed as second function argument. The PoA state is ignored for
# non-PoA networks (in which case `vmState.processBlock(header,body)`
# would also be correct but not vice versa.)
validationResult = vmState.processBlock(c.clique, header, body)
when not defined(release):
if validationResult == ValidationResult.Error and
body.transactions.calcTxRoot == header.txRoot:
dumpDebuggingMetaData(c.db, header, body, vmState)
warn "Validation error. Debugging metadata dumped."
if validationResult != ValidationResult.OK:
return validationResult
if c.extraValidation:
let res = c.db.validateHeaderAndKinship(
header,
body,
checkSealOK = false, # TODO: how to checkseal from here
c.cacheByEpoch
)
if res.isErr:
debug "block validation error", msg = res.error
return ValidationResult.Error
discard c.db.persistHeaderToDb(header)
discard c.db.persistTransactions(header.blockNumber, body.transactions)
discard c.db.persistReceipts(vmState.receipts)
# update currentBlock *after* we persist it
# so the rpc return consistent result
# between eth_blockNumber and eth_syncing
c.db.currentBlock = header.blockNumber
if c.db.config.poaEngine:
if c.clique.cliqueSnapshot(headers[^1]).isErr:
debug "PoA signer snapshot failed"
when defined(debug):
#let list = c.clique.pp(c.clique.cliqueSigners).sorted
#echo &"*** {list.len} trusted signer(s): ", list.join(" ")
discard
transaction.commit()
# ------------------------------------------------------------------------------
# Public `AbstractChainDB` overload method
# ------------------------------------------------------------------------------
method persistBlocks*(c: Chain; headers: openarray[BlockHeader];
bodies: openarray[BlockBody]): ValidationResult
{.gcsafe, raises: [Defect,CatchableError].} =
# Run the VM here
if headers.len != bodies.len:
debug "Number of headers not matching number of bodies"
return ValidationResult.Error
if headers.len == 0:
debug "Nothing to do"
return ValidationResult.OK
safeP2PChain("persistBlocks"):
result = c.persistBlocksImpl(headers,bodies)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -19,19 +19,24 @@
##
import
./clique/[clique_cfg, clique_defs, clique_desc, clique_miner],
chronicles
./clique/[clique_cfg,
clique_defs,
clique_desc,
clique_miner,
clique_signers,
clique_snapshot,
snapshot/snapshot_desc]
{.push raises: [Defect].}
logScope:
topics = "clique PoA"
export
clique_cfg,
clique_defs,
clique_desc,
clique_miner.snapshot
clique_miner,
clique_signers,
clique_snapshot,
snapshot_desc.Snapshot
# ------------------------------------------------------------------------------
# End

View File

@ -21,8 +21,8 @@
import
std/[random, sequtils, strutils, times],
../../db/db_chain,
./clique_cfg/ec_recover,
./clique_defs,
./ec_recover,
eth/common,
ethash,
stew/results,
@ -48,29 +48,55 @@ type
blockHeader*: proc(v: BlockHeader; delim: string):
string {.gcsafe,raises: [Defect,CatchableError].}
CliqueCfg* = ref object
db*: BaseChainDB
signatures*: EcRecover ## Recent block signatures to speed up mining
period*: Duration ## time between blocks to enforce
prng*: Rand ## PRNG state for internal random generator
bcEpoch: UInt256 ## The number of blocks after which to checkpoint
## and reset the pending votes.Suggested 30000 for
## the testnet to remain analogous to the mainnet
## ethash epoch.
prettyPrint*: PrettyPrinters ## debugging support
CliqueCfg* = ref object of RootRef
db*: BaseChainDB ##\
## All purpose (incl. blockchain) database.
period: Duration ##\
## Time between blocks to enforce.
ckpInterval: int ##\
## Number of blocks after which to save the vote snapshot to the
## database.
roThreshold: int ##\
## Number of blocks after which a chain segment is considered immutable
## (ie. soft finality). It is used by the downloader as a hard limit
## against deep ancestors, by the blockchain against deep reorgs, by the
## freezer as the cutoff threshold and by clique as the snapshot trust
## limit.
prng: Rand ##\
## PRNG state for internal random generator. This PRNG is
## cryptographically insecure but with reproducible data stream.
signatures: EcRecover ##\
## Recent block signatures cached to speed up mining.
epoch: int ##\
## The number of blocks after which to checkpoint and reset the pending
## votes.Suggested 30000 for the testnet to remain analogous to the
## mainnet ethash epoch.
debug*: bool ##\
## Debug mode flag
prettyPrint*: PrettyPrinters ##\
## debugging support
{.push raises: [Defect].}
# ------------------------------------------------------------------------------
# Public functions
# Public constructor
# ------------------------------------------------------------------------------
proc newCliqueCfg*(db: BaseChainDB; period = BLOCK_PERIOD;
epoch = 0.u256): CliqueCfg =
CliqueCfg(
proc newCliqueCfg*(db: BaseChainDB): CliqueCfg =
result = CliqueCfg(
db: db,
period: period,
bcEpoch: if epoch.isZero: EPOCH_LENGTH.u256 else: epoch,
epoch: EPOCH_LENGTH,
period: BLOCK_PERIOD,
ckpInterval: CHECKPOINT_INTERVAL,
roThreshold: FULL_IMMUTABILITY_THRESHOLD,
signatures: initEcRecover(),
prng: initRand(prngSeed),
prettyPrint: PrettyPrinters(
@ -79,19 +105,68 @@ proc newCliqueCfg*(db: BaseChainDB; period = BLOCK_PERIOD;
extraData: proc(v:Blob): string = $v,
blockHeader: proc(v:BlockHeader; delim:string): string = $v))
proc epoch*(cfg: CliqueCfg): BlockNumber {.inline.} =
# ------------------------------------------------------------------------------
# Public helper funcion
# ------------------------------------------------------------------------------
proc ecRecover*(cfg: CliqueCfg; header: BlockHeader): auto
{.gcsafe, raises: [Defect,CatchableError].}=
cfg.signatures.getEcRecover(header)
# ------------------------------------------------------------------------------
# Public setters
# ------------------------------------------------------------------------------
proc `epoch=`*(cfg: CliqueCfg; epoch: SomeInteger) {.inline.} =
## Setter
cfg.epoch = if 0 < epoch: epoch
else: EPOCH_LENGTH
proc `period=`*(cfg: CliqueCfg; period: Duration) {.inline.} =
## Setter
cfg.period = if period != Duration(): period
else: BLOCK_PERIOD
proc `ckpInterval=`*(cfg: CliqueCfg; numBlocks: SomeInteger) {.inline.} =
## Setter
cfg.ckpInterval = if 0 < numBlocks: numBlocks
else: CHECKPOINT_INTERVAL
proc `roThreshold=`*(cfg: CliqueCfg; numBlocks: SomeInteger) {.inline.} =
## Setter
cfg.roThreshold = if 0 < numBlocks: numBlocks
else: FULL_IMMUTABILITY_THRESHOLD
# ------------------------------------------------------------------------------
# Public PRNG, may be overloaded
# ------------------------------------------------------------------------------
method rand*(cfg: CliqueCfg; max: Natural): int {.gcsafe,base.} =
## The method returns a random number base on an internal PRNG providing a
## reproducible stream of random data. This function is supposed to be used
## exactly when repeatability comes in handy. Never to be used for crypto key
## generation or like (except testing.)
cfg.prng.rand(max)
# ------------------------------------------------------------------------------
# Public getter
# ------------------------------------------------------------------------------
proc epoch*(cfg: CliqueCfg): auto {.inline.} =
## Getter
cfg.bcEpoch
cfg.epoch.u256
proc `epoch=`*(cfg: CliqueCfg; epoch: BlockNumber) {.inline.} =
## Setter
cfg.bcEpoch = epoch
if cfg.bcEpoch.isZero:
cfg.bcEpoch = EPOCH_LENGTH.u256
proc period*(cfg: CliqueCfg): auto {.inline.} =
## Getter
cfg.period
proc `epoch=`*(cfg: CliqueCfg; epoch: SomeUnsignedInt) {.inline.} =
## Setter
cfg.epoch = epoch.u256
proc ckpInterval*(cfg: CliqueCfg): auto {.inline.} =
## Getter
cfg.ckpInterval.u256
proc roThreshold*(cfg: CliqueCfg): auto {.inline.} =
## Getter
cfg.roThreshold
# ------------------------------------------------------------------------------
# Debugging
@ -105,6 +180,11 @@ template ppExceptionWrap*(body: untyped) =
except:
raise (ref PrettyPrintDefect)(msg: getCurrentException().msg)
proc say*(cfg: CliqueCfg; v: varargs[string,`$`]) {.inline.} =
## Debugging output
ppExceptionWrap:
if cfg.debug: stderr.write "*** " & v.join & "\n"
proc pp*(v: CliqueError): string =
## Pretty print error
@ -112,7 +192,7 @@ proc pp*(v: CliqueError): string =
if v[1] != "":
result &= " => " & v[1]
proc pp*(v: CliqueResult): string =
proc pp*(v: CliqueOkResult): string =
## Pretty print result
if v.isOk:
"OK"

View File

@ -19,10 +19,10 @@
##
import
../../utils,
../../utils/lru_cache,
./clique_defs,
./clique_utils,
../../../utils,
../../../utils/lru_cache,
../clique_defs,
../clique_utils,
eth/[common, keys, rlp],
stint
@ -73,7 +73,7 @@ proc initEcRecover*(cache: var EcRecover) =
# Convert public key to address.
return ok(pubKey.value.toCanonicalAddress)
cache.initLruCache(toKey, toValue, INMEMORY_SIGNATURES)
cache.initCache(toKey, toValue, INMEMORY_SIGNATURES)
proc initEcRecover*: EcRecover {.gcsafe, raises: [Defect].} =
result.initEcRecover
@ -84,7 +84,7 @@ proc getEcRecover*(addrCache: var EcRecover; header: BlockHeader): auto {.
gcsafe, raises: [Defect,CatchableError].} =
## extract Ethereum account address from a signed header block, the relevant
## signature used is appended to the re-purposed extra data field
addrCache.getLruItem(header)
addrCache.getItem(header)
proc append*(rw: var RlpWriter; ecRec: EcRecover) {.

View File

@ -104,8 +104,8 @@ const
# clique/clique.go(76): var ( [..]
type
CliqueErrorType* = enum
noCliqueError = 0 ##\
## Default/reset value
resetCliqueError = 0 ##\
## Default/reset value (use `cliqueNoError` below rather than this valie)
errUnknownBlock = ##\
## is returned when the list of signers is requested for a block that is
@ -220,6 +220,13 @@ type
# additional/bespoke errors, manually added
# -----------------------------------------
errUnknownHash = "No header found for hash value"
errEmptyLruCache = "No snapshot available"
errSetLruSnaps = ##\
## Attempt to assign a value to a non-existing slot
"Missing LRU slot for snapshot"
errZeroBlockNumberRejected = "Block number must not be Zero"
errSkSigResult ## eth/keys subsytem error: signature
@ -242,12 +249,15 @@ type
nilCliqueSealSignedRecently = "Signed recently, must wait for others"
# ------------------------------------------------------------------------------
# More types
# More types and constants
# ------------------------------------------------------------------------------
type
CliqueError* = (CliqueErrorType,string)
CliqueResult* = Result[void,CliqueError]
CliqueOkResult* = Result[void,CliqueError]
const
cliqueNoError* = (resetCliqueError, "")
# ------------------------------------------------------------------------------
# End

View File

@ -24,14 +24,14 @@ import
../../constants,
./clique_cfg,
./clique_defs,
./recent_snaps,
./snapshot/[lru_snaps, snapshot_desc],
chronos,
eth/[common, keys, rlp]
type
# clique/clique.go(142): type SignerFn func(signer [..]
CliqueSignerFn* = ## Hashes and signs the data to be signed by
## a backing account
CliqueSignerFn* = ## Hashes and signs the data to be signed by
## a backing account
proc(signer: EthAddress;
message: openArray[byte]): Result[Hash256,cstring] {.gcsafe.}
@ -41,22 +41,34 @@ type
Clique* = ref object ## Clique is the proof-of-authority consensus engine
## proposed to support the Ethereum testnet following
## the Ropsten attacks.
cCfg: CliqueCfg ## Common engine parameters to fine tune behaviour
signer*: EthAddress ##\
## Ethereum address of the current signing key
cRecents: RecentSnaps ## Snapshots for recent block to speed up reorgs
# signatures => see CliqueCfg
cProposals: Proposals ## Cu1rrent list of proposals we are pushing
signer*: EthAddress ## Ethereum address of the signing key
signFn*: CliqueSignerFn ## Signer function to authorize hashes with
cLock: AsyncLock ## Protects the signer fields
stopSealReq*: bool ## Stop running `seal()` function
stopVHeaderReq*: bool ## Stop running `verifyHeader()` function
# signatures => see CliqueCfg
cFakeDiff: bool ## Testing only: skip difficulty verifications
cDebug: bool ## debug mode
cfg: CliqueCfg ##\
## Common engine parameters to fine tune behaviour
recents: LruSnaps ##\
## Snapshots cache for recent block search
snapshot: Snapshot ##\
## Stashing last snapshot operation here
error: CliqueError ##\
## Last error, typically stored by snaphot utility
proposals: Proposals ##\
## Cu1rrent list of proposals we are pushing
asyncLock: AsyncLock ##\
## Protects the signer fields
fakeDiff: bool ##\
## Testing/debugging only: skip difficulty verifications
{.push raises: [Defect].}
@ -68,48 +80,51 @@ type
proc newClique*(cfg: CliqueCfg): Clique =
## Initialiser for Clique proof-of-authority consensus engine with the
## initial signers set to the ones provided by the user.
Clique(cCfg: cfg,
cRecents: initRecentSnaps(cfg),
cProposals: initTable[EthAddress,bool](),
cLock: newAsyncLock())
Clique(cfg: cfg,
recents: initLruSnaps(cfg),
snapshot: cfg.initSnapshot(BlockHeader()), # dummy
proposals: initTable[EthAddress,bool](),
asyncLock: newAsyncLock())
# ------------------------------------------------------------------------------
# Public debug/pretty print
# ------------------------------------------------------------------------------
proc pp*(rc: var Result[Snapshot,CliqueError]; indent = 0): string =
if rc.isOk:
rc.value.pp(indent)
else:
"(error: " & rc.error.pp & ")"
proc getPrettyPrinters*(c: Clique): var PrettyPrinters =
## Mixin for pretty printers, see `clique/clique_cfg.pp()`
c.cfg.prettyPrint
# ------------------------------------------------------------------------------
# Public getters
# ------------------------------------------------------------------------------
proc cfg*(c: Clique): auto {.inline.} =
proc recents*(c: Clique): var LruSnaps {.inline.} =
## Getter
c.cCfg
proc db*(c: Clique): BaseChainDB {.inline.} =
## Getter
c.cCfg.db
proc recents*(c: Clique): var RecentSnaps {.inline.} =
## Getter
c.cRecents
c.recents
proc proposals*(c: Clique): var Proposals {.inline.} =
## Getter
c.cProposals
c.proposals
proc debug*(c: Clique): auto {.inline.} =
proc snapshot*(c: Clique): var Snapshot {.inline.} =
## Getter, last processed snapshot
c.snapshot
proc error*(c: Clique): auto {.inline.} =
## Getter, last error message
c.error
proc cfg*(c: Clique): auto {.inline.} =
## Getter
c.cDebug
c.cfg
proc db*(c: Clique): auto {.inline.} =
## Getter
c.cfg.db
proc fakeDiff*(c: Clique): auto {.inline.} =
## Getter
c.cFakeDiff
c.fakeDiff
# ------------------------------------------------------------------------------
# Public setters
@ -117,17 +132,21 @@ proc fakeDiff*(c: Clique): auto {.inline.} =
proc `db=`*(c: Clique; db: BaseChainDB) {.inline.} =
## Setter, re-set database
c.cCfg.db = db
c.cProposals = initTable[EthAddress,bool]()
c.cRecents = c.cCfg.initRecentSnaps
c.cRecents.debug = c.cDebug
# note that the signatures[] cache need not be flushed
c.cfg.db = db
c.proposals = initTable[EthAddress,bool]()
c.recents = c.cfg.initLruSnaps
proc `debug=`*(c: Clique; debug: bool) =
## Set debugging mode on/off and set the `fakeDiff` flag `true`
c.cFakeDiff = true
c.cDebug = debug
c.cRecents.debug = debug
proc `fakeDiff=`*(c: Clique; debug: bool) =
## Setter
c.fakeDiff = debug
proc `snapshot=`*(c: Clique; snap: Snapshot) =
## Setter
c.snapshot = snap
proc `error=`*(c: Clique; error: CliqueError) =
## Setter
c.error = error
# ------------------------------------------------------------------------------
# Public lock/unlock
@ -135,11 +154,11 @@ proc `debug=`*(c: Clique; debug: bool) =
proc lock*(c: Clique) {.inline, raises: [Defect,CatchableError].} =
## Lock descriptor
waitFor c.cLock.acquire
waitFor c.asyncLock.acquire
proc unLock*(c: Clique) {.inline, raises: [Defect,AsyncLockError].} =
## Unlock descriptor
c.cLock.release
c.asyncLock.release
template doExclusively*(c: Clique; action: untyped) =
## Handy helper

View File

@ -21,17 +21,18 @@
##
import
std/[random, sequtils, strformat, tables, times],
std/[sequtils, strformat, tables, times],
../../constants,
../../db/state_db,
../../db/[db_chain, state_db],
../../utils,
../gaslimit,
./clique_cfg,
./clique_defs,
./clique_desc,
./clique_utils,
./ec_recover,
./recent_snaps,
./clique_snapshot,
./clique_signers,
./snapshot/[snapshot_desc, snapshot_misc],
chronicles,
chronos,
eth/[common, keys, rlp],
@ -46,10 +47,6 @@ type
CliqueSyncDefect* = object of Defect
## Defect raised with lock/unlock problem
proc snapshot*(c: Clique; blockNumber: BlockNumber; hash: Hash256;
parents: openArray[Blockheader]): Result[Snapshot,CliqueError] {.
gcsafe, raises: [Defect,CatchableError].}
# ------------------------------------------------------------------------------
# Private Helpers
# ------------------------------------------------------------------------------
@ -68,12 +65,12 @@ template syncExceptionWrap(action: untyped) =
proc ecrecover(c: Clique; header: BlockHeader): Result[EthAddress,CliqueError]
{.gcsafe, raises: [Defect,CatchableError].} =
## ecrecover extracts the Ethereum account address from a signed header.
c.cfg.signatures.getEcRecover(header)
c.cfg.ecRecover(header)
# clique/clique.go(463): func (c *Clique) verifySeal(chain [..]
proc verifySeal(c: Clique; header: BlockHeader;
parents: openArray[BlockHeader]): CliqueResult
parents: openArray[BlockHeader]): CliqueOkResult
{.gcsafe, raises: [Defect,CatchableError].} =
## Check whether the signature contained in the header satisfies the
## consensus protocol requirements. The method accepts an optional list of
@ -85,28 +82,28 @@ proc verifySeal(c: Clique; header: BlockHeader;
return err((errUnknownBlock,""))
# Retrieve the snapshot needed to verify this header and cache it
var snap = c.snapshot(header.blockNumber-1, header.parentHash, parents)
if snap.isErr:
return err(snap.error)
let rc = c.cliqueSnapshot(header.parentHash, parents)
if rc.isErr:
return err(rc.error)
# Resolve the authorization key and check against signers
let signer = c.ecrecover(header)
if signer.isErr:
return err(signer.error)
if not snap.value.isSigner(signer.value):
if not c.snapshot.isSigner(signer.value):
return err((errUnauthorizedSigner,""))
let seen = snap.value.recent(signer.value)
let seen = c.snapshot.recent(signer.value)
if seen.isOk:
# Signer is among recents, only fail if the current block does not
# shift it out
if header.blockNumber - snap.value.signersThreshold.u256 < seen.value:
if header.blockNumber - c.snapshot.signersThreshold.u256 < seen.value:
return err((errRecentlySigned,""))
# Ensure that the difficulty corresponds to the turn-ness of the signer
if not c.fakeDiff:
if snap.value.inTurn(header.blockNumber, signer.value):
if c.snapshot.inTurn(header.blockNumber, signer.value):
if header.difficulty != DIFF_INTURN:
return err((errWrongDifficulty,""))
else:
@ -118,7 +115,7 @@ proc verifySeal(c: Clique; header: BlockHeader;
# clique/clique.go(314): func (c *Clique) verifyCascadingFields(chain [..]
proc verifyCascadingFields(c: Clique; header: BlockHeader;
parents: openArray[BlockHeader]): CliqueResult
parents: openArray[BlockHeader]): CliqueOkResult
{.gcsafe, raises: [Defect,CatchableError].} =
## Verify all the header fields that are not standalone, rather depend on a
## batch of previous headers. The caller may optionally pass in a batch of
@ -133,11 +130,8 @@ proc verifyCascadingFields(c: Clique; header: BlockHeader;
var parent: BlockHeader
if 0 < parents.len:
parent = parents[^1]
else:
let rc = c.db.getBlockHeaderResult(header.blockNumber-1)
if rc.isErr:
return err((errUnknownAncestor,""))
parent = rc.value
elif not c.db.getBlockHeader(header.blockNumber-1, parent):
return err((errUnknownAncestor,""))
if parent.blockNumber != header.blockNumber-1 or
parent.hash != header.parentHash:
@ -152,19 +146,21 @@ proc verifyCascadingFields(c: Clique; header: BlockHeader;
&"invalid gasUsed: have {header.gasUsed}, " &
&"gasLimit {header.gasLimit}"))
let rc = c.db.validateGasLimitOrBaseFee(header, parent)
if rc.isErr:
return err((errCliqueGasLimitOrBaseFee, rc.error))
block:
let rc = c.db.validateGasLimitOrBaseFee(header, parent)
if rc.isErr:
return err((errCliqueGasLimitOrBaseFee, rc.error))
# Retrieve the snapshot needed to verify this header and cache it
var snap = c.snapshot(header.blockNumber-1, header.parentHash, parents)
if snap.isErr:
return err(snap.error)
block:
let rc = c.cliqueSnapshot(header.parentHash, parents)
if rc.isErr:
return err(rc.error)
# If the block is a checkpoint block, verify the signer list
if (header.blockNumber mod c.cfg.epoch.u256) == 0:
let
signersList = snap.value.signers
signersList = c.cliqueSigners
extraList = header.extraData.extraDataAddresses
if signersList != extraList:
return err((errMismatchingCheckpointSigners,""))
@ -175,7 +171,7 @@ proc verifyCascadingFields(c: Clique; header: BlockHeader;
# clique/clique.go(246): func (c *Clique) verifyHeader(chain [..]
proc verifyHeader(c: Clique; header: BlockHeader;
parents: openArray[BlockHeader]): CliqueResult
parents: openArray[BlockHeader]): CliqueOkResult
{.gcsafe, raises: [Defect,CatchableError].} =
## Check whether a header conforms to the consensus rules.The caller may
## optionally pass in a batch of parents (ascending order) to avoid looking
@ -258,16 +254,6 @@ proc calcDifficulty(snap: var Snapshot; signer: EthAddress): DifficultyInt =
# Public functions
# ------------------------------------------------------------------------------
# clique/clique.go(369): func (c *Clique) snapshot(chain [..]
proc snapshot*(c: Clique; blockNumber: BlockNumber; hash: Hash256;
parents: openArray[Blockheader]): Result[Snapshot,CliqueError]
{.gcsafe, raises: [Defect,CatchableError].} =
## snapshot retrieves the authorization snapshot at a given point in time.
c.recents.getRecentSnaps:
RecentArgs(blockHash: hash,
blockNumber: blockNumber,
parents: toSeq(parents))
# clique/clique.go(212): func (c *Clique) Author(header [..]
proc author*(c: Clique; header: BlockHeader): Result[EthAddress,CliqueError]
{.gcsafe, raises: [Defect,CatchableError].} =
@ -281,7 +267,7 @@ proc author*(c: Clique; header: BlockHeader): Result[EthAddress,CliqueError]
# clique/clique.go(217): func (c *Clique) VerifyHeader(chain [..]
proc verifyHeader*(c: Clique; header: BlockHeader): CliqueResult
proc verifyHeader*(c: Clique; header: BlockHeader): CliqueOkResult
{.gcsafe, raises: [Defect,CatchableError].} =
## For the Consensus Engine, `verifyHeader()` checks whether a header
## conforms to the consensus rules of a given engine. Verifying the seal
@ -293,7 +279,7 @@ proc verifyHeader*(c: Clique; header: BlockHeader): CliqueResult
# clique/clique.go(224): func (c *Clique) VerifyHeader(chain [..]
proc verifyHeaders*(c: Clique; headers: openArray[BlockHeader]):
Future[seq[CliqueResult]] {.async,gcsafe.} =
Future[seq[CliqueOkResult]] {.async,gcsafe.} =
## For the Consensus Engine, `verifyHeader()` s similar to VerifyHeader, but
## verifies a batch of headers concurrently. This method is accompanied
## by a `stopVerifyHeader()` method that can abort the operations.
@ -325,7 +311,7 @@ proc stopVerifyHeader*(c: Clique): bool {.discardable.} =
# clique/clique.go(450): func (c *Clique) VerifyUncles(chain [..]
proc verifyUncles*(c: Clique; ethBlock: EthBlock): CliqueResult =
proc verifyUncles*(c: Clique; ethBlock: EthBlock): CliqueOkResult =
## For the Consensus Engine, `verifyUncles()` verifies that the given
## block's uncles conform to the consensus rules of a given engine.
##
@ -337,7 +323,7 @@ proc verifyUncles*(c: Clique; ethBlock: EthBlock): CliqueResult =
# clique/clique.go(506): func (c *Clique) Prepare(chain [..]
proc prepare*(c: Clique; header: var BlockHeader): CliqueResult
proc prepare*(c: Clique; header: var BlockHeader): CliqueOkResult
{.gcsafe, raises: [Defect,CatchableError].} =
## For the Consensus Engine, `prepare()` initializes the consensus fields
## of a block header according to the rules of a particular engine. The
@ -351,42 +337,42 @@ proc prepare*(c: Clique; header: var BlockHeader): CliqueResult
header.nonce.reset
# Assemble the voting snapshot to check which votes make sense
var snap = c.snapshot(header.blockNumber-1, header.parentHash, @[])
if snap.isErr:
return err(snap.error)
let rc = c.cliqueSnapshot(header.parentHash, @[])
if rc.isErr:
return err(rc.error)
if (header.blockNumber mod c.cfg.epoch) != 0:
c.doExclusively:
# Gather all the proposals that make sense voting on
var addresses: seq[EthAddress]
for (address,authorize) in c.proposals.pairs:
if snap.value.validVote(address, authorize):
if c.snapshot.isValidVote(address, authorize):
addresses.add address
# If there's pending proposals, cast a vote on them
if 0 < addresses.len:
header.coinbase = addresses[c.cfg.prng.rand(addresses.len-1)]
header.coinbase = addresses[c.cfg.rand(addresses.len-1)]
header.nonce = if header.coinbase in c.proposals: NONCE_AUTH
else: NONCE_DROP
# Set the correct difficulty
header.difficulty = snap.value.calcDifficulty(c.signer)
header.difficulty = c.snapshot.calcDifficulty(c.signer)
# Ensure the extra data has all its components
header.extraData.setLen(EXTRA_VANITY)
if (header.blockNumber mod c.cfg.epoch) == 0:
header.extraData.add snap.value.signers.mapIt(toSeq(it)).concat
header.extraData.add c.cliqueSigners.mapIt(toSeq(it)).concat
header.extraData.add 0.byte.repeat(EXTRA_SEAL)
# Mix digest is reserved for now, set to empty
header.mixDigest.reset
# Ensure the timestamp has the correct delay
let parent = c.db.getBlockHeaderResult(header.blockNumber-1)
if parent.isErr:
var parent: BlockHeader
if not c.db.getBlockHeader(header.blockNumber-1, parent):
return err((errUnknownAncestor,""))
header.timestamp = parent.value.timestamp + c.cfg.period
header.timestamp = parent.timestamp + c.cfg.period
if header.timestamp < getTime():
header.timestamp = getTime()
@ -499,18 +485,18 @@ proc seal*(c: Clique; ethBlock: EthBlock):
signFn = c.signFn
# Bail out if we're unauthorized to sign a block
var snap = c.snapshot(header.blockNumber-1, header.parentHash, @[])
if snap.isErr:
return err(snap.error)
if not snap.value.isSigner(signer):
let rc = c.cliqueSnapshot(header.parentHash)
if rc.isErr:
return err(rc.error)
if not c.snapshot.isSigner(signer):
return err((errUnauthorizedSigner,""))
# If we're amongst the recent signers, wait for the next block
let seen = snap.value.recent(signer)
let seen = c.snapshot.recent(signer)
if seen.isOk:
# Signer is among recents, only wait if the current block does not
# shift it out
if header.blockNumber < seen.value + snap.value.signersThreshold.u256:
if header.blockNumber < seen.value + c.snapshot.signersThreshold.u256:
info $nilCliqueSealSignedRecently
return err((nilCliqueSealSignedRecently,""))
@ -518,13 +504,13 @@ proc seal*(c: Clique; ethBlock: EthBlock):
var delay = header.timestamp - getTime()
if header.difficulty == DIFF_NOTURN:
# It's not our turn explicitly to sign, delay it a bit
let wiggle = snap.value.signersThreshold.int64 * WIGGLE_TIME
let wiggle = c.snapshot.signersThreshold.int64 * WIGGLE_TIME
# Kludge for limited rand() argument range
if wiggle.inSeconds < (int.high div 1000).int64:
let rndWiggleMs = c.cfg.prng.rand(wiggle.inMilliSeconds.int)
let rndWiggleMs = c.cfg.rand(wiggle.inMilliSeconds.int)
delay += initDuration(milliseconds = rndWiggleMs)
else:
let rndWiggleSec = c.cfg.prng.rand((wiggle.inSeconds and int.high).int)
let rndWiggleSec = c.cfg.rand((wiggle.inSeconds and int.high).int)
delay += initDuration(seconds = rndWiggleSec)
trace "Out-of-turn signing requested",
@ -578,10 +564,10 @@ proc calcDifficulty(c: Clique;
## This implementation returns the difficulty that a new block should have:
## * DIFF_NOTURN(2) if BLOCK_NUMBER % SIGNER_COUNT != SIGNER_INDEX
## * DIFF_INTURN(1) if BLOCK_NUMBER % SIGNER_COUNT == SIGNER_INDEX
var snap = c.snapshot(parent.blockNumber, parent.blockHash, @[])
if snap.isErr:
return err(snap.error)
return ok(snap.value.calcDifficulty(c.signer))
let rc = c.cliqueSnapshot(parent)
if rc.isErr:
return err(rc.error)
return ok(c.snapshot.calcDifficulty(c.signer))
# # clique/clique.go(710): func (c *Clique) SealHash(header [..]

View File

@ -0,0 +1,39 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
##
## Signers for Clique PoA Consensus Protocol
## =========================================
##
## For details see
## `EIP-225 <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
## and
## `go-ethereum <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
##
import
./clique_desc,
./snapshot/[ballot, snapshot_desc],
eth/common
{.push raises: [Defect].}
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc cliqueSigners*(c: Clique): seq[EthAddress] {.inline.} =
## Retrieves the sorted list of authorized signers for the last registered
## snapshot. If there was no snapshot, an empty list is returned.
c.snapshot.ballot.authSigners
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,328 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
##
## Snapshot for Clique PoA Consensus Protocol
## ==========================================
##
## For details see
## `EIP-225 <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
## and
## `go-ethereum <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
##
import
std/[sequtils, strformat],
../../db/db_chain,
../../utils,
./clique_cfg,
./clique_defs,
./clique_desc,
./snapshot/[lru_snaps, snapshot_apply, snapshot_desc],
chronicles,
eth/[common, keys],
nimcrypto,
stew/results,
stint
type
# Internal sub-descriptor for `LocalSnapsDesc`
LocalPivot = object
header: BlockHeader
hash: Hash256
# Internal sub-descriptor for `LocalSnapsDesc`
LocalPath = object
snaps: Snapshot ## snapshot for given hash
trail: seq[BlockHeader] ## header chain towards snapshot
error: CliqueError ## error message
LocalSnaps = object
c: Clique
start: LocalPivot ## start here searching for checkpoints
value: LocalPath ## snapshot location
parents: seq[BlockHeader] ## explicit parents
{.push raises: [Defect].}
logScope:
topics = "clique PoA snapshot"
# ------------------------------------------------------------------------------
# Private debugging functions
# ------------------------------------------------------------------------------
proc say(d: LocalSnaps; v: varargs[string,`$`]) {.inline.} =
d.c.cfg.say v
discard
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc maxCheckPointLe(d: var LocalSnaps;
number: BlockNumber): BlockNumber {.inline.} =
let epc = number mod d.c.cfg.ckpInterval
if epc < number:
number - epc
else:
# epc == number => number < ckpInterval
0.u256
proc isCheckPoint(d: var LocalSnaps;
number: BlockNumber): bool {.inline.} =
(number mod d.c.cfg.ckpInterval) == 0
proc isEpoch(d: var LocalSnaps;
number: BlockNumber): bool {.inline.} =
(number mod d.c.cfg.epoch) == 0
proc isSnapshotPosition(d: var LocalSnaps;
number: BlockNumber): bool {.inline.} =
# clique/clique.go(394): if number == 0 || (number%c.config.Epoch [..]
if number.isZero:
# At the genesis => snapshot the initial state.
return true
if d.isEpoch(number) and d.c.cfg.roThreshold < d.value.trail.len:
# Wwe have piled up more headers than allowed to be re-orged (chain
# reinit from a freezer), regard checkpoint trusted and snapshot it.
return true
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
proc findSnapshot(d: var LocalSnaps): bool
{.inline, gcsafe, raises: [Defect,CatchableError].} =
## Search for a snapshot starting at current header starting at the pivot
## value `ls.start`.
var (header, hash) = (d.start.header, d.start.hash)
while true:
let number = header.blockNumber
# Check whether the snapshot was recently visited and cahed
if d.c.recents.hasLruSnaps(hash):
let rc = d.c.recents.getLruSnaps(hash)
if rc.isOK:
# we made sure that this is not a blind entry (currently no reason
# why there should be any, though)
d.value.snaps = rc.value
# d.say "findSnapshot cached #",number," <", d.value.trail.len
debug "Found recently cached voting snapshot",
blockNumber = number,
blockHash = hash
return true
# If an on-disk checkpoint snapshot can be found, use that
if d.isCheckPoint(number) and
d.value.snaps.loadSnapshot(d.c.cfg, hash).isOK:
d.say "findSnapshot disked #",number," <",d.value.trail.len
trace "Loaded voting snapshot from disk",
blockNumber = number,
blockHash = hash
# clique/clique.go(386): snap = s
return true
# Note that epoch is a restart and sync point. Eip-225 requires that the
# epoch header contains the full list of currently authorised signers.
if d.isSnapshotPosition(number):
# clique/clique.go(395): checkpoint := chain.GetHeaderByNumber [..]
d.value.snaps.initSnapshot(d.c.cfg, header)
if d.value.snaps.storeSnapshot.isOK:
d.say "findSnapshot <epoch> #",number," <",d.value.trail.len
info "Stored voting snapshot to disk",
blockNumber = number,
blockHash = hash
return true
# No snapshot for this header, gather the header and move backward
var parent: BlockHeader
if 0 < d.parents.len:
# If we have explicit parents, pick from there (enforced)
parent = d.parents.pop
# clique/clique.go(416): if header.Hash() != hash [..]
if parent.hash != header.parentHash:
d.value.error = (errUnknownAncestor,"")
return false
# No explicit parents (or no more left), reach out to the database
elif not d.c.cfg.db.getBlockHeader(header.parentHash, parent):
d.value.error = (errUnknownAncestor,"")
return false
# Add to batch (note that list order needs to be reversed later)
d.value.trail.add header
hash = header.parentHash
header = parent
# => while loop
# notreached
raiseAssert "findSnapshot(): wrong exit from forever-loop"
proc applyTrail(d: var LocalSnaps; snaps: var Snapshot;
trail: seq[BlockHeader]): Result[Snapshot,CliqueError]
{.inline, gcsafe, raises: [Defect,CatchableError].} =
## Apply any `trail` headers on top of the snapshot `snap`
# Apply trail with reversed list order
var liart = trail
for i in 0 ..< liart.len div 2:
swap(liart[i], liart[^(1+i)])
block:
# clique/clique.go(434): snap, err := snap.apply(headers)
let rc = snaps.snapshotApply(liart)
if rc.isErr:
return err(rc.error)
# If we've generated a new checkpoint snapshot, save to disk
if d.isCheckPoint(snaps.blockNumber) and 0 < liart.len:
var rc = snaps.storeSnapshot
if rc.isErr:
return err(rc.error)
d.say "updateSnapshot <disk> chechkpoint #", snaps.blockNumber
trace "Stored voting snapshot to disk",
blockNumber = snaps.blockNumber,
blockHash = snaps.blockHash
ok(snaps)
proc updateSnapshot(c: Clique; header: Blockheader;
parents: openArray[Blockheader]): Result[Snapshot,CliqueError]
{.gcsafe, raises: [Defect,CatchableError].} =
# Initialise cache management
var d = LocalSnaps(
c: c,
parents: toSeq(parents),
start: LocalPivot(
header: header,
hash: header.hash))
# Search for previous snapshots
if not d.findSnapshot:
return err(d.value.error)
# Previous snapshot found, apply any pending trail headers on top of it
if 0 < d.value.trail.len:
let
first = d.value.trail[^1].blockNumber
last = d.value.trail[0].blockNumber
ckpt = d.maxCheckPointLe(last)
# If there is at least one checkpoint part of the trail sequence, make sure
# that we can store the latest one. This will be done by the `applyTrail()`
# handler for the largest block number in the sequence (note that the trail
# block numbers are in reverse order.)
if first <= ckpt and ckpt < last:
# Split the trail sequence so that the first one has the checkpoint
# entry with largest block number.
let
inx = (last - ckpt).truncate(int)
preTrail = d.value.trail[inx ..< d.value.trail.len]
# Second part (note reverse block numbers.)
d.value.trail.setLen(inx)
let rc = d.applyTrail(d.value.snaps, preTrail)
if rc.isErr:
return err(rc.error)
d.value.snaps = rc.value
var snaps = d.applyTrail(d.value.snaps, d.value.trail)
if snaps.isErr:
return err(snaps.error)
# clique/clique.go(438): c.recents.Add(snap.Hash, snap)
if c.recents.setLruSnaps(snaps.value):
return ok(snaps.value)
# someting went seriously wrong -- lol
err((errSetLruSnaps, &"block #{snaps.value.blockNumber}"))
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
# clique/clique.go(369): func (c *Clique) snapshot(chain [..]
proc cliqueSnapshot*(c: Clique; header: Blockheader;
parents: openArray[Blockheader]): CliqueOkResult
{.gcsafe, raises: [Defect,CatchableError].} =
## Create authorisation state snapshot of a given point in the block chain
## and store it in the `Clique` descriptor to be retrievable as `c.snapshot`
## if successful.
##
## The retun result error (or no error) is also stored in the `Clique`
## descriptor to be retrievable as `c.error`.
c.error = cliqueNoError
let rc = c.recents.getLruSnaps(header.hash)
if rc.isOk:
c.snapshot = rc.value
return ok()
let snaps = c.updateSnapshot(header, parents)
if snaps.isErr:
c.error = (snaps.error)
return err(c.error)
c.snapshot = snaps.value
ok()
proc cliqueSnapshot*(c: Clique; header: Blockheader): CliqueOkResult
{.inline,gcsafe,raises: [Defect,CatchableError].} =
## Short for `cliqueSnapshot(c,header,@[])`
c.cliqueSnapshot(header, @[])
proc cliqueSnapshot*(c: Clique; hash: Hash256;
parents: openArray[Blockheader]): CliqueOkResult
{.gcsafe,raises: [Defect,CatchableError].} =
## Create authorisation state snapshot of a given point in the block chain
## and store it in the `Clique` descriptor to be retrievable as `c.snapshot`
## if successful.
##
## The retun result error (or no error) is also stored in the `Clique`
## descriptor to be retrievable as `c.error`.
c.error = cliqueNoError
let rc = c.recents.getLruSnaps(hash)
if rc.isOk:
c.snapshot = rc.value
return ok()
var header: BlockHeader
if not c.cfg.db.getBlockHeader(hash, header):
c.error = (errUnknownHash,"")
return err(c.error)
let snaps = c.updateSnapshot(header, parents)
if snaps.isErr:
c.error = (snaps.error)
return err(c.error)
c.snapshot = snaps.value
ok()
proc cliqueSnapshot*(c: Clique; hash: Hash256): CliqueOkResult
{.gcsafe,raises: [Defect,CatchableError].} =
## Short for `cliqueSnapshot(c,hash,@[])`
c.cliqueSnapshot(hash, @[])
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -24,7 +24,6 @@ import
std/[algorithm, strformat, times],
../../chain_config,
../../constants,
../../db/db_chain,
../../utils,
./clique_defs,
eth/[common, rlp],
@ -63,7 +62,7 @@ proc sorted*(e: openArray[EthAddress]; order = EthAscending): seq[EthAddress] =
e.sorted(cmp = eCmp, order = order.SortOrder)
proc cliqueResultErr*(w: CliqueError): CliqueResult =
proc cliqueResultErr*(w: CliqueError): CliqueOkResult =
## Return error result (syntactic sugar)
err(w)
@ -82,28 +81,17 @@ proc extraDataAddresses*(extraData: Blob): seq[EthAddress] =
addrOffset += EthAddress.len
proc getBlockHeaderResult*(db: BaseChainDB;
number: BlockNumber): Result[BlockHeader,void] {.
gcsafe, raises: [Defect,RlpError].} =
## Slightly re-phrased dbChain.getBlockHeader(..) command
var header: BlockHeader
if db_chain.getBlockHeader(db, number, header):
return ok(header)
err()
# core/types/block.go(343): func (b *Block) WithSeal(header [..]
proc withHeader*(b: EthBlock; header: BlockHeader): EthBlock =
## New block with the data from `b` but the header replaced with the
## argument one.
EthBlock(
header: header,
txs: b.txs,
uncles: b.uncles)
EthBlock(header: header,
txs: b.txs,
uncles: b.uncles)
# consensus/misc/forks.go(30): func VerifyForkHashes(config [..]
proc verifyForkHashes*(c: var ChainConfig; header: BlockHeader): CliqueResult {.
gcsafe, raises: [Defect,ValueError].} =
proc verifyForkHashes*(c: var ChainConfig; header: BlockHeader):
CliqueOkResult {.gcsafe, raises: [Defect,ValueError].} =
## Verify that blocks conforming to network hard-forks do have the correct
## hashes, to avoid clients going off on different chains.

View File

@ -1,244 +0,0 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
##
## Snapshot Cache for Clique PoA Consensus Protocol
## ================================================
##
## For details see
## `EIP-225 <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
## and
## `go-ethereum <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
##
## Caveat: Not supporting RLP serialisation encode()/decode()
##
import
std/[sequtils, strutils],
../../utils,
../../utils/lru_cache,
./clique_cfg,
./clique_defs,
./clique_utils,
./snapshot,
chronicles,
eth/[common, keys],
nimcrypto,
stint
export
snapshot
type
RecentArgs* = ref object
blockHash*: Hash256
blockNumber*: BlockNumber
parents*: seq[BlockHeader]
# Internal, temporary state variables
LocalArgs = ref object
headers: seq[BlockHeader]
# Internal type, simplify Hash256 for rlp serialisation
RecentKey = array[32, byte]
# Internal descriptor used by toValue()
RecentDesc = object
cfg: CliqueCfg
debug: bool
args: RecentArgs
local: LocalArgs
RecentSnaps* = object
cfg: CliqueCfg
debug: bool
cache: LruCache[RecentDesc,RecentKey,Snapshot,CliqueError]
{.push raises: [Defect].}
logScope:
topics = "clique PoA recent-snaps"
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc say(d: RecentDesc; v: varargs[string,`$`]) =
## Debugging output
ppExceptionWrap:
if d.debug:
stderr.write "*** " & v.join & "\n"
proc say(rs: var RecentSnaps; v: varargs[string,`$`]) =
## Debugging output
ppExceptionWrap:
if rs.debug:
stderr.write "*** " & v.join & "\n"
proc getPrettyPrinters(d: RecentDesc): var PrettyPrinters =
## Mixin for pretty printers, see `clique/clique_cfg.pp()`
d.cfg.prettyPrint
proc canDiskCheckPointOk(d: RecentDesc):
bool {.inline, raises: [Defect,RlpError].} =
# clique/clique.go(394): if number == 0 || (number%c.config.Epoch [..]
if d.args.blockNumber.isZero:
# If we're at the genesis, snapshot the initial state.
return true
if (d.args.blockNumber mod d.cfg.epoch) == 0:
# Alternatively if we're at a checkpoint block without a parent
# (light client CHT), or we have piled up more headers than allowed
# to be re-orged (chain reinit from a freezer), consider the
# checkpoint trusted and snapshot it.
if FULL_IMMUTABILITY_THRESHOLD < d.local.headers.len:
return true
if d.cfg.db.getBlockHeaderResult(d.args.blockNumber - 1).isErr:
return true
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
proc tryLoadDiskSnapshot(d: RecentDesc; snap: var Snapshot): bool {.inline.} =
# clique/clique.go(383): if number%checkpointInterval == 0 [..]
if (d.args.blockNumber mod CHECKPOINT_INTERVAL) == 0:
if snap.loadSnapshot(d.cfg, d.args.blockHash).isOk:
trace "Loaded voting snapshot from disk",
blockNumber = d.args.blockNumber,
blockHash = d.args.blockHash
return true
proc tryStoreDiskCheckPoint(d: RecentDesc; snap: var Snapshot):
bool {.gcsafe, raises: [Defect,RlpError].} =
if d.canDiskCheckPointOk:
# clique/clique.go(395): checkpoint := chain.GetHeaderByNumber [..]
let checkPoint = d.cfg.db.getBlockHeaderResult(d.args.blockNumber)
if checkPoint.isErr:
return false
let
hash = checkPoint.value.hash
accountList = checkPoint.value.extraData.extraDataAddresses
snap.initSnapshot(d.cfg, d.args.blockNumber, hash, accountList)
snap.setDebug(d.debug)
if snap.storeSnapshot.isOk:
info "Stored checkpoint snapshot to disk",
blockNumber = d.args.blockNumber,
blockHash = hash
return true
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc initRecentSnaps*(rs: var RecentSnaps;
cfg: CliqueCfg) {.gcsafe,raises: [Defect].} =
var toKey: LruKey[RecentDesc,RecentKey] =
proc(d: RecentDesc): RecentKey =
d.args.blockHash.data
var toValue: LruValue[RecentDesc,Snapshot,CliqueError] =
proc(d: RecentDesc): Result[Snapshot,CliqueError] =
var snap: Snapshot
while true:
# If an on-disk checkpoint snapshot can be found, use that
if d.tryLoadDiskSnapshot(snap):
# clique/clique.go(386): snap = s
break
# Save checkpoint e.g. when at the genesis ..
if d.tryStoreDiskCheckPoint(snap):
# clique/clique.go(407): log.Info("Stored [..]
break
# No snapshot for this header, gather the header and move backward
var header: BlockHeader
if 0 < d.args.parents.len:
# If we have explicit parents, pick from there (enforced)
header = d.args.parents[^1]
# clique/clique.go(416): if header.Hash() != hash [..]
if header.hash != d.args.blockHash or
header.blockNumber != d.args.blockNumber:
return err((errUnknownAncestor,""))
d.args.parents.setLen(d.args.parents.len-1)
else:
# No explicit parents (or no more left), reach out to the database
let rc = d.cfg.db.getBlockHeaderResult(d.args.blockNumber)
if rc.isErr:
return err((errUnknownAncestor,""))
header = rc.value
# Add to batch (note that list order needs to be reversed later)
d.local.headers.add header
d.args.blockNumber -= 1.u256
d.args.blockHash = header.parentHash
# => while loop
# Previous snapshot found, apply any pending headers on top of it
for i in 0 ..< d.local.headers.len div 2:
# Reverse list order
swap(d.local.headers[i], d.local.headers[^(1+i)])
block:
# clique/clique.go(434): snap, err := snap.apply(headers)
d.say "recentSnaps => applySnapshot([",
d.local.headers.mapIt("#" & $it.blockNumber.truncate(int))
.join(",").string, "])"
let rc = snap.applySnapshot(d.local.headers)
d.say "recentSnaps => applySnapshot() => ", rc.pp
if rc.isErr:
return err(rc.error)
# If we've generated a new checkpoint snapshot, save to disk
if (snap.blockNumber mod CHECKPOINT_INTERVAL) == 0 and
0 < d.local.headers.len:
var rc = snap.storeSnapshot
if rc.isErr:
return err(rc.error)
trace "Stored voting snapshot to disk",
blockNumber = snap.blockNumber,
blockHash = snap.blockHash
# clique/clique.go(438): c.recents.Add(snap.Hash, snap)
return ok(snap)
rs.cfg = cfg
rs.cache.initLruCache(toKey, toValue, INMEMORY_SNAPSHOTS)
proc initRecentSnaps*(cfg: CliqueCfg): RecentSnaps {.gcsafe,raises: [Defect].} =
result.initRecentSnaps(cfg)
proc getRecentSnaps*(rs: var RecentSnaps; args: RecentArgs): auto {.
gcsafe, raises: [Defect,CatchableError].} =
## Get snapshot from cache or disk
rs.say "getRecentSnap #", args.blockNumber
rs.cache.getLruItem:
RecentDesc(cfg: rs.cfg,
debug: rs.debug,
args: args,
local: LocalArgs())
proc `debug=`*(rs: var RecentSnaps; debug: bool) =
## Setter, debugging mode on/off
rs.debug = debug
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,358 +0,0 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
##
## Snapshot Structure for Clique PoA Consensus Protocol
## ====================================================
##
## For details see
## `EIP-225 <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
## and
## `go-ethereum <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
##
const
# debugging, enable with: nim c -r -d:noisy:3 ...
noisy {.intdefine.}: int = 0
isMainOk {.used.} = noisy > 2
import
std/[algorithm, sequtils, strformat, strutils, tables, times],
../../db/storage_types,
../../utils/lru_cache,
./clique_cfg,
./clique_defs,
./clique_poll,
./ec_recover,
chronicles,
eth/[common, rlp, trie/db]
type
AddressHistory = Table[BlockNumber,EthAddress]
SnapshotData* = object
blockNumber: BlockNumber ## block number where snapshot was created on
blockHash: Hash256 ## block hash where snapshot was created on
recents: AddressHistory ## recent signers for spam protections
# clique/snapshot.go(58): Recents map[uint64]common.Address [..]
ballot: CliquePoll ## Votes => authorised signers
debug: bool ## debug mode
# clique/snapshot.go(50): type Snapshot struct [..]
Snapshot* = object ## Snapshot is the state of the authorization voting at
## a given point in time.
cfg: CliqueCfg ## parameters to fine tune behavior
data*: SnapshotData ## real snapshot
{.push raises: [Defect].}
logScope:
topics = "clique PoA snapshot"
# ------------------------------------------------------------------------------
# Pretty printers for debugging
# ------------------------------------------------------------------------------
proc say(s: var Snapshot; v: varargs[string,`$`]) =
## Debugging output
ppExceptionWrap:
if s.data.debug:
stderr.write "*** " & v.join & "\n"
proc getPrettyPrinters(s: var Snapshot): var PrettyPrinters =
## Mixin for pretty printers
s.cfg.prettyPrint
proc pp(s: var Snapshot; h: var AddressHistory): string =
ppExceptionWrap:
toSeq(h.keys)
.sorted
.mapIt("#" & $it & ":" & s.pp(h[it.u256]))
.join(",")
proc pp(s: var Snapshot; v: Vote): string =
proc authorized(b: bool): string =
if b: "authorise" else: "de-authorise"
ppExceptionWrap:
"(" & &"address={s.pp(v.address)}" &
&",signer={s.pp(v.signer)}" &
&",blockNumber={v.blockNumber}" &
&",{authorized(v.authorize)}" & ")"
proc votesList(s: var Snapshot; sep: string): string =
proc s3Cmp(a, b: (string,string,Vote)): int =
result = cmp(a[0], b[0])
if result == 0:
result = cmp(a[1], b[1])
s.data.ballot.votesInternal
.mapIt((s.pp(it[0]),s.pp(it[1]),it[2]))
.sorted(cmp = s3cmp)
.mapIt(s.pp(it[2]))
.join(sep)
proc signersList(s: var Snapshot): string =
s.pp(s.data.ballot.authSigners).sorted.join(",")
# ------------------------------------------------------------------------------
# Public pretty printers
# ------------------------------------------------------------------------------
proc pp*(s: var Snapshot; delim: string): string =
## Pretty print descriptor
let
sep1 = if 0 < delim.len: delim
else: ";"
sep2 = if 0 < delim.len and delim[0] == '\n': delim & ' '.repeat(7)
else: ";"
ppExceptionWrap:
&"(blockNumber=#{s.data.blockNumber}" &
&"{sep1}recents=" & "{" & s.pp(s.data.recents) & "}" &
&"{sep1}signers=" & "{" & s.signersList & "}" &
&"{sep1}votes=[" & s.votesList(sep2) & "])"
proc pp*(s: var Snapshot; indent = 0): string =
## Pretty print descriptor
let delim = if 0 < indent: "\n" & ' '.repeat(indent) else: " "
s.pp(delim)
# ------------------------------------------------------------------------------
# Private functions needed to support RLP conversion
# ------------------------------------------------------------------------------
proc append[K,V](rw: var RlpWriter; tab: Table[K,V]) {.inline.} =
rw.startList(tab.len)
for key,value in tab.pairs:
rw.append((key,value))
proc read[K,V](rlp: var Rlp;
Q: type Table[K,V]): Q {.inline, raises: [Defect,CatchableError].} =
for w in rlp.items:
let (key,value) = w.read((K,V))
result[key] = value
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc setDebug*(s: var Snapshot; debug: bool) =
## Set debugging mode on/off
s.data.debug = debug
s.data.ballot.setDebug(debug)
# clique/snapshot.go(72): func newSnapshot(config [..]
proc initSnapshot*(s: var Snapshot; cfg: CliqueCfg;
number: BlockNumber; hash: Hash256; signers: openArray[EthAddress]) =
## This creates a new snapshot with the specified startup parameters. The
## method does not initialize the set of recent signers, so only ever use
## if for the genesis block.
s.cfg = cfg
s.data.blockNumber = number
s.data.blockHash = hash
s.data.recents = initTable[BlockNumber,EthAddress]()
s.data.ballot.initCliquePoll(signers)
proc initSnapshot*(cfg: CliqueCfg; number: BlockNumber; hash: Hash256;
signers: openArray[EthAddress]; debug = true): Snapshot =
result.initSnapshot(cfg, number, hash, signers)
proc blockNumber*(s: var Snapshot): BlockNumber =
## Getter
s.data.blockNumber
proc blockHash*(s: var Snapshot): Hash256 =
## Getter
s.data.blockHash
# clique/snapshot.go(88): func loadSnapshot(config [..]
proc loadSnapshot*(s: var Snapshot; cfg: CliqueCfg;
hash: Hash256): CliqueResult {.gcsafe, raises: [Defect].} =
## Load an existing snapshot from the database.
try:
s.cfg = cfg
s.data = s.cfg.db.db
.get(hash.cliqueSnapshotKey.toOpenArray)
.decode(SnapshotData)
except CatchableError as e:
return err((errSnapshotLoad,e.msg))
result = ok()
# clique/snapshot.go(104): func (s *Snapshot) store(db [..]
proc storeSnapshot*(s: var Snapshot): CliqueResult {.gcsafe,raises: [Defect].} =
## Insert the snapshot into the database.
try:
s.cfg.db.db
.put(s.data.blockHash.cliqueSnapshotKey.toOpenArray, rlp.encode(s.data))
except CatchableError as e:
return err((errSnapshotStore,e.msg))
result = ok()
# clique/snapshot.go(185): func (s *Snapshot) apply(headers [..]
proc applySnapshot*(s: var Snapshot;
headers: openArray[BlockHeader]): CliqueResult {.
gcsafe, raises: [Defect,CatchableError].} =
## Initialises an authorization snapshot `snap` by applying the `headers`
## to the argument snapshot desciptor `s`.
s.say "applySnapshot ", s.pp(headers).join("\n" & ' '.repeat(18))
# Allow passing in no headers for cleaner code
if headers.len == 0:
return ok()
# Sanity check that the headers can be applied
if headers[0].blockNumber != s.data.blockNumber + 1:
return err((errInvalidVotingChain,""))
# clique/snapshot.go(191): for i := 0; i < len(headers)-1; i++ {
for i in 0 ..< headers.len - 1:
if headers[i+1].blockNumber != headers[i].blockNumber+1:
return err((errInvalidVotingChain,""))
# Iterate through the headers and create a new snapshot
let
start = getTime()
logInterval = initDuration(seconds = 8)
var
logged = start
s.say "applySnapshot state=", s.pp(25)
# clique/snapshot.go(206): for i, header := range headers [..]
for headersIndex in 0 ..< headers.len:
let
# headersIndex => also used for logging at the end of this loop
header = headers[headersIndex]
number = header.blockNumber
s.say "applySnapshot processing #", number
# Remove any votes on checkpoint blocks
if (number mod s.cfg.epoch) == 0:
# Note that the correctness of the authorised accounts list is verified in
# clique/clique.verifyCascadingFields(),
# see clique/clique.go(355): if number%c.config.Epoch == 0 {
# This means, the account list passed with the epoch header is verified
# to be the same as the one we already have.
#
# clique/snapshot.go(210): snap.Votes = nil
s.data.ballot.flushVotes
s.say "applySnapshot epoch => reset, state=", s.pp(41)
# Delete the oldest signer from the recent list to allow it signing again
block:
let limit = s.data.ballot.authSignersThreshold.u256
if limit <= number:
s.data.recents.del(number - limit)
# Resolve the authorization key and check against signers
let signer = ? s.cfg.signatures.getEcRecover(header)
s.say "applySnapshot signer=", s.pp(signer)
if not s.data.ballot.isAuthSigner(signer):
s.say "applySnapshot signer not authorised => fail ", s.pp(29)
return err((errUnauthorizedSigner,""))
for recent in s.data.recents.values:
if recent == signer:
s.say "applySnapshot signer recently seen ", s.pp(signer)
return err((errRecentlySigned,""))
s.data.recents[number] = signer
# Header authorized, discard any previous vote from the signer
# clique/snapshot.go(233): for i, vote := range snap.Votes {
s.data.ballot.delVote(signer = signer, address = header.coinbase)
# Tally up the new vote from the signer
# clique/snapshot.go(244): var authorize bool
var authOk = false
if header.nonce == NONCE_AUTH:
authOk = true
elif header.nonce != NONCE_DROP:
return err((errInvalidVote,""))
let vote = Vote(address: header.coinbase,
signer: signer,
blockNumber: number,
authorize: authOk)
s.say "applySnapshot calling addVote ", s.pp(vote)
# clique/snapshot.go(253): if snap.cast(header.Coinbase, authorize) {
s.data.ballot.addVote(vote)
# clique/snapshot.go(269): if limit := uint64(len(snap.Signers)/2 [..]
if s.data.ballot.authSignersShrunk:
# Signer list shrunk, delete any leftover recent caches
let limit = s.data.ballot.authSignersThreshold.u256
if limit <= number:
# Pop off least block number from the list
let item = number - limit
s.say "will delete recent item #", item, " (", number, "-", limit,
") from recents={", s.pp(s.data.recents), "}"
s.data.recents.del(item)
s.say "applySnapshot state=", s.pp(25)
# If we're taking too much time (ecrecover), notify the user once a while
if logInterval < logged - getTime():
info "Reconstructing voting history",
processed = headersIndex,
total = headers.len,
elapsed = start - getTime()
logged = getTime()
let sinceStart = start - getTime()
if logInterval < sinceStart:
info "Reconstructed voting history",
processed = headers.len,
elapsed = sinceStart
# clique/snapshot.go(303): snap.Number += uint64(len(headers))
s.data.blockNumber += headers.len.u256
s.data.blockHash = headers[^1].blockHash
result = ok()
proc validVote*(s: var Snapshot; address: EthAddress; authorize: bool): bool =
## Returns `true` if voting makes sense, at all.
s.data.ballot.validVote(address, authorize)
proc recent*(s: var Snapshot; address: EthAddress): Result[BlockNumber,void] =
## Return `BlockNumber` for `address` argument (if any)
for (number,recent) in s.data.recents.pairs:
if recent == address:
return ok(number)
return err()
proc signersThreshold*(s: var Snapshot): int =
## Forward to `CliquePoll`: Minimum number of authorised signers needed.
s.data.ballot.authSignersThreshold
proc isSigner*(s: var Snapshot; address: EthAddress): bool =
## Checks whether argukment ``address` is in signers list
s.data.ballot.isAuthSigner(address)
proc signers*(s: var Snapshot): seq[EthAddress] =
## Retrieves the sorted list of authorized signers
s.data.ballot.authSigners
# clique/snapshot.go(319): func (s *Snapshot) inturn(number [..]
proc inTurn*(s: var Snapshot; number: BlockNumber, signer: EthAddress): bool =
## Returns `true` if a signer at a given block height is in-turn or not.
let ascSignersList = s.data.ballot.authSigners
for offset in 0 ..< ascSignersList.len:
if ascSignersList[offset] == signer:
return (number mod ascSignersList.len.u256) == offset.u256
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -19,9 +19,10 @@
##
import
std/[sequtils, strutils, tables],
./clique_cfg,
./clique_utils,
std/[sequtils, tables],
# std/[strutils],
../clique_cfg,
../clique_utils,
eth/common
type
@ -39,7 +40,7 @@ type
authorize: bool
signers: Table[EthAddress,Vote]
CliquePoll* = object
Ballot* = object
votes: Table[EthAddress,Tally] ## votes by account -> signer
authSig: Table[EthAddress,bool] ## currently authorised signers
authRemoved: bool ## last `addVote()` action was removing an
@ -52,45 +53,58 @@ type
# Private
# ------------------------------------------------------------------------------
proc say(t: var CliquePoll; v: varargs[string,`$`]) =
proc say(t: var Ballot; v: varargs[string,`$`]) {.inline.} =
## Debugging output
ppExceptionWrap:
if t.debug:
stderr.write "*** " & v.join & "\n"
# if t.debug: stderr.write "*** " & v.join & "\n"
discard
# ------------------------------------------------------------------------------
# Public
# Public debugging/pretty-printer support
# ------------------------------------------------------------------------------
proc setDebug*(t: var CliquePoll; debug: bool) =
## Set debugging mode on/off
t.debug = debug
proc votesInternal*(t: var Ballot): seq[(EthAddress,EthAddress,Vote)] =
for account,tally in t.votes.pairs:
for signer,vote in tally.signers.pairs:
result.add (account, signer, vote)
proc initCliquePoll*(t: var CliquePoll) =
## Ininialise an empty `CliquePoll` descriptor.
# ------------------------------------------------------------------------------
# Public constructor
# ------------------------------------------------------------------------------
proc initBallot*(t: var Ballot) =
## Ininialise an empty `Ballot` descriptor.
t.votes = initTable[EthAddress,Tally]()
t.authSig = initTable[EthAddress,bool]()
proc initCliquePoll*(t: var CliquePoll; signers: openArray[EthAddress]) =
## Ininialise `CliquePoll` with a given authorised signers list
t.initCliquePoll
proc initBallot*(t: var Ballot; signers: openArray[EthAddress]) =
## Ininialise `Ballot` with a given authorised signers list
t.initBallot
for a in signers:
t.authSig[a] = true
proc authSigners*(t: var CliquePoll): seq[EthAddress] =
# ------------------------------------------------------------------------------
# Public setters
# ------------------------------------------------------------------------------
proc `debug=`*(t: var Ballot; debug: bool) =
## Set debugging mode on/off
t.debug = debug
# ------------------------------------------------------------------------------
# Public getters
# ------------------------------------------------------------------------------
proc authSigners*(t: var Ballot): seq[EthAddress] =
## Sorted ascending list of authorised signer addresses
toSeq(t.authSig.keys).sorted(EthAscending)
proc isAuthSigner*(t: var CliquePoll; address: EthAddress): bool =
## Check whether `address` is an authorised signer
address in t.authSig
proc authSignersShrunk*(t: var CliquePoll): bool =
proc isAuthSignersListShrunk*(t: var Ballot): bool =
## Check whether the authorised signers list was shrunk recently after
## appying `addVote()`
t.authRemoved
proc authSignersThreshold*(t: var CliquePoll): int =
proc authSignersThreshold*(t: var Ballot): int =
## Returns the minimum number of authorised signers needed for authorising
## a addres for voting. This is currently
## ::
@ -98,8 +112,15 @@ proc authSignersThreshold*(t: var CliquePoll): int =
##
1 + (t.authSig.len div 2)
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc delVote*(t: var CliquePoll; signer, address: EthAddress) {.
proc isAuthSigner*(t: var Ballot; address: EthAddress): bool =
## Check whether `address` is an authorised signer
address in t.authSig
proc delVote*(t: var Ballot; signer, address: EthAddress) {.
gcsafe, raises: [Defect,KeyError].} =
## Remove a particular previously added vote.
if address in t.votes:
@ -110,18 +131,18 @@ proc delVote*(t: var CliquePoll; signer, address: EthAddress) {.
t.votes[address].signers.del(signer)
proc flushVotes*(t: var CliquePoll) =
proc flushVotes*(t: var Ballot) =
## Reset/flush pending votes, authorised signers remain the same.
t.votes.clear
# clique/snapshot.go(141): func (s *Snapshot) validVote(address [..]
proc validVote*(t: var CliquePoll; address: EthAddress; authorize: bool): bool =
proc isValidVote*(t: var Ballot; address: EthAddress; authorize: bool): bool =
## Check whether voting would have an effect in `addVote()`
if address in t.authSig: not authorize else: authorize
proc addVote*(t: var CliquePoll; vote: Vote) {.
proc addVote*(t: var Ballot; vote: Vote) {.
gcsafe, raises: [Defect,KeyError].} =
## Add a new vote collecting the signers for the particular voting address.
##
@ -144,7 +165,7 @@ proc addVote*(t: var CliquePoll; vote: Vote) {.
authOk = vote.authorize
# clique/snapshot.go(147): if !s.validVote(address, [..]
if not t.validVote(vote.address, vote.authorize):
if not t.isValidVote(vote.address, vote.authorize):
# Corner case: touch votes for this account
if t.votes.hasKey(vote.address):
@ -203,15 +224,6 @@ proc addVote*(t: var CliquePoll; vote: Vote) {.
t.say "addVote done"
# ------------------------------------------------------------------------------
# Test interface
# ------------------------------------------------------------------------------
proc votesInternal*(t: var CliquePoll): seq[(EthAddress,EthAddress,Vote)] =
for account,tally in t.votes.pairs:
for signer,vote in tally.signers.pairs:
result.add (account, signer, vote)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,84 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
##
## Snapshot Cache for Clique PoA Consensus Protocol
## ================================================
##
## For details see
## `EIP-225 <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
## and
## `go-ethereum <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
##
## Caveat: Not supporting RLP serialisation encode()/decode()
##
import
../../../utils/lru_cache,
../clique_cfg,
../clique_defs,
./snapshot_desc,
chronicles,
eth/[common, keys],
stew/results,
stint
type
# Internal type, simplify Hash256 for rlp serialisation
SnapsKey =
array[32, byte]
LruSnapsResult* =
Result[Snapshot,void]
LruSnaps* =
LruCache[Hash256,SnapsKey,LruSnapsResult,CliqueError]
{.push raises: [Defect].}
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc initLruSnaps*(rs: var LruSnaps) {.gcsafe,raises: [Defect].} =
var toKey: LruKey[Hash256,SnapsKey] =
proc(h: Hash256): SnapsKey =
h.data
var toValue: LruValue[Hash256,LruSnapsResult,CliqueError] =
proc(h: Hash256): Result[LruSnapsResult,CliqueError] =
## blind value, use `setLruSnaps()` to update
ok(err(LruSnapsResult))
rs.initCache(toKey, toValue, INMEMORY_SNAPSHOTS)
proc initLruSnaps*(cfg: CliqueCfg): LruSnaps {.gcsafe,raises: [Defect].} =
result.initLruSnaps
proc hasLruSnaps*(rs: var LruSnaps; hash: Hash256): bool {.inline.} =
## Check whether a particular snapshot exists in the cache
rs.hasKey(hash)
proc setLruSnaps*(rs: var LruSnaps; snaps: var Snapshot): bool
{.gcsafe, inline, raises: [Defect,CatchableError].} =
## Cache/overwite particular snapshot
rs.setItem(snaps.blockHash, ok(LruSnapsResult,snaps))
proc getLruSnaps*(rs: var LruSnaps; hash: Hash256): LruSnapsResult
{.gcsafe, raises: [Defect,CatchableError].} =
## Get snapshot from cache, store/return placeholder if there was no cached
## snapshot. Use `setLruSnaps()` for updating that entry.
rs.getItem(hash).value
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,172 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
##
## Snapshot Processor for Clique PoA Consensus Protocol
## ====================================================
##
## For details see
## `EIP-225 <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
## and
## `go-ethereum <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
##
import
std/[tables, times],
../clique_cfg,
../clique_defs,
./ballot,
./snapshot_desc,
chronicles,
eth/[common, rlp],
stew/results
{.push raises: [Defect].}
logScope:
topics = "clique PoA snapshot-apply"
# ------------------------------------------------------------------------------
# Private functions needed to support RLP conversion
# ------------------------------------------------------------------------------
proc say(s: var Snapshot; v: varargs[string,`$`]) {.inline.} =
# s.cfg.say v
discard
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
# clique/snapshot.go(185): func (s *Snapshot) apply(headers [..]
proc snapshotApply*(s: var Snapshot;
headers: openArray[BlockHeader]): CliqueOkResult {.
gcsafe, raises: [Defect,CatchableError].} =
## Initialises an authorization snapshot `snap` by applying the `headers`
## to the argument snapshot desciptor `s`.
#s.say "applySnapshot ", s.pp(headers).join("\n" & ' '.repeat(18))
# Allow passing in no headers for cleaner code
if headers.len == 0:
return ok()
# Sanity check that the headers can be applied
if headers[0].blockNumber != s.blockNumber + 1:
return err((errInvalidVotingChain,""))
# clique/snapshot.go(191): for i := 0; i < len(headers)-1; i++ {
for i in 0 ..< headers.len - 1:
if headers[i+1].blockNumber != headers[i].blockNumber+1:
return err((errInvalidVotingChain,""))
# Iterate through the headers and create a new snapshot
let
start = getTime()
logInterval = initDuration(seconds = 8)
var
logged = start
s.say "applySnapshot state=", s.pp(25)
# clique/snapshot.go(206): for i, header := range headers [..]
for headersIndex in 0 ..< headers.len:
let
# headersIndex => also used for logging at the end of this loop
header = headers[headersIndex]
number = header.blockNumber
s.say "applySnapshot processing #", number
# Remove any votes on checkpoint blocks
if (number mod s.cfg.epoch) == 0:
# Note that the correctness of the authorised accounts list is verified in
# clique/clique.verifyCascadingFields(),
# see clique/clique.go(355): if number%c.config.Epoch == 0 {
# This means, the account list passed with the epoch header is verified
# to be the same as the one we already have.
#
# clique/snapshot.go(210): snap.Votes = nil
s.ballot.flushVotes
s.say "applySnapshot epoch => reset, state=", s.pp(41)
# Delete the oldest signer from the recent list to allow it signing again
block:
let limit = s.ballot.authSignersThreshold.u256
if limit <= number:
s.recents.del(number - limit)
# Resolve the authorization key and check against signers
let signer = ? s.cfg.ecRecover(header)
s.say "applySnapshot signer=", s.pp(signer)
if not s.ballot.isAuthSigner(signer):
s.say "applySnapshot signer not authorised => fail ", s.pp(29)
return err((errUnauthorizedSigner,""))
for recent in s.recents.values:
if recent == signer:
s.say "applySnapshot signer recently seen ", s.pp(signer)
return err((errRecentlySigned,""))
s.recents[number] = signer
# Header authorized, discard any previous vote from the signer
# clique/snapshot.go(233): for i, vote := range snap.Votes {
s.ballot.delVote(signer = signer, address = header.coinbase)
# Tally up the new vote from the signer
# clique/snapshot.go(244): var authorize bool
var authOk = false
if header.nonce == NONCE_AUTH:
authOk = true
elif header.nonce != NONCE_DROP:
return err((errInvalidVote,""))
let vote = Vote(address: header.coinbase,
signer: signer,
blockNumber: number,
authorize: authOk)
s.say "applySnapshot calling addVote ", s.pp(vote)
# clique/snapshot.go(253): if snap.cast(header.Coinbase, authorize) {
s.ballot.addVote(vote)
# clique/snapshot.go(269): if limit := uint64(len(snap.Signers)/2 [..]
if s.ballot.isAuthSignersListShrunk:
# Signer list shrunk, delete any leftover recent caches
let limit = s.ballot.authSignersThreshold.u256
if limit <= number:
# Pop off least block number from the list
let item = number - limit
s.say "will delete recent item #", item, " (", number, "-", limit,
") from recents={", s.pp(s.recents), "}"
s.recents.del(item)
s.say "applySnapshot state=", s.pp(25)
# If we're taking too much time (ecrecover), notify the user once a while
if logInterval < logged - getTime():
info "Reconstructing voting history",
processed = headersIndex,
total = headers.len,
elapsed = start - getTime()
logged = getTime()
let sinceStart = start - getTime()
if logInterval < sinceStart:
info "Reconstructed voting history",
processed = headers.len,
elapsed = sinceStart
# clique/snapshot.go(303): snap.Number += uint64(len(headers))
s.blockNumber = s.blockNumber + headers.len.u256
s.blockHash = headers[^1].blockHash
result = ok()
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,227 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
##
## Snapshot Structure for Clique PoA Consensus Protocol
## ====================================================
##
## For details see
## `EIP-225 <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
## and
## `go-ethereum <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
##
import
std/[algorithm, sequtils, strformat, strutils, tables],
../../../db/storage_types,
../../../utils,
../clique_cfg,
../clique_defs,
../clique_utils,
./ballot,
chronicles,
eth/[common, rlp, trie/db],
stew/results
type
AddressHistory = Table[BlockNumber,EthAddress]
SnapshotData* = object
blockNumber: BlockNumber ## block number where snapshot was created on
blockHash: Hash256 ## block hash where snapshot was created on
recents: AddressHistory ## recent signers for spam protections
# clique/snapshot.go(58): Recents map[uint64]common.Address [..]
ballot: Ballot ## Votes => authorised signers
# clique/snapshot.go(50): type Snapshot struct [..]
Snapshot* = object ## Snapshot is the state of the authorization
## voting at a given point in time.
cfg: CliqueCfg ## parameters to fine tune behavior
data*: SnapshotData ## real snapshot
{.push raises: [Defect].}
logScope:
topics = "clique PoA snapshot"
# ------------------------------------------------------------------------------
# Pretty printers for debugging
# ------------------------------------------------------------------------------
proc getPrettyPrinters*(s: var Snapshot): var PrettyPrinters {.gcsafe.}
proc pp*(s: var Snapshot; v: Vote): string {.gcsafe.}
proc votesList(s: var Snapshot; sep: string): string =
proc s3Cmp(a, b: (string,string,Vote)): int =
result = cmp(a[0], b[0])
if result == 0:
result = cmp(a[1], b[1])
s.data.ballot.votesInternal
.mapIt((s.pp(it[0]),s.pp(it[1]),it[2]))
.sorted(cmp = s3cmp)
.mapIt(s.pp(it[2]))
.join(sep)
proc signersList(s: var Snapshot): string =
s.pp(s.data.ballot.authSigners).sorted.join(",")
# ------------------------------------------------------------------------------
# Private functions needed to support RLP conversion
# ------------------------------------------------------------------------------
proc append[K,V](rw: var RlpWriter; tab: Table[K,V]) {.inline.} =
rw.startList(tab.len)
for key,value in tab.pairs:
rw.append((key,value))
proc read[K,V](rlp: var Rlp;
Q: type Table[K,V]): Q {.inline, raises: [Defect,CatchableError].} =
for w in rlp.items:
let (key,value) = w.read((K,V))
result[key] = value
# ------------------------------------------------------------------------------
# Public pretty printers
# ------------------------------------------------------------------------------
proc getPrettyPrinters*(s: var Snapshot): var PrettyPrinters =
## Mixin for pretty printers
s.cfg.prettyPrint
proc pp*(s: var Snapshot; h: var AddressHistory): string {.gcsafe.} =
ppExceptionWrap:
toSeq(h.keys)
.sorted
.mapIt("#" & $it & ":" & s.pp(h[it.u256]))
.join(",")
proc pp*(s: var Snapshot; v: Vote): string =
proc authorized(b: bool): string =
if b: "authorise" else: "de-authorise"
ppExceptionWrap:
"(" & &"address={s.pp(v.address)}" &
&",signer={s.pp(v.signer)}" &
&",blockNumber={v.blockNumber}" &
&",{authorized(v.authorize)}" & ")"
proc pp*(s: var Snapshot; delim: string): string {.gcsafe.} =
## Pretty print descriptor
let
sep1 = if 0 < delim.len: delim
else: ";"
sep2 = if 0 < delim.len and delim[0] == '\n': delim & ' '.repeat(7)
else: ";"
ppExceptionWrap:
&"(blockNumber=#{s.data.blockNumber}" &
&"{sep1}recents=" & "{" & s.pp(s.data.recents) & "}" &
&"{sep1}signers=" & "{" & s.signersList & "}" &
&"{sep1}votes=[" & s.votesList(sep2) & "])"
proc pp*(s: var Snapshot; indent = 0): string {.gcsafe.} =
## Pretty print descriptor
let delim = if 0 < indent: "\n" & ' '.repeat(indent) else: " "
s.pp(delim)
# ------------------------------------------------------------------------------
# Public Constructor
# ------------------------------------------------------------------------------
# clique/snapshot.go(72): func newSnapshot(config [..]
proc initSnapshot*(s: var Snapshot; cfg: CliqueCfg;
number: BlockNumber; hash: Hash256; signers: openArray[EthAddress]) =
## Create a new snapshot with the specified startup parameters. This
## constructor does not initialize the set of recent `signers`, so only ever
## use if for the genesis block.
s.cfg = cfg
s.data.blockNumber = number
s.data.blockHash = hash
s.data.recents = initTable[BlockNumber,EthAddress]()
s.data.ballot.initBallot(signers)
s.data.ballot.debug = s.cfg.debug
proc initSnapshot*(s: var Snapshot; cfg: CliqueCfg; header: BlockHeader) =
## Create a new snapshot for the given header. The header need not be on the
## block chain, yet. The trusted signer list is derived from the
## `extra data` field of the header.
let signers = header.extraData.extraDataAddresses
s.initSnapshot(cfg, header.blockNumber, header.hash, signers)
proc initSnapshot*(cfg: CliqueCfg; header: BlockHeader): Snapshot =
result.initSnapshot(cfg, header)
# ------------------------------------------------------------------------------
# Public getters
# ------------------------------------------------------------------------------
proc cfg*(s: var Snapshot): CliqueCfg {.inline.} =
## Getter
s.cfg
proc blockNumber*(s: var Snapshot): BlockNumber {.inline.} =
## Getter
s.data.blockNumber
proc blockHash*(s: var Snapshot): Hash256 {.inline.} =
## Getter
s.data.blockHash
proc recents*(s: var Snapshot): var AddressHistory {.inline.} =
## Retrieves the list of recently added addresses
s.data.recents
proc ballot*(s: var Snapshot): var Ballot {.inline.} =
## Retrieves the ballot box descriptor with the votes
s.data.ballot
# ------------------------------------------------------------------------------
# Public setters
# ------------------------------------------------------------------------------
proc `blockNumber=`*(s: var Snapshot; number: BlockNumber) {.inline.} =
## Getter
s.data.blockNumber = number
proc `blockHash=`*(s: var Snapshot; hash: Hash256) {.inline.} =
## Getter
s.data.blockHash = hash
# ------------------------------------------------------------------------------
# Public load/store support
# ------------------------------------------------------------------------------
# clique/snapshot.go(88): func loadSnapshot(config [..]
proc loadSnapshot*(s: var Snapshot; cfg: CliqueCfg;
hash: Hash256): CliqueOkResult {.gcsafe, raises: [Defect].} =
## Load an existing snapshot from the database.
try:
s.cfg = cfg
s.data = s.cfg.db.db
.get(hash.cliqueSnapshotKey.toOpenArray)
.decode(SnapshotData)
s.data.ballot.debug = s.cfg.debug
except CatchableError as e:
return err((errSnapshotLoad,e.msg))
result = ok()
# clique/snapshot.go(104): func (s *Snapshot) store(db [..]
proc storeSnapshot*(s: var Snapshot):
CliqueOkResult {.gcsafe,raises: [Defect].} =
## Insert the snapshot into the database.
try:
s.cfg.db.db
.put(s.data.blockHash.cliqueSnapshotKey.toOpenArray, rlp.encode(s.data))
except CatchableError as e:
return err((errSnapshotStore,e.msg))
result = ok()
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,75 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
##
## Miscellaneous Snapshot Functions for Clique PoA Consensus Protocol
## ==================================================================
##
## For details see
## `EIP-225 <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
## and
## `go-ethereum <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
##
import
std/[tables],
./ballot,
./snapshot_desc,
chronicles,
eth/[common, rlp],
stew/results
{.push raises: [Defect].}
logScope:
topics = "clique PoA snapshot-misc"
# ------------------------------------------------------------------------------
# Public getters
# ------------------------------------------------------------------------------
proc signersThreshold*(s: var Snapshot): int {.inline.} =
## Minimum number of authorised signers needed.
s.ballot.authSignersThreshold
#proc signers*(s: var Snapshot): seq[EthAddress] {.inline.} =
# ## Retrieves the sorted list of authorized signers
# s.ballot.authSigners
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc isValidVote*(s: var Snapshot; address: EthAddress; authorize: bool): bool =
## Returns `true` if voting makes sense, at all.
s.ballot.isValidVote(address, authorize)
proc recent*(s: var Snapshot; address: EthAddress): Result[BlockNumber,void] =
## Return `BlockNumber` for `address` argument (if any)
for (number,recent) in s.recents.pairs:
if recent == address:
return ok(number)
return err()
proc isSigner*(s: var Snapshot; address: EthAddress): bool =
## Checks whether argukment ``address` is in signers list
s.ballot.isAuthSigner(address)
# clique/snapshot.go(319): func (s *Snapshot) inturn(number [..]
proc inTurn*(s: var Snapshot; number: BlockNumber, signer: EthAddress): bool =
## Returns `true` if a signer at a given block height is in-turn or not.
let ascSignersList = s.ballot.authSigners
for offset in 0 ..< ascSignersList.len:
if ascSignersList[offset] == signer:
return (number mod ascSignersList.len.u256) == offset.u256
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -11,13 +11,11 @@
import
./executor/[
calculate_reward,
executor_helpers,
process_block,
process_transaction]
export
calculate_reward.blockRewards,
executor_helpers.createBloom,
process_block,
process_transaction

View File

@ -24,7 +24,10 @@ const
eth5 = 5.eth
eth3 = 3.eth
eth2 = 2.eth
blockRewards*: array[Fork, Uint256] = [
# Note than the `blockRewards` were previously exported but nowhere
# used otherwise.
blockRewards: array[Fork, Uint256] = [
eth5, # FkFrontier
eth5, # FkHomestead
eth5, # FkTangerine
@ -37,9 +40,17 @@ const
eth2 # FkLondon
]
{.push raises: [Defect].}
proc calculateReward*(vmState: BaseVMState;
header: BlockHeader; body: BlockBody) =
let blockReward = blockRewards[vmState.getFork]
header: BlockHeader; body: BlockBody)
{.gcsafe, raises: [Defect,CatchableError].} =
var blockReward: Uint256
safeExecutor("getFork"):
blockReward = blockRewards[vmState.getForkUnsafe]
var mainReward = blockReward
for uncle in body.uncles:

View File

@ -9,6 +9,7 @@
# according to those terms.
import
std/[strformat],
../../config,
../../db/accounts_cache,
../../forks,
@ -17,11 +18,20 @@ import
eth/[common, bloom]
type
ExecutorError* = object of CatchableError
## Catch and relay exception error
# TODO: these types need to be removed
# once eth/bloom and eth/common sync'ed
Bloom = common.BloomFilter
LogsBloom = bloom.BloomFilter
{.push raises: [Defect].}
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
# TODO: move these three receipt procs below somewhere else more appropriate
func logsBloom(logs: openArray[Log]): LogsBloom =
for log in logs:
@ -29,17 +39,41 @@ func logsBloom(logs: openArray[Log]): LogsBloom =
for topic in log.topics:
result.incl topic
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
template safeExecutor*(info: string; code: untyped) =
try:
code
except CatchableError as e:
raise (ref CatchableError)(msg: e.msg)
except Defect as e:
raise (ref Defect)(msg: e.msg)
except:
let e = getCurrentException()
raise newException(ExecutorError, info & "(): " & $e.name & " -- " & e.msg)
func createBloom*(receipts: openArray[Receipt]): Bloom =
var bloom: LogsBloom
for rec in receipts:
bloom.value = bloom.value or logsBloom(rec.logs).value
result = bloom.value.toByteArrayBE
proc getFork*(vmState: BaseVMState): Fork {.inline.} =
## Shortcut for configured fork, deliberately not naming it toFork()
proc getForkUnsafe*(vmState: BaseVMState): Fork
{.inline, raises: [Exception].} =
## Shortcut for configured fork, deliberately not naming it toFork(). This
## function may throw an `Exception` and must be wrapped.
vmState.chainDB.config.toFork(vmState.blockNumber)
proc makeReceipt*(vmState: BaseVMState; txType: TxType): Receipt =
proc makeReceipt*(vmState: BaseVMState; txType: TxType): Receipt
{.inline, raises: [Defect,CatchableError].} =
proc getFork(vmState: BaseVMState): Fork
{.inline, raises: [Defect,CatchableError].} =
safeExecutor("getFork"):
result = vmState.getForkUnsafe
var rec: Receipt
if vmState.getFork < FkByzantium:
rec.isHash = true
@ -54,4 +88,6 @@ proc makeReceipt*(vmState: BaseVMState; txType: TxType): Receipt =
rec.bloom = logsBloom(rec.logs).value.toByteArrayBE
rec
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -9,7 +9,6 @@
# according to those terms.
import
../../config,
../../constants,
../../db/[db_chain, accounts_cache],
../../transaction,
@ -26,12 +25,15 @@ import
eth/[common, trie/db],
nimcrypto
{.push raises: [Defect].}
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
proc procBlkPreamble(vmState: BaseVMState; dbTx: DbTransaction;
header: BlockHeader, body: BlockBody): bool =
header: BlockHeader, body: BlockBody): bool
{.gcsafe, raises: [Defect,CatchableError].} =
if vmState.chainDB.config.daoForkSupport and
vmState.chainDB.config.daoForkBlock == header.blockNumber:
vmState.mutateStateDB:
@ -79,7 +81,8 @@ proc procBlkPreamble(vmState: BaseVMState; dbTx: DbTransaction;
proc procBlkEpilogue(vmState: BaseVMState; dbTx: DbTransaction;
header: BlockHeader, body: BlockBody): bool =
header: BlockHeader, body: BlockBody): bool
{.gcsafe, raises: [Defect,RlpError].} =
# Reward beneficiary
vmState.mutateStateDB:
if vmState.generateWitness:
@ -116,8 +119,9 @@ proc procBlkEpilogue(vmState: BaseVMState; dbTx: DbTransaction;
# ------------------------------------------------------------------------------
proc processBlock*(vmState: BaseVMState;
header: BlockHeader, body: BlockBody): ValidationResult =
## Processes `(header,body)` pair for a non-PoA network
header: BlockHeader, body: BlockBody): ValidationResult
{.gcsafe, raises: [Defect,CatchableError].} =
## Processes `(header,body)` pair for a non-PoA network, only
if vmState.chainDB.config.poaEngine:
# PoA consensus engine unsupported, see the other version of
# processBlock() below
@ -144,20 +148,25 @@ proc processBlock*(vmState: BaseVMState;
proc processBlock*(vmState: BaseVMState; poa: Clique;
header: BlockHeader, body: BlockBody): ValidationResult =
## Processes `(header,body)` pair for a any network regardless of PoA or not
header: BlockHeader, body: BlockBody): ValidationResult
{.gcsafe, raises: [Defect,CatchableError].} =
## Generalised function to processes `(header,body)` pair for any network,
## regardless of PoA or not
# Process PoA state transition first so there is no need to re-wind on error.
if vmState.chainDB.config.poaEngine and
not poa.updatePoaState(header, body):
debug "PoA update failed"
return ValidationResult.Error
var dbTx = vmState.chainDB.db.beginTransaction()
defer: dbTx.dispose()
if not vmState.procBlkPreamble(dbTx, header, body):
return ValidationResult.Error
# PoA consensus engine have no reward for miner
if not vmState.chainDB.config.poaEngine:
vmState.calculateReward(header, body)
elif not vmState.updatePoaState(header, body):
debug "PoA update failed"
return ValidationResult.Error
if not vmState.procBlkEpilogue(dbTx, header, body):
return ValidationResult.Error

View File

@ -20,6 +20,11 @@ import
chronicles,
eth/common
{.push raises: [Defect].}
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
proc eip1559TxNormalization(tx: Transaction): Transaction =
result = tx
@ -28,9 +33,10 @@ proc eip1559TxNormalization(tx: Transaction): Transaction =
result.maxFee = tx.gasPrice
proc processTransaction*(tx: Transaction, sender: EthAddress, vmState: BaseVMState, fork: Fork): GasInt =
## Process the transaction, write the results to db.
## Returns amount of ETH to be rewarded to miner
proc processTransactionImpl(tx: Transaction, sender: EthAddress,
vmState: BaseVMState, fork: Fork): GasInt
# wildcard exception, wrapped below
{.gcsafe, raises: [Exception].} =
trace "Sender", sender
trace "txHash", rlpHash = tx.rlpHash
@ -76,9 +82,26 @@ proc processTransaction*(tx: Transaction, sender: EthAddress, vmState: BaseVMSta
vmState.accountDb.collectWitnessData()
vmState.accountDb.persist(clearCache = false)
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc processTransaction*(tx: Transaction,
sender: EthAddress, vmState: BaseVMState): GasInt =
tx.processTransaction(sender, vmState, vmState.getFork)
proc processTransaction*(tx: Transaction, sender: EthAddress,
vmState: BaseVMState, fork: Fork): GasInt
{.gcsafe, raises: [Defect,CatchableError].} =
## Process the transaction, write the results to db.
## Returns amount of ETH to be rewarded to miner
safeExecutor("processTransaction"):
result = tx.processTransactionImpl(sender, vmState, fork)
proc processTransaction*(tx: Transaction, sender: EthAddress,
vmState: BaseVMState): GasInt
{.gcsafe, raises: [Defect,CatchableError].} =
## Same as the other prototype variant with the `fork` argument derived
## from `vmState` in a canonical way
safeExecutor("processTransaction"):
result = tx.processTransaction(sender, vmState, vmState.getForkUnsafe)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -9,12 +9,11 @@
# according to those terms.
import
../../vm_types,
../clique,
eth/[common]
proc updatePoaState*(vmState: BaseVMState;
header: BlockHeader; body: BlockBody): bool =
proc updatePoaState*(poa: Clique; header: BlockHeader; body: BlockBody): bool =
true
# End

View File

@ -53,13 +53,13 @@ proc initEpochHashCache*(cache: var EpochHashCache; cacheMaxItems = 10) =
let top = blockNumber.bnToEpoch.uint64 * EPOCH_LENGTH
ok( mkcache( getCacheSize(top), getSeedhash(top)))
cache.initLruCache(toKey, toValue, cacheMaxItems)
cache.initCache(toKey, toValue, cacheMaxItems)
proc getEpochHash*(cache: var EpochHashCache;
blockNumber: uint64): auto {.inline.} =
## Return hash list, indexed by epoch of argument `blockNumber`
cache.getLruItem(blockNumber).value
cache.getItem(blockNumber).value
# ------------------------------------------------------------------------------
# End

View File

@ -73,38 +73,59 @@ proc `==`[K,V](a, b: var LruData[K,V]): bool =
a.tab == b.tab
# ------------------------------------------------------------------------------
# Public functions
# Public constructor and reset
# ------------------------------------------------------------------------------
proc clearLruCache*[T,K,V,E](cache: var LruCache[T,K,V,E])
{.gcsafe, raises: [Defect].} =
## Reset/clear an initialised LRU cache.
proc clearCache*[T,K,V,E](cache: var LruCache[T,K,V,E]; cacheInitSize = 0)
{.gcsafe, raises: [Defect].} =
## Reset/clear an initialised LRU cache. The cache will be re-allocated
## with `cacheInitSize` initial spaces if this is positive, or `cacheMaxItems`
## spaces (see `initLruCache()`) as a default.
var initSize = cacheInitSize
if initSize <= 0:
initSize = cache.data.maxItems
cache.data.first.reset
cache.data.last.reset
cache.data.tab = initTable[K,LruItem[K,V]](cache.data.maxItems.nextPowerOfTwo)
cache.data.tab = initTable[K,LruItem[K,V]](initSize.nextPowerOfTwo)
proc initLruCache*[T,K,V,E](cache: var LruCache[T,K,V,E];
toKey: LruKey[T,K], toValue: LruValue[T,V,E];
cacheMaxItems = 10) {.gcsafe, raises: [Defect].} =
## Initialise LRU cache. The handlers `toKey()` and `toValue()` are
## explained at the data type definition.
proc initCache*[T,K,V,E](cache: var LruCache[T,K,V,E];
toKey: LruKey[T,K], toValue: LruValue[T,V,E];
cacheMaxItems = 10; cacheInitSize = 0)
{.gcsafe, raises: [Defect].} =
## Initialise LRU cache. The handlers `toKey()` and `toValue()` are explained
## at the data type definition. The cache will be allocated with
## `cacheInitSize` initial spaces if this is positive, or `cacheMaxItems`
## spaces (see `initLruCache()`) as a default.
cache.data.maxItems = cacheMaxItems
cache.toKey = toKey
cache.toValue = toValue
cache.clearLruCache
cache.clearCache
# ------------------------------------------------------------------------------
# Public functions, basic mechanism
# ------------------------------------------------------------------------------
proc getLruItem*[T,K,V,E](lru: var LruCache[T,K,V,E]; arg: T): Result[V,E] {.
gcsafe, raises: [Defect,CatchableError].} =
## Returns `lru.toValue(arg)`, preferably from result cached earlier.
proc getItem*[T,K,V,E](lru: var LruCache[T,K,V,E];
arg: T; peekOK = false): Result[V,E]
{.gcsafe, raises: [Defect,CatchableError].} =
## If the key `lru.toKey(arg)` is a cached key, the associated value will
## be returnd. If the `peekOK` argument equals `false`, the associated
## key-value pair will have been moved to the end of the LRU queue.
##
## If the key `lru.toKey(arg)` is not a cached key and the LRU queue has at
## least `cacheMaxItems` entries (see `initLruCache()`, the first key-value
## pair will be removed from the LRU queue. Then the value the pair
## (`lru.toKey(arg)`,`lru.toValue(arg)`) will be appended to the LRU queue
## and the value part returned.
##
let key = lru.toKey(arg)
# Relink item if already in the cache => move to last position
if lru.data.tab.hasKey(key):
let lruItem = lru.data.tab[key]
if key == lru.data.last:
if peekOk or key == lru.data.last:
# Nothing to do
return ok(lruItem.value)
@ -148,6 +169,96 @@ proc getLruItem*[T,K,V,E](lru: var LruCache[T,K,V,E]; arg: T): Result[V,E] {.
lru.data.tab[key] = tabItem
result = ok(rcValue)
# ------------------------------------------------------------------------------
# Public functions, cache info
# ------------------------------------------------------------------------------
proc hasKey*[T,K,V,E](lru: var LruCache[T,K,V,E]; arg: T): bool {.gcsafe.} =
## Check whether the `arg` argument is cached
let key = lru.toKey(arg)
lru.data.tab.hasKey(key)
proc firstKey*[T,K,V,E](lru: var LruCache[T,K,V,E]): K {.gcsafe.} =
## Returns the key of the first item in the LRU queue, or the reset
## value it the cache is empty.
if 0 < lru.data.tab.len:
result = lru.data.first
proc lastKey*[T,K,V,E](lru: var LruCache[T,K,V,E]): K {.gcsafe.} =
## Returns the key of the last item in the LRU queue, or the reset
## value it the cache is empty.
if 0 < lru.data.tab.len:
result = lru.data.last
proc maxLen*[T,K,V,E](lru: var LruCache[T,K,V,E]): int {.gcsafe.} =
## Maximal number of cache entries.
lru.data.maxItems
proc len*[T,K,V,E](lru: var LruCache[T,K,V,E]): int {.gcsafe.} =
## Return the number of elements in the cache.
lru.data.tab.len
# ------------------------------------------------------------------------------
# Public functions, advanced features
# ------------------------------------------------------------------------------
proc setItem*[T,K,V,E](lru: var LruCache[T,K,V,E]; arg: T; value: V): bool
{.gcsafe, raises: [Defect,CatchableError].} =
## Update entry with key `lru.toKey(arg)` by `value`. Reurns `true` if the
## key exists in the database, and false otherwise.
##
## This function allows for simlifying the `toValue()` function (see
## `initLruCache()`) to provide a placeholder only and later fill this
## slot with this `setLruItem()` function.
let key = lru.toKey(arg)
if lru.data.tab.hasKey(key):
lru.data.tab[key].value = value
return true
proc delItem*[T,K,V,E](lru: var LruCache[T,K,V,E]; arg: T): bool
{.gcsafe, discardable, raises: [Defect,KeyError].} =
## Delete the `arg` argument from cached. That way, the LRU cache can
## be re-purposed as a sequence with efficient random delete facility.
let key = lru.toKey(arg)
# Relink item if already in the cache => move to last position
if lru.data.tab.hasKey(key):
let lruItem = lru.data.tab[key]
# Unlink key Item
if lru.data.tab.len == 1:
lru.data.first.reset
lru.data.last.reset
elif key == lru.data.last:
lru.data.last = lruItem.prv
elif key == lru.data.first:
lru.data.first = lruItem.nxt
else:
lru.data.tab[lruItem.prv].nxt = lruItem.nxt
lru.data.tab[lruItem.nxt].prv = lruItem.prv
lru.data.tab.del(key)
return true
iterator keyItemPairs*[T,K,V,E](lru: var LruCache[T,K,V,E]): (K,LruItem[K,V])
{.gcsafe, raises: [Defect,CatchableError].} =
## Cycle through all (key,lruItem) pairs in chronological order.
if 0 < lru.data.tab.len:
var key = lru.data.first
for _ in 0 ..< lru.data.tab.len - 1:
var item = lru.data.tab[key]
yield (key, item)
key = item.nxt
yield (key, lru.data.tab[key])
if key != lru.data.last:
raiseAssert "Garbled LRU cache next/prv references"
# ------------------------------------------------------------------------------
# Public functions, RLP support
# ------------------------------------------------------------------------------
proc `==`*[T,K,V,E](a, b: var LruCache[T,K,V,E]): bool =
## Returns `true` if both argument LRU caches contain the same data
@ -185,13 +296,6 @@ proc read*[K,V](rlp: var Rlp; Q: type LruData[K,V]): Q {.
let (key,value) = w.read((K,LruItem[K,V]))
result.tab[key] = value
proc specs*[T,K,V,E](cache: var LruCache[T,K,V,E]):
(int, K, K, Table[K,LruItem[K,V]]) =
## Returns cache data & specs `(maxItems,firstKey,lastKey,tableRef)` for
## debugging and testing.
(cache.data.maxItems, cache.data.first, cache.data.last, cache.data.tab)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -10,10 +10,8 @@
import
std/[algorithm, os, sequtils, strformat, strutils],
../nimbus/config,
../nimbus/db/db_chain,
../nimbus/p2p/[chain, clique, clique/snapshot],
../nimbus/utils,
../nimbus/p2p/[chain, clique],
./test_clique/[pool, undump],
eth/[common, keys],
stint,
@ -26,10 +24,6 @@ let
# Helpers
# ------------------------------------------------------------------------------
proc db(ap: TesterPool): auto =
## Getter
ap.clique.db
proc getBlockHeader(ap: TesterPool; number: BlockNumber): BlockHeader =
## Shortcut => db/db_chain.getBlockHeader()
doAssert ap.db.getBlockHeader(number, result)
@ -48,11 +42,13 @@ proc runCliqueSnapshot(noisy = true) =
##
suite "Clique PoA Snapshot":
var
pool = newVoterPool().setDebug(noisy)
pool = newVoterPool()
const
skipSet = {999}
testSet = {0 .. 999}
pool.debug = noisy
# clique/snapshot_test.go(379): for i, tt := range tests {
for tt in voterSamples.filterIt(it.id in testSet):
@ -71,23 +67,19 @@ proc runCliqueSnapshot(noisy = true) =
.appendVoter(tt.votes)
.commitVoterChain
# see clique/snapshot_test.go(476): snap, err := engine.snapshot( [..]
let topHeader = pool.topVoterHeader
var snap = pool.snapshot(topHeader.blockNumber, topHeader.hash, @[])
# see clique/snapshot_test.go(477): if err != nil {
if snap.isErr:
if pool.error != cliqueNoError:
# Note that clique/snapshot_test.go does not verify _here_ against
# the scheduled test error -- rather this voting error is supposed
# to happen earlier (processed at clique/snapshot_test.go(467)) when
# assembling the block chain (sounds counter intuitive to the author
# of this source file as the scheduled errors are _clique_ related).
check snap.error[0] == tt.failure
check pool.error[0] == tt.failure
else:
let
expected = tt.results.mapIt("@" & it).sorted
snapResult = pool.pp(snap.value.signers).sorted
pool.say "*** snap state=", snap.pp(16)
snapResult = pool.pp(pool.cliqueSigners).sorted
pool.say "*** snap state=", pool.snapshot.pp(16)
pool.say " result=[", snapResult.join(",") & "]"
pool.say " expected=[", expected.join(",") & "]"
@ -95,15 +87,18 @@ proc runCliqueSnapshot(noisy = true) =
check snapResult == expected
proc runGoerliReplay(noisy = true;
dir = "tests"; stopAfterBlock = uint64.high) =
proc runGoerliReplay(noisy = true; dir = "tests"; stopAfterBlock = 0u64) =
var
pool = GoerliNet.newVoterPool
xChain = pool.db.newChain
pool = newVoterPool()
cache: array[7,(seq[BlockHeader],seq[BlockBody])]
cInx = 0
stoppedOk = false
pool.debug = noisy
let stopThreshold = if stopAfterBlock == 0u64: uint64.high.u256
else: stopAfterBlock.u256
suite "Replay Goerli Chain":
for w in (dir / goerliCapture).undumpNextGroup:
@ -118,7 +113,7 @@ proc runGoerliReplay(noisy = true;
cInx.inc
# Handy for partial tests
if stopAfterBlock <= cache[cInx-1][0][0].blockNumber.truncate(uint64):
if stopThreshold < cache[cInx-1][0][0].blockNumber:
stoppedOk = true
break
@ -130,7 +125,7 @@ proc runGoerliReplay(noisy = true;
last = cache[^1][0][^1].blockNumber
test &"Goerli Blocks #{first}..#{last} ({cache.len} transactions)":
for (headers,bodies) in cache:
let addedPersistBlocks = xChain.persistBlocks(headers, bodies)
let addedPersistBlocks = pool.chain.persistBlocks(headers,bodies)
check addedPersistBlocks == ValidationResult.Ok
if addedPersistBlocks != ValidationResult.Ok: return
@ -141,12 +136,49 @@ proc runGoerliReplay(noisy = true;
last = cache[cInx-1][0][^1].blockNumber
test &"Goerli Blocks #{first}..#{last} ({cInx} transactions)":
for (headers,bodies) in cache:
let addedPersistBlocks = xChain.persistBlocks(headers, bodies)
let addedPersistBlocks = pool.chain.persistBlocks(headers,bodies)
check addedPersistBlocks == ValidationResult.Ok
if addedPersistBlocks != ValidationResult.Ok: return
if stoppedOk:
test &"Runner stooped after reaching #{stopAfterBlock}":
test &"Runner stopped after reaching #{stopThreshold}":
discard
proc runGoerliBaybySteps(noisy = true; dir = "tests"; stopAfterBlock = 20u64) =
var
pool = newVoterPool()
stoppedOk = false
pool.debug = noisy
let stopThreshold = if stopAfterBlock == 0u64: uint64.high.u256
else: stopAfterBlock.u256
suite "Replay Goerli Chain Transactions Single Blockwise":
for w in (dir / goerliCapture).undumpNextGroup:
if w[0][0].blockNumber == 0.u256:
# Verify Genesis
doAssert w[0][0] == pool.getBlockHeader(0.u256)
else:
for n in 0 ..< w[0].len:
let
header = w[0][n]
body = w[1][n]
parents = w[0][0 ..< n]
# Handy for partial tests
if stopThreshold < header.blockNumber:
stoppedOk = true
break
test &"Goerli Block #{header.blockNumber} + {parents.len} parents":
check pool.chain.clique.cliqueSnapshot(header,parents).isOk
let addedPersistBlocks = pool.chain.persistBlocks(@[header],@[body])
check addedPersistBlocks == ValidationResult.Ok
if addedPersistBlocks != ValidationResult.Ok: return
if stoppedOk:
test &"Runner stopped after reaching #{stopThreshold}":
discard
# ------------------------------------------------------------------------------
@ -155,12 +187,14 @@ proc runGoerliReplay(noisy = true;
proc cliqueMain*(noisy = defined(debug)) =
noisy.runCliqueSnapshot
noisy.runGoerliBaybySteps
noisy.runGoerliReplay
when isMainModule:
let noisy = defined(debug)
noisy.runCliqueSnapshot
noisy.runGoerliReplay(dir = ".", stopAfterBlock = 1000)
#noisy.runCliqueSnapshot
noisy.runGoerliBaybySteps(dir = ".")
#noisy.runGoerliReplay(dir = ".", stopAfterBlock = 0)
# ------------------------------------------------------------------------------
# End

View File

@ -12,8 +12,10 @@ import
std/[random, sequtils, strformat, strutils, tables, times],
../../nimbus/[config, chain_config, constants, genesis, utils],
../../nimbus/db/db_chain,
../../nimbus/p2p/clique,
../../nimbus/p2p/clique/clique_utils,
../../nimbus/p2p/[chain,
clique,
clique/clique_utils,
clique/snapshot/snapshot_desc],
./voter_samples as vs,
eth/[common, keys, p2p, rlp, trie/db],
ethash,
@ -21,11 +23,10 @@ import
stew/objects
export
vs
vs, snapshot_desc
const
prngSeed = 42
# genesisTemplate = "../customgenesis/berlin2000.json"
type
XSealKey = array[EXTRA_SEAL,byte]
@ -41,27 +42,22 @@ type
accounts: Table[string,PrivateKey] ## accounts table
boot: CustomGenesis ## imported Genesis configuration
batch: seq[seq[BlockHeader]] ## collect header chains
engine: Clique
chain: Chain
names: Table[EthAddress,string] ## reverse lookup for debugging
xSeals: Table[XSealKey,XSealValue] ## collect signatures for debugging
debug: bool ## debuggin mode for sub-systems
# ------------------------------------------------------------------------------
# Private Helpers
# ------------------------------------------------------------------------------
proc chain(ap: TesterPool): auto =
## Getter
ap.engine.db
proc getBlockHeader(ap: TesterPool; number: BlockNumber): BlockHeader =
## Shortcut => db/db_chain.getBlockHeader()
doAssert ap.chain.getBlockHeader(number, result)
doAssert ap.chain.clique.db.getBlockHeader(number, result)
proc getBlockHeader(ap: TesterPool; hash: Hash256): BlockHeader =
## Shortcut => db/db_chain.getBlockHeader()
doAssert ap.chain.getBlockHeader(hash, result)
doAssert ap.chain.clique.db.getBlockHeader(hash, result)
proc isZero(a: openArray[byte]): bool =
result = true
@ -96,24 +92,15 @@ proc privateKey(ap: TesterPool; account: string): PrivateKey =
let address = result.toPublicKey.toCanonicalAddress
ap.names[address] = account
proc resetChainDb(ap: TesterPool; extraData: Blob) =
## Setup new block chain with bespoke genesis
ap.engine.db = BaseChainDB(db: newMemoryDb(), config: ap.boot.config)
ap.engine.db.populateProgress
# new genesis block
var g = ap.boot.genesis
if 0 < extraData.len:
g.extraData = extraData
g.commit(ap.engine.db)
# ------------------------------------------------------------------------------
# Private pretty printer call backs
# ------------------------------------------------------------------------------
proc findName(ap: TesterPool; address: EthAddress): string =
## Find name for a particular address
if address in ap.names:
return ap.names[address]
if address notin ap.names:
ap.names[address] = &"X{ap.names.len+1}"
ap.names[address]
proc findSignature(ap: TesterPool; sig: openArray[byte]): XSealValue =
## Find a previusly registered signature
@ -190,6 +177,7 @@ proc ppBlockHeader(ap: TesterPool; v: BlockHeader; delim: string): string =
&"{sep}nonce={ap.ppNonce(v.nonce)}" &
&"{sep}extraData={ap.ppExtraData(v.extraData)})"
# ------------------------------------------------------------------------------
# Private: Constructor helpers
# ------------------------------------------------------------------------------
@ -200,6 +188,18 @@ proc initPrettyPrinters(pp: var PrettyPrinters; ap: TesterPool) =
pp.extraData = proc(v:Blob): string = ap.ppExtraData(v)
pp.blockHeader = proc(v:BlockHeader; d:string): string = ap.ppBlockHeader(v,d)
proc resetChainDb(ap: TesterPool; extraData: Blob) =
## Setup new block chain with bespoke genesis
ap.chain = BaseChainDB(db: newMemoryDb(), config: ap.boot.config).newChain
ap.chain.clique.db.populateProgress
# new genesis block
var g = ap.boot.genesis
if 0 < extraData.len:
g.extraData = extraData
g.commit(ap.chain.clique.db)
# fine tune Clique descriptor
ap.chain.clique.cfg.prettyPrint.initPrettyPrinters(ap)
proc initTesterPool(ap: TesterPool): TesterPool {.discardable.} =
result = ap
result.prng = initRand(prngSeed)
@ -207,32 +207,20 @@ proc initTesterPool(ap: TesterPool): TesterPool {.discardable.} =
result.accounts = initTable[string,PrivateKey]()
result.xSeals = initTable[XSealKey,XSealValue]()
result.names = initTable[EthAddress,string]()
result.engine = BaseChainDB(
db: newMemoryDb(),
config: ap.boot.config).newCliqueCfg.newClique
result.engine.debug = false
result.engine.cfg.prettyPrint.initPrettyPrinters(result)
result.resetChainDb(@[])
# ------------------------------------------------------------------------------
# Public functions
# Public: pretty printer support
# ------------------------------------------------------------------------------
proc getPrettyPrinters*(t: TesterPool): var PrettyPrinters =
## Mixin for pretty printers, see `clique/clique_cfg.pp()`
t.engine.cfg.prettyPrint
proc setDebug*(ap: TesterPool; debug=true): TesterPool {.inline,discardable,} =
## Set debugging mode on/off
result = ap
ap.debug = debug
ap.engine.debug = debug
t.chain.clique.cfg.prettyPrint
proc say*(t: TesterPool; v: varargs[string,`$`]) =
if t.debug:
if t.chain.clique.cfg.debug:
stderr.write v.join & "\n"
proc sayHeaderChain*(ap: TesterPool; indent = 0): TesterPool {.discardable.} =
result = ap
let pfx = ' '.repeat(indent)
@ -243,6 +231,59 @@ proc sayHeaderChain*(ap: TesterPool; indent = 0): TesterPool {.discardable.} =
top = ap.getBlockHeader(top.parentHash)
ap.say pfx, "parent header: " & ap.pp(top, 16+indent)
# ------------------------------------------------------------------------------
# Public: Constructor
# ------------------------------------------------------------------------------
proc newVoterPool*(networkId = GoerliNet): TesterPool =
TesterPool(
boot: CustomGenesis(
genesis: defaultGenesisBlockForNetwork(networkId),
config: chainConfig(networkId))).initTesterPool
# ------------------------------------------------------------------------------
# Public: getter
# ------------------------------------------------------------------------------
proc chain*(ap: TesterPool): auto {.inline.} =
## Getter
ap.chain
proc clique*(ap: TesterPool): auto {.inline.} =
## Getter
ap.chain.clique
proc db*(ap: TesterPool): auto {.inline.} =
## Getter
ap.clique.db
proc debug*(ap: TesterPool): auto {.inline.} =
## Getter
ap.clique.cfg.debug
proc cliqueSigners*(ap: TesterPool): auto {.inline.} =
## Getter
ap.clique.cliqueSigners
proc error*(ap: TesterPool): auto {.inline.} =
## Getter
ap.clique.error
proc snapshot*(ap: TesterPool): var Snapshot {.inline.} =
## Getter
ap.clique.snapshot
# ------------------------------------------------------------------------------
# Public: setter
# ------------------------------------------------------------------------------
proc `debug=`*(ap: TesterPool; debug: bool) {.inline,} =
## Set debugging mode on/off
ap.clique.cfg.debug = debug
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
# clique/snapshot_test.go(62): func (ap *testerAccountPool) address(account [..]
proc address*(ap: TesterPool; account: string): EthAddress =
@ -284,58 +325,23 @@ proc sign*(ap: TesterPool; header: var BlockHeader; signer: string) =
blockNumber: header.blockNumber.truncate(uint64),
account: signer)
proc snapshot*(ap: TesterPool; number: BlockNumber; hash: Hash256;
parent: openArray[BlockHeader]): auto =
## Call p2p/clique.snapshotInternal()
if ap.debug:
var header = ap.getBlockHeader(number)
ap.say "*** snapshot argument: #", number
ap.sayHeaderChain(8)
when false: # all addresses are typically pp-mappable
ap.say " address map: ", toSeq(ap.names.pairs)
.mapIt(&"@{it[1]}:{it[0]}")
.sorted
.join("\n" & ' '.repeat(23))
ap.engine.snapshot(number, hash, parent)
proc clique*(ap: TesterPool): Clique =
## Getter
ap.engine
# ------------------------------------------------------------------------------
# Public: Constructor
# ------------------------------------------------------------------------------
proc newVoterPool*(customGenesis: CustomGenesis): TesterPool =
TesterPool(boot: customGenesis).initTesterPool
proc newVoterPool*(id: NetworkId): TesterPool =
CustomGenesis(
config: chainConfig(id),
genesis: defaultGenesisBlockForNetwork(id)).newVoterPool
proc newVoterPool*(genesisTemplate = ""): TesterPool =
if genesisTemplate == "":
return getConfiguration().net.networkId.newVoterPool
# Find genesis block from template
new result
doAssert genesisTemplate.loadCustomGenesis(result.boot)
result.initTesterPool
# ------------------------------------------------------------------------------
# Public: set up & manage voter database
# ------------------------------------------------------------------------------
proc setVoterAccount*(ap: TesterPool; account: string;
prvKey: PrivateKey): TesterPool {.discardable.} =
## Manually define/import account
result = ap
ap.accounts[account] = prvKey
let address = prvKey.toPublicKey.toCanonicalAddress
ap.names[address] = account
#proc setVoterAccount*(ap: TesterPool; account: string;
# prvKey: PrivateKey): TesterPool {.discardable.} =
# ## Manually define/import account
# result = ap
# ap.accounts[account] = prvKey
# let address = prvKey.toPublicKey.toCanonicalAddress
# ap.names[address] = account
#
#proc topVoterHeader*(ap: TesterPool): BlockHeader =
# ## Get top header from voter batch list
# doAssert 0 < ap.batch.len # see initTesterPool() and resetVoterChain()
# if 0 < ap.batch[^1].len:
# result = ap.batch[^1][^1]
proc resetVoterChain*(ap: TesterPool; signers: openArray[string];
@ -359,7 +365,7 @@ proc resetVoterChain*(ap: TesterPool; signers: openArray[string];
# store modified genesis block and epoch
ap.resetChainDb(extraData)
ap.engine.cfg.epoch = epoch.uint
ap.clique.cfg.epoch = epoch
# clique/snapshot_test.go(415): blocks, _ := core.GenerateChain(&config, [..]
@ -397,7 +403,7 @@ proc appendVoter*(ap: TesterPool;
# clique/snapshot_test.go(432): if auths := tt.votes[j].checkpoint; [..]
if 0 < voter.checkpoint.len:
doAssert (header.blockNumber mod ap.engine.cfg.epoch) == 0
doAssert (header.blockNumber mod ap.clique.cfg.epoch) == 0
ap.checkpoint(header,voter.checkpoint)
# Generate the signature, embed it into the header and the block
@ -420,31 +426,21 @@ proc commitVoterChain*(ap: TesterPool): TesterPool {.discardable.} =
## Write the headers from the voter header batch list to the block chain DB
result = ap
# Create a pristine blockchain with the genesis injected
for headers in ap.batch:
if 0 < headers.len:
doAssert ap.chain.getCanonicalHead.blockNumber < headers[0].blockNumber
let bodies = BlockBody().repeat(headers.len)
doAssert ap.chain.persistBlocks(headers,bodies) == ValidationResult.OK
# see p2p/chain.persistBlocks()
ap.chain.highestBlock = headers[^1].blockNumber
let transaction = ap.chain.db.beginTransaction()
for i in 0 ..< headers.len:
let header = headers[i]
discard ap.chain.persistHeaderToDb(header)
doAssert ap.chain.getCanonicalHead().blockHash == header.blockHash
discard ap.chain.persistTransactions(header.blockNumber, @[])
discard ap.chain.persistReceipts(@[])
ap.chain.currentBlock = header.blockNumber
transaction.commit()
proc topVoterHeader*(ap: TesterPool): BlockHeader =
## Get top header from voter batch list
doAssert 0 < ap.batch.len # see initTesterPool() and resetVoterChain()
if 0 < ap.batch[^1].len:
result = ap.batch[^1][^1]
if ap.debug:
let
number = headers[^1].blockNumber
header = ap.getBlockHeader(number)
ap.say "*** snapshot argument: #", number
ap.sayHeaderChain(8)
when false: # all addresses are typically pp-mappable
ap.say " address map: ", toSeq(ap.names.pairs)
.mapIt(&"@{it[1]}:{it[0]}")
.sorted
.join("\n" & ' '.repeat(23))
# ------------------------------------------------------------------------------
# End

View File

@ -11,6 +11,7 @@
import
../nimbus/utils/lru_cache,
eth/rlp,
sequtils,
strformat,
tables,
unittest2
@ -36,61 +37,23 @@ proc say(noisy = false; pfx = "***"; args: varargs[string, `$`]) =
echo outText
# Privy access to LRU internals
proc maxItems[T,K,V,E](cache: var LruCache[T,K,V,E]): int =
cache.specs[0]
proc first[T,K,V,E](cache: var LruCache[T,K,V,E]): K =
cache.specs[1]
proc last[T,K,V,E](cache: var LruCache[T,K,V,E]): K =
cache.specs[2]
proc tab[T,K,V,E](cache: var LruCache[T,K,V,E]): Table[K,LruItem[K,V]] =
cache.specs[3]
proc verifyLinks[T,K,V,E](lru: var LruCache[T,K,V,E]) =
var key = lru.first
if lru.tab.len == 1:
doAssert lru.tab.hasKey(key)
doAssert key == lru.last
elif 1 < lru.tab.len:
# forward links
for n in 1 ..< lru.tab.len:
var curKey = key
key = lru.tab[curKey].nxt
if lru.tab[key].prv != curKey:
echo &"({n}): lru.tab[{key}].prv == {lru.tab[key].prv} exp {curKey}"
doAssert lru.tab[key].prv == curKey
doAssert key == lru.last
# backward links
for n in 1 ..< lru.tab.len:
var curKey = key
key = lru.tab[curKey].prv
if lru.tab[key].nxt != curKey:
echo &"({n}): lru.tab[{key}].nxt == {lru.tab[key].nxt} exp {curKey}"
doAssert lru.tab[key].nxt == curKey
doAssert key == lru.first
proc verifyBackLinks[T,K,V,E](lru: var LruCache[T,K,V,E]) =
var
index = 0
prvKey: K
for key,item in lru.keyItemPairs:
if 0 < index:
doAssert prvKey == item.prv
index.inc
prvKey = key
proc toKeyList[T,K,V,E](lru: var LruCache[T,K,V,E]): seq[K] =
lru.verifyLinks
if 0 < lru.tab.len:
var key = lru.first
while key != lru.last:
result.add key
key = lru.tab[key].nxt
result.add lru.last
lru.verifyBackLinks
toSeq(lru.keyItemPairs).mapIt(it[0])
proc toValueList[T,K,V,E](lru: var LruCache[T,K,V,E]): seq[V] =
lru.verifyLinks
if 0 < lru.tab.len:
var key = lru.first
while key != lru.last:
result.add lru.tab[key].value
key = lru.tab[key].nxt
result.add lru.tab[lru.last].value
lru.verifyBackLinks
toSeq(lru.keyItemPairs).mapIt(it[1].value)
proc createTestCache: LruCache[int,int,string,int] =
var
@ -103,7 +66,7 @@ proc createTestCache: LruCache[int,int,string,int] =
cache: LruCache[int,int,string,int]
# Create LRU cache
cache.initLruCache(getKey, getValue, cacheLimit)
cache.initCache(getKey, getValue, cacheLimit)
result = cache
@ -116,17 +79,16 @@ proc filledTestCache(noisy: bool): LruCache[int,int,string,int] =
for w in keyList:
var
key = w mod 13
reSched = cache.tab.hasKey(key)
value = cache.getLruItem(key)
reSched = cache.hasKey(key)
value = cache.getItem(key)
queue = cache.toKeyList
values = cache.toValueList
# verfy key/value pairs
for n in 0 ..< queue.len:
doAssert $queue[n] == $values[n]
if reSched:
noisy.say ">>>", &"rotate {value} => {queue}"
else:
noisy.say "+++", &"append {value} => {queue}"
doAssert queue.mapIt($it) == values
doAssert key == cache.lastKey
result = cache
@ -146,13 +108,13 @@ proc doDeepCopyTest(noisy: bool) =
c2 = c1
doAssert c1 == c2
discard c1.getLruItem(77)
discard c1.getItem(77)
say &"c1Specs: {c1.maxItems} {c1.first} {c1.last} ..."
say &"c2Specs: {c2.maxItems} {c2.first} {c2.last} ..."
say &"c1Specs: {c1.maxLen} {c1.firstKey} {c1.lastKey} ..."
say &"c2Specs: {c2.maxLen} {c2.firstKey} {c2.lastKey} ..."
doAssert c1 != c2
doAssert c1.tab != c2.tab
doAssert toSeq(c1.keyItemPairs) != toSeq(c2.keyItemPairs)
proc doSerialiserTest(noisy: bool) =
@ -167,16 +129,17 @@ proc doSerialiserTest(noisy: bool) =
say &"serialised[{s1.len}]: {s1}"
c2.clearLruCache
c2.clearCache
doAssert c1 != c2
c2.data = s1.decode(type c2.data)
doAssert c1 == c2
say &"c2Specs: {c2.maxItems} {c2.first} {c2.last} ..."
say &"c2Specs: {c2.maxLen} {c2.firstKey} {c2.lastKey} ..."
doAssert s1 == rlp.encode(c2.data)
proc doSerialiseSingleEntry(noisy: bool) =
proc say(a: varargs[string]) =
@ -184,7 +147,7 @@ proc doSerialiseSingleEntry(noisy: bool) =
var
c1 = createTestCache()
value = c1.getLruItem(77)
value = c1.getItem(77)
queue = c1.toKeyList
values = c1.toValueList
@ -196,17 +159,37 @@ proc doSerialiseSingleEntry(noisy: bool) =
say &"serialised[{s1.len}]: {s1}"
c2.clearLruCache
c2.clearCache
doAssert c1 != c2
c2.data = s1.decode(type c2.data)
doAssert c1 == c2
say &"c2Specs: {c2.maxItems} {c2.first} {c2.last} ..."
say &"c2Specs: {c2.maxLen} {c2.firstKey} {c2.lastKey} ..."
doAssert s1 == rlp.encode(c2.data)
proc doRandomDeleteTest(noisy: bool) =
proc say(a: varargs[string]) =
say(noisy = noisy, args = a)
var
c1 = filledTestCache(false)
sq = toSeq(c1.keyItemPairs).mapIt(it[0])
s0 = sq
inx = 5
key = sq[5]
sq.delete(5,5)
say &"sq: {s0} <off sq[5]({key})> {sq}"
doAssert c1.delItem(key)
doAssert sq == toSeq(c1.keyItemPairs).mapIt(it[0])
c1.verifyBackLinks
proc lruCacheMain*(noisy = defined(debug)) =
suite "LRU Cache":
@ -222,6 +205,9 @@ proc lruCacheMain*(noisy = defined(debug)) =
test "Rlp Single Entry Test":
doSerialiseSingleEntry(noisy)
test "Random Delete":
doRandomDeleteTest(noisy)
when isMainModule:
lruCacheMain()