rm Clique consensus method support and Goerli network (#2219)

* rm Clique consensus method support and Goerli network

* rm a few more SealingEngineRef and GoerliNets
This commit is contained in:
tersec 2024-05-25 14:12:14 +00:00 committed by GitHub
parent 72912626a2
commit e895c0baeb
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
37 changed files with 28 additions and 4470 deletions

View File

@ -18,7 +18,6 @@ import
../../../nimbus/[ ../../../nimbus/[
config, config,
constants, constants,
core/sealer,
core/chain, core/chain,
core/tx_pool, core/tx_pool,
core/tx_pool/tx_item, core/tx_pool/tx_item,
@ -45,7 +44,6 @@ type
com : CommonRef com : CommonRef
node : EthereumNode node : EthereumNode
server : RpcHttpServer server : RpcHttpServer
sealer : SealingEngineRef
ttd : DifficultyInt ttd : DifficultyInt
client : RpcHttpClient client : RpcHttpClient
sync : BeaconSyncRef sync : BeaconSyncRef
@ -93,7 +91,7 @@ proc newEngineEnv*(conf: var NimbusConf, chainFile: string, enableAuth: bool): E
chain = newChain(com) chain = newChain(com)
com.initializeEmptyDb() com.initializeEmptyDb()
let txPool = TxPoolRef.new(com, conf.engineSigner) let txPool = TxPoolRef.new(com, ZERO_ADDRESS)
node.addEthHandlerCapability( node.addEthHandlerCapability(
node.peerPool, node.peerPool,
@ -117,9 +115,6 @@ proc newEngineEnv*(conf: var NimbusConf, chainFile: string, enableAuth: bool): E
echo "Failed to create rpc server: ", error echo "Failed to create rpc server: ", error
quit(QuitFailure) quit(QuitFailure)
sealer = SealingEngineRef.new(
chain, ctx, conf.engineSigner,
txPool, EngineStopped)
sync = if com.ttd().isSome: sync = if com.ttd().isSome:
BeaconSyncRef.init(node, chain, ctx.rng, conf.maxPeers, id=conf.tcpPort.int) BeaconSyncRef.init(node, chain, ctx.rng, conf.maxPeers, id=conf.tcpPort.int)
else: else:
@ -135,8 +130,6 @@ proc newEngineEnv*(conf: var NimbusConf, chainFile: string, enableAuth: bool): E
if chainFile.len > 0: if chainFile.len > 0:
if not importRlpBlock(chainFolder / chainFile, com): if not importRlpBlock(chainFolder / chainFile, com):
quit(QuitFailure) quit(QuitFailure)
elif not enableAuth:
sealer.start()
server.start() server.start()
@ -153,7 +146,6 @@ proc newEngineEnv*(conf: var NimbusConf, chainFile: string, enableAuth: bool): E
com : com, com : com,
node : node, node : node,
server : server, server : server,
sealer : sealer,
client : client, client : client,
sync : sync, sync : sync,
txPool : txPool, txPool : txPool,
@ -165,7 +157,6 @@ proc close*(env: EngineEnv) =
if not env.sync.isNil: if not env.sync.isNil:
env.sync.stop() env.sync.stop()
waitFor env.client.close() waitFor env.client.close()
waitFor env.sealer.stop()
waitFor env.server.closeWait() waitFor env.server.closeWait()
proc setRealTTD*(env: EngineEnv) = proc setRealTTD*(env: EngineEnv) =

View File

@ -17,7 +17,6 @@ import
transaction, transaction,
vm_state, vm_state,
vm_types, vm_types,
core/clique,
core/dao, core/dao,
core/validate, core/validate,
core/chain/chain_desc, core/chain/chain_desc,
@ -107,9 +106,6 @@ proc setBlock*(c: ChainRef; header: BlockHeader;
let dbTx = c.db.beginTransaction() let dbTx = c.db.beginTransaction()
defer: dbTx.dispose() defer: dbTx.dispose()
var cliqueState = c.clique.cliqueSave
defer: c.clique.cliqueRestore(cliqueState)
c.com.hardForkTransition(header) c.com.hardForkTransition(header)
# Needed for figuring out whether KVT cleanup is due (see at the end) # Needed for figuring out whether KVT cleanup is due (see at the end)

View File

@ -84,7 +84,7 @@ proc main() =
) )
com.initializeEmptyDb() com.initializeEmptyDb()
let txPool = TxPoolRef.new(com, conf.engineSigner) let txPool = TxPoolRef.new(com, ZERO_ADDRESS)
discard importRlpBlock(blocksFile, com) discard importRlpBlock(blocksFile, com)
let ctx = setupGraphqlContext(com, ethNode, txPool) let ctx = setupGraphqlContext(com, ethNode, txPool)

View File

@ -17,7 +17,7 @@ import
../../../nimbus/config, ../../../nimbus/config,
../../../nimbus/rpc, ../../../nimbus/rpc,
../../../nimbus/utils/utils, ../../../nimbus/utils/utils,
../../../nimbus/core/[chain, tx_pool, sealer], ../../../nimbus/core/[chain, tx_pool],
../../../tests/test_helpers, ../../../tests/test_helpers,
./vault ./vault
@ -28,7 +28,6 @@ type
vault*: Vault vault*: Vault
rpcClient*: RpcClient rpcClient*: RpcClient
rpcServer: RpcServer rpcServer: RpcServer
sealingEngine: SealingEngineRef
stopServer: StopServerProc stopServer: StopServerProc
const const
@ -85,28 +84,20 @@ proc setupEnv*(): TestEnv =
com.initializeEmptyDb() com.initializeEmptyDb()
let chainRef = newChain(com) let chainRef = newChain(com)
let txPool = TxPoolRef.new(com, conf.engineSigner) let txPool = TxPoolRef.new(com, ZERO_ADDRESS)
# txPool must be informed of active head # txPool must be informed of active head
# so it can know the latest account state # so it can know the latest account state
let head = com.db.getCanonicalHead() let head = com.db.getCanonicalHead()
doAssert txPool.smartHead(head) doAssert txPool.smartHead(head)
let sealingEngine = SealingEngineRef.new(
chainRef, ethCtx, conf.engineSigner,
txPool, EngineStopped
)
let rpcServer = setupRpcServer(ethCtx, com, ethNode, txPool, conf) let rpcServer = setupRpcServer(ethCtx, com, ethNode, txPool, conf)
let rpcClient = newRpcHttpClient() let rpcClient = newRpcHttpClient()
waitFor rpcClient.connect("127.0.0.1", Port(8545), false) waitFor rpcClient.connect("127.0.0.1", Port(8545), false)
let stopServer = stopRpcHttpServer let stopServer = stopRpcHttpServer
sealingEngine.start()
let t = TestEnv( let t = TestEnv(
rpcClient: rpcClient, rpcClient: rpcClient,
sealingEngine: sealingEngine,
rpcServer: rpcServer, rpcServer: rpcServer,
vault : newVault(chainID, gasPrice, rpcClient), vault : newVault(chainID, gasPrice, rpcClient),
stopServer: stopServer stopServer: stopServer
@ -116,5 +107,4 @@ proc setupEnv*(): TestEnv =
proc stopEnv*(t: TestEnv) = proc stopEnv*(t: TestEnv) =
waitFor t.rpcClient.close() waitFor t.rpcClient.close()
waitFor t.sealingEngine.stop()
t.stopServer(t.rpcServer) t.stopServer(t.rpcServer)

View File

@ -54,7 +54,6 @@ const
CustomNet* = 0.NetworkId CustomNet* = 0.NetworkId
# these are public network id # these are public network id
MainNet* = 1.NetworkId MainNet* = 1.NetworkId
GoerliNet* = 5.NetworkId
SepoliaNet* = 11155111.NetworkId SepoliaNet* = 11155111.NetworkId
HoleskyNet* = 17000.NetworkId HoleskyNet* = 17000.NetworkId
@ -356,10 +355,6 @@ proc validateChainConfig*(conf: ChainConfig): bool =
if cur.time.isSome: if cur.time.isSome:
lastTimeBasedFork = cur lastTimeBasedFork = cur
if conf.clique.period.isSome or
conf.clique.epoch.isSome:
conf.consensusType = ConsensusType.POA
proc parseGenesis*(data: string): Genesis proc parseGenesis*(data: string): Genesis
{.gcsafe.} = {.gcsafe.} =
try: try:
@ -473,29 +468,6 @@ proc chainConfigForNetwork*(id: NetworkId): ChainConfig =
terminalTotalDifficulty: some(mainNetTTD), terminalTotalDifficulty: some(mainNetTTD),
shanghaiTime: some(1_681_338_455.EthTime) shanghaiTime: some(1_681_338_455.EthTime)
) )
of GoerliNet:
ChainConfig(
clique: CliqueOptions(period: some(15), epoch: some(30000)),
consensusType: ConsensusType.POA,
chainId: GoerliNet.ChainId,
# Genesis: # 2015-07-30 15:26:13 UTC
homesteadBlock: some(0.toBlockNumber), # Included in genesis
daoForkSupport: false,
eip150Block: some(0.toBlockNumber), # Included in genesis
eip150Hash: toDigest("0000000000000000000000000000000000000000000000000000000000000000"),
eip155Block: some(0.toBlockNumber), # Included in genesis
eip158Block: some(0.toBlockNumber), # Included in genesis
byzantiumBlock: some(0.toBlockNumber), # Included in genesis
constantinopleBlock: some(0.toBlockNumber), # Included in genesis
petersburgBlock: some(0.toBlockNumber), # Included in genesis
istanbulBlock: some(1_561_651.toBlockNumber), # 2019-10-30 13:53:05 UTC
muirGlacierBlock: some(4_460_644.toBlockNumber), # Skipped in Goerli
berlinBlock: some(4_460_644.toBlockNumber), # 2021-03-18 05:29:51 UTC
londonBlock: some(5_062_605.toBlockNumber), # 2021-07-01 03:19:39 UTC
terminalTotalDifficulty: some(10790000.u256),
shanghaiTime: some(1_678_832_736.EthTime),
cancunTime: some(1_705_473_120.EthTime), # 2024-01-17 06:32:00
)
of SepoliaNet: of SepoliaNet:
const sepoliaTTD = parse("17000000000000000",UInt256) const sepoliaTTD = parse("17000000000000000",UInt256)
ChainConfig( ChainConfig(
@ -553,15 +525,6 @@ proc genesisBlockForNetwork*(id: NetworkId): Genesis
difficulty: 17179869184.u256, difficulty: 17179869184.u256,
alloc: decodePrealloc(mainnetAllocData) alloc: decodePrealloc(mainnetAllocData)
) )
of GoerliNet:
Genesis(
nonce: 0.toBlockNonce,
timestamp: EthTime(0x5c51a607),
extraData: hexToSeqByte("0x22466c6578692069732061207468696e6722202d204166726900000000000000e0a2bd4258d2768837baa26a28fe71dc079f84c70000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
gasLimit: 0xa00000,
difficulty: 1.u256,
alloc: decodePrealloc(goerliAllocData)
)
of SepoliaNet: of SepoliaNet:
Genesis( Genesis(
nonce: 0.toBlockNonce, nonce: 0.toBlockNonce,

View File

@ -13,7 +13,7 @@ import
std/[options], std/[options],
chronicles, chronicles,
eth/trie/trie_defs, eth/trie/trie_defs,
../core/[pow, clique, casper], ../core/[pow, casper],
../db/[core_db, ledger, storage_types], ../db/[core_db, ledger, storage_types],
../utils/[utils, ec_recover], ../utils/[utils, ec_recover],
".."/[constants, errors], ".."/[constants, errors],
@ -91,9 +91,6 @@ type
pow: PowRef pow: PowRef
## Wrapper around `hashimotoLight()` and lookup cache ## Wrapper around `hashimotoLight()` and lookup cache
poa: Clique
## For non-PoA networks this descriptor is ignored.
pos: CasperRef pos: CasperRef
## Proof Of Stake descriptor ## Proof Of Stake descriptor
@ -161,10 +158,6 @@ proc init(com : CommonRef,
com.ldgType = (if ldgType == LedgerType(0): LedgerCache else: ldgType) com.ldgType = (if ldgType == LedgerType(0): LedgerCache else: ldgType)
com.pruneHistory= pruneHistory com.pruneHistory= pruneHistory
# Initalise the PoA state regardless of whether it is needed on the current
# network. For non-PoA networks this descriptor is ignored.
com.poa = newClique(com.db, com.cliquePeriod, com.cliqueEpoch)
# Always initialise the PoW epoch cache even though it migh no be used # Always initialise the PoW epoch cache even though it migh no be used
com.pow = PowRef.new com.pow = PowRef.new
com.pos = CasperRef.new com.pos = CasperRef.new
@ -281,7 +274,6 @@ proc clone*(com: CommonRef, db: CoreDbRef): CommonRef =
currentFork : com.currentFork, currentFork : com.currentFork,
consensusType: com.consensusType, consensusType: com.consensusType,
pow : com.pow, pow : com.pow,
poa : com.poa,
pos : com.pos, pos : com.pos,
ldgType : com.ldgType, ldgType : com.ldgType,
pruneHistory : com.pruneHistory) pruneHistory : com.pruneHistory)
@ -353,19 +345,10 @@ func forkGTE*(com: CommonRef, fork: HardFork): bool =
com.currentFork >= fork com.currentFork >= fork
# TODO: move this consensus code to where it belongs # TODO: move this consensus code to where it belongs
proc minerAddress*(com: CommonRef; header: BlockHeader): EthAddress func minerAddress*(com: CommonRef; header: BlockHeader): EthAddress
{.gcsafe, raises: [CatchableError].} = {.gcsafe, raises: [CatchableError].} =
if com.consensusType != ConsensusType.POA: # POW and POS return header.coinbase
# POW and POS return header.coinbase return header.coinbase
return header.coinbase
# POA return ecRecover
let account = header.ecRecover
if account.isErr:
let msg = "Could not recover account address: " & $account.error
raise newException(ValidationError, msg)
account.value
func forkId*(com: CommonRef, head, time: uint64): ForkID {.gcsafe.} = func forkId*(com: CommonRef, head, time: uint64): ForkID {.gcsafe.} =
## EIP 2364/2124 ## EIP 2364/2124
@ -436,10 +419,6 @@ func startOfHistory*(com: CommonRef): Hash256 =
## Getter ## Getter
com.startOfHistory com.startOfHistory
func poa*(com: CommonRef): Clique =
## Getter
com.poa
func pow*(com: CommonRef): PowRef = func pow*(com: CommonRef): PowRef =
## Getter ## Getter
com.pow com.pow

View File

@ -23,10 +23,6 @@ type
# algorithm: Ethash # algorithm: Ethash
POW POW
# Proof of Authority
# algorithm: Clique
POA
# Proof of Stake # Proof of Stake
# algorithm: Casper # algorithm: Casper
POS POS

View File

@ -189,14 +189,6 @@ type
abbr: "e" abbr: "e"
name: "import-key" }: InputFile name: "import-key" }: InputFile
engineSigner* {.
desc: "Set the signer address(as 20 bytes hex) and enable sealing engine to run and " &
"producing blocks at specified interval (only PoA/Clique supported)"
defaultValue: ZERO_ADDRESS
defaultValueDesc: ""
abbr: "s"
name: "engine-signer" }: EthAddress
verifyFrom* {. verifyFrom* {.
desc: "Enable extra verification when current block number greater than verify-from" desc: "Enable extra verification when current block number greater than verify-from"
defaultValueDesc: "" defaultValueDesc: ""
@ -616,7 +608,6 @@ proc getNetworkId(conf: NimbusConf): Option[NetworkId] =
let network = toLowerAscii(conf.network) let network = toLowerAscii(conf.network)
case network case network
of "mainnet": return some MainNet of "mainnet": return some MainNet
of "goerli" : return some GoerliNet
of "sepolia": return some SepoliaNet of "sepolia": return some SepoliaNet
of "holesky": return some HoleskyNet of "holesky": return some HoleskyNet
else: else:
@ -693,8 +684,6 @@ proc getBootNodes*(conf: NimbusConf): seq[ENode] =
case conf.networkId case conf.networkId
of MainNet: of MainNet:
bootstrapNodes.setBootnodes(MainnetBootnodes) bootstrapNodes.setBootnodes(MainnetBootnodes)
of GoerliNet:
bootstrapNodes.setBootnodes(GoerliBootnodes)
of SepoliaNet: of SepoliaNet:
bootstrapNodes.setBootnodes(SepoliaBootnodes) bootstrapNodes.setBootnodes(SepoliaBootnodes)
of HoleskyNet: of HoleskyNet:

View File

@ -14,8 +14,7 @@ import
../../common/common, ../../common/common,
../../utils/utils, ../../utils/utils,
../../vm_types, ../../vm_types,
../pow, ../pow
../clique
export export
common common
@ -62,12 +61,12 @@ proc newChain*(com: CommonRef,
vmState: vmState, vmState: vmState,
) )
proc newChain*(com: CommonRef): ChainRef = func newChain*(com: CommonRef): ChainRef =
## Constructor for the `Chain` descriptor object. All sub-object descriptors ## Constructor for the `Chain` descriptor object. All sub-object descriptors
## are initialised with defaults. So is extra block chain validation ## are initialised with defaults. So is extra block chain validation
## * `enabled` for PoA networks (such as Goerli) ## * `enabled` for PoA networks (such as Goerli)
## * `disabled` for non-PaA networks ## * `disabled` for non-PaA networks
let extraValidation = com.consensus in {ConsensusType.POA, ConsensusType.POS} let extraValidation = com.consensus == ConsensusType.POS
ChainRef( ChainRef(
com: com, com: com,
validateBlock: true, validateBlock: true,
@ -81,10 +80,6 @@ proc vmState*(c: ChainRef): BaseVMState =
## Getter ## Getter
c.vmState c.vmState
proc clique*(c: ChainRef): Clique =
## Getter
c.com.poa
proc pow*(c: ChainRef): PowRef = proc pow*(c: ChainRef): PowRef =
## Getter ## Getter
c.com.pow c.com.pow

View File

@ -14,8 +14,6 @@ import
../../db/ledger, ../../db/ledger,
../../vm_state, ../../vm_state,
../../vm_types, ../../vm_types,
../clique/clique_verify,
../clique,
../executor, ../executor,
../validate, ../validate,
./chain_desc, ./chain_desc,
@ -79,9 +77,6 @@ proc persistBlocksImpl(c: ChainRef; headers: openArray[BlockHeader];
let dbTx = c.db.beginTransaction() let dbTx = c.db.beginTransaction()
defer: dbTx.dispose() defer: dbTx.dispose()
var cliqueState = c.clique.cliqueSave
defer: c.clique.cliqueRestore(cliqueState)
c.com.hardForkTransition(headers[0]) c.com.hardForkTransition(headers[0])
# Note that `0 < headers.len`, assured when called from `persistBlocks()` # Note that `0 < headers.len`, assured when called from `persistBlocks()`
@ -110,15 +105,14 @@ proc persistBlocksImpl(c: ChainRef; headers: openArray[BlockHeader];
if c.validateBlock and c.extraValidation and if c.validateBlock and c.extraValidation and
c.verifyFrom <= header.blockNumber: c.verifyFrom <= header.blockNumber:
if c.com.consensus != ConsensusType.POA: let res = c.com.validateHeaderAndKinship(
let res = c.com.validateHeaderAndKinship( header,
header, body,
body, checkSealOK = false) # TODO: how to checkseal from here
checkSealOK = false) # TODO: how to checkseal from here if res.isErr:
if res.isErr: debug "block validation error",
debug "block validation error", msg = res.error
msg = res.error return ValidationResult.Error
return ValidationResult.Error
if c.generateWitness: if c.generateWitness:
vmState.generateWitness = true vmState.generateWitness = true
@ -137,21 +131,6 @@ proc persistBlocksImpl(c: ChainRef; headers: openArray[BlockHeader];
if validationResult != ValidationResult.OK: if validationResult != ValidationResult.OK:
return validationResult return validationResult
if c.validateBlock and c.extraValidation and
c.verifyFrom <= header.blockNumber:
if c.com.consensus == ConsensusType.POA:
var parent = if 0 < i: @[headers[i-1]] else: @[]
let rc = c.clique.cliqueVerify(c.com, header, parent)
if rc.isOk:
# mark it off so it would not auto-restore previous state
c.clique.cliqueDispose(cliqueState)
else:
debug "PoA header verification failed",
blockNumber = header.blockNumber,
msg = $rc.error
return ValidationResult.Error
if c.generateWitness: if c.generateWitness:
let dbTx = c.db.beginTransaction() let dbTx = c.db.beginTransaction()
defer: dbTx.dispose() defer: dbTx.dispose()

View File

@ -1,99 +0,0 @@
# Nimbus
# Copyright (c) 2018-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
##
## EIP-225 Clique PoA Consensus Protocol
## =====================================
##
## For details see
## `EIP-225 <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
## and
## `go-ethereum <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
##
import
./clique/[clique_cfg, clique_defs, clique_desc],
./clique/snapshot/[ballot, snapshot_desc],
stew/results
{.push raises: [].}
# Note that mining is unsupported. Unused code ported from the Go
# implementation is stashed into the `clique_unused` module.
export
clique_cfg,
clique_defs,
clique_desc.Clique
type
CliqueState* = ##\
## Descriptor state snapshot which can be used for implementing
## transaction trasnaction handling. Nore the `Snapshot` type
## inside the `Result[]` is most probably opaque.
Result[Snapshot,void]
# ------------------------------------------------------------------------------
# Public
# ------------------------------------------------------------------------------
proc newClique*(db: CoreDbRef, cliquePeriod: EthTime, cliqueEpoch: int): Clique =
## Constructor for a new Clique proof-of-authority consensus engine. The
## initial state of the engine is `empty`, there are no authorised signers.
##
## If chain_config provides `Period` or `Epoch`, then `Period` or `Epoch`
## will be taken from chain_config. Otherwise, default value in `newCliqueCfg`
## will be used
let cfg = db.newCliqueCfg
if cliquePeriod > 0:
cfg.period = cliquePeriod
if cliqueEpoch > 0:
cfg.epoch = cliqueEpoch
cfg.newClique
proc cliqueSave*(c: Clique): CliqueState =
## Save current `Clique` state. This state snapshot saves the internal
## data that make up the list of authorised signers (see `cliqueSigners()`
## below.)
ok(c.snapshot)
proc cliqueRestore*(c: Clique; state: var CliqueState) =
## Restore current `Clique` state from a saved snapshot.
##
## For the particular `state` argument this fuction is disabled with
## `cliqueDispose()`. So it can be savely wrapped in a `defer:` statement.
## In transaction lingo, this would then be the rollback function.
if state.isOk:
c.snapshot = state.value
proc cliqueDispose*(c: Clique; state: var CliqueState) =
## Disable the function `cliqueDispose()` for the particular `state`
## argument.
##
## In transaction lingo, this would be the commit function if
## `cliqueRestore()` was wrapped in a `defer:` statement.
state = err(CliqueState)
proc cliqueSigners*(c: Clique): seq[EthAddress] =
## Retrieve the sorted list of authorized signers for the current state
## of the `Clique` descriptor.
##
## Note that the return argument list is sorted on-the-fly each time this
## function is invoked.
c.snapshot.ballot.authSigners
proc cliqueSignersLen*(c: Clique): int =
## Get the number of authorized signers for the current state of the
## `Clique` descriptor. The result is equivalent to `c.cliqueSigners.len`.
c.snapshot.ballot.authSignersLen
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,168 +0,0 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
##
## Clique PoA Conmmon Config
## =========================
##
## Constants used by Clique proof-of-authority consensus protocol, see
## `EIP-225 <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
## and
## `go-ethereum <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
##
{.push raises: [].}
import
std/[random, times],
ethash,
../../db/core_db,
../../utils/ec_recover,
./clique_defs
export
core_db
const
prngSeed = 42
type
CliqueCfg* = ref object of RootRef
db*: CoreDbRef
## All purpose (incl. blockchain) database.
nSnaps*: uint64
## Number of snapshots stored on disk (for logging troublesshoting)
snapsData*: uint64
## Raw payload stored on disk (for logging troublesshoting)
period: EthTime
## Time between blocks to enforce.
ckpInterval: int
## Number of blocks after which to save the vote snapshot to the
## disk database.
roThreshold: int
## Number of blocks after which a chain segment is considered immutable
## (ie. soft finality). It is used by the downloader as a hard limit
## against deep ancestors, by the blockchain against deep reorgs, by the
## freezer as the cutoff threshold and by clique as the snapshot trust
## limit.
prng: Rand
## PRNG state for internal random generator. This PRNG is
## cryptographically insecure but with reproducible data stream.
signatures: EcRecover
## Recent block signatures cached to speed up mining.
epoch: int
## The number of blocks after which to checkpoint and reset the pending
## votes.Suggested 30000 for the testnet to remain analogous to the
## mainnet ethash epoch.
logInterval: Duration
## Time interval after which the `snapshotApply()` function main loop
## produces logging entries.
# ------------------------------------------------------------------------------
# Public constructor
# ------------------------------------------------------------------------------
proc newCliqueCfg*(db: CoreDbRef): CliqueCfg =
result = CliqueCfg(
db: db,
epoch: EPOCH_LENGTH,
period: BLOCK_PERIOD,
ckpInterval: CHECKPOINT_INTERVAL,
roThreshold: FULL_IMMUTABILITY_THRESHOLD,
logInterval: SNAPS_LOG_INTERVAL_MICSECS,
signatures: EcRecover.init(),
prng: initRand(prngSeed))
# ------------------------------------------------------------------------------
# Public helper funcion
# ------------------------------------------------------------------------------
# clique/clique.go(145): func ecrecover(header [..]
proc ecRecover*(
cfg: CliqueCfg;
header: BlockHeader;
): auto =
cfg.signatures.ecRecover(header)
# ------------------------------------------------------------------------------
# Public setters
# ------------------------------------------------------------------------------
proc `epoch=`*(cfg: CliqueCfg; epoch: SomeInteger) =
## Setter
cfg.epoch = if 0 < epoch: epoch
else: EPOCH_LENGTH
proc `period=`*(cfg: CliqueCfg; period: EthTime) =
## Setter
cfg.period = if period != EthTime(0): period
else: BLOCK_PERIOD
proc `ckpInterval=`*(cfg: CliqueCfg; numBlocks: SomeInteger) =
## Setter
cfg.ckpInterval = if 0 < numBlocks: numBlocks
else: CHECKPOINT_INTERVAL
proc `roThreshold=`*(cfg: CliqueCfg; numBlocks: SomeInteger) =
## Setter
cfg.roThreshold = if 0 < numBlocks: numBlocks
else: FULL_IMMUTABILITY_THRESHOLD
proc `logInterval=`*(cfg: CliqueCfg; duration: Duration) =
## Setter
cfg.logInterval = if duration != Duration(): duration
else: SNAPS_LOG_INTERVAL_MICSECS
# ------------------------------------------------------------------------------
# Public PRNG, may be overloaded
# ------------------------------------------------------------------------------
method rand*(cfg: CliqueCfg; max: Natural): int {.gcsafe, base, raises: [].} =
## The method returns a random number base on an internal PRNG providing a
## reproducible stream of random data. This function is supposed to be used
## exactly when repeatability comes in handy. Never to be used for crypto key
## generation or like (except testing.)
cfg.prng.rand(max)
# ------------------------------------------------------------------------------
# Public getter
# ------------------------------------------------------------------------------
proc epoch*(cfg: CliqueCfg): BlockNumber =
## Getter
cfg.epoch.u256
proc period*(cfg: CliqueCfg): EthTime =
## Getter
cfg.period
proc ckpInterval*(cfg: CliqueCfg): BlockNumber =
## Getter
cfg.ckpInterval.u256
proc roThreshold*(cfg: CliqueCfg): int =
## Getter
cfg.roThreshold
proc logInterval*(cfg: CliqueCfg): Duration =
## Getter
cfg.logInterval
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,261 +0,0 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
##
## Clique PoA Constants & Types
## ============================
##
## Constants used by Clique proof-of-authority consensus protocol, see
## `EIP-225 <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
## and
## `go-ethereum <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
##
import
std/[times],
eth/common,
stew/results,
stint
{.push raises: [].}
# ------------------------------------------------------------------------------
# Constants copied from eip-225 specs & implementation
# ------------------------------------------------------------------------------
const
# clique/clique.go(48): const ( [..]
CHECKPOINT_INTERVAL* = ##\
## Number of blocks after which to save the vote snapshot to the database
4 * 1024
INMEMORY_SNAPSHOTS* = ##\
## Number of recent vote snapshots to keep in memory.
128
WIGGLE_TIME* = ##\
## PoA mining only (currently unsupported).
##
## Random delay (per signer) to allow concurrent signers
initDuration(seconds = 0, milliseconds = 500)
# clique/clique.go(57): var ( [..]
BLOCK_PERIOD* = ##\
## Minimum difference in seconds between two consecutive block timestamps.
## Suggested time is 15s for the `testnet` to remain analogous to the
## `mainnet` ethash target.
EthTime 15
EXTRA_VANITY* = ##\
## Fixed number of extra-data prefix bytes reserved for signer vanity.
## Suggested 32 bytes to retain the current extra-data allowance and/or use.
32
NONCE_AUTH* = ##\
## Magic nonce number 0xffffffffffffffff to vote on adding a new signer.
0xffffffffffffffffu64.toBlockNonce
NONCE_DROP* = ##\
## Magic nonce number 0x0000000000000000 to vote on removing a signer.
0x0000000000000000u64.toBlockNonce
DIFF_NOTURN* = ##\
## Block score (difficulty) for blocks containing out-of-turn signatures.
## Suggested 1 since it just needs to be an arbitrary baseline constant.
1.u256
DIFF_INTURN* = ##\
## Block score (difficulty) for blocks containing in-turn signatures.
## Suggested 2 to show a slight preference over out-of-turn signatures.
2.u256
# params/network_params.go(60): FullImmutabilityThreshold = 90000
FULL_IMMUTABILITY_THRESHOLD* = ##\
## Number of blocks after which a chain segment is considered immutable (ie.
## soft finality). It is used by the downloader as a hard limit against
## deep ancestors, by the blockchain against deep reorgs, by the freezer as
## the cutoff threshold and by clique as the snapshot trust limit.
90000
# Other
SNAPS_LOG_INTERVAL_MICSECS* = ##\
## Time interval after which the `snapshotApply()` function main loop
## produces logging entries. The original value from the Go reference
## implementation has 8 seconds (which seems a bit long.) For the first
## 300k blocks in the Goerli chain, typical execution time in tests was
## mostly below 300 micro secs.
initDuration(microSeconds = 200)
# ------------------------------------------------------------------------------
# Error tokens
# ------------------------------------------------------------------------------
type
# clique/clique.go(76): var ( [..]
CliqueErrorType* = enum
resetCliqueError = ##\
## Default/reset value (use `cliqueNoError` below rather than this valie)
(0, "no error")
errUnknownBlock = ##\
## is returned when the list of signers is requested for a block that is
## not part of the local blockchain.
"unknown block"
errInvalidCheckpointBeneficiary = ##\
## is returned if a checkpoint/epoch transition block has a beneficiary
## set to non-zeroes.
"beneficiary in checkpoint block non-zero"
errInvalidVote = ##\
## is returned if a nonce value is something else that the two allowed
## constants of 0x00..0 or 0xff..f.
"vote nonce not 0x00..0 or 0xff..f"
errInvalidCheckpointVote = ##\
## is returned if a checkpoint/epoch transition block has a vote nonce
## set to non-zeroes.
"vote nonce in checkpoint block non-zero"
errMissingVanity = ##\
## is returned if a block's extra-data section is shorter than 32 bytes,
## which is required to store the signer vanity.
"extra-data 32 byte vanity prefix missing"
errMissingSignature = ##\
## is returned if a block's extra-data section doesn't seem to contain a
## 65 byte secp256k1 signature.
"extra-data 65 byte signature suffix missing"
errExtraSigners = ##\
## is returned if non-checkpoint block contain signer data in their
## extra-data fields.
"non-checkpoint block contains extra signer list"
errInvalidCheckpointSigners = ##\
## is returned if a checkpoint block contains an invalid list of signers
## (i.e. non divisible by 20 bytes).
"invalid signer list on checkpoint block"
errMismatchingCheckpointSigners = ##\
## is returned if a checkpoint block contains a list of signers different
## than the one the local node calculated.
"mismatching signer list on checkpoint block"
errInvalidMixDigest = ##\
## is returned if a block's mix digest is non-zero.
"non-zero mix digest"
errInvalidUncleHash = ##\
## is returned if a block contains an non-empty uncle list.
"non empty uncle hash"
errInvalidDifficulty = ##\
## is returned if the difficulty of a block neither 1 or 2.
"invalid difficulty"
errWrongDifficulty = ##\
## is returned if the difficulty of a block doesn't match the turn of
## the signer.
"wrong difficulty"
errInvalidTimestamp = ##\
## is returned if the timestamp of a block is lower than the previous
## block's timestamp + the minimum block period.
"invalid timestamp"
errInvalidVotingChain = ##\
## is returned if an authorization list is attempted to be modified via
## out-of-range or non-contiguous headers.
"invalid voting chain"
errUnauthorizedSigner = ##\
## is returned if a header is signed by a non-authorized entity.
"unauthorized signer"
errRecentlySigned = ##\
## is returned if a header is signed by an authorized entity that
## already signed a header recently, thus is temporarily not allowed to.
"recently signed"
# additional errors sources elsewhere
# -----------------------------------
errPublicKeyToShort = ##\
## Cannot retrieve public key
"cannot retrieve public key: too short"
# imported from consensus/errors.go
errUnknownAncestor = ##\
## is returned when validating a block requires an ancestor that is
## unknown.
"unknown ancestor"
errFutureBlock = ##\
## is returned when a block's timestamp is in the future according to
## the current node.
"block in the future"
# additional/bespoke errors, manually added
# -----------------------------------------
errUnknownHash = "No header found for hash value"
errEmptyLruCache = "No snapshot available"
errNotInitialised = ##\
## Initalisation value for `Result` entries
"Not initialised"
errSetLruSnaps = ##\
## Attempt to assign a value to a non-existing slot
"Missing LRU slot for snapshot"
errEcRecover = ##\
## Subsytem error"
"ecRecover failed"
errSnapshotLoad ## DB subsytem error
errSnapshotStore ## ..
errSnapshotClone
errCliqueGasLimitOrBaseFee
errCliqueExceedsGasLimit
errCliqueGasRepriceFork
errCliqueSealSigFn
errCliqueStopped = "process was interrupted"
errCliqueUnclesNotAllowed = "uncles not allowed"
# not really an error
nilCliqueSealNoBlockYet = "Sealing paused, waiting for transactions"
nilCliqueSealSignedRecently = "Signed recently, must wait for others"
# ------------------------------------------------------------------------------
# More types and constants
# ------------------------------------------------------------------------------
type
CliqueError* = ##\
## Error message, tinned component + explanatory text (if any)
(CliqueErrorType,string)
CliqueOkResult* = ##\
## Standard ok/error result type for `Clique` functions
Result[void,CliqueError]
const
cliqueNoError* = ##\
## No-error constant
(resetCliqueError, "")
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,186 +0,0 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
##
## Descriptor Objects for Clique PoA Consensus Protocol
## ====================================================
##
## For details see
## `EIP-225 <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
## and
## `go-ethereum <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
##
{.push raises: [].}
import
std/tables,
./clique_cfg,
./clique_defs,
./snapshot/snapshot_desc,
chronicles,
eth/keys,
stew/[keyed_queue, results]
type
RawSignature* = array[RawSignatureSize, byte]
# clique/clique.go(142): type SignerFn func(signer [..]
CliqueSignerFn* = ## Hashes and signs the data to be signed by
## a backing account
proc(signer: EthAddress;
message: openArray[byte]): Result[RawSignature, cstring]
{.gcsafe, raises: [CatchableError].}
Proposals = Table[EthAddress,bool]
CliqueSnapKey* = ##\
## Internal key used for the LRU cache (derived from Hash256).
array[32,byte]
CliqueSnapLru = ##\
## Snapshots cache
KeyedQueue[CliqueSnapKey,Snapshot]
CliqueFailed* = ##\
## Last failed state: block hash and error result
(Hash256, CliqueError)
# clique/clique.go(172): type Clique struct { [..]
Clique* = ref object ##\
## Clique is the proof-of-authority consensus engine proposed to support
## the Ethereum testnet following the Ropsten attacks.
signer*: EthAddress ##\
## Ethereum address of the current signing key
signFn*: CliqueSignerFn ## Signer function to authorize hashes with
cfg: CliqueCfg ##\
## Common engine parameters to fine tune behaviour
recents: CliqueSnapLru ##\
## Snapshots cache for recent block search
snapshot: Snapshot ##\
## Last successful snapshot
failed: CliqueFailed ##\
## Last verification error (if any)
proposals: Proposals ##\
## Cu1rrent list of proposals we are pushing
applySnapsMinBacklog: bool ##\
## Epoch is a restart and sync point. Eip-225 requires that the epoch
## header contains the full list of currently authorised signers.
##
## If this flag is set `true`, then the `cliqueSnapshot()` function will
## walk back to the1 `epoch` header with at least `cfg.roThreshold` blocks
## apart from the current header. This is how it is done in the reference
## implementation.
##
## Leving the flag `false`, the assumption is that all the checkponts
## before have been vetted already regardless of the current branch. So
## the nearest `epoch` header is used.
logScope:
topics = "clique PoA constructor"
# ------------------------------------------------------------------------------
# Public constructor
# ------------------------------------------------------------------------------
# clique/clique.go(191): func New(config [..]
proc newClique*(cfg: CliqueCfg): Clique =
## Initialiser for Clique proof-of-authority consensus engine with the
## initial signers set to the ones provided by the user.
result = Clique(cfg: cfg,
snapshot: cfg.newSnapshot(BlockHeader()),
proposals: initTable[EthAddress,bool]())
# ------------------------------------------------------------------------------
# Public debug/pretty print
# ------------------------------------------------------------------------------
proc `$`*(e: CliqueError): string =
## Join text fragments
result = $e[0]
if e[1] != "":
result &= ": " & e[1]
# ------------------------------------------------------------------------------
# Public getters
# ------------------------------------------------------------------------------
proc recents*(
c: Clique;
): var KeyedQueue[CliqueSnapKey,Snapshot]
=
## Getter
c.recents
proc proposals*(c: Clique): var Proposals =
## Getter
c.proposals
proc snapshot*(c: Clique): Snapshot =
## Getter, last successfully processed snapshot.
c.snapshot
proc failed*(c: Clique): CliqueFailed =
## Getter, last snapshot error.
c.failed
proc cfg*(c: Clique): CliqueCfg =
## Getter
c.cfg
proc db*(c: Clique): CoreDbRef =
## Getter
c.cfg.db
proc applySnapsMinBacklog*(c: Clique): bool =
## Getter.
##
## If this flag is set `true`, then the `cliqueSnapshot()` function will
## walk back to the `epoch` header with at least `cfg.roThreshold` blocks
## apart from the current header. This is how it is done in the reference
## implementation.
##
## Setting the flag `false` which is the default, the assumption is that all
## the checkponts before have been vetted already regardless of the current
## branch. So the nearest `epoch` header is used.
c.applySnapsMinBacklog
# ------------------------------------------------------------------------------
# Public setters
# ------------------------------------------------------------------------------
proc `db=`*(c: Clique; db: CoreDbRef) =
## Setter, re-set database
c.cfg.db = db
c.proposals = initTable[EthAddress,bool]()
proc `snapshot=`*(c: Clique; snaps: Snapshot) =
## Setter
c.snapshot = snaps
proc `failed=`*(c: Clique; failure: CliqueFailed) =
## Setter
c.failed = failure
proc `applySnapsMinBacklog=`*(c: Clique; value: bool) =
## Setter
c.applySnapsMinBacklog = value
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,208 +0,0 @@
# Nimbus
# Copyright (c) 2018-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
##
## Generate PoA Voting Header
## ==========================
##
## For details see
## `EIP-225 <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
## and
## `go-ethereum <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
##
import
std/[sequtils],
eth/[common, keys],
../../constants,
./clique_cfg,
./clique_defs,
./clique_desc,
./clique_helpers
{.push raises: [].}
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
# clique/snapshot_test.go(49): func (ap *testerAccountPool) [..]
proc extraCheckPoint(header: var BlockHeader; signers: openArray[EthAddress]) =
## creates a Clique checkpoint signer section from the provided list
## of authorized signers and embeds it into the provided header.
header.extraData.setLen(EXTRA_VANITY)
header.extraData.add signers.mapIt(toSeq(it)).concat
header.extraData.add 0.byte.repeat(EXTRA_SEAL)
# clique/snapshot_test.go(77): func (ap *testerAccountPool) sign(header n[..]
proc sign(header: var BlockHeader; signer: PrivateKey) =
## sign calculates a Clique digital signature for the given block and embeds
## it back into the header.
#
# Sign the header and embed the signature in extra data
let
hashData = header.hashSealHeader.data
signature = signer.sign(SkMessage(hashData)).toRaw
extraLen = header.extraData.len
header.extraData.setLen(extraLen - EXTRA_SEAL)
header.extraData.add signature
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
# clique/snapshot_test.go(415): blocks, _ := core.GenerateChain(&config, [..]
proc cliqueGenvote*(
c: Clique;
voter: EthAddress; # new voter account/identity
seal: PrivateKey; # signature key
parent: BlockHeader;
elapsed = EthTime(0);
voteInOk = false; # vote in the new voter if `true`
outOfTurn = false;
checkPoint: seq[EthAddress] = @[]): BlockHeader =
## Generate PoA voting header (as opposed to `epoch` synchronisation header.)
## The function arguments are as follows:
##
## :c:
## Clique descriptor. see the `newClique()` object constructor.
##
## :voter:
## New voter account address to vote in or out (see `voteInOk`). A trivial
## example for the first block #1 header would be choosing one of the
## accounts listed in the `extraData` field fo the genesis header (note
## that Goerli has exactly one of those accounts.) This trivial example
## has no effect on the authorised voters' list.
##
## :seal:
## Private key related to an authorised voter account. Again, a trivial
## example for the block #1 header would be to (know and) use the
## associated key for one of the accounts listed in the `extraData` field
## fo the genesis header.
##
## :parent:
## parent header to chain with (not necessarily on block chain yet). For
## a block #1 header as a trivial example, this would be the genesis
## header.
##
## :elapsed:
## Optional timestamp distance from parent. This value defaults to valid
## minimum time interval `c.cfg.period`
##
## :voteInOk:
## Role of voting account. If `true`, the `voter` account address is voted
## in to be accepted as authorised account. If `false`, the `voter` account
## is voted to be removed (if it exists as authorised account, at all.)
##
## :outOfTurn:
## Must be `false` if the `voter` is `in-turn` which is defined as the
## property of a header block number retrieving the `seal` account address
## when used as list index (modulo list-length) into the (internally
## calculated and sorted) list of authorised signers. Absence of this
## property is called `out-of-turn`.
##
## The classification `in-turn` and `out-of-turn` is used only with a
## multi mining strategy where an `in-turn` block is slightly preferred.
## Nevertheless, this property is to be locked into the block chain. In a
## trivial example of an authorised signers list with exactly one entry,
## all block numbers are zero modulo one, so are `in-turn`, and
## `outOfTurn` would be left `false`.
##
## :checkPoint:
## List of currently authorised signers. According to the Clique protocol
## EIP-225, this list must be the same as the internally computed list of
## authorised signers from the block chain.
##
## This list must appear on an `epoch` block and nowhere else. An `epoch`
## block is a block where the block number is a multiple of `c.cfg.epoch`.
## Typically, `c.cfg.epoch` is initialised as `30'000`.
##
let timeElapsed = if elapsed == EthTime(0): c.cfg.period else: elapsed
result = BlockHeader(
parentHash: parent.blockHash,
ommersHash: EMPTY_UNCLE_HASH,
stateRoot: parent.stateRoot,
timestamp: parent.timestamp + timeElapsed,
txRoot: EMPTY_ROOT_HASH,
receiptRoot: EMPTY_ROOT_HASH,
blockNumber: parent.blockNumber + 1,
gasLimit: parent.gasLimit,
#
# clique/snapshot_test.go(417): gen.SetCoinbase(accounts.address( [..]
coinbase: voter,
#
# clique/snapshot_test.go(418): if tt.votes[j].auth {
nonce: if voteInOk: NONCE_AUTH else: NONCE_DROP,
#
# clique/snapshot_test.go(436): header.Difficulty = diffInTurn [..]
difficulty: if outOfTurn: DIFF_NOTURN else: DIFF_INTURN,
#
extraData: 0.byte.repeat(EXTRA_VANITY + EXTRA_SEAL))
# clique/snapshot_test.go(432): if auths := tt.votes[j].checkpoint; [..]
if 0 < checkPoint.len:
result.extraCheckPoint(checkPoint)
# Generate the signature and embed it into the header
result.sign(seal)
proc cliqueGenvote*(
c: Clique; voter: EthAddress; seal: PrivateKey;
elapsed = EthTime(0);
voteInOk = false;
outOfTurn = false;
checkPoint: seq[EthAddress] = @[]): BlockHeader
{.gcsafe, raises: [CatchableError].} =
## Variant of `clique_genvote()` where the `parent` is the canonical head
## on the block chain database.
##
## Trivial example (aka smoke test):
##
## :signature: `S`
## :account address: `a(S)`
## :genesis: extraData contains exactly one signer `a(S)`
##
## [..]
##
## | import pkg/[times], ..
## | import p2p/[chain,clique], p2p/clique/clique_genvote, ..
##
## [..]
##
## | var db: CoreDbRef = ...
## | var c = db.newChain
##
##
## | \# overwrite, typically initialised at 15s
## | const threeSecs = initDuration(seconds = 3)
## | c.clique.cfg.period = threeSecs
##
##
## | \# create first block (assuming empty block chain), mind `a(S)`, `S`
## | let header = c.clique.clique_genvote(`a(S)`, `S`, elapsed = threeSecs)
##
## [..]
##
## let ok = c.persistBlocks(@[header],@[BlockBody()])
##
## [..]
##
c.cliqueGenvote(voter, seal,
parent = c.cfg.db.getCanonicalHead,
elapsed = elapsed,
voteInOk = voteInOk,
outOfTurn = outOfTurn,
checkPoint = checkPoint)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,111 +0,0 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
##
## Tools & Utils for Clique PoA Consensus Protocol
## ===============================================
##
## For details see
## `EIP-225 <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
## and
## `go-ethereum <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
##
import
std/[algorithm, times],
../../constants,
../../utils/utils,
./clique_defs,
eth/[common, rlp],
stew/[objects, results],
stint
type
EthSortOrder* = enum
EthDescending = SortOrder.Descending.ord
EthAscending = SortOrder.Ascending.ord
{.push raises: [].}
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
func zeroItem[T](t: typedesc[T]): T =
discard
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc isZero*[T: EthAddress|Hash256|Duration](a: T): bool =
## `true` if `a` is all zero
a == zeroItem(T)
proc sorted*(e: openArray[EthAddress]; order = EthAscending): seq[EthAddress] =
proc eCmp(x, y: EthAddress): int =
for n in 0 ..< x.len:
if x[n] < y[n]:
return -1
elif y[n] < x[n]:
return 1
e.sorted(cmp = eCmp, order = order.ord.SortOrder)
proc cliqueResultErr*(w: CliqueError): CliqueOkResult =
## Return error result (syntactic sugar)
err(w)
proc extraDataAddresses*(extraData: Blob): seq[EthAddress] =
## Extract signer addresses from extraData header field
proc toEthAddress(a: openArray[byte]; start: int): EthAddress =
toArray(EthAddress.len, a[start ..< start + EthAddress.len])
if EXTRA_VANITY + EXTRA_SEAL < extraData.len and
((extraData.len - (EXTRA_VANITY + EXTRA_SEAL)) mod EthAddress.len) == 0:
var addrOffset = EXTRA_VANITY
while addrOffset + EthAddress.len <= extraData.len - EXTRA_SEAL:
result.add extraData.toEthAddress(addrOffset)
addrOffset += EthAddress.len
# core/types/block.go(343): func (b *Block) WithSeal(header [..]
proc withHeader*(b: EthBlock; header: BlockHeader): EthBlock =
## New block with the data from `b` but the header replaced with the
## argument one.
EthBlock(header: header,
txs: b.txs,
uncles: b.uncles)
# ------------------------------------------------------------------------------
# Seal hash support
# ------------------------------------------------------------------------------
# clique/clique.go(730): func encodeSigHeader(w [..]
proc encodeSealHeader*(header: BlockHeader): seq[byte] =
## Cut sigature off `extraData` header field and consider new `baseFee`
## field for Eip1559.
doAssert EXTRA_SEAL < header.extraData.len
var rlpHeader = header
rlpHeader.extraData.setLen(header.extraData.len - EXTRA_SEAL)
rlp.encode(rlpHeader)
# clique/clique.go(688): func SealHash(header *types.Header) common.Hash {
proc hashSealHeader*(header: BlockHeader): Hash256 =
## Returns the hash of a block prior to it being sealed.
header.encodeSealHeader.keccakHash
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,261 +0,0 @@
# Nimbus
# Copyright (c) 2018-2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
##
## Mining Support for Clique PoA Consensus Protocol
## ===================!=============================
##
## For details see
## `EIP-225 <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
## and
## `go-ethereum <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
##
{.push raises: [].}
import
std/[sequtils],
chronicles,
chronos,
eth/keys,
"../.."/[constants, utils/ec_recover],
../../common/common,
./clique_cfg,
./clique_defs,
./clique_desc,
./clique_helpers,
./clique_snapshot,
./clique_verify,
./snapshot/[ballot, snapshot_desc]
logScope:
topics = "clique PoA Mining"
# ------------------------------------------------------------------------------
# Private Helpers
# ------------------------------------------------------------------------------
proc isValidVote(s: Snapshot; a: EthAddress; authorize: bool): bool {.gcsafe, raises: [].} =
s.ballot.isValidVote(a, authorize)
proc isSigner*(s: Snapshot; address: EthAddress): bool {.gcsafe, raises: [].} =
## See `clique_verify.isSigner()`
s.ballot.isAuthSigner(address)
# clique/snapshot.go(319): func (s *Snapshot) inturn(number [..]
proc inTurn*(s: Snapshot; number: BlockNumber, signer: EthAddress): bool {.gcsafe, raises: [].} =
## See `clique_verify.inTurn()`
let ascSignersList = s.ballot.authSigners
for offset in 0 ..< ascSignersList.len:
if ascSignersList[offset] == signer:
return (number mod ascSignersList.len.u256) == offset.u256
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
# clique/clique.go(681): func calcDifficulty(snap [..]
proc calcDifficulty(s: Snapshot; signer: EthAddress): DifficultyInt {.gcsafe, raises: [].} =
if s.inTurn(s.blockNumber + 1, signer):
DIFF_INTURN
else:
DIFF_NOTURN
proc recentBlockNumber*(s: Snapshot;
a: EthAddress): Result[BlockNumber,void] {.gcsafe, raises: [].} =
## Return `BlockNumber` for `address` argument (if any)
for (number,recent) in s.recents.pairs:
if recent == a:
return ok(number)
return err()
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
# clique/clique.go(506): func (c *Clique) Prepare(chain [..]
proc prepare*(c: Clique; parent: BlockHeader, header: var BlockHeader): CliqueOkResult
{.gcsafe, raises: [CatchableError].} =
## For the Consensus Engine, `prepare()` initializes the consensus fields
## of a block header according to the rules of a particular engine.
##
## This implementation prepares all the consensus fields of the header for
## running the transactions on top.
# Assemble the voting snapshot to check which votes make sense
let rc = c.cliqueSnapshot(parent.blockHash, @[])
if rc.isErr:
return err(rc.error)
# if we are not voting, coinbase should be filled with zero
# because other subsystem e.g txpool can produce block header
# with non zero coinbase. if that coinbase is one of the signer
# and the nonce is zero, that signer will be vote out from
# signer list
header.coinbase.reset
let modEpoch = (parent.blockNumber+1) mod c.cfg.epoch
if modEpoch.isZero.not:
# Gather all the proposals that make sense voting on
var addresses: seq[EthAddress]
for (address,authorize) in c.proposals.pairs:
if c.snapshot.isValidVote(address, authorize):
addresses.add address
# If there's pending proposals, cast a vote on them
if 0 < addresses.len:
header.coinbase = addresses[c.cfg.rand(addresses.len-1)]
header.nonce = if header.coinbase in c.proposals: NONCE_AUTH
else: NONCE_DROP
# Set the correct difficulty
header.difficulty = c.snapshot.calcDifficulty(c.signer)
# Ensure the extra data has all its components
header.extraData.setLen(EXTRA_VANITY)
if modEpoch.isZero:
header.extraData.add c.snapshot.ballot.authSigners.mapIt(toSeq(it)).concat
header.extraData.add 0.byte.repeat(EXTRA_SEAL)
# Mix digest is reserved for now, set to empty
header.mixDigest.reset
# Ensure the timestamp has the correct delay
header.timestamp = parent.timestamp + c.cfg.period
if header.timestamp < EthTime.now():
header.timestamp = EthTime.now()
ok()
proc prepareForSeal*(c: Clique; prepHeader: BlockHeader; header: var BlockHeader) {.gcsafe, raises: [].} =
# TODO: use system.move?
header.nonce = prepHeader.nonce
header.extraData = prepHeader.extraData
header.mixDigest = prepHeader.mixDigest
# clique/clique.go(589): func (c *Clique) Authorize(signer [..]
proc authorize*(c: Clique; signer: EthAddress; signFn: CliqueSignerFn) {.gcsafe, raises: [].} =
## Injects private key into the consensus engine to mint new blocks with.
c.signer = signer
c.signFn = signFn
# clique/clique.go(724): func CliqueRLP(header [..]
proc cliqueRlp*(header: BlockHeader): seq[byte] {.gcsafe, raises: [].} =
## Returns the rlp bytes which needs to be signed for the proof-of-authority
## sealing. The RLP to sign consists of the entire header apart from the 65
## byte signature contained at the end of the extra data.
##
## Note, the method requires the extra data to be at least 65 bytes,
## otherwise it panics. This is done to avoid accidentally using both forms
## (signature present or not), which could be abused to produce different
##hashes for the same header.
header.encodeSealHeader
# clique/clique.go(688): func SealHash(header *types.Header) common.Hash {
proc sealHash*(header: BlockHeader): Hash256 {.gcsafe, raises: [].} =
## For the Consensus Engine, `sealHash()` returns the hash of a block prior
## to it being sealed.
##
## This implementation returns the hash of a block prior to it being sealed.
header.hashSealHeader
# clique/clique.go(599): func (c *Clique) Seal(chain [..]
proc seal*(c: Clique; ethBlock: var EthBlock):
Result[void,CliqueError] {.gcsafe,
raises: [CatchableError].} =
## This implementation attempts to create a sealed block using the local
## signing credentials.
var header = ethBlock.header
# Sealing the genesis block is not supported
if header.blockNumber.isZero:
return err((errUnknownBlock, ""))
# For 0-period chains, refuse to seal empty blocks (no reward but would spin
# sealing)
if c.cfg.period == 0 and ethBlock.txs.len == 0:
info $nilCliqueSealNoBlockYet
return err((nilCliqueSealNoBlockYet, ""))
# Don't hold the signer fields for the entire sealing procedure
let
signer = c.signer
signFn = c.signFn
# Bail out if we're unauthorized to sign a block
let rc = c.cliqueSnapshot(header.parentHash)
if rc.isErr:
return err(rc.error)
if not c.snapshot.isSigner(signer):
return err((errUnauthorizedSigner, ""))
# If we're amongst the recent signers, wait for the next block
let seen = c.snapshot.recentBlockNumber(signer)
if seen.isOk:
# Signer is among recents, only wait if the current block does not
# shift it out
if header.blockNumber < seen.value + c.snapshot.signersThreshold.u256:
info $nilCliqueSealSignedRecently
return err((nilCliqueSealSignedRecently, ""))
when false:
# Sweet, the protocol permits us to sign the block, wait for our time
var delay = header.timestamp - EthTime.now()
if header.difficulty == DIFF_NOTURN:
# It's not our turn explicitly to sign, delay it a bit
let wiggle = c.snapshot.signersThreshold.int64 * WIGGLE_TIME
# Kludge for limited rand() argument range
if wiggle.inSeconds < (int.high div 1000).int64:
let rndWiggleMs = c.cfg.rand(wiggle.inMilliseconds.int)
delay += initDuration(milliseconds = rndWiggleMs)
else:
let rndWiggleSec = c.cfg.rand((wiggle.inSeconds and int.high).int)
delay += initDuration(seconds = rndWiggleSec)
trace "Out-of-turn signing requested",
wiggle = $wiggle
# Sign all the things!
try:
let signature = signFn(signer,header.cliqueRlp)
if signature.isErr:
return err((errCliqueSealSigFn,$signature.error))
let extraLen = header.extraData.len
if EXTRA_SEAL < extraLen:
header.extraData.setLen(extraLen - EXTRA_SEAL)
header.extraData.add signature.value
except CatchableError as exc:
return err((errCliqueSealSigFn,
"Error when signing block header: " & exc.msg))
ethBlock = ethBlock.withHeader(header)
ok()
# clique/clique.go(673): func (c *Clique) CalcDifficulty(chain [..]
proc calcDifficulty*(c: Clique;
parent: BlockHeader): Result[DifficultyInt,CliqueError]
{.gcsafe, raises: [CatchableError].} =
## For the Consensus Engine, `calcDifficulty()` is the difficulty adjustment
## algorithm. It returns the difficulty that a new block should have.
##
## This implementation returns the difficulty that a new block should have:
## * DIFF_NOTURN(2) if BLOCK_NUMBER % SIGNER_COUNT != SIGNER_INDEX
## * DIFF_INTURN(1) if BLOCK_NUMBER % SIGNER_COUNT == SIGNER_INDEX
let rc = c.cliqueSnapshot(parent)
if rc.isErr:
return err(rc.error)
return ok(c.snapshot.calcDifficulty(c.signer))
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,442 +0,0 @@
# Nimbus
# Copyright (c) 2018-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
##
## Snapshot for Clique PoA Consensus Protocol
## ==========================================
##
## For details see
## `EIP-225 <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
## and
## `go-ethereum <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
##
import
std/[sequtils, strutils],
chronicles,
eth/[keys],
stew/[keyed_queue, results],
../../utils/prettify,
"."/[clique_cfg, clique_defs, clique_desc],
./snapshot/[snapshot_apply, snapshot_desc]
type
# Internal sub-descriptor for `LocalSnapsDesc`
LocalPivot = object
header: BlockHeader
hash: Hash256
# Internal sub-descriptor for `LocalSnapsDesc`
LocalPath = object
snaps: Snapshot ## snapshot for given hash
chain: seq[BlockHeader] ## header chain towards snapshot
error: CliqueError ## error message
# Internal sub-descriptor for `LocalSnapsDesc`
LocalSubChain = object
first: int ## first chain[] element to be used
top: int ## length of chain starting at position 0
LocalSnaps = object
c: Clique
start: LocalPivot ## start here searching for checkpoints
trail: LocalPath ## snapshot location
subChn: LocalSubChain ## chain[] sub-range
parents: HeadersHolderRef ## explicit parents
HeadersHolderRef* = ref object
headers*: seq[BlockHeader]
{.push raises: [].}
logScope:
topics = "clique PoA snapshot"
static:
const stopCompilerGossip {.used.} = 42.toSI
# ------------------------------------------------------------------------------
# Private debugging functions, pretty printing
# ------------------------------------------------------------------------------
template say(d: var LocalSnaps; v: varargs[untyped]): untyped =
discard
# uncomment body to enable, note that say() prints on <stderr>
# d.c.cfg.say v
#proc pp(a: Hash256): string =
# if a == EMPTY_ROOT_HASH:
# "*blank-root*"
# elif a == EMPTY_SHA3:
# "*empty-sha3*"
# else:
# a.data.mapIt(it.toHex(2)).join[56 .. 63].toLowerAscii
#proc pp(q: openArray[BlockHeader]; n: int): string =
# result = "["
# if 5 < n:
# result &= toSeq(q[0 .. 2]).mapIt("#" & $it.blockNumber).join(", ")
# result &= " .." & $n & ".. #" & $q[n-1].blockNumber
# else:
# result &= toSeq(q[0 ..< n]).mapIt("#" & $it.blockNumber).join(", ")
# result &= "]"
#proc pp(b: BlockNumber, q: openArray[BlockHeader]; n: int): string =
# "#" & $b & " + " & q.pp(n)
#proc pp(q: openArray[BlockHeader]): string =
# q.pp(q.len)
#proc pp(b: BlockNumber, q: openArray[BlockHeader]): string =
# b.pp(q, q.len)
#proc pp(h: BlockHeader, q: openArray[BlockHeader]; n: int): string =
# "headers=(" & h.blockNumber.pp(q,n) & ")"
#proc pp(h: BlockHeader, q: openArray[BlockHeader]): string =
# h.pp(q,q.len)
#proc pp(t: var LocalPath; w: var LocalSubChain): string =
# var (a, b) = (w.first, w.top)
# if a == 0 and b == 0: b = t.chain.len
# "trail=(#" & $t.snaps.blockNumber & " + " & t.chain[a ..< b].pp & ")"
#proc pp(t: var LocalPath): string =
# var w = LocalSubChain()
# t.pp(w)
#proc pp(err: CliqueError): string =
# "(" & $err[0] & "," & err[1] & ")"
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc maxCheckPointLe(d: var LocalSnaps; number: BlockNumber): BlockNumber =
let epc = number mod d.c.cfg.ckpInterval
if epc < number:
number - epc
else:
# epc == number => number < ckpInterval
0.u256
proc isCheckPoint(d: var LocalSnaps; number: BlockNumber): bool =
(number mod d.c.cfg.ckpInterval).isZero
proc isEpoch(d: var LocalSnaps; number: BlockNumber): bool =
(number mod d.c.cfg.epoch).isZero
proc isSnapshotPosition(d: var LocalSnaps; number: BlockNumber): bool =
# clique/clique.go(394): if number == 0 || (number%c.config.Epoch [..]
if d.isEpoch(number):
if number.isZero:
# At the genesis => snapshot the initial state.
return true
if not d.c.applySnapsMinBacklog:
return true
if d.c.cfg.roThreshold < d.trail.chain.len:
# We have piled up more headers than allowed to be re-orged (chain
# reinit from a freezer), regard checkpoint trusted and snapshot it.
return true
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
func len*(list: HeadersHolderRef): int =
list.headers.len
func `[]`*(list: HeadersHolderRef, idx: int): BlockHeader =
list.headers[idx]
func `[]`*(list: HeadersHolderRef, idx: BackwardsIndex): BlockHeader =
list.headers[list.headers.len - int(idx)]
proc findSnapshot(d: var LocalSnaps): bool =
## Search for a snapshot starting at current header starting at the pivot
## value `d.start`. The snapshot returned in `trail` is a clone of the
## cached snapshot and can be modified later.
var
(header, hash) = (d.start.header, d.start.hash)
parentsLen = d.parents.len
# For convenience, ignore the current header as top parents list entry
if 0 < parentsLen and d.parents[^1] == header:
parentsLen.dec
while true:
#d.say "findSnapshot ", header.pp(d.parents, parentsLen),
# " trail=", d.trail.chain.pp
let number = header.blockNumber
# Check whether the snapshot was recently visited and cached
block:
let rc = d.c.recents.lruFetch(hash.data)
if rc.isOk:
d.trail.snaps = rc.value.cloneSnapshot
# d.say "findSnapshot cached ", d.trail.pp
trace "Found recently cached voting snapshot",
blockNumber = number,
blockHash = hash
return true
# If an on-disk checkpoint snapshot can be found, use that
if d.isCheckPoint(number):
let rc = d.c.cfg.loadSnapshot(hash)
if rc.isOk:
d.trail.snaps = rc.value.cloneSnapshot
d.say "findSnapshot on disk ", d.trail.pp
trace "Loaded voting snapshot from disk",
blockNumber = number,
blockHash = hash
# clique/clique.go(386): snap = s
return true
# Note that epoch is a restart and sync point. Eip-225 requires that the
# epoch header contains the full list of currently authorised signers.
if d.isSnapshotPosition(number):
# clique/clique.go(395): checkpoint := chain.GetHeaderByNumber [..]
d.trail.snaps = d.c.cfg.newSnapshot(header)
let rc = d.c.cfg.storeSnapshot(d.trail.snaps)
if rc.isOk:
d.say "findSnapshot <epoch> ", d.trail.pp
trace "Stored voting snapshot to disk",
blockNumber = number,
blockHash = hash,
nSnaps = d.c.cfg.nSnaps,
snapsTotal = d.c.cfg.snapsData.toSI
return true
# No snapshot for this header, get the parent header and move backward
hash = header.parentHash
# Add to batch (reversed list order, biggest block number comes first)
d.trail.chain.add header
# Assign parent header
if 0 < parentsLen:
# If we have explicit parents, pop it from the parents list
parentsLen.dec
header = d.parents[parentsLen]
# clique/clique.go(416): if header.Hash() != hash [..]
if header.blockHash != hash:
d.trail.error = (errUnknownAncestor,"")
return false
# No explicit parents (or no more parents left), reach out to the database
elif not d.c.cfg.db.getBlockHeader(hash, header):
d.trail.error = (errUnknownAncestor,"")
return false
# => while loop
# notreached
raiseAssert "findSnapshot(): wrong exit from forever-loop"
proc applyTrail(d: var LocalSnaps): CliqueOkResult
{.gcsafe, raises: [CatchableError].} =
## Apply any `trail` headers on top of the snapshot `snap`
if d.subChn.first < d.subChn.top:
block:
# clique/clique.go(434): snap, err := snap.apply(headers)
d.say "applyTrail ", d.trail.pp(d.subChn)
let rc = d.trail.snaps.snapshotApplySeq(
d.trail.chain, d.subChn.top-1, d.subChn.first)
if rc.isErr:
d.say "applyTrail snaps=#", d.trail.snaps.blockNumber,
" err=", rc.error.pp
return err(rc.error)
d.say "applyTrail snaps=#", d.trail.snaps.blockNumber
# If we've generated a new checkpoint snapshot, save to disk
if d.isCheckPoint(d.trail.snaps.blockNumber):
var rc = d.c.cfg.storeSnapshot(d.trail.snaps)
if rc.isErr:
return err(rc.error)
d.say "applyTrail <disk> chechkpoint #", d.trail.snaps.blockNumber
trace "Stored voting snapshot to disk",
blockNumber = d.trail.snaps.blockNumber,
blockHash = d.trail.snaps.blockHash,
nSnaps = d.c.cfg.nSnaps,
snapsTotal = d.c.cfg.snapsData.toSI
ok()
proc updateSnapshot(d: var LocalSnaps): SnapshotResult
{.gcsafe, raises: [CatchableError].} =
## Find snapshot for header `d.start.header` and assign it to the LRU cache.
## This function was expects thet the LRU cache already has a slot allocated
## for the snapshot having run `getLruSnaps()`.
d.say "updateSnapshot begin ", d.start.header.blockNumber.pp(d.parents)
# Search for previous snapshots
if not d.findSnapshot:
return err(d.trail.error)
# Initialise range for header chain[] to be applied to `d.trail.snaps`
d.subChn.top = d.trail.chain.len
# Previous snapshot found, apply any pending trail headers on top of it
if 0 < d.subChn.top:
let
first = d.trail.chain[^1].blockNumber
last = d.trail.chain[0].blockNumber
ckpt = d.maxCheckPointLe(last)
# If there is at least one checkpoint part of the trail sequence, make sure
# that we can store the latest one. This will be done by the `applyTrail()`
# handler for the largest block number in the sequence (note that the trail
# block numbers are in reverse order.)
if first <= ckpt and ckpt < last:
# Split the trail sequence so that the first one has the checkpoint
# entry with largest block number.
let inx = (last - ckpt).truncate(int)
# First part (note reverse block numbers.)
d.subChn.first = inx
let rc = d.applyTrail
if rc.isErr:
return err(rc.error)
# Second part (note reverse block numbers.)
d.subChn.first = 0
d.subChn.top = inx
var rc = d.applyTrail
if rc.isErr:
return err(rc.error)
# clique/clique.go(438): c.recents.Add(snap.Hash, snap)
discard d.c.recents.lruAppend(
d.trail.snaps.blockHash.data, d.trail.snaps, INMEMORY_SNAPSHOTS)
if 1 < d.trail.chain.len:
d.say "updateSnapshot ok #", d.trail.snaps.blockNumber,
" trail.len=", d.trail.chain.len
ok(d.trail.snaps)
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc cliqueSnapshotSeq*(c: Clique; header: BlockHeader;
parents: HeadersHolderRef): SnapshotResult
{.gcsafe, raises: [CatchableError].} =
## Create authorisation state snapshot of a given point in the block chain
## and store it in the `Clique` descriptor to be retrievable as `c.snapshot`
## if successful.
##
## If the `parents[]` argument list top element (if any) is the same as the
## `header` argument, this top element is silently ignored.
##
## If this function is successful, the compiled `Snapshot` will also be
## stored in the `Clique` descriptor which can be retrieved later
## via `c.snapshot`.
block:
let rc = c.recents.lruFetch(header.blockHash.data)
if rc.isOk:
c.snapshot = rc.value
return ok(rc.value)
# Avoid deep copy, sequence will not be changed by `updateSnapshot()`
var snaps = LocalSnaps(
c: c,
parents: parents,
start: LocalPivot(
header: header,
hash: header.blockHash))
let rc = snaps.updateSnapshot
if rc.isOk:
c.snapshot = rc.value
rc
proc cliqueSnapshotSeq*(c: Clique; hash: Hash256;
parents: HeadersHolderRef): SnapshotResult
{.gcsafe,raises: [CatchableError].} =
## Create authorisation state snapshot of a given point in the block chain
## and store it in the `Clique` descriptor to be retrievable as `c.snapshot`
## if successful.
##
## If the `parents[]` argument list top element (if any) is the same as the
## `header` argument, this top element is silently ignored.
##
## If this function is successful, the compiled `Snapshot` will also be
## stored in the `Clique` descriptor which can be retrieved later
## via `c.snapshot`.
block:
let rc = c.recents.lruFetch(hash.data)
if rc.isOk:
c.snapshot = rc.value
return ok(rc.value)
var header: BlockHeader
if not c.cfg.db.getBlockHeader(hash, header):
return err((errUnknownHash,""))
# Avoid deep copy, sequence will not be changed by `updateSnapshot()`
var snaps = LocalSnaps(
c: c,
parents: parents,
start: LocalPivot(
header: header,
hash: hash))
let rc = snaps.updateSnapshot
if rc.isOk:
c.snapshot = rc.value
rc
# clique/clique.go(369): func (c *Clique) snapshot(chain [..]
proc cliqueSnapshot*(c: Clique; header: BlockHeader;
parents: var seq[BlockHeader]): SnapshotResult
{.gcsafe, raises: [CatchableError].} =
let list = HeadersHolderRef(
headers: toSeq(parents)
)
c.cliqueSnapshotSeq(header,list)
proc cliqueSnapshot*(c: Clique;hash: Hash256;
parents: openArray[BlockHeader]): SnapshotResult
{.gcsafe, raises: [CatchableError].} =
let list = HeadersHolderRef(
headers: toSeq(parents)
)
c.cliqueSnapshotSeq(hash,list)
proc cliqueSnapshot*(c: Clique; header: BlockHeader): SnapshotResult
{.gcsafe,raises: [CatchableError].} =
## Short for `cliqueSnapshot(c,header,@[])`
let blind = HeadersHolderRef()
c.cliqueSnapshotSeq(header, blind)
proc cliqueSnapshot*(c: Clique; hash: Hash256): SnapshotResult
{.gcsafe,raises: [CatchableError].} =
## Short for `cliqueSnapshot(c,hash,@[])`
let blind = HeadersHolderRef()
c.cliqueSnapshotSeq(hash, blind)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,426 +0,0 @@
# Nimbus
# Copyright (c) 2018-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
##
## Verify Headers for Clique PoA Consensus Protocol
## ================================================
##
## Note that mining in currently unsupported by `NIMBUS`
##
## For details see
## `EIP-225 <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
## and
## `go-ethereum <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
##
import
std/[strformat, sequtils],
../../utils/utils,
../../common/common,
../gaslimit,
./clique_cfg,
./clique_defs,
./clique_desc,
./clique_helpers,
./clique_snapshot,
./snapshot/[ballot, snapshot_desc],
chronicles,
stew/results
{.push raises: [].}
logScope:
topics = "clique PoA verify header"
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
# consensus/misc/forks.go(30): func VerifyForkHashes(config [..]
proc verifyForkHashes(com: CommonRef; header: BlockHeader): CliqueOkResult
{.gcsafe, raises: [ValueError].} =
## Verify that blocks conforming to network hard-forks do have the correct
## hashes, to avoid clients going off on different chains.
if com.eip150Block.isSome and
com.eip150Block.get == header.blockNumber:
# If the homestead reprice hash is set, validate it
let
eip150 = com.eip150Hash
hash = header.blockHash
if eip150 != hash:
return err((errCliqueGasRepriceFork,
&"Homestead gas reprice fork: have {eip150}, want {hash}"))
return ok()
proc signersThreshold*(s: Snapshot): int =
## Minimum number of authorised signers needed.
s.ballot.authSignersThreshold
proc recentBlockNumber*(s: Snapshot; a: EthAddress): Result[BlockNumber,void] =
## Return `BlockNumber` for `address` argument (if any)
for (number,recent) in s.recents.pairs:
if recent == a:
return ok(number)
return err()
proc isSigner*(s: Snapshot; address: EthAddress): bool =
## Checks whether argukment ``address` is in signers list
s.ballot.isAuthSigner(address)
# clique/snapshot.go(319): func (s *Snapshot) inturn(number [..]
proc inTurn*(s: Snapshot; number: BlockNumber, signer: EthAddress): bool =
## Returns `true` if a signer at a given block height is in-turn.
let ascSignersList = s.ballot.authSigners
if 0 < ascSignersList.len:
let offset = (number mod ascSignersList.len.u256).truncate(int64)
return ascSignersList[offset] == signer
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
# clique/clique.go(463): func (c *Clique) verifySeal(chain [..]
proc verifySeal(c: Clique; header: BlockHeader): CliqueOkResult =
## Check whether the signature contained in the header satisfies the
## consensus protocol requirements. The method accepts an optional list of
## parent headers that aren't yet part of the local blockchain to generate
## the snapshots from.
# Verifying the genesis block is not supported
if header.blockNumber.isZero:
return err((errUnknownBlock,""))
# Get current snapshot
let snapshot = c.snapshot
# Verify availability of the cached snapshot
doAssert snapshot.blockHash == header.parentHash
# Resolve the authorization key and check against signers
let signer = c.cfg.ecRecover(header)
if signer.isErr:
return err((errEcRecover,$signer.error))
if not snapshot.isSigner(signer.value):
return err((errUnauthorizedSigner,""))
let seen = snapshot.recentBlockNumber(signer.value)
if seen.isOk:
# Signer is among recents, only fail if the current block does not
# shift it out
# clique/clique.go(486): if limit := uint64(len(snap.Signers)/2 + 1); [..]
if header.blockNumber - snapshot.signersThreshold.u256 < seen.value:
return err((errRecentlySigned,""))
# Ensure that the difficulty corresponds to the turn-ness of the signer
if snapshot.inTurn(header.blockNumber, signer.value):
if header.difficulty != DIFF_INTURN:
return err((errWrongDifficulty,"INTURN expected"))
else:
if header.difficulty != DIFF_NOTURN:
return err((errWrongDifficulty,"NOTURN expected"))
ok()
# clique/clique.go(314): func (c *Clique) verifyCascadingFields(chain [..]
proc verifyCascadingFields(c: Clique; com: CommonRef; header: BlockHeader;
parents: HeadersHolderRef): CliqueOkResult
{.gcsafe, raises: [CatchableError].} =
## Verify all the header fields that are not standalone, rather depend on a
## batch of previous headers. The caller may optionally pass in a batch of
## parents (ascending order) to avoid looking those up from the database.
## This is useful for concurrently verifying a batch of new headers.
# The genesis block is the always valid dead-end
if header.blockNumber.isZero:
return ok()
# Ensure that the block's timestamp isn't too close to its parent
var parent: BlockHeader
if 0 < parents.len:
parent = parents[^1]
elif not c.db.getBlockHeader(header.blockNumber-1, parent):
return err((errUnknownAncestor,""))
if parent.blockNumber != header.blockNumber-1 or
parent.blockHash != header.parentHash:
return err((errUnknownAncestor,""))
# clique/clique.go(330): if parent.Time+c.config.Period > header.Time {
if header.timestamp < parent.timestamp + c.cfg.period:
return err((errInvalidTimestamp,""))
# Verify that the gasUsed is <= gasLimit
block:
# clique/clique.go(333): if header.GasUsed > header.GasLimit {
let (used, limit) = (header.gasUsed, header.gasLimit)
if limit < used:
return err((errCliqueExceedsGasLimit,
&"invalid gasUsed: have {used}, gasLimit {limit}"))
# Verify `GasLimit` or `BaseFee` depending on whether before or after
# EIP-1559/London fork.
block:
# clique/clique.go(337): if !chain.Config().IsLondon(header.Number) {
let rc = com.validateGasLimitOrBaseFee(header, parent)
if rc.isErr:
return err((errCliqueGasLimitOrBaseFee, rc.error))
# Retrieve the snapshot needed to verify this header and cache it
block:
# clique/clique.go(350): snap, err := c.snapshot(chain, number-1, ..
let rc = c.cliqueSnapshotSeq(header.parentHash, parents)
if rc.isErr:
return err(rc.error)
# If the block is a checkpoint block, verify the signer list
if (header.blockNumber mod c.cfg.epoch.u256) == 0:
var addrList = header.extraData.extraDataAddresses
# not using `authSigners()` here as it is too slow
if c.snapshot.ballot.authSignersLen != addrList.len or
not c.snapshot.ballot.isAuthSigner(addrList):
return err((errMismatchingCheckpointSigners,""))
# All basic checks passed, verify the seal and return
return c.verifySeal(header)
proc verifyHeaderFields(c: Clique; header: BlockHeader): CliqueOkResult =
## Check header fields, the ones that do not depend on a parent block.
# clique/clique.go(250): number := header.Number.Uint64()
# Don't waste time checking blocks from the future
if EthTime.now() < header.timestamp:
return err((errFutureBlock,""))
# Checkpoint blocks need to enforce zero beneficiary
let isCheckPoint = (header.blockNumber mod c.cfg.epoch.u256).isZero
if isCheckPoint and not header.coinbase.isZero:
return err((errInvalidCheckpointBeneficiary,""))
# Nonces must be 0x00..0 or 0xff..f, zeroes enforced on checkpoints
if header.nonce != NONCE_AUTH and header.nonce != NONCE_DROP:
return err((errInvalidVote,""))
if isCheckPoint and header.nonce != NONCE_DROP:
return err((errInvalidCheckpointVote,""))
# Check that the extra-data contains both the vanity and signature
if header.extraData.len < EXTRA_VANITY:
return err((errMissingVanity,""))
if header.extraData.len < EXTRA_VANITY + EXTRA_SEAL:
return err((errMissingSignature,""))
# Ensure that the extra-data contains a signer list on a checkpoint,
# but none otherwise
let signersBytes = header.extraData.len - EXTRA_VANITY - EXTRA_SEAL
if not isCheckPoint:
if signersBytes != 0:
return err((errExtraSigners,""))
elif (signersBytes mod EthAddress.len) != 0:
return err((errInvalidCheckpointSigners,""))
# Ensure that the mix digest is zero as we do not have fork protection
# currently
if not header.mixDigest.isZero:
return err((errInvalidMixDigest,""))
# Ensure that the block does not contain any uncles which are meaningless
# in PoA
if header.ommersHash != EMPTY_UNCLE_HASH:
return err((errInvalidUncleHash,""))
# Ensure that the block's difficulty is meaningful (may not be correct at
# this point)
if not header.blockNumber.isZero:
# Note that neither INTURN or NOTURN should be zero (but this might be
# subject to change as it is explicitely checked for in `clique.go`)
let diffy = header.difficulty
# clique/clique.go(246): if header.Difficulty == nil || (header.Difficulty..
if diffy.isZero or (diffy != DIFF_INTURN and diffy != DIFF_NOTURN):
return err((errInvalidDifficulty,""))
# verify that the gas limit is <= 2^63-1
when header.gasLimit.typeof isnot int64:
if int64.high < header.gasLimit:
return err((errCliqueExceedsGasLimit,
&"invalid gasLimit: have {header.gasLimit}, must be int64"))
ok()
# clique/clique.go(246): func (c *Clique) verifyHeader(chain [..]
proc cliqueVerifyImpl(c: Clique; com: CommonRef; header: BlockHeader;
parents: HeadersHolderRef): CliqueOkResult
{.gcsafe, raises: [CatchableError].} =
## Check whether a header conforms to the consensus rules. The caller may
## optionally pass in a batch of parents (ascending order) to avoid looking
## those up from the database. This is useful for concurrently verifying
## a batch of new headers.
c.failed = (ZERO_HASH256,cliqueNoError)
block:
# Check header fields independent of parent blocks
let rc = c.verifyHeaderFields(header)
if rc.isErr:
c.failed = (header.blockHash, rc.error)
return err(rc.error)
block:
# If all checks passed, validate any special fields for hard forks
let rc = com.verifyForkHashes(header)
if rc.isErr:
c.failed = (header.blockHash, rc.error)
return err(rc.error)
# All basic checks passed, verify cascading fields
result = c.verifyCascadingFields(com, header, parents)
if result.isErr:
c.failed = (header.blockHash, result.error)
proc cliqueVerifySeq*(c: Clique; com: CommonRef; header: BlockHeader;
parents: HeadersHolderRef): CliqueOkResult
{.gcsafe, raises: [CatchableError].} =
## Check whether a header conforms to the consensus rules. The caller may
## optionally pass in a batch of parents (ascending order) to avoid looking
## those up from the database. This is useful for concurrently verifying
## a batch of new headers.
##
## On success, the latest authorised signers list is available via the
## fucntion `c.cliqueSigners()`. Otherwise, the latest error is also stored
## in the `Clique` descriptor
##
## If there is an error, this error is also stored within the `Clique`
## descriptor and can be retrieved via `c.failed` along with the hash/ID of
## the failed block header.
block:
let rc = c.cliqueVerifyImpl(com, header, parents)
if rc.isErr:
return rc
# Adjust current shapshot (the function `cliqueVerifyImpl()` typically
# works with the parent snapshot.
block:
let rc = c.cliqueSnapshotSeq(header, parents)
if rc.isErr:
return err(rc.error)
ok()
proc cliqueVerifySeq(c: Clique; com: CommonRef;
headers: HeadersHolderRef): CliqueOkResult
{.gcsafe, raises: [CatchableError].} =
## This function verifies a batch of headers checking each header for
## consensus rules conformance. The `headers` list is supposed to
## contain a chain of headers, i e. `headers[i]` is parent to `headers[i+1]`.
##
## On success, the latest authorised signers list is available via the
## fucntion `c.cliqueSigners()`. Otherwise, the latest error is also stored
## in the `Clique` descriptor
##
## If there is an error, this error is also stored within the `Clique`
## descriptor and can be retrieved via `c.failed` along with the hash/ID of
## the failed block header.
##
## Note that the sequence argument must be write-accessible, even though it
## will be left untouched by this function.
if 0 < headers.len:
block:
let blind = HeadersHolderRef()
let rc = c.cliqueVerifyImpl(com, headers[0],blind)
if rc.isErr:
return rc
for n in 1 ..< headers.len:
let parent = HeadersHolderRef(
headers: headers.headers[n-1 .. n-1] # is actually a single item squence
)
let rc = c.cliqueVerifyImpl(com, headers[n],parent)
if rc.isErr:
return rc
# Adjust current shapshot (the function `cliqueVerifyImpl()` typically
# works with the parent snapshot.
block:
let rc = c.cliqueSnapshot(headers[^1])
if rc.isErr:
return err(rc.error)
ok()
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc cliqueVerify*(c: Clique; com: CommonRef; header: BlockHeader;
parents: openArray[BlockHeader]): CliqueOkResult
{.gcsafe, raises: [CatchableError].} =
## Check whether a header conforms to the consensus rules. The caller may
## optionally pass on a batch of parents (ascending order) to avoid looking
## those up from the database. This function updates the list of authorised
## signers (see `cliqueSigners()` below.)
##
## On success, the latest authorised signers list is available via the
## fucntion `c.cliqueSigners()`. Otherwise, the latest error is also stored
## in the `Clique` descriptor and is accessible as `c.failed`.
##
## This function is not transaction-save, that is the internal state of
## the authorised signers list has the state of the last update after a
## successful header verification. The hash of the failing header together
## with the error message is then accessible as `c.failed`.
##
## Use the directives `cliqueSave()`, `cliqueDispose()`, and/or
## `cliqueRestore()` for transaction.
let list = HeadersHolderRef(
headers: toSeq(parents)
)
c.cliqueVerifySeq(com, header, list)
# clique/clique.go(217): func (c *Clique) VerifyHeader(chain [..]
proc cliqueVerify*(c: Clique; com: CommonRef; header: BlockHeader): CliqueOkResult
{.gcsafe, raises: [CatchableError].} =
## Consensus rules verifier without optional parents list.
let blind = HeadersHolderRef()
c.cliqueVerifySeq(com, header, blind)
proc cliqueVerify*(c: Clique; com: CommonRef;
headers: openArray[BlockHeader]): CliqueOkResult
{.gcsafe, raises: [CatchableError].} =
## This function verifies a batch of headers checking each header for
## consensus rules conformance (see also the other `cliqueVerify()` function
## instance.) The `headers` list is supposed to contain a chain of headers,
## i.e. `headers[i]` is parent to `headers[i+1]`.
##
## On success, the latest authorised signers list is available via the
## fucntion `c.cliqueSigners()`. Otherwise, the latest error is also stored
## in the `Clique` descriptor and is accessible as `c.failed`.
##
## This function is not transaction-save, that is the internal state of
## the authorised signers list has the state of the last update after a
## successful header verification. The hash of the failing header together
## with the error message is then accessible as `c.failed`.
##
## Use the directives `cliqueSave()`, `cliqueDispose()`, and/or
## `cliqueRestore()` for transaction.
let list = HeadersHolderRef(
headers: toSeq(headers)
)
c.cliqueVerifySeq(com, list)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,213 +0,0 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
##
## Votes Management for Clique PoA Consensus Protocol
## =================================================
##
## For details see
## `EIP-225 <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
## and
## `go-ethereum <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
##
import
std/[sequtils, tables],
../clique_helpers,
eth/common
type
Vote* = object
## Vote represent single votes that an authorized signer made to modify
## the list of authorizations.
signer*: EthAddress ## authorized signer that cast this vote
address*: EthAddress ## account being voted on to change its
## authorization type (`true` or `false`)
blockNumber*: BlockNumber ## block number the vote was cast in
## (expire old votes)
authorize*: bool ## authorization type, whether to authorize or
## deauthorize the voted account
Tally = object
authorize: bool
signers: Table[EthAddress,Vote]
Ballot* = object
votes: Table[EthAddress,Tally] ## votes by account -> signer
authSig: Table[EthAddress,bool] ## currently authorised signers
authRemoved: bool ## last `addVote()` action was removing an
## authorised signer from the `authSig` list
{.push raises: [].}
# ------------------------------------------------------------------------------
# Public debugging/pretty-printer support
# ------------------------------------------------------------------------------
proc votesInternal*(t: var Ballot): seq[(EthAddress,EthAddress,Vote)] =
for account,tally in t.votes.pairs:
for signer,vote in tally.signers.pairs:
result.add (account, signer, vote)
# ------------------------------------------------------------------------------
# Public constructor
# ------------------------------------------------------------------------------
proc initBallot*(t: var Ballot) =
## Ininialise an empty `Ballot` descriptor.
t.votes = initTable[EthAddress,Tally]()
t.authSig = initTable[EthAddress,bool]()
proc initBallot*(t: var Ballot; signers: openArray[EthAddress]) =
## Ininialise `Ballot` with a given authorised signers list
t.initBallot
for a in signers:
t.authSig[a] = true
# ------------------------------------------------------------------------------
# Public getters
# ------------------------------------------------------------------------------
proc authSigners*(t: var Ballot): seq[EthAddress] =
## Sorted ascending list of authorised signer addresses
toSeq(t.authSig.keys).sorted(EthAscending)
proc authSignersLen*(t: var Ballot): int =
## Returns the number of currently known authorised signers.
t.authSig.len
proc isAuthSignersListShrunk*(t: var Ballot): bool =
## Check whether the authorised signers list was shrunk recently after
## appying `addVote()`
t.authRemoved
proc authSignersThreshold*(t: var Ballot): int =
## Returns the minimum number of authorised signers needed for authorising
## a addres for voting. This is currently
## ::
## 1 + half of the number of authorised signers
##
1 + (t.authSig.len div 2)
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc isAuthSigner*(t: var Ballot; addresses: var seq[EthAddress]): bool =
## Check whether all `addresses` entries are authorised signers.
##
## Using this function should be preferable over `authSigners()` which has
## complexity `O(log n)` while this function runs with `O(n)`.
for a in addresses:
if a notin t.authSig:
return false
true
proc isAuthSigner*(t: var Ballot; address: EthAddress): bool =
## Check whether `address` is an authorised signer
address in t.authSig
proc delVote*(t: var Ballot; signer, address: EthAddress) {.
gcsafe, raises: [KeyError].} =
## Remove a particular previously added vote.
if address in t.votes:
if signer in t.votes[address].signers:
if t.votes[address].signers.len <= 1:
t.votes.del(address)
else:
t.votes[address].signers.del(signer)
proc flushVotes*(t: var Ballot) =
## Reset/flush pending votes, authorised signers remain the same.
t.votes.clear
# clique/snapshot.go(141): func (s *Snapshot) validVote(address [..]
proc isValidVote*(t: var Ballot; address: EthAddress; authorize: bool): bool =
## Check whether voting would have an effect in `addVote()`
if address in t.authSig: not authorize else: authorize
proc addVote*(t: var Ballot; vote: Vote) {.
gcsafe, raises: [KeyError].} =
## Add a new vote collecting the signers for the particular voting address.
##
## Unless it is the first vote for this address, the authorisation type
## `true` or `false` of the vote must match the previous one. For the first
## vote, the authorisation type `true` is accepted if the address is not an
## authorised signer, and `false` if it is an authorised signer. Otherwise
## the vote is ignored.
##
## If the number of signers for the particular address are at least
## `authSignersThreshold()`, the status of this address will change as
## follows.
## * If the authorisation type is `true`, the address is added
## to the list of authorised signers.
## * If the authorisation type is `false`, the address is removed
## from the list of authorised signers.
t.authRemoved = false
var
numVotes = 0
authOk = vote.authorize
# clique/snapshot.go(147): if !s.validVote(address, [..]
if not t.isValidVote(vote.address, vote.authorize):
# Corner case: touch votes for this account
if t.votes.hasKey(vote.address):
let refVote = t.votes[vote.address]
numVotes = refVote.signers.len
authOk = refVote.authorize
elif not t.votes.hasKey(vote.address):
# Collect inital vote
t.votes[vote.address] = Tally(
authorize: vote.authorize,
signers: {vote.signer: vote}.toTable)
numVotes = 1
elif t.votes[vote.address].authorize == vote.authorize:
# Collect additional vote
t.votes[vote.address].signers[vote.signer] = vote
numVotes = t.votes[vote.address].signers.len
else:
return
# clique/snapshot.go(262): if tally := snap.Tally[header.Coinbase]; [..]
# Vote passed, update the list of authorised signers if enough votes
if numVotes < t.authSignersThreshold:
return
var obsolete = @[vote.address]
if authOk:
# Has minimum votes, so add it
t.authSig[vote.address] = true
else:
# clique/snapshot.go(266): delete(snap.Signers, [..]
t.authSig.del(vote.address)
t.authRemoved = true
# Not a signer anymore => remove it everywhere
for key,value in t.votes.mpairs:
if vote.address in value.signers:
if 1 < value.signers.len:
value.signers.del(vote.address)
else:
obsolete.add key
for key in obsolete:
t.votes.del(key)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,182 +0,0 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
##
## Snapshot Processor for Clique PoA Consensus Protocol
## ====================================================
##
## For details see
## `EIP-225 <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
## and
## `go-ethereum <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
##
import
std/times,
chronicles,
eth/common,
stew/results,
".."/[clique_cfg, clique_defs],
"."/[ballot, snapshot_desc]
{.push raises: [].}
logScope:
topics = "clique PoA snapshot-apply"
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
template pairWalkIj(first, last: int; offTop: Positive; code: untyped) =
if first <= last:
for n in first .. last - offTop:
let
i {.inject.} = n
j {.inject.} = n + 1
code
else:
for n in first.countdown(last + offTop):
let
i {.inject.} = n
j {.inject.} = n - 1
code
template doWalkIt(first, last: int; code: untyped) =
if first <= last:
for n in first .. last:
let it {.inject.} = n
code
else:
for n in first.countdown(last):
let it {.inject.} = n
code
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
# clique/snapshot.go(185): func (s *Snapshot) apply(headers [..]
proc snapshotApplySeq*(s: Snapshot; headers: var seq[BlockHeader],
first, last: int): CliqueOkResult
{.gcsafe, raises: [CatchableError].} =
## Initialises an authorization snapshot `snap` by applying the `headers`
## to the argument snapshot desciptor `s`.
# Sanity check that the headers can be applied
if headers[first].blockNumber != s.blockNumber + 1:
return err((errInvalidVotingChain,""))
# clique/snapshot.go(191): for i := 0; i < len(headers)-1; i++ {
first.pairWalkIj(last, 1):
if headers[j].blockNumber != headers[i].blockNumber+1:
return err((errInvalidVotingChain,""))
# Iterate through the headers and create a new snapshot
let
start = getTime()
var
logged = start
# clique/snapshot.go(206): for i, header := range headers [..]
first.doWalkIt(last):
let
# headersIndex => also used for logging at the end of this loop
headersIndex = it
header = headers[headersIndex]
number = header.blockNumber
# Remove any votes on checkpoint blocks
if (number mod s.cfg.epoch).isZero:
# Note that the correctness of the authorised accounts list is verified in
# clique/clique.verifyCascadingFields(),
# see clique/clique.go(355): if number%c.config.Epoch == 0 {
# This means, the account list passed with the epoch header is verified
# to be the same as the one we already have.
#
# clique/snapshot.go(210): snap.Votes = nil
s.ballot.flushVotes
# Delete the oldest signer from the recent list to allow it signing again
block:
let limit = s.ballot.authSignersThreshold.u256
if limit <= number:
s.recents.del(number - limit)
# Resolve the authorization key and check against signers
let signer = s.cfg.ecRecover(header)
if signer.isErr:
return err((errEcRecover,$signer.error))
if not s.ballot.isAuthSigner(signer.value):
return err((errUnauthorizedSigner,""))
for recent in s.recents.values:
if recent == signer.value:
return err((errRecentlySigned,""))
s.recents[number] = signer.value
# Header authorized, discard any previous vote from the signer
# clique/snapshot.go(233): for i, vote := range snap.Votes {
s.ballot.delVote(signer = signer.value, address = header.coinbase)
# Tally up the new vote from the signer
# clique/snapshot.go(244): var authorize bool
var authOk = false
if header.nonce == NONCE_AUTH:
authOk = true
elif header.nonce != NONCE_DROP:
return err((errInvalidVote,""))
let vote = Vote(address: header.coinbase,
signer: signer.value,
blockNumber: number,
authorize: authOk)
# clique/snapshot.go(253): if snap.cast(header.Coinbase, authorize) {
s.ballot.addVote(vote)
# clique/snapshot.go(269): if limit := uint64(len(snap.Signers)/2 [..]
if s.ballot.isAuthSignersListShrunk:
# Signer list shrunk, delete any leftover recent caches
let limit = s.ballot.authSignersThreshold.u256
if limit <= number:
# Pop off least block number from the list
let item = number - limit
s.recents.del(item)
# If we're taking too much time (ecrecover), notify the user once a while
if s.cfg.logInterval < getTime() - logged:
debug "Reconstructing voting history",
processed = headersIndex,
total = headers.len,
elapsed = getTime() - start
logged = getTime()
let sinceStart = getTime() - start
if s.cfg.logInterval < sinceStart:
debug "Reconstructed voting history",
processed = headers.len,
elapsed = sinceStart
# clique/snapshot.go(303): snap.Number += uint64(len(headers))
doAssert headers[last].blockNumber == s.blockNumber+(1+(last-first).abs).u256
s.blockNumber = headers[last].blockNumber
s.blockHash = headers[last].blockHash
ok()
proc snapshotApply*(s: Snapshot; headers: var seq[BlockHeader]): CliqueOkResult
{.gcsafe, raises: [CatchableError].} =
if headers.len == 0:
return ok()
s.snapshotApplySeq(headers, 0, headers.len - 1)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,190 +0,0 @@
# Nimbus
# Copyright (c) 2021-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
##
## Snapshot Structure for Clique PoA Consensus Protocol
## ====================================================
##
## For details see
## `EIP-225 <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
## and
## `go-ethereum <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md>`_
##
import
std/tables,
chronicles,
eth/rlp,
results,
../../../db/[core_db, storage_types],
../clique_cfg,
../clique_defs,
../clique_helpers,
./ballot
export tables
type
SnapshotResult* = ##\
## Snapshot/error result type
Result[Snapshot,CliqueError]
AddressHistory* = Table[BlockNumber,EthAddress]
SnapshotData* = object
blockNumber: BlockNumber ## block number where snapshot was created on
blockHash: Hash256 ## block hash where snapshot was created on
recents: AddressHistory ## recent signers for spam protections
# clique/snapshot.go(58): Recents map[uint64]common.Address [..]
ballot: Ballot ## Votes => authorised signers
# clique/snapshot.go(50): type Snapshot struct [..]
Snapshot* = ref object ## Snapshot is the state of the authorization
## voting at a given point in time.
cfg: CliqueCfg ## parameters to fine tune behavior
data*: SnapshotData ## real snapshot
{.push raises: [].}
logScope:
topics = "clique PoA snapshot"
# ------------------------------------------------------------------------------
# Private functions needed to support RLP conversion
# ------------------------------------------------------------------------------
template logTxt(info: static[string]): static[string] =
"Clique " & info
proc append[K,V](rw: var RlpWriter; tab: Table[K,V]) =
rw.startList(tab.len)
for key,value in tab.pairs:
rw.append((key,value))
proc read[K,V](rlp: var Rlp; Q: type Table[K,V]): Q {.gcsafe, raises: [RlpError].} =
for w in rlp.items:
let (key,value) = w.read((K,V))
result[key] = value
# ------------------------------------------------------------------------------
# Private constructor helper
# ------------------------------------------------------------------------------
# clique/snapshot.go(72): func newSnapshot(config [..]
proc initSnapshot(s: Snapshot; cfg: CliqueCfg;
number: BlockNumber; hash: Hash256; signers: openArray[EthAddress]) =
## Initalise a new snapshot.
s.cfg = cfg
s.data.blockNumber = number
s.data.blockHash = hash
s.data.recents = initTable[BlockNumber,EthAddress]()
s.data.ballot.initBallot(signers)
# ------------------------------------------------------------------------------
# Public Constructor
# ------------------------------------------------------------------------------
proc newSnapshot*(cfg: CliqueCfg; header: BlockHeader): Snapshot =
## Create a new snapshot for the given header. The header need not be on the
## block chain, yet. The trusted signer list is derived from the
## `extra data` field of the header.
new result
let signers = header.extraData.extraDataAddresses
result.initSnapshot(cfg, header.blockNumber, header.blockHash, signers)
# ------------------------------------------------------------------------------
# Public getters
# ------------------------------------------------------------------------------
proc cfg*(s: Snapshot): CliqueCfg =
## Getter
s.cfg
proc blockNumber*(s: Snapshot): BlockNumber =
## Getter
s.data.blockNumber
proc blockHash*(s: Snapshot): Hash256 =
## Getter
s.data.blockHash
proc recents*(s: Snapshot): var AddressHistory =
## Retrieves the list of recently added addresses
s.data.recents
proc ballot*(s: Snapshot): var Ballot =
## Retrieves the ballot box descriptor with the votes
s.data.ballot
# ------------------------------------------------------------------------------
# Public setters
# ------------------------------------------------------------------------------
proc `blockNumber=`*(s: Snapshot; number: BlockNumber) =
## Getter
s.data.blockNumber = number
proc `blockHash=`*(s: Snapshot; hash: Hash256) =
## Getter
s.data.blockHash = hash
# ------------------------------------------------------------------------------
# Public load/store support
# ------------------------------------------------------------------------------
# clique/snapshot.go(88): func loadSnapshot(config [..]
proc loadSnapshot*(cfg: CliqueCfg; hash: Hash256):
Result[Snapshot,CliqueError] =
## Load an existing snapshot from the database.
var
s = Snapshot(cfg: cfg)
try:
let rc = s.cfg.db.newKvt().get(hash.cliqueSnapshotKey.toOpenArray)
if rc.isOk:
s.data = rc.value.decode(SnapshotData)
else:
if rc.error.error != KvtNotFound:
error logTxt "get() failed", error=($$rc.error)
return err((errSnapshotLoad,""))
except RlpError as e:
return err((errSnapshotLoad, $e.name & ": " & e.msg))
ok(s)
# clique/snapshot.go(104): func (s *Snapshot) store(db [..]
proc storeSnapshot*(cfg: CliqueCfg; s: Snapshot): CliqueOkResult =
## Insert the snapshot into the database.
let
key = s.data.blockHash.cliqueSnapshotKey
val = rlp.encode(s.data)
db = s.cfg.db.newKvt(offSite = true) # bypass block chain txs
defer: db.forget()
let rc = db.put(key.toOpenArray, val)
if rc.isErr:
error logTxt "put() failed", `error`=($$rc.error)
db.saveOffSite()
cfg.nSnaps.inc
cfg.snapsData += val.len.uint
ok()
# ------------------------------------------------------------------------------
# Public deep copy
# ------------------------------------------------------------------------------
proc cloneSnapshot*(s: Snapshot): Snapshot =
## Clone the snapshot
Snapshot(
cfg: s.cfg, # copy ref
data: s.data) # copy data
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -16,7 +16,6 @@ import
../../transaction, ../../transaction,
../../vm_state, ../../vm_state,
../../vm_types, ../../vm_types,
../clique,
../dao, ../dao,
./calculate_reward, ./calculate_reward,
./executor_helpers, ./executor_helpers,

View File

@ -1,155 +0,0 @@
# Nimbus
# Copyright (c) 2018-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import
pkg/[chronos,
stew/results,
chronicles,
eth/keys],
".."/[config,
constants],
"."/[
chain,
tx_pool,
validate],
"."/clique/[
clique_desc,
clique_cfg,
clique_sealer],
../utils/utils,
../common/[common, context]
type
EngineState* = enum
EngineStopped,
EngineRunning,
EnginePostMerge
SealingEngineRef* = ref SealingEngineObj
SealingEngineObj = object of RootObj
state: EngineState
engineLoop: Future[void]
chain*: ChainRef
ctx: EthContext
signer: EthAddress
txPool: TxPoolRef
proc validateSealer*(conf: NimbusConf, ctx: EthContext, chain: ChainRef): Result[void, string] =
if conf.engineSigner == ZERO_ADDRESS:
return err("signer address should not zero, use --engine-signer to set signer address")
let res = ctx.am.getAccount(conf.engineSigner)
if res.isErr:
return err("signer address not in registered accounts, use --import-key/account to register the account")
let acc = res.get()
if not acc.unlocked:
return err("signer account not unlocked, please unlock it first via rpc/password file")
let com = chain.com
if com.consensus != ConsensusType.POA:
return err("currently only PoA engine is supported")
ok()
proc generateBlock(engine: SealingEngineRef,
outBlock: var EthBlock): Result[void, string] =
let bundle = engine.txPool.assembleBlock().valueOr:
return err(error)
outBlock = bundle.blk
if engine.chain.com.consensus == ConsensusType.POS:
# Stop the block generator if we reach TTD
engine.state = EnginePostMerge
if engine.state != EnginePostMerge:
# Post merge, Clique should not be executing
let sealRes = engine.chain.clique.seal(outBlock)
if sealRes.isErr:
return err("error sealing block header: " & $sealRes.error)
debug "generated block",
blockNumber = outBlock.header.blockNumber,
blockHash = blockHash(outBlock.header)
ok()
proc sealingLoop(engine: SealingEngineRef): Future[void] {.async.} =
let clique = engine.chain.clique
proc signerFunc(signer: EthAddress, message: openArray[byte]):
Result[RawSignature, cstring] {.gcsafe.} =
let
hashData = keccakHash(message)
ctx = engine.ctx
acc = ctx.am.getAccount(signer).tryGet()
rawSign = sign(acc.privateKey, SkMessage(hashData.data)).toRaw
ok(rawSign)
clique.authorize(engine.signer, signerFunc)
# convert times.Duration to chronos.Duration
let period = chronos.seconds(clique.cfg.period.int64)
while engine.state == EngineRunning:
# the sealing engine will tick every `cliquePeriod` seconds
await sleepAsync(period)
if engine.state != EngineRunning:
break
# deviation from 'correct' sealing engine:
# - no queue for chain reorgs
# - no async lock/guard against race with sync algo
var blk: EthBlock
let blkRes = engine.generateBlock(blk)
if blkRes.isErr:
error "sealing engine generateBlock error", msg=blkRes.error
break
let res = engine.chain.persistBlocks([blk.header], [
BlockBody(transactions: blk.txs, uncles: blk.uncles)
])
if res == ValidationResult.Error:
error "sealing engine: persistBlocks error"
break
discard engine.txPool.smartHead(blk.header) # add transactions update jobs
info "block generated", number=blk.header.blockNumber
proc new*(_: type SealingEngineRef,
chain: ChainRef,
ctx: EthContext,
signer: EthAddress,
txPool: TxPoolRef,
initialState: EngineState): SealingEngineRef =
SealingEngineRef(
chain: chain,
ctx: ctx,
signer: signer,
txPool: txPool,
state: initialState
)
proc start*(engine: SealingEngineRef) =
## Starts sealing engine.
if engine.state == EngineStopped:
engine.state = EngineRunning
engine.engineLoop = sealingLoop(engine)
info "sealing engine started"
proc stop*(engine: SealingEngineRef) {.async.} =
## Stop sealing engine from producing more blocks.
if engine.state == EngineRunning:
engine.state = EngineStopped
await engine.engineLoop.cancelAndWait()
info "sealing engine stopped"

View File

@ -20,7 +20,6 @@ import
../../vm_state, ../../vm_state,
../../vm_types, ../../vm_types,
../eip4844, ../eip4844,
../clique/[clique_sealer, clique_desc, clique_cfg],
../pow/difficulty, ../pow/difficulty,
../executor, ../executor,
../casper, ../casper,
@ -84,12 +83,6 @@ proc prepareHeader(dh: TxChainRef; parent: BlockHeader, timestamp: EthTime)
dh.prepHeader.timestamp, parent) dh.prepHeader.timestamp, parent)
dh.prepHeader.coinbase = dh.miner dh.prepHeader.coinbase = dh.miner
dh.prepHeader.mixDigest.reset dh.prepHeader.mixDigest.reset
of ConsensusType.POA:
discard dh.com.poa.prepare(parent, dh.prepHeader)
# beware POA header.coinbase != signerAddress
# but BaseVMState.minerAddress == signerAddress
# - minerAddress is extracted from header.extraData
# - header.coinbase is from clique engine
of ConsensusType.POS: of ConsensusType.POS:
dh.com.pos.prepare(dh.prepHeader) dh.com.pos.prepare(dh.prepHeader)
@ -98,8 +91,6 @@ proc prepareForSeal(dh: TxChainRef; header: var BlockHeader) {.gcsafe, raises: [
of ConsensusType.POW: of ConsensusType.POW:
# do nothing, tx pool was designed with POW in mind # do nothing, tx pool was designed with POW in mind
discard discard
of ConsensusType.POA:
dh.com.poa.prepareForSeal(dh.prepHeader, header)
of ConsensusType.POS: of ConsensusType.POS:
dh.com.pos.prepareForSeal(header) dh.com.pos.prepareForSeal(header)
@ -107,12 +98,6 @@ proc getTimestamp(dh: TxChainRef, parent: BlockHeader): EthTime =
case dh.com.consensus case dh.com.consensus
of ConsensusType.POW: of ConsensusType.POW:
EthTime.now() EthTime.now()
of ConsensusType.POA:
let timestamp = parent.timestamp + dh.com.poa.cfg.period
if timestamp < EthTime.now():
EthTime.now()
else:
timestamp
of ConsensusType.POS: of ConsensusType.POS:
dh.com.pos.timestamp dh.com.pos.timestamp

View File

@ -25,8 +25,6 @@ import
./core/eip4844, ./core/eip4844,
./core/block_import, ./core/block_import,
./db/core_db/persistent, ./db/core_db/persistent,
./core/clique/clique_desc,
./core/clique/clique_sealer,
./sync/protocol, ./sync/protocol,
./sync/handlers ./sync/handlers
@ -49,7 +47,7 @@ proc importBlocks(conf: NimbusConf, com: CommonRef) =
proc basicServices(nimbus: NimbusNode, proc basicServices(nimbus: NimbusNode,
conf: NimbusConf, conf: NimbusConf,
com: CommonRef) = com: CommonRef) =
nimbus.txPool = TxPoolRef.new(com, conf.engineSigner) nimbus.txPool = TxPoolRef.new(com, ZERO_ADDRESS)
# txPool must be informed of active head # txPool must be informed of active head
# so it can know the latest account state # so it can know the latest account state
@ -208,40 +206,6 @@ proc localServices(nimbus: NimbusNode, conf: NimbusConf,
nimbus.setupRpc(conf, com, protocols) nimbus.setupRpc(conf, com, protocols)
if conf.engineSigner != ZERO_ADDRESS and not com.forkGTE(MergeFork):
let res = nimbus.ctx.am.getAccount(conf.engineSigner)
if res.isErr:
error "Failed to get account",
msg = res.error,
hint = "--key-store or --import-key"
quit(QuitFailure)
let rs = validateSealer(conf, nimbus.ctx, nimbus.chainRef)
if rs.isErr:
fatal "Engine signer validation error", msg = rs.error
quit(QuitFailure)
proc signFunc(signer: EthAddress, message: openArray[byte]): Result[RawSignature, cstring] {.gcsafe.} =
let
hashData = keccakHash(message)
acc = nimbus.ctx.am.getAccount(signer).tryGet()
rawSign = sign(acc.privateKey, SkMessage(hashData.data)).toRaw
ok(rawSign)
nimbus.chainRef.clique.authorize(conf.engineSigner, signFunc)
# disable sealing engine if beacon engine enabled
if not com.forkGTE(MergeFork):
nimbus.sealingEngine = SealingEngineRef.new(
nimbus.chainRef, nimbus.ctx, conf.engineSigner,
nimbus.txPool, EngineStopped
)
# only run sealing engine if there is a signer
if conf.engineSigner != ZERO_ADDRESS:
nimbus.sealingEngine.start()
# metrics server # metrics server
if conf.metricsEnabled: if conf.metricsEnabled:
info "Starting metrics HTTP server", address = conf.metricsAddress, port = conf.metricsPort info "Starting metrics HTTP server", address = conf.metricsAddress, port = conf.metricsPort

View File

@ -12,7 +12,6 @@ import
eth/p2p, eth/p2p,
metrics/chronos_httpserver, metrics/chronos_httpserver,
./rpc/rpc_server, ./rpc/rpc_server,
./core/sealer,
./core/chain, ./core/chain,
./core/tx_pool, ./core/tx_pool,
./sync/peers, ./sync/peers,
@ -29,7 +28,6 @@ export
p2p, p2p,
chronos_httpserver, chronos_httpserver,
rpc_server, rpc_server,
sealer,
chain, chain,
tx_pool, tx_pool,
peers, peers,
@ -50,7 +48,6 @@ type
engineApiServer*: NimbusHttpServerRef engineApiServer*: NimbusHttpServerRef
ethNode*: EthereumNode ethNode*: EthereumNode
state*: NimbusState state*: NimbusState
sealingEngine*: SealingEngineRef
ctx*: EthContext ctx*: EthContext
chainRef*: ChainRef chainRef*: ChainRef
txPool*: TxPoolRef txPool*: TxPoolRef
@ -71,8 +68,6 @@ proc stop*(nimbus: NimbusNode, conf: NimbusConf) {.async, gcsafe.} =
await nimbus.httpServer.stop() await nimbus.httpServer.stop()
if nimbus.engineApiServer.isNil.not: if nimbus.engineApiServer.isNil.not:
await nimbus.engineApiServer.stop() await nimbus.engineApiServer.stop()
if conf.engineSigner != ZERO_ADDRESS and nimbus.sealingEngine.isNil.not:
await nimbus.sealingEngine.stop()
if conf.maxPeers > 0: if conf.maxPeers > 0:
await nimbus.networkLoop.cancelAndWait() await nimbus.networkLoop.cancelAndWait()
if nimbus.peerManager.isNil.not: if nimbus.peerManager.isNil.not:

View File

@ -17,7 +17,7 @@ import
eth/p2p/[private/p2p_types, peer_pool], eth/p2p/[private/p2p_types, peer_pool],
stew/byteutils, stew/byteutils,
"."/[protocol, types], "."/[protocol, types],
../core/[chain, clique/clique_sealer, eip4844, gaslimit, withdrawals], ../core/[chain, eip4844, gaslimit, withdrawals],
../core/pow/difficulty, ../core/pow/difficulty,
../constants, ../constants,
../utils/utils, ../utils/utils,
@ -151,15 +151,6 @@ proc validateDifficulty(ctx: LegacySyncRef,
let com = ctx.chain.com let com = ctx.chain.com
case consensusType case consensusType
of ConsensusType.POA:
let rc = ctx.chain.clique.calcDifficulty(parentHeader)
if rc.isErr:
return false
if header.difficulty < rc.get():
trace "provided header difficulty is too low",
expect=rc.get(), get=header.difficulty
return false
of ConsensusType.POW: of ConsensusType.POW:
let calcDiffc = com.calcDifficulty(header.timestamp, parentHeader) let calcDiffc = com.calcDifficulty(header.timestamp, parentHeader)
if header.difficulty < calcDiffc: if header.difficulty < calcDiffc:
@ -212,16 +203,6 @@ proc validateHeader(ctx: LegacySyncRef, header: BlockHeader,
if not ctx.validateDifficulty(header, parentHeader, consensusType): if not ctx.validateDifficulty(header, parentHeader, consensusType):
return false return false
if consensusType == ConsensusType.POA:
let period = com.cliquePeriod
# Timestamp diff between blocks is lower than PERIOD (clique)
if parentHeader.timestamp + period > header.timestamp:
trace "invalid timestamp diff (lower than period)",
parent=parentHeader.timestamp,
header=header.timestamp,
period
return false
var res = com.validateGasLimitOrBaseFee(header, parentHeader) var res = com.validateGasLimitOrBaseFee(header, parentHeader)
if res.isErr: if res.isErr:
trace "validate gaslimit error", trace "validate gaslimit error",

View File

@ -17,7 +17,6 @@ import
from ../nimbus/common/chain_config import from ../nimbus/common/chain_config import
MainNet, MainNet,
GoerliNet,
SepoliaNet, SepoliaNet,
HoleskyNet HoleskyNet
@ -73,10 +72,9 @@ proc processU256(val: string, o: var UInt256): ConfigStatus =
o = parse(val, UInt256) o = parse(val, UInt256)
result = Success result = Success
proc processNetId(val: string, o: var NetworkId): ConfigStatus = func processNetId(val: string, o: var NetworkId): ConfigStatus =
case val.toLowerAscii() case val.toLowerAscii()
of "main": o = MainNet of "main": o = MainNet
of "goerli": o = GoerliNet
of "sepolia": o = SepoliaNet of "sepolia": o = SepoliaNet
of "holesky": o = HoleskyNet of "holesky": o = HoleskyNet

View File

@ -1,360 +0,0 @@
# Nimbus
# Copyright (c) 2022-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
import
std/[algorithm, os, sequtils, strformat, strutils, times],
chronicles,
eth/keys,
stint,
unittest2,
../nimbus/core/[chain,
clique,
clique/clique_snapshot,
clique/clique_desc,
clique/clique_helpers
],
../nimbus/common/[common,context],
../nimbus/utils/[ec_recover, utils],
../nimbus/[config, constants],
./test_clique/pool,
./replay/undump_blocks_gz
const
baseDir = [".", "tests", ".." / "tests", $DirSep] # path containg repo
repoDir = ["test_clique", "replay", "status"] # alternative repos
goerliCapture = "goerli68161.txt.gz"
groupReplayTransactions = 7
# ------------------------------------------------------------------------------
# Helpers
# ------------------------------------------------------------------------------
proc getBlockHeader(ap: TesterPool; number: BlockNumber): BlockHeader =
## Shortcut => db/core_db.getBlockHeader()
doAssert ap.db.getBlockHeader(number, result)
proc ppSecs(elapsed: Duration): string =
result = $elapsed.inSeconds
let ns = elapsed.inNanoseconds mod 1_000_000_000
if ns != 0:
# to rounded decimal seconds
let ds = (ns + 5_000_000i64) div 10_000_000i64
result &= &".{ds:02}"
result &= "s"
proc ppRow(elapsed: Duration): string =
let ms = elapsed.inMilliSeconds + 500
"x".repeat(ms div 1000)
proc findFilePath(file: string): string =
result = "?unknown?" / file
for dir in baseDir:
for repo in repoDir:
let path = dir / repo / file
if path.fileExists:
return path
proc setTraceLevel =
discard
when defined(chronicles_runtime_filtering) and loggingEnabled:
setLogLevel(LogLevel.TRACE)
proc setErrorLevel =
discard
when defined(chronicles_runtime_filtering) and loggingEnabled:
setLogLevel(LogLevel.ERROR)
# ------------------------------------------------------------------------------
# Test Runners
# ------------------------------------------------------------------------------
# clique/snapshot_test.go(99): func TestClique(t *testing.T) {
proc runCliqueSnapshot(noisy = true; postProcessOk = false;
testIds = {0'u16 .. 999'u16}; skipIds = {0'u16}-{0'u16}) =
## Clique PoA Snapshot
## ::
## Tests that Clique signer voting is evaluated correctly for various
## simple and complex scenarios, as well as that a few special corner
## cases fail correctly.
##
let postProcessInfo = if postProcessOk: ", Transaction Finaliser Applied"
else: ", Without Finaliser"
suite &"Clique PoA Snapshot{postProcessInfo}":
var pool = newVoterPool()
setErrorLevel()
if noisy:
pool.noisy = true
setTraceLevel()
# clique/snapshot_test.go(379): for i, tt := range tests {
for voterSample in voterSamples.filterIt(it.id.uint16 in testIds):
let tt = voterSample
test &"Snapshots {tt.id:2}: {tt.info.substr(0,50)}...":
pool.say "\n"
# Noisily skip this test
if tt.id.uint16 in skipIds:
skip()
else:
# Assemble a chain of headers from the cast votes
# see clique/snapshot_test.go(407): config := *params.TestChainConfig
pool
.resetVoterChain(tt.signers, tt.epoch, tt.runBack)
# see clique/snapshot_test.go(425): for j, block := range blocks {
.appendVoter(tt.votes)
.commitVoterChain(postProcessOk)
# see clique/snapshot_test.go(477): if err != nil {
if tt.failure != cliqueNoError[0]:
# Note that clique/snapshot_test.go does not verify _here_ against
# the scheduled test error -- rather this voting error is supposed
# to happen earlier (processed at clique/snapshot_test.go(467)) when
# assembling the block chain (sounds counter intuitive to the author
# of this source file as the scheduled errors are _clique_ related).
check pool.failed[1][0] == tt.failure
else:
let
expected = tt.results.mapIt("@" & it).sorted
snapResult = pool.pp(pool.cliqueSigners).sorted
pool.say "*** snap state=", pool.pp(pool.snapshot,16)
pool.say " result=[", snapResult.join(",") & "]"
pool.say " expected=[", expected.join(",") & "]"
# Verify the final list of signers against the expected ones
check snapResult == expected
proc runGoerliReplay(noisy = true; showElapsed = false,
captureFile = goerliCapture,
startAtBlock = 0u64; stopAfterBlock = 0u64) =
var
pool = newVoterPool()
cache: array[groupReplayTransactions,(seq[BlockHeader],seq[BlockBody])]
cInx = 0
stoppedOk = false
let
fileInfo = captureFile.splitFile.name.split(".")[0]
filePath = captureFile.findFilePath
pool.verifyFrom = startAtBlock
setErrorLevel()
if noisy:
pool.noisy = true
setTraceLevel()
let stopThreshold = if stopAfterBlock == 0u64: uint64.high.u256
else: stopAfterBlock.u256
suite &"Replay Goerli chain from {fileInfo} capture":
for w in filePath.undumpBlocksGz:
if w[0][0].blockNumber == 0.u256:
# Verify Genesis
doAssert w[0][0] == pool.getBlockHeader(0.u256)
else:
# Condense in cache
cache[cInx] = w
cInx.inc
# Handy for partial tests
if stopThreshold < cache[cInx-1][0][0].blockNumber:
stoppedOk = true
break
# Run from cache if complete set
if cache.len <= cInx:
cInx = 0
let
first = cache[0][0][0].blockNumber
last = cache[^1][0][^1].blockNumber
blkRange = &"#{first}..#{last}"
info = if first <= startAtBlock.u256 and startAtBlock.u256 <= last:
&", verification #{startAtBlock}.."
else:
""
test &"Goerli Blocks {blkRange} ({cache.len} transactions{info})":
let start = getTime()
for (headers,bodies) in cache:
let addedPersistBlocks = pool.chain.persistBlocks(headers,bodies)
check addedPersistBlocks == ValidationResult.Ok
if addedPersistBlocks != ValidationResult.Ok: return
if showElapsed and startAtBlock.u256 <= last:
let
elpd = getTime() - start
info = &"{elpd.ppSecs:>7} {pool.cliqueSignersLen} {elpd.ppRow}"
echo &"\n elapsed {blkRange:<17} {info}"
# Rest from cache
if 0 < cInx:
let
first = cache[0][0][0].blockNumber
last = cache[cInx-1][0][^1].blockNumber
blkRange = &"#{first}..#{last}"
info = if first <= startAtBlock.u256 and startAtBlock.u256 <= last:
&", Verification #{startAtBlock}.."
else:
""
test &"Goerli Blocks {blkRange} ({cache.len} transactions{info})":
let start = getTime()
for (headers,bodies) in cache:
let addedPersistBlocks = pool.chain.persistBlocks(headers,bodies)
check addedPersistBlocks == ValidationResult.Ok
if addedPersistBlocks != ValidationResult.Ok: return
if showElapsed and startAtBlock.u256 <= last:
let
elpsd = getTime() - start
info = &"{elpsd.ppSecs:>7} {pool.cliqueSignersLen} {elpsd.ppRow}"
echo &"\n elapsed {blkRange:<17} {info}"
if stoppedOk:
test &"Runner stopped after reaching #{stopThreshold}":
discard
proc runGoerliBaybySteps(noisy = true;
captureFile = goerliCapture,
stopAfterBlock = 0u64) =
var
pool = newVoterPool()
stoppedOk = false
setErrorLevel()
if noisy:
pool.noisy = true
setTraceLevel()
let
fileInfo = captureFile.splitFile.name.split(".")[0]
filePath = captureFile.findFilePath
stopThreshold = if stopAfterBlock == 0u64: 20.u256
else: stopAfterBlock.u256
suite &"Replay Goerli chain from {fileInfo} capture, single blockwise":
for w in filePath.undumpBlocksGz:
if stoppedOk:
break
if w[0][0].blockNumber == 0.u256:
# Verify Genesis
doAssert w[0][0] == pool.getBlockHeader(0.u256)
else:
for n in 0 ..< w[0].len:
let
header = w[0][n]
body = w[1][n]
var
parents = w[0][0 ..< n]
test &"Goerli Block #{header.blockNumber} + {parents.len} parents":
check pool.chain.clique.cliqueSnapshot(header,parents).isOk
let addedPersistBlocks = pool.chain.persistBlocks(@[header],@[body])
check addedPersistBlocks == ValidationResult.Ok
if addedPersistBlocks != ValidationResult.Ok: return
# Handy for partial tests
if stopThreshold <= header.blockNumber:
stoppedOk = true
break
if stoppedOk:
test &"Runner stopped after reaching #{stopThreshold}":
discard
proc cliqueMiscTests() =
let
prvKeyFile = "private.key".findFilePath
suite "clique misc":
test "signer func":
let
engineSigner = "658bdf435d810c91414ec09147daa6db62406379"
privateKey = prvKeyFile
conf = makeConfig(@["--engine-signer:" & engineSigner, "--import-key:" & privateKey])
ctx = newEthContext()
check ctx.am.importPrivateKey(string conf.importKey).isOk()
check ctx.am.getAccount(conf.engineSigner).isOk()
proc signFunc(signer: EthAddress, message: openArray[byte]): Result[RawSignature, cstring] {.gcsafe.} =
let
hashData = keccakHash(message)
acc = ctx.am.getAccount(conf.engineSigner).tryGet()
rawSign = sign(acc.privateKey, SkMessage(hashData.data)).toRaw
ok(rawSign)
let signerFn: CliqueSignerFn = signFunc
var header: BlockHeader
header.extraData.setLen(EXTRA_VANITY)
header.extraData.add 0.byte.repeat(EXTRA_SEAL)
let signature = signerFn(conf.engineSigner, header.encodeSealHeader).get()
let extraLen = header.extraData.len
if EXTRA_SEAL < extraLen:
header.extraData.setLen(extraLen - EXTRA_SEAL)
header.extraData.add signature
let resAddr = ecRecover(header)
check resAddr.isOk
check resAddr.value == conf.engineSigner
# ------------------------------------------------------------------------------
# Main function(s)
# ------------------------------------------------------------------------------
proc cliqueMain*(noisy = defined(debug)) =
noisy.runCliqueSnapshot(true)
noisy.runCliqueSnapshot(false)
noisy.runGoerliBaybySteps
noisy.runGoerliReplay(startAtBlock = 31100u64)
cliqueMiscTests()
when isMainModule:
let
skipIDs = {999}
# A new capture file can be generated using
# `test_clique/indiump.dumpGroupNl()`
# placed at the end of
# `p2p/chain/persist_blocks.persistBlocks()`.
captureFile = goerliCapture
#captureFile = "dump-stream.out.gz"
proc goerliReplay(noisy = true;
showElapsed = true;
captureFile = captureFile;
startAtBlock = 0u64;
stopAfterBlock = 0u64) =
runGoerliReplay(
noisy = noisy,
showElapsed = showElapsed,
captureFile = captureFile,
startAtBlock = startAtBlock,
stopAfterBlock = stopAfterBlock)
# local path is: nimbus-eth1/tests
let noisy = defined(debug)
noisy.runCliqueSnapshot(true)
noisy.runCliqueSnapshot(false)
noisy.runGoerliBaybySteps
false.runGoerliReplay(startAtBlock = 31100u64)
#noisy.goerliReplay(startAtBlock = 31100u64)
#noisy.goerliReplay(startAtBlock = 194881u64, stopAfterBlock = 198912u64)
cliqueMiscTests()
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,503 +0,0 @@
# Nimbus
# Copyright (c) 2021-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
import
std/[algorithm, sequtils, strformat, strutils, tables],
eth/keys,
ethash,
secp256k1/abi,
stew/objects,
../../nimbus/core/[chain, clique], # must be early (compilation annoyance)
../../nimbus/common/common,
../../nimbus/core/clique/[clique_desc, clique_genvote,
clique_helpers, clique_snapshot],
../../nimbus/core/clique/snapshot/[ballot, snapshot_desc],
../../nimbus/[config, constants],
./voter_samples as vs
export
vs, snapshot_desc
const
prngSeed = 42
## The `TestSpecs` sample depends on this seed,
type
XSealKey = array[EXTRA_SEAL,byte]
XSealValue = object
blockNumber: uint64
account: string
TesterPool* = ref object ## Pool to maintain currently active tester accounts,
## mapped from textual names used in the tests below
## to actual Ethereum private keys capable of signing
## transactions.
prng: uint32 ## random state
accounts: Table[string,PrivateKey] ## accounts table
networkId: NetworkId
boot: NetworkParams ## imported Genesis configuration
batch: seq[seq[BlockHeader]] ## collect header chains
chain: ChainRef
names: Table[EthAddress,string] ## reverse lookup for debugging
xSeals: Table[XSealKey,XSealValue] ## collect signatures for debugging
noisy*: bool
# ------------------------------------------------------------------------------
# Private Prng (Clique keeps generated addresses sorted)
# ------------------------------------------------------------------------------
proc posixPrngInit(state: var uint32; seed: uint32) =
state = seed
proc posixPrngRand(state: var uint32): byte =
## POSIX.1-2001 example of a rand() implementation, see manual page rand(3).
##
## Clique relies on the even/odd position of an address after sorting. For
## address generation, the Nim PRNG was used which seems to have changed
## with Nim 1.6.11 (Linux, Windoes only.)
##
## The `TestSpecs` sample depends on `prngSeed` and `posixPrngRand()`.
state = state * 1103515245 + 12345;
let val = (state shr 16) and 32767 # mod 2^31
(val shr 8).byte # Extract second byte
# ------------------------------------------------------------------------------
# Private Helpers
# ------------------------------------------------------------------------------
proc getBlockHeader(ap: TesterPool; number: BlockNumber): BlockHeader =
## Shortcut => db/core_db.getBlockHeader()
doAssert ap.chain.clique.db.getBlockHeader(number, result)
proc getBlockHeader(ap: TesterPool; hash: Hash256): BlockHeader =
## Shortcut => db/core_db.getBlockHeader()
doAssert ap.chain.clique.db.getBlockHeader(hash, result)
proc isZero(a: openArray[byte]): bool =
result = true
for w in a:
if w != 0:
return false
proc rand(ap: TesterPool): byte =
ap.prng.posixPrngRand().byte
proc newPrivateKey(ap: TesterPool): PrivateKey =
## Roughly modelled after `random(PrivateKey,getRng()[])` with
## non-secure but reproducible PRNG
var data{.noinit.}: array[SkRawSecretKeySize,byte]
for n in 0 ..< data.len:
data[n] = ap.rand
# verify generated key, see keys.random(PrivateKey) from eth/keys.nim
var dataPtr0 = cast[ptr byte](unsafeAddr data[0])
doAssert secp256k1_ec_seckey_verify(
secp256k1_context_no_precomp, dataPtr0) == 1
# Convert to PrivateKey
PrivateKey.fromRaw(data).value
proc privateKey(ap: TesterPool; account: string): PrivateKey =
## Return private key for given tester `account`
if account != "":
if account in ap.accounts:
result = ap.accounts[account]
else:
result = ap.newPrivateKey
ap.accounts[account] = result
let address = result.toPublicKey.toCanonicalAddress
ap.names[address] = account
# ------------------------------------------------------------------------------
# Private pretty printer call backs
# ------------------------------------------------------------------------------
proc findName(ap: TesterPool; address: EthAddress): string =
## Find name for a particular address
if address notin ap.names:
ap.names[address] = &"X{ap.names.len+1}"
ap.names[address]
proc findSignature(ap: TesterPool; sig: openArray[byte]): XSealValue =
## Find a previusly registered signature
if sig.len == XSealKey.len:
let key = toArray(XSealKey.len,sig)
if key in ap.xSeals:
result = ap.xSeals[key]
proc pp(ap: TesterPool; v: BlockNonce): string =
## Pretty print nonce
if v == NONCE_AUTH:
"AUTH"
elif v == NONCE_DROP:
"DROP"
else:
&"0x{v.toHex}"
proc pp(ap: TesterPool; v: EthAddress): string =
## Pretty print address
if v.isZero:
result = "@0"
else:
let a = ap.findName(v)
if a == "":
result = &"@{v}"
else:
result = &"@{a}"
proc pp*(ap: TesterPool; v: openArray[EthAddress]): seq[string] =
## Pretty print address list
toSeq(v).mapIt(ap.pp(it))
proc pp(ap: TesterPool; v: Blob): string =
## Visualise `extraData` field
if v.len < EXTRA_VANITY + EXTRA_SEAL or
((v.len - (EXTRA_VANITY + EXTRA_SEAL)) mod EthAddress.len) != 0:
result = &"0x{v.toHex}[{v.len}]"
else:
var data = v
#
# extra vanity prefix
let vanity = data[0 ..< EXTRA_VANITY]
data = data[EXTRA_VANITY ..< data.len]
result = if vanity.isZero: "0u256+" else: &"{vanity.toHex}+"
#
# list of addresses
if EthAddress.len + EXTRA_SEAL <= data.len:
var glue = "["
while EthAddress.len + EXTRA_SEAL <= data.len:
let address = toArray(EthAddress.len,data[0 ..< EthAddress.len])
data = data[EthAddress.len ..< data.len]
result &= &"{glue}{ap.pp(address)}"
glue = ","
result &= "]+"
#
# signature
let val = ap.findSignature(data)
if val.account != "":
result &= &"<#{val.blockNumber},{val.account}>"
elif data.isZero:
result &= &"<0>"
else:
let sig = SkSignature.fromRaw(data)
if sig.isOk:
result &= &"<{sig.value.toHex}>"
else:
result &= &"0x{data.toHex}[{data.len}]"
proc pp(ap: TesterPool; v: Vote): string =
proc authorized(b: bool): string =
if b: "authorise" else: "de-authorise"
"(" &
&"address={ap.pp(v.address)}" &
&",signer={ap.pp(v.signer)}" &
&",blockNumber=#{v.blockNumber}" &
&",{authorized(v.authorize)}" & ")"
proc pp(ap: TesterPool; h: AddressHistory): string =
toSeq(h.keys)
.sorted
.mapIt("#" & $it & ":" & ap.pp(h[it.u256]))
.join(",")
proc votesList(ap: TesterPool; s: Snapshot; sep: string): string =
proc s3Cmp(a, b: (string,string,Vote)): int =
result = cmp(a[0], b[0])
if result == 0:
result = cmp(a[1], b[1])
let votes = s.ballot.votesInternal
votes.mapIt((ap.pp(it[0]),ap.pp(it[1]),it[2]))
.sorted(cmp = s3Cmp)
.mapIt(ap.pp(it[2]))
.join(sep)
proc signersList(ap: TesterPool; s: Snapshot): string =
ap.pp(s.ballot.authSigners).sorted.join(",")
proc pp*(ap: TesterPool; s: Snapshot; delim: string): string =
## Pretty print descriptor
let
p1 = if 0 < delim.len: delim else: ";"
p2 = if 0 < delim.len and delim[0] == '\n': delim & ' '.repeat(7) else: ";"
"(" &
&"blockNumber=#{s.blockNumber}" &
&"{p1}recents=" & "{" & ap.pp(s.recents) & "}" &
&"{p1}signers=" & "{" & ap.signersList(s) & "}" &
&"{p1}votes=[" & ap.votesList(s,p2) & "])"
proc pp*(ap: TesterPool; s: Snapshot; indent = 0): string =
## Pretty print descriptor
let delim = if 0 < indent: "\n" & ' '.repeat(indent) else: " "
ap.pp(s, delim)
proc pp(ap: TesterPool; v: BlockHeader; delim: string): string =
## Pretty print block header
let sep = if 0 < delim.len: delim else: ";"
&"(blockNumber=#{v.blockNumber}" &
&"{sep}parentHash={v.parentHash}" &
&"{sep}selfHash={v.blockHash}" &
&"{sep}stateRoot={v.stateRoot}" &
&"{sep}coinbase={ap.pp(v.coinbase)}" &
&"{sep}nonce={ap.pp(v.nonce)}" &
&"{sep}extraData={ap.pp(v.extraData)})"
proc pp(ap: TesterPool; v: BlockHeader; indent = 3): string =
## Pretty print block header, NL delimited, indented fields
let delim = if 0 < indent: "\n" & ' '.repeat(indent) else: " "
ap.pp(v, delim)
# ------------------------------------------------------------------------------
# Private: Constructor helpers
# ------------------------------------------------------------------------------
proc resetChainDb(ap: TesterPool; extraData: Blob; debug = false) =
## Setup new block chain with bespoke genesis
# new genesis block
if 0 < extraData.len:
ap.boot.genesis.extraData = extraData
let com = CommonRef.new(
newCoreDbRef DefaultDbMemory,
networkId = ap.networkId,
params = ap.boot)
ap.chain = newChain(com)
com.initializeEmptyDb()
ap.noisy = debug
proc initTesterPool(ap: TesterPool): TesterPool {.discardable.} =
result = ap
result.prng.posixPrngInit(prngSeed)
result.batch = @[newSeq[BlockHeader]()]
result.accounts = initTable[string,PrivateKey]()
result.xSeals = initTable[XSealKey,XSealValue]()
result.names = initTable[EthAddress,string]()
result.resetChainDb(@[])
# ------------------------------------------------------------------------------
# Public: pretty printer support
# ------------------------------------------------------------------------------
proc say*(t: TesterPool; v: varargs[string,`$`]) =
if t.noisy:
stderr.write v.join & "\n"
proc sayHeaderChain*(ap: TesterPool; indent = 0): TesterPool {.discardable.} =
result = ap
let pfx = ' '.repeat(indent)
var top = if 0 < ap.batch[^1].len: ap.batch[^1][^1]
else: ap.getBlockHeader(0.u256)
ap.say pfx, " top header: " & ap.pp(top, 16+indent)
while not top.blockNumber.isZero:
top = ap.getBlockHeader(top.parentHash)
ap.say pfx, "parent header: " & ap.pp(top, 16+indent)
# ------------------------------------------------------------------------------
# Public: Constructor
# ------------------------------------------------------------------------------
proc newVoterPool*(networkId = GoerliNet): TesterPool =
TesterPool(
networkId: networkId,
boot: networkParams(networkId)
).initTesterPool
# ------------------------------------------------------------------------------
# Public: getter
# ------------------------------------------------------------------------------
proc chain*(ap: TesterPool): ChainRef =
## Getter
ap.chain
proc clique*(ap: TesterPool): Clique =
## Getter
ap.chain.clique
proc db*(ap: TesterPool): CoreDbRef =
## Getter
ap.clique.db
proc cliqueSigners*(ap: TesterPool): seq[EthAddress] =
## Getter
ap.clique.cliqueSigners
proc cliqueSignersLen*(ap: TesterPool): int =
## Getter
ap.clique.cliqueSignersLen
proc snapshot*(ap: TesterPool): Snapshot =
## Getter
ap.clique.snapshot
proc failed*(ap: TesterPool): CliqueFailed =
## Getter
ap.clique.failed
# ------------------------------------------------------------------------------
# Public: setter
# ------------------------------------------------------------------------------
proc `verifyFrom=`*(ap: TesterPool; verifyFrom: uint64) =
## Setter, block number where `Clique` should start
ap.chain.verifyFrom = verifyFrom
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
# clique/snapshot_test.go(62): func (ap *testerAccountPool) address(account [..]
proc address*(ap: TesterPool; account: string): EthAddress =
## retrieves the Ethereum address of a tester account by label, creating
## a new account if no previous one exists yet.
if account != "":
result = ap.privateKey(account).toPublicKey.toCanonicalAddress
# ------------------------------------------------------------------------------
# Public: set up & manage voter database
# ------------------------------------------------------------------------------
proc resetVoterChain*(ap: TesterPool; signers: openArray[string];
epoch = 0; runBack = true): TesterPool {.discardable.} =
## Reset the batch list for voter headers and update genesis block
result = ap
ap.batch = @[newSeq[BlockHeader]()]
# clique/snapshot_test.go(384): signers := make([]common.Address, [..]
let signers = signers.mapIt(ap.address(it)).sorted(EthAscending)
var extraData = 0.byte.repeat(EXTRA_VANITY)
# clique/snapshot_test.go(399): for j, signer := range signers {
for signer in signers:
extraData.add signer.toSeq
# clique/snapshot_test.go(397):
extraData.add 0.byte.repeat(EXTRA_SEAL)
# store modified genesis block and epoch
ap.resetChainDb(extraData, ap.noisy)
ap.clique.cfg.epoch = epoch
ap.clique.applySnapsMinBacklog = runBack
# clique/snapshot_test.go(415): blocks, _ := core.GenerateChain(&config, [..]
proc appendVoter*(ap: TesterPool;
voter: TesterVote): TesterPool {.discardable.} =
## Append a voter header to the block chain batch list
result = ap
doAssert 0 < ap.batch.len # see initTesterPool() and resetVoterChain()
let parent = if ap.batch[^1].len == 0:
ap.getBlockHeader(0.u256)
else:
ap.batch[^1][^1]
let header = ap.chain.clique.cliqueGenvote(
voter = ap.address(voter.voted),
seal = ap.privateKey(voter.signer),
parent = parent,
elapsed = EthTime(100),
voteInOk = voter.auth,
outOfTurn = voter.noTurn,
checkPoint = voter.checkpoint.mapIt(ap.address(it)).sorted(EthAscending))
if 0 < voter.checkpoint.len:
doAssert (header.blockNumber mod ap.clique.cfg.epoch).isZero
# Register for debugging
let
extraLen = header.extraData.len
extraSeal = header.extraData[extraLen - EXTRA_SEAL ..< extraLen]
ap.xSeals[toArray(XSealKey.len,extraSeal)] = XSealValue(
blockNumber: header.blockNumber.truncate(uint64),
account: voter.signer)
if voter.newbatch:
ap.batch.add @[]
ap.batch[^1].add header
proc appendVoter*(ap: TesterPool;
voters: openArray[TesterVote]): TesterPool {.discardable.} =
## Append a list of voter headers to the block chain batch list
result = ap
for voter in voters:
ap.appendVoter(voter)
proc commitVoterChain*(ap: TesterPool; postProcessOk = false;
stopFaultyHeader = false): TesterPool {.discardable.} =
## Write the headers from the voter header batch list to the block chain DB.
##
## If `postProcessOk` is set, an additional verification step is added at
## the end of each transaction.
##
## if `stopFaultyHeader` is set, the function stops immediately on error.
## Otherwise the offending block is removed, the rest of the batch is
## adjusted and applied again repeatedly.
result = ap
var reChainOk = false
for n in 0 ..< ap.batch.len:
block forLoop:
var headers = ap.batch[n]
while true:
if headers.len == 0:
break forLoop # continue with for loop
ap.say &"*** transaction ({n}) list: [",
headers.mapIt(&"#{it.blockNumber}").join(", "), "]"
# Realign rest of transaction to existing block chain
if reChainOk:
var parent = ap.chain.clique.db.getCanonicalHead
for i in 0 ..< headers.len:
headers[i].parentHash = parent.blockHash
headers[i].blockNumber = parent.blockNumber + 1
parent = headers[i]
# Perform transaction into the block chain
let bodies = BlockBody().repeat(headers.len)
if ap.chain.persistBlocks(headers,bodies) == ValidationResult.OK:
break
if stopFaultyHeader:
return
# If the offending block is the last one of the last transaction,
# then there is nothing to do.
let culprit = headers.filterIt(ap.failed[0] == it.blockHash)
doAssert culprit.len == 1
let number = culprit[0].blockNumber
if n + 1 == ap.batch.len and number == headers[^1].blockNumber:
return
# Remove offending block and try again for the rest
ap.say "*** persistBlocks failed, omitting block #", culprit
let prevLen = headers.len
headers = headers.filterIt(number != it.blockNumber)
doAssert headers.len < prevLen
reChainOk = true
if ap.noisy:
ap.say "*** snapshot argument: #", headers[^1].blockNumber
ap.sayHeaderChain(8)
when false: # all addresses are typically pp-mappable
ap.say " address map: ", toSeq(ap.names.pairs)
.mapIt(&"@{it[1]}:{it[0]}")
.sorted
.join("\n" & ' '.repeat(23))
if postProcessOk:
discard ap.clique.cliqueSnapshot(headers[^1])
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1 +0,0 @@
9c647b8b7c4e7c3490668fb6c11473619db80c93704c70893d3813af4090c39c

View File

@ -1,421 +0,0 @@
# Nimbus
# Copyright (c) 2021-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
# Test cases from https://github.com/ethereum/EIPs/blob/master/EIPS/eip-225.md
import
../../nimbus/core/clique/clique_defs
type
TesterVote* = object ## VoterBlock represents a single block signed by a
## particular account, where the account may or may not
## have cast a Clique vote.
signer*: string ##\
## Account that signed this particular block
voted*: string ##\
## Optional value if the signer voted on adding/removing ## someone
auth*: bool ##\
## Whether the vote was to authorize (or deauthorize)
checkpoint*: seq[string] ##\
## List of authorized signers if this is an epoch block
noTurn*: bool ##\
## Initialise `NOTURN` if `true`, otherwise use `INTURN`. This is not
## part of Go ref test implementation. The flag used here to avoid what
## is implemented as `fakeDiff` kludge in the Go ref test implementation.
##
## Note that the `noTurn` value depends on the sort order of the
## calculated authorised signers account address list. These account
## addresses in turn (no pun intended) depend on the private keys of
## these accounts. Now, the private keys are generated on-the-fly by a
## PRNG which re-seeded the same for each test. So the sort order is
## predictable and the correct value of the `noTurn` flag can be set
## by sort of experimenting with the tests (and/or refering to earlier
## woking test specs.)
newbatch*: bool
TestSpecs* = object ## Defining genesis and the various voting scenarios
## to test (see `votes`.)
id*: int ##\
## Test id
info*: string ##\
## Test description
epoch*: int ##\
## Number of blocks in an epoch (unset = 30000)
runBack*: bool ##\
## Set `applySnapsMinBacklog` flag
signers*: seq[string] ##\
## Initial list of authorized signers in the genesis
votes*: seq[TesterVote] ##\
## Chain of signed blocks, potentially influencing auths
results*: seq[string] ##\
## Final list of authorized signers after all blocks
failure*: CliqueErrorType ##\
## Failure if some block is invalid according to the rules
const
# Define the various voting scenarios to test
voterSamples* = [
# clique/snapshot_test.go(108): {
TestSpecs(
id: 1,
info: "Single signer, no votes cast",
signers: @["A"],
votes: @[TesterVote(signer: "A")],
results: @["A"]),
TestSpecs(
id: 2,
info: "Single signer, voting to add two others (only accept first, "&
"second needs 2 votes)",
signers: @["A"],
votes: @[TesterVote(signer: "A", voted: "B", auth: true),
TesterVote(signer: "B"),
TesterVote(signer: "A", voted: "C", auth: true)],
results: @["A", "B"]),
TestSpecs(
id: 3,
info: "Two signers, voting to add three others (only accept first " &
"two, third needs 3 votes already)",
signers: @["A", "B"],
votes: @[TesterVote(signer: "A", voted: "C", auth: true),
TesterVote(signer: "B", voted: "C", auth: true),
TesterVote(signer: "A", voted: "D", auth: true, noTurn: true),
TesterVote(signer: "B", voted: "D", auth: true),
TesterVote(signer: "C", noTurn: true),
TesterVote(signer: "A", voted: "E", auth: true, noTurn: true),
TesterVote(signer: "B", voted: "E", auth: true, noTurn: true)],
results: @["A", "B", "C", "D"]),
TestSpecs(
id: 4,
info: "Single signer, dropping itself (weird, but one less " &
"cornercase by explicitly allowing this)",
signers: @["A"],
votes: @[TesterVote(signer: "A", voted: "A")]),
TestSpecs(
id: 5,
info: "Two signers, actually needing mutual consent to drop either " &
"of them (not fulfilled)",
signers: @["A", "B"],
votes: @[TesterVote(signer: "A", voted: "B")],
results: @["A", "B"]),
TestSpecs(
id: 6,
info: "Two signers, actually needing mutual consent to drop either " &
"of them (fulfilled)",
signers: @["A", "B"],
votes: @[TesterVote(signer: "A", voted: "B"),
TesterVote(signer: "B", voted: "B")],
results: @["A"]),
TestSpecs(
id: 7,
info: "Three signers, two of them deciding to drop the third",
signers: @["A", "B", "C"],
votes: @[TesterVote(signer: "A", voted: "C", noTurn: true),
TesterVote(signer: "B", voted: "C", noTurn: true)],
results: @["A", "B"]),
TestSpecs(
id: 8,
info: "Four signers, consensus of two not being enough to drop anyone",
signers: @["A", "B", "C", "D"],
votes: @[TesterVote(signer: "A", voted: "C", noTurn: true),
TesterVote(signer: "B", voted: "C", noTurn: true)],
results: @["A", "B", "C", "D"]),
TestSpecs(
id: 9,
info: "Four signers, consensus of three already being enough to " &
"drop someone",
signers: @["A", "B", "C", "D"],
votes: @[TesterVote(signer: "A", voted: "D", noTurn: true),
TesterVote(signer: "B", voted: "D", noTurn: true),
TesterVote(signer: "C", voted: "D", noTurn: true)],
results: @["A", "B", "C"]),
TestSpecs(
id: 10,
info: "Authorizations are counted once per signer per target",
signers: @["A", "B"],
votes: @[TesterVote(signer: "A", voted: "C", auth: true),
TesterVote(signer: "B"),
TesterVote(signer: "A", voted: "C", auth: true),
TesterVote(signer: "B"),
TesterVote(signer: "A", voted: "C", auth: true)],
results: @["A", "B"]),
TestSpecs(
id: 11,
info: "Authorizing multiple accounts concurrently is permitted",
signers: @["A", "B"],
votes: @[TesterVote(signer: "A", voted: "C", auth: true),
TesterVote(signer: "B"),
TesterVote(signer: "A", voted: "D", auth: true),
TesterVote(signer: "B"),
TesterVote(signer: "A"),
TesterVote(signer: "B", voted: "D", auth: true),
TesterVote(signer: "A", noTurn: true),
TesterVote(signer: "B", voted: "C", auth: true, noTurn: true)],
results: @["A", "B", "C", "D"]),
TestSpecs(
id: 12,
info: "Deauthorizations are counted once per signer per target",
signers: @["A", "B"],
votes: @[TesterVote(signer: "A", voted: "B"),
TesterVote(signer: "B"),
TesterVote(signer: "A", voted: "B"),
TesterVote(signer: "B"),
TesterVote(signer: "A", voted: "B")],
results: @["A", "B"]),
TestSpecs(
id: 13,
info: "Deauthorizing multiple accounts concurrently is permitted",
signers: @["A", "B", "C", "D"],
votes: @[TesterVote(signer: "A", voted: "C", noTurn: true),
TesterVote(signer: "B", noTurn: true),
TesterVote(signer: "C", noTurn: true),
TesterVote(signer: "A", voted: "D", noTurn: true),
TesterVote(signer: "B"),
TesterVote(signer: "C", noTurn: true),
TesterVote(signer: "A"),
TesterVote(signer: "B", voted: "D", noTurn: true),
TesterVote(signer: "C", voted: "D", noTurn: true),
TesterVote(signer: "A", noTurn: true),
TesterVote(signer: "B", voted: "C", noTurn: true)],
results: @["A", "B"]),
TestSpecs(
id: 14,
info: "Votes from deauthorized signers are discarded immediately " &
"(deauth votes)",
signers: @["A", "B", "C"],
votes: @[TesterVote(signer: "C", voted: "B", noTurn: true),
TesterVote(signer: "A", voted: "C"),
TesterVote(signer: "B", voted: "C", noTurn: true),
TesterVote(signer: "A", voted: "B", noTurn: true)],
results: @["A", "B"]),
TestSpecs(
id: 15,
info: "Votes from deauthorized signers are discarded immediately " &
"(auth votes)",
signers: @["A", "B", "C"],
votes: @[TesterVote(signer: "C", voted: "D", auth: true, noTurn: true),
TesterVote(signer: "A", voted: "C"),
TesterVote(signer: "B", voted: "C", noTurn: true),
TesterVote(signer: "A", voted: "D", auth: true, noTurn: true)],
results: @["A", "B"]),
TestSpecs(
id: 16,
info: "Cascading changes are not allowed, only the account being " &
"voted on may change",
signers: @["A", "B", "C", "D"],
votes: @[TesterVote(signer: "A", voted: "C", noTurn: true),
TesterVote(signer: "B", noTurn: true),
TesterVote(signer: "C", noTurn: true),
TesterVote(signer: "A", voted: "D", noTurn: true),
TesterVote(signer: "B", voted: "C"),
TesterVote(signer: "C", noTurn: true),
TesterVote(signer: "A"),
TesterVote(signer: "B", voted: "D", noTurn: true),
TesterVote(signer: "C", voted: "D", noTurn: true)],
results: @["A", "B", "C"]),
TestSpecs(
id: 17,
info: "Changes reaching consensus out of bounds (via a deauth) " &
"execute on touch",
signers: @["A", "B", "C", "D"],
votes: @[TesterVote(signer: "A", voted: "C", noTurn: true),
TesterVote(signer: "B", noTurn: true),
TesterVote(signer: "C", noTurn: true),
TesterVote(signer: "A", voted: "D", noTurn: true),
TesterVote(signer: "B", voted: "C"),
TesterVote(signer: "C", noTurn: true),
TesterVote(signer: "A"),
TesterVote(signer: "B", voted: "D", noTurn: true),
TesterVote(signer: "C", voted: "D", noTurn: true),
TesterVote(signer: "A", noTurn: true),
TesterVote(signer: "C", voted: "C", auth: true, noTurn: true)],
results: @["A", "B"]),
TestSpecs(
id: 18,
info: "Changes reaching consensus out of bounds (via a deauth) " &
"may go out of consensus on first touch",
signers: @["A", "B", "C", "D"],
votes: @[TesterVote(signer: "A", voted: "C", noTurn: true),
TesterVote(signer: "B", noTurn: true),
TesterVote(signer: "C", noTurn: true),
TesterVote(signer: "A", voted: "D", noTurn: true),
TesterVote(signer: "B", voted: "C"),
TesterVote(signer: "C", noTurn: true),
TesterVote(signer: "A"),
TesterVote(signer: "B", voted: "D", noTurn: true),
TesterVote(signer: "C", voted: "D", noTurn: true),
TesterVote(signer: "A", noTurn: true),
TesterVote(signer: "B", voted: "C", auth: true, noTurn: true)],
results: @["A", "B", "C"]),
TestSpecs(
id: 19,
info: "Ensure that pending votes don't survive authorization status " &
"changes. This corner case can only appear if a signer is " &
"quickly added, removed and then readded (or the inverse), " &
"while one of the original voters dropped. If a past vote is " &
"left cached in the system somewhere, this will interfere " &
"with the final signer outcome.",
signers: @["A", "B", "C", "D", "E"],
votes: @[
# Authorize F, 3 votes needed
TesterVote(signer: "A", voted: "F", auth: true, noTurn: true),
TesterVote(signer: "B", voted: "F", auth: true),
TesterVote(signer: "C", voted: "F", auth: true, noTurn: true),
# Deauthorize F, 4 votes needed (leave A's previous vote "unchanged")
TesterVote(signer: "D", voted: "F", noTurn: true),
TesterVote(signer: "E", voted: "F", noTurn: true),
TesterVote(signer: "B", voted: "F", noTurn: true),
TesterVote(signer: "C", voted: "F"),
# Almost authorize F, 2/3 votes needed
TesterVote(signer: "D", voted: "F", auth: true),
TesterVote(signer: "E", voted: "F", auth: true, noTurn: true),
# Deauthorize A, 3 votes needed
TesterVote(signer: "B", voted: "A", noTurn: true),
TesterVote(signer: "C", voted: "A"),
TesterVote(signer: "D", voted: "A", noTurn: true),
# Finish authorizing F, 3/3 votes needed
TesterVote(signer: "B", voted: "F", auth: true, noTurn: true)],
results: @["B", "C", "D", "E", "F"]),
TestSpecs(
id: 20,
info: "Epoch transitions reset all votes to allow chain checkpointing",
epoch: 3,
signers: @["A", "B"],
votes: @[TesterVote(signer: "A", voted: "C", auth: true),
TesterVote(signer: "B"),
TesterVote(signer: "A", checkpoint: @["A", "B"]),
TesterVote(signer: "B", voted: "C", auth: true)],
results: @["A", "B"]),
TestSpecs(
id: 21,
info: "An unauthorized signer should not be able to sign blocks",
signers: @["A"],
votes: @[TesterVote(signer: "B", noTurn: true)],
failure: errUnauthorizedSigner),
TestSpecs(
id: 22,
info: "An authorized signer that signed recenty should not be able " &
"to sign again",
signers: @["A", "B"],
votes: @[TesterVote(signer: "A"),
TesterVote(signer: "A")],
failure: errRecentlySigned),
TestSpecs(
id: 23,
info: "Recent signatures should not reset on checkpoint blocks " &
"imported in a batch ",
epoch: 3,
signers: @["A", "B", "C"],
votes: @[TesterVote(signer: "A", noTurn: true),
TesterVote(signer: "B", noTurn: true),
TesterVote(signer: "A", checkpoint: @["A", "B", "C"],
noTurn: true),
TesterVote(signer: "A", noTurn: true)],
# Setting the `runBack` flag forces the shapshot handler searching for
# a checkpoint before entry 3. So the checkpont will be ignored for
# re-setting the system so that address `A` of block #3 is found in the
# list of recent signers (see documentation of the flag
# `applySnapsMinBacklog` for the `Clique` descriptor.)
#
# As far as I understand, there was no awareness of the tranaction batch
# in the Go implementation -- jordan.
runBack: true,
failure: errRecentlySigned),
# The last test does not differ from the previous one with the current
# test environment.
TestSpecs(
id: 24,
info: "Recent signatures (revisted) should not reset on checkpoint " &
"blocks imported in a batch " &
"(https://github.com/ethereum/go-ethereum/issues/17593). "&
"Whilst this seems overly specific and weird, it was a "&
"Rinkeby consensus split.",
epoch: 3,
signers: @["A", "B", "C"],
votes: @[TesterVote(signer: "A", noTurn: true),
TesterVote(signer: "B", noTurn: true),
TesterVote(signer: "A", checkpoint: @["A", "B", "C"],
noTurn: true),
TesterVote(signer: "A", newbatch: true, noTurn: true)],
# Setting the `runBack` flag forces the shapshot handler searching for
# a checkpoint before entry 3. So the checkpont will be ignored for
# re-setting the system so that address `A` of block #3 is found in the
# list of recent signers (see documentation of the flag
# `applySnapsMinBacklog` for the `Clique` descriptor.)
#
# As far as I understand, there was no awareness of the tranaction batch
# in the Go implementation -- jordan.
runBack: true,
failure: errRecentlySigned),
# Not found in Go reference implementation
TestSpecs(
id: 25,
info: "Test 23/24 with using the most recent <epoch> checkpoint",
epoch: 3,
signers: @["A", "B", "C"],
votes: @[TesterVote(signer: "A", noTurn: true),
TesterVote(signer: "B", noTurn: true),
TesterVote(signer: "A", checkpoint: @["A", "B", "C"],
noTurn: true),
TesterVote(signer: "A", noTurn: true)],
results: @["A", "B", "C"])]
static:
# For convenience, make sure that IDs are increasing
for n in 1 ..< voterSamples.len:
if voterSamples[n-1].id < voterSamples[n].id:
continue
echo "voterSamples[", n, "] == ", voterSamples[n].id, " expected ",
voterSamples[n-1].id + 1, " or greater"
doAssert voterSamples[n-1].id < voterSamples[n].id
# End

View File

@ -86,12 +86,12 @@ proc configurationMain*() =
let conf = makeConfig(@["--custom-network:" & genesisFile]) let conf = makeConfig(@["--custom-network:" & genesisFile])
check conf.networkId == 123.NetworkId check conf.networkId == 123.NetworkId
test "network-id not set, goerli set": test "network-id not set, sepolia set":
let conf = makeConfig(@["--network:goerli"]) let conf = makeConfig(@["--network:sepolia"])
check conf.networkId == GoerliNet check conf.networkId == SepoliaNet
test "network-id set, goerli set": test "network-id set, sepolia set":
let conf = makeConfig(@["--network:goerli", "--network:123"]) let conf = makeConfig(@["--network:sepolia", "--network:123"])
check conf.networkId == 123.NetworkId check conf.networkId == 123.NetworkId
test "rpc-api": test "rpc-api":

View File

@ -81,21 +81,6 @@ let
# will run over all avail files in parent folder # will run over all avail files in parent folder
files: @["00000.era1"]) # on external repo files: @["00000.era1"]) # on external repo
# Goerli will be abondoned in future
goerliSample = CaptureSpecs(
builtIn: true,
name: "goerli",
network: GoerliNet,
files: @["goerli68161.txt.gz"]) # on local replay folder
goerliSampleEx = CaptureSpecs(
builtIn: true,
name: "goerli",
network: GoerliNet,
files: @[
"goerli482304.txt.gz", # on nimbus-eth1-blobs/replay
"goerli482305-504192.txt.gz"])
# ------------------ # ------------------
# Supposed to run mostly on defaults, object name tag: m=memory, r=rocksDB # Supposed to run mostly on defaults, object name tag: m=memory, r=rocksDB
@ -142,35 +127,12 @@ let
dbType = AristoDbRocks) dbType = AristoDbRocks)
# Goerli will be abondoned in future
goerliTest0m* = goerliSample
.cloneWith(
name = "-am-some",
numBlocks = 1_000)
goerliTest1m* = goerliSample
.cloneWith(
name = "-am",
numBlocks = high(int))
goerliTest2m* = goerliSampleEx
.cloneWith(
name = "-ex-am",
numBlocks = high(int))
goerliTest3r* = goerliSampleEx
.cloneWith(
name = "-ex-ar",
numBlocks = high(int),
dbType = AristoDbRocks)
# ------------------ # ------------------
allSamples* = [ allSamples* = [
mainTest0m, mainTest1m, mainTest0m, mainTest1m,
mainTest2r, mainTest3r, mainTest4r, mainTest2r, mainTest3r, mainTest4r,
mainTest5m, mainTest6r, mainTest5m, mainTest6r
goerliTest0m, goerliTest1m, goerliTest2m, goerliTest3r
] ]
# End # End

View File

@ -141,7 +141,6 @@ template runGenesisTimeIdTests() =
proc forkIdMain*() = proc forkIdMain*() =
suite "Fork ID tests": suite "Fork ID tests":
runTest(MainNet, "MainNet") runTest(MainNet, "MainNet")
runTest(GoerliNet, "GoerliNet")
runTest(SepoliaNet, "SepoliaNet") runTest(SepoliaNet, "SepoliaNet")
runTest(HoleskyNet, "HoleskyNet") runTest(HoleskyNet, "HoleskyNet")
test "Genesis Time Fork ID": test "Genesis Time Fork ID":

View File

@ -36,10 +36,6 @@ proc genesisTest() =
let b = makeGenesis(MainNet) let b = makeGenesis(MainNet)
check(b.blockHash == "D4E56740F876AEF8C010B86A40D5F56745A118D0906A34E69AEC8C0DB1CB8FA3".toDigest) check(b.blockHash == "D4E56740F876AEF8C010B86A40D5F56745A118D0906A34E69AEC8C0DB1CB8FA3".toDigest)
test "Correct goerlinet hash":
let b = makeGenesis(GoerliNet)
check(b.blockHash == "bf7e331f7f7c1dd2e05159666b3bf8bc7a8a3a9eb1d518969eab529dd9b88c1a".toDigest)
test "Correct sepolia hash": test "Correct sepolia hash":
let b = makeGenesis(SepoliaNet) let b = makeGenesis(SepoliaNet)
check b.blockHash == "25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9".toDigest check b.blockHash == "25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9".toDigest
@ -60,18 +56,6 @@ proc customGenesisTest() =
check cgb.config.consensusType == ConsensusType.POW check cgb.config.consensusType == ConsensusType.POW
check cgc.config.consensusType == ConsensusType.POW check cgc.config.consensusType == ConsensusType.POW
test "calaveras.json":
var cg: NetworkParams
check loadNetworkParams("calaveras.json".findFilePath, cg)
let com = CommonRef.new(newCoreDbRef DefaultDbMemory, params = cg)
let stateRoot = "664c93de37eb4a72953ea42b8c046cdb64c9f0b0bca5505ade8d970d49ebdb8c".toDigest
let genesisHash = "eb9233d066c275efcdfed8037f4fc082770176aefdbcb7691c71da412a5670f2".toDigest
check com.genesisHeader.stateRoot == stateRoot
check com.genesisHeader.blockHash == genesisHash
check com.consensus == ConsensusType.POA
check com.cliquePeriod == 30
check com.cliqueEpoch == 30000
test "Devnet4.json (aka Kintsugi in all but chainId)": test "Devnet4.json (aka Kintsugi in all but chainId)":
var cg: NetworkParams var cg: NetworkParams
check loadNetworkParams("devnet4.json".findFilePath, cg) check loadNetworkParams("devnet4.json".findFilePath, cg)