diff --git a/nimbus/p2p/chain.nim b/nimbus/p2p/chain.nim index e831a08ba..0388a8455 100644 --- a/nimbus/p2p/chain.nim +++ b/nimbus/p2p/chain.nim @@ -3,8 +3,8 @@ import ../db/db_chain, ../genesis, ../utils, - ../utils/difficulty, ../vm_state, + ./clique, ./executor, ./validate, ./validate/epoch_hash_cache, @@ -37,8 +37,17 @@ type db: BaseChainDB forkIds: array[ChainFork, ForkID] blockZeroHash: KeccakHash - cacheByEpoch: EpochHashCache - extraValidation: bool + + extraValidation: bool ##\ + ## Trigger extra validation, currently with `persistBlocksin()` only. + + cacheByEpoch: EpochHashCache ##\ + ## Objects cache to speed up lookup in validation functions. + + poa: Clique ##\ + ## For non-PoA networks (when `db.config.poaEngine` is `false`), + ## this descriptor is ignored. + func toChainFork(c: ChainConfig, number: BlockNumber): ChainFork = if number >= c.londonBlock: London @@ -115,6 +124,11 @@ proc newChain*(db: BaseChainDB, extraValidation = false): Chain = result.forkIds = calculateForkIds(db.config, genesisCRC) result.extraValidation = extraValidation + # Initalise the PoA state regardless of whether it is needed on the current + # network. For non-PoA networks (when `db.config.poaEngine` is `false`), + # this descriptor is ignored. + result.poa = db.newCliqueCfg.newClique + if extraValidation: result.cacheByEpoch.initEpochHashCache @@ -164,7 +178,12 @@ method persistBlocks*(c: Chain; headers: openarray[BlockHeader]; (header, body) = (headers[i], bodies[i]) parentHeader = c.db.getBlockHeader(header.parentHash) vmState = newBaseVMState(parentHeader.stateRoot, header, c.db) - validationResult = processBlock(c.db, header, body, vmState) + + # The following processing function call will update the PoA state which + # is passed as second function argument. The PoA state is ignored for + # non-PoA networks (in which case `vmState.processBlock(header,body)` + # would also be correct but not vice versa.) + validationResult = vmState.processBlock(c.poa, header, body) when not defined(release): if validationResult == ValidationResult.Error and diff --git a/nimbus/p2p/clique.nim b/nimbus/p2p/clique.nim index 20387496e..00d25e8bc 100644 --- a/nimbus/p2p/clique.nim +++ b/nimbus/p2p/clique.nim @@ -19,643 +19,19 @@ ## import - std/[random, sequtils, strformat, tables, times], - ../constants, - ../db/[db_chain, state_db], - ../utils, - ./clique/[clique_cfg, clique_defs, clique_utils, ec_recover, recent_snaps], - ./gaslimit, - chronicles, - chronos, - eth/[common, keys, rlp], - nimcrypto - -export - clique_cfg, - clique_defs - -type - CliqueSyncDefect* = object of Defect - ## Defect raised with lock/unlock problem - - # clique/clique.go(142): type SignerFn func(signer [..] - CliqueSignerFn* = ## Hashes and signs the data to be signed by - ## a backing account - proc(signer: EthAddress; - message: openArray[byte]): Result[Hash256,cstring] {.gcsafe.} - - Proposals = Table[EthAddress,bool] - - # clique/clique.go(172): type Clique struct { [..] - Clique* = object ## Clique is the proof-of-authority consensus engine - ## proposed to support the Ethereum testnet following - ## the Ropsten attacks. - cfg: CliqueCfg ## Consensus engine parameters to fine tune behaviour - - recents: RecentSnaps ## Snapshots for recent block to speed up reorgs - # signatures => see CliqueCfg - - proposals: Proposals ## Current list of proposals we are pushing - - signer: EthAddress ## Ethereum address of the signing key - signFn: CliqueSignerFn ## Signer function to authorize hashes with - lock: AsyncLock ## Protects the signer fields - - stopSealReq: bool ## Stop running `seal()` function - stopVHeaderReq: bool ## Stop running `verifyHeader()` function - - fakeDiff: bool ## Testing only: skip difficulty verifications - debug: bool ## debug mode + ./clique/[clique_cfg, clique_defs, clique_desc, clique_miner], + chronicles {.push raises: [Defect].} logScope: topics = "clique PoA" -# ------------------------------------------------------------------------------ -# Private Helpers -# ------------------------------------------------------------------------------ - -template doExclusively(c: var Clique; action: untyped) = - waitFor c.lock.acquire - action - c.lock.release - -template syncExceptionWrap(action: untyped) = - try: - action - except: - raise (ref CliqueSyncDefect)(msg: getCurrentException().msg) - -# ------------------------------------------------------------------------------ -# Private functions -# ------------------------------------------------------------------------------ - -# clique/clique.go(145): func ecrecover(header [..] -proc ecrecover(c: var Clique; - header: BlockHeader): Result[EthAddress,CliqueError] {. - gcsafe, raises: [Defect,CatchableError].} = - ## ecrecover extracts the Ethereum account address from a signed header. - c.cfg.signatures.getEcRecover(header) - - -# clique/clique.go(369): func (c *Clique) snapshot(chain [..] -proc snapshot(c: var Clique; blockNumber: BlockNumber; hash: Hash256; - parents: openArray[Blockheader]): Result[Snapshot,CliqueError] {. - gcsafe, raises: [Defect,CatchableError].} = - ## snapshot retrieves the authorization snapshot at a given point in time. - c.recents.getRecentSnaps: - RecentArgs(blockHash: hash, - blockNumber: blockNumber, - parents: toSeq(parents)) - - -# clique/clique.go(463): func (c *Clique) verifySeal(chain [..] -proc verifySeal(c: var Clique; header: BlockHeader; - parents: openArray[BlockHeader]): CliqueResult {. - gcsafe, raises: [Defect,CatchableError].} = - ## Check whether the signature contained in the header satisfies the - ## consensus protocol requirements. The method accepts an optional list of - ## parent headers that aren't yet part of the local blockchain to generate - ## the snapshots from. - - # Verifying the genesis block is not supported - if header.blockNumber.isZero: - return err((errUnknownBlock,"")) - - # Retrieve the snapshot needed to verify this header and cache it - var snap = c.snapshot(header.blockNumber-1, header.parentHash, parents) - if snap.isErr: - return err(snap.error) - - # Resolve the authorization key and check against signers - let signer = c.ecrecover(header) - if signer.isErr: - return err(signer.error) - - if not snap.value.isSigner(signer.value): - return err((errUnauthorizedSigner,"")) - - let seen = snap.value.recent(signer.value) - if seen.isOk: - # Signer is among recents, only fail if the current block does not - # shift it out - if header.blockNumber - snap.value.signersThreshold.u256 < seen.value: - return err((errRecentlySigned,"")) - - # Ensure that the difficulty corresponds to the turn-ness of the signer - if not c.fakeDiff: - if snap.value.inTurn(header.blockNumber, signer.value): - if header.difficulty != DIFF_INTURN: - return err((errWrongDifficulty,"")) - else: - if header.difficulty != DIFF_NOTURN: - return err((errWrongDifficulty,"")) - - return ok() - - -# clique/clique.go(314): func (c *Clique) verifyCascadingFields(chain [..] -proc verifyCascadingFields(c: var Clique; header: BlockHeader; - parents: openArray[BlockHeader]): CliqueResult {. - gcsafe, raises: [Defect,CatchableError].} = - ## Verify all the header fields that are not standalone, rather depend on a - ## batch of previous headers. The caller may optionally pass in a batch of - ## parents (ascending order) to avoid looking those up from the database. - ## This is useful for concurrently verifying a batch of new headers. - - # The genesis block is the always valid dead-end - if header.blockNumber.isZero: - return err((errZeroBlockNumberRejected,"")) - - # Ensure that the block's timestamp isn't too close to its parent - var parent: BlockHeader - if 0 < parents.len: - parent = parents[^1] - else: - let rc = c.cfg.dbChain.getBlockHeaderResult(header.blockNumber-1) - if rc.isErr: - return err((errUnknownAncestor,"")) - parent = rc.value - - if parent.blockNumber != header.blockNumber-1 or - parent.hash != header.parentHash: - return err((errUnknownAncestor,"")) - - if header.timestamp < parent.timestamp + c.cfg.period: - return err((errInvalidTimestamp,"")) - - # Verify that the gasUsed is <= gasLimit - if header.gasLimit < header.gasUsed: - return err((errCliqueExceedsGasLimit, - &"invalid gasUsed: have {header.gasUsed}, " & - &"gasLimit {header.gasLimit}")) - - let rc = c.cfg.dbChain.validateGasLimitOrBaseFee(header, parent) - if rc.isErr: - return err((errCliqueGasLimitOrBaseFee, rc.error)) - - # Retrieve the snapshot needed to verify this header and cache it - var snap = c.snapshot(header.blockNumber-1, header.parentHash, parents) - if snap.isErr: - return err(snap.error) - - # If the block is a checkpoint block, verify the signer list - if (header.blockNumber mod c.cfg.epoch.u256) == 0: - let - signersList = snap.value.signers - extraList = header.extraData.extraDataAddresses - if signersList != extraList: - return err((errMismatchingCheckpointSigners,"")) - - # All basic checks passed, verify the seal and return - return c.verifySeal(header, parents) - - -# clique/clique.go(246): func (c *Clique) verifyHeader(chain [..] -proc verifyHeader(c: var Clique; header: BlockHeader; - parents: openArray[BlockHeader]): CliqueResult {. - gcsafe, raises: [Defect,CatchableError].} = - ## Check whether a header conforms to the consensus rules.The caller may - ## optionally pass in a batch of parents (ascending order) to avoid looking - ## those up from the database. This is useful for concurrently verifying - ## a batch of new headers. - if header.blockNumber.isZero: - return err((errUnknownBlock,"")) - - # Don't waste time checking blocks from the future - if getTime() < header.timestamp: - return err((errFutureBlock,"")) - - # Checkpoint blocks need to enforce zero beneficiary - let isCheckPoint = (header.blockNumber mod c.cfg.epoch.u256) == 0 - if isCheckPoint and not header.coinbase.isZero: - return err((errInvalidCheckpointBeneficiary,"")) - - # Nonces must be 0x00..0 or 0xff..f, zeroes enforced on checkpoints - if header.nonce != NONCE_AUTH and header.nonce != NONCE_DROP: - return err((errInvalidVote,"")) - if isCheckPoint and header.nonce != NONCE_DROP: - return err((errInvalidCheckpointVote,"")) - - # Check that the extra-data contains both the vanity and signature - if header.extraData.len < EXTRA_VANITY: - return err((errMissingVanity,"")) - if header.extraData.len < EXTRA_VANITY + EXTRA_SEAL: - return err((errMissingSignature,"")) - - # Ensure that the extra-data contains a signer list on checkpoint, - # but none otherwise - let signersBytes = header.extraData.len - EXTRA_VANITY - EXTRA_SEAL - if not isCheckPoint and signersBytes != 0: - return err((errExtraSigners,"")) - - if isCheckPoint and (signersBytes mod EthAddress.len) != 0: - return err((errInvalidCheckpointSigners,"")) - - # Ensure that the mix digest is zero as we do not have fork protection - # currently - if not header.mixDigest.isZero: - return err((errInvalidMixDigest,"")) - - # Ensure that the block does not contain any uncles which are meaningless - # in PoA - if header.ommersHash != EMPTY_UNCLE_HASH: - return err((errInvalidUncleHash,"")) - - # Ensure that the block's difficulty is meaningful (may not be correct at - # this point) - if not header.blockNumber.isZero: - if header.difficulty.isZero or - (header.difficulty != DIFF_INTURN and - header.difficulty != DIFF_NOTURN): - return err((errInvalidDifficulty,"")) - - # verify that the gas limit is <= 2^63-1 - when header.gasLimit.typeof isnot int64: - if int64.high < header.gasLimit: - return err((errCliqueExceedsGasLimit, - &"invalid gasLimit: have {header.gasLimit}, must be int64")) - - # If all checks passed, validate any special fields for hard forks - let rc = c.cfg.dbChain.config.verifyForkHashes(header) - if rc.isErr: - return err(rc.error) - - # All basic checks passed, verify cascading fields - return c.verifyCascadingFields(header, parents) - - -# clique/clique.go(681): func calcDifficulty(snap [..] -proc calcDifficulty(snap: var Snapshot; signer: EthAddress): DifficultyInt = - if snap.inTurn(snap.blockNumber + 1, signer): - DIFF_INTURN - else: - DIFF_NOTURN - -# ------------------------------------------------------------------------------ -# Public functions -# ------------------------------------------------------------------------------ - -# clique/clique.go(191): func New(config [..] -proc initClique*(c: var Clique; cfg: CliqueCfg) = - ## Initialiser for Clique proof-of-authority consensus engine with the - ## initial signers set to the ones provided by the user. - c.cfg = cfg - c.recents = initRecentSnaps(cfg) - c.proposals = initTable[EthAddress,bool]() - c.lock = newAsyncLock() - -proc initClique*(cfg: CliqueCfg): Clique = - result.initClique(cfg) - - -proc setDebug*(c: var Clique; debug: bool) = - ## Set debugging mode on/off and set the `fakeDiff` flag `true` - c.fakeDiff = true - c.debug = debug - c.recents.setDebug(debug) - - -# clique/clique.go(212): func (c *Clique) Author(header [..] -proc author*(c: var Clique; - header: BlockHeader): Result[EthAddress,CliqueError] {. - gcsafe, raises: [Defect,CatchableError].} = - ## For the Consensus Engine, `author()` retrieves the Ethereum address of the - ## account that minted the given block, which may be different from the - ## header's coinbase if a consensus engine is based on signatures. - ## - ## This implementation returns the Ethereum address recovered from the - ## signature in the header's extra-data section. - c.ecrecover(header) - - -# clique/clique.go(217): func (c *Clique) VerifyHeader(chain [..] -proc verifyHeader*(c: var Clique; header: BlockHeader): CliqueResult {. - gcsafe, raises: [Defect,CatchableError].} = - ## For the Consensus Engine, `verifyHeader()` checks whether a header - ## conforms to the consensus rules of a given engine. Verifying the seal - ## may be done optionally here, or explicitly via the `verifySeal()` method. - ## - ## This implementation checks whether a header conforms to the consensus - ## rules. - c.verifyHeader(header, @[]) - -# clique/clique.go(224): func (c *Clique) VerifyHeader(chain [..] -proc verifyHeaders*(c: var Clique; headers: openArray[BlockHeader]): - Future[seq[CliqueResult]] {.async,gcsafe.} = - ## For the Consensus Engine, `verifyHeader()` s similar to VerifyHeader, but - ## verifies a batch of headers concurrently. This method is accompanied - ## by a `stopVerifyHeader()` method that can abort the operations. - ## - ## This implementation checks whether a header conforms to the consensus - ## rules. It verifies a batch of headers. If running in the background, - ## the process can be stopped by calling the `stopVerifyHeader()` function. - syncExceptionWrap: - c.doExclusively: - c.stopVHeaderReq = false - for n in 0 ..< headers.len: - c.doExclusively: - let isStopRequest = c.stopVHeaderReq - if isStopRequest: - result.add cliqueResultErr((errCliqueStopped,"")) - break - result.add c.verifyHeader(headers[n], headers[0 ..< n]) - c.doExclusively: - c.stopVHeaderReq = false - -proc stopVerifyHeader*(c: var Clique): bool {.discardable.} = - ## Activate the stop flag for running `verifyHeader()` function. - ## Returns `true` if the stop flag could be activated. - syncExceptionWrap: - c.doExclusively: - if not c.stopVHeaderReq: - c.stopVHeaderReq = true - result = true - - -# clique/clique.go(450): func (c *Clique) VerifyUncles(chain [..] -proc verifyUncles*(c: var Clique; ethBlock: EthBlock): CliqueResult = - ## For the Consensus Engine, `verifyUncles()` verifies that the given - ## block's uncles conform to the consensus rules of a given engine. - ## - ## This implementation always returns an error for existing uncles as this - ## consensus mechanism doesn't permit uncles. - if 0 < ethBlock.uncles.len: - return err((errCliqueUnclesNotAllowed,"")) - result = ok() - - -# clique/clique.go(506): func (c *Clique) Prepare(chain [..] -proc prepare*(c: var Clique; header: var BlockHeader): CliqueResult {. - gcsafe, raises: [Defect,CatchableError].} = - ## For the Consensus Engine, `prepare()` initializes the consensus fields - ## of a block header according to the rules of a particular engine. The - ## changes are executed inline. - ## - ## This implementation prepares all the consensus fields of the header for - ## running the transactions on top. - - # If the block isn't a checkpoint, cast a random vote (good enough for now) - header.coinbase.reset - header.nonce.reset - - # Assemble the voting snapshot to check which votes make sense - var snap = c.snapshot(header.blockNumber-1, header.parentHash, @[]) - if snap.isErr: - return err(snap.error) - - if (header.blockNumber mod c.cfg.epoch) != 0: - c.doExclusively: - # Gather all the proposals that make sense voting on - var addresses: seq[EthAddress] - for (address,authorize) in c.proposals.pairs: - if snap.value.validVote(address, authorize): - addresses.add address - - # If there's pending proposals, cast a vote on them - if 0 < addresses.len: - header.coinbase = addresses[c.cfg.prng.rand(addresses.len-1)] - header.nonce = if header.coinbase in c.proposals: NONCE_AUTH - else: NONCE_DROP - - # Set the correct difficulty - header.difficulty = snap.value.calcDifficulty(c.signer) - - # Ensure the extra data has all its components - header.extraData.setLen(EXTRA_VANITY) - if (header.blockNumber mod c.cfg.epoch) == 0: - header.extraData.add snap.value.signers.mapIt(toSeq(it)).concat - header.extraData.add 0.byte.repeat(EXTRA_SEAL) - - # Mix digest is reserved for now, set to empty - header.mixDigest.reset - - # Ensure the timestamp has the correct delay - let parent = c.cfg.dbChain.getBlockHeaderResult(header.blockNumber-1) - if parent.isErr: - return err((errUnknownAncestor,"")) - - header.timestamp = parent.value.timestamp + c.cfg.period - if header.timestamp < getTime(): - header.timestamp = getTime() - - return ok() - - -# clique/clique.go(571): func (c *Clique) Finalize(chain [..] -proc finalize*(c: var Clique; header: BlockHeader; db: AccountStateDB) = - ## For the Consensus Engine, `finalize()` runs any post-transaction state - ## modifications (e.g. block rewards) but does not assemble the block. - ## - ## Note: The block header and state database might be updated to reflect any - ## consensus rules that happen at finalization (e.g. block rewards). - ## - ## Not implemented here, raises `AssertionDefect` - raiseAssert "Not implemented" - # - # ## This implementation ensures no uncles are set, nor block rewards given. - # # No block rewards in PoA, so the state remains as is and uncles are dropped - # let deleteEmptyObjectsOk = c.cfg.config.eip158block <= header.blockNumber - # header.stateRoot = db.intermediateRoot(deleteEmptyObjectsOk) - # header.ommersHash = EMPTY_UNCLE_HASH - -# clique/clique.go(579): func (c *Clique) FinalizeAndAssemble(chain [..] -proc finalizeAndAssemble*(c: var Clique; header: BlockHeader; - db: AccountStateDB; txs: openArray[Transaction]; - receipts: openArray[Receipt]): - Result[EthBlock,CliqueError] = - ## For the Consensus Engine, `finalizeAndAssemble()` runs any - ## post-transaction state modifications (e.g. block rewards) and assembles - ## the final block. - ## - ## Note: The block header and state database might be updated to reflect any - ## consensus rules that happen at finalization (e.g. block rewards). - ## - ## Not implemented here, raises `AssertionDefect` - raiseAssert "Not implemented" - # ## Ensuring no uncles are set, nor block rewards given, and returns the - # ## final block. - # - # # Finalize block - # c.finalize(header, state, txs, uncles) - # - # # Assemble and return the final block for sealing - # return types.NewBlock(header, txs, nil, receipts, - # trie.NewStackTrie(nil)), nil - - -# clique/clique.go(589): func (c *Clique) Authorize(signer [..] -proc authorize*(c: var Clique; signer: EthAddress; signFn: CliqueSignerFn) = - ## Injects private key into the consensus engine to mint new blocks with. - syncExceptionWrap: - c.doExclusively: - c.signer = signer - c.signFn = signFn - - -# clique/clique.go(724): func CliqueRLP(header [..] -proc cliqueRlp*(header: BlockHeader): seq[byte] = - ## Returns the rlp bytes which needs to be signed for the proof-of-authority - ## sealing. The RLP to sign consists of the entire header apart from the 65 - ## byte signature contained at the end of the extra data. - ## - ## Note, the method requires the extra data to be at least 65 bytes, - ## otherwise it panics. This is done to avoid accidentally using both forms - ## (signature present or not), which could be abused to produce different - ##hashes for the same header. - header.encodeSealHeader - - -# clique/clique.go(688): func SealHash(header *types.Header) common.Hash { -proc sealHash*(header: BlockHeader): Hash256 = - ## For the Consensus Engine, `sealHash()` returns the hash of a block prior - ## to it being sealed. - ## - ## This implementation returns the hash of a block prior to it being sealed. - header.hashSealHeader - - -# clique/clique.go(599): func (c *Clique) Seal(chain [..] -proc seal*(c: var Clique; ethBlock: EthBlock): - Future[Result[EthBlock,CliqueError]] {.async,gcsafe.} = - ## For the Consensus Engine, `seal()` generates a new sealing request for - ## the given input block and pushes the result into the given channel. - ## - ## Note, the method returns immediately and will send the result async. More - ## than one result may also be returned depending on the consensus algorithm. - ## - ## This implementation attempts to create a sealed block using the local - ## signing credentials. If running in the background, the process can be - ## stopped by calling the `stopSeal()` function. - c.doExclusively: - c.stopSealReq = false - var header = ethBlock.header - - # Sealing the genesis block is not supported - if header.blockNumber.isZero: - return err((errUnknownBlock,"")) - - # For 0-period chains, refuse to seal empty blocks (no reward but would spin - # sealing) - if c.cfg.period.isZero and ethBlock.txs.len == 0: - info $nilCliqueSealNoBlockYet - return err((nilCliqueSealNoBlockYet,"")) - - # Don't hold the signer fields for the entire sealing procedure - c.doExclusively: - let - signer = c.signer - signFn = c.signFn - - # Bail out if we're unauthorized to sign a block - var snap = c.snapshot(header.blockNumber-1, header.parentHash, @[]) - if snap.isErr: - return err(snap.error) - if not snap.value.isSigner(signer): - return err((errUnauthorizedSigner,"")) - - # If we're amongst the recent signers, wait for the next block - let seen = snap.value.recent(signer) - if seen.isOk: - # Signer is among recents, only wait if the current block does not - # shift it out - if header.blockNumber < seen.value + snap.value.signersThreshold.u256: - info $nilCliqueSealSignedRecently - return err((nilCliqueSealSignedRecently,"")) - - # Sweet, the protocol permits us to sign the block, wait for our time - var delay = header.timestamp - getTime() - if header.difficulty == DIFF_NOTURN: - # It's not our turn explicitly to sign, delay it a bit - let wiggle = snap.value.signersThreshold.int64 * WIGGLE_TIME - # Kludge for limited rand() argument range - if wiggle.inSeconds < (int.high div 1000).int64: - let rndWiggleMs = c.cfg.prng.rand(wiggle.inMilliSeconds.int) - delay += initDuration(milliseconds = rndWiggleMs) - else: - let rndWiggleSec = c.cfg.prng.rand((wiggle.inSeconds and int.high).int) - delay += initDuration(seconds = rndWiggleSec) - - trace "Out-of-turn signing requested", - wiggle = $wiggle - - # Sign all the things! - let sigHash = signFn(signer,header.cliqueRlp) - if sigHash.isErr: - return err((errCliqueSealSigFn,$sigHash.error)) - let extraLen = header.extraData.len - if EXTRA_SEAL < extraLen: - header.extraData.setLen(extraLen - EXTRA_SEAL) - header.extraData.add sigHash.value.data - - # Wait until sealing is terminated or delay timeout. - trace "Waiting for slot to sign and propagate", - delay = $delay - - # FIXME: double check - let timeOutTime = getTime() + delay - while getTime() < timeOutTime: - c.doExclusively: - let isStopRequest = c.stopVHeaderReq - if isStopRequest: - warn "Sealing result is not read by miner", - sealhash = sealHash(header) - return err((errCliqueStopped,"")) - poll() - - c.doExclusively: - c.stopSealReq = false - return ok(ethBlock.withHeader(header)) - -proc stopSeal*(c: var Clique): bool {.discardable.} = - ## Activate the stop flag for running `seal()` function. - ## Returns `true` if the stop flag could be activated. - syncExceptionWrap: - c.doExclusively: - if not c.stopSealReq: - c.stopSealReq = true - result =true - - -# clique/clique.go(673): func (c *Clique) CalcDifficulty(chain [..] -proc calcDifficulty(c: var Clique; - parent: BlockHeader): Result[DifficultyInt,CliqueError] {. - gcsafe, raises: [Defect,CatchableError].} = - ## For the Consensus Engine, `calcDifficulty()` is the difficulty adjustment - ## algorithm. It returns the difficulty that a new block should have. - ## - ## This implementation returns the difficulty that a new block should have: - ## * DIFF_NOTURN(2) if BLOCK_NUMBER % SIGNER_COUNT != SIGNER_INDEX - ## * DIFF_INTURN(1) if BLOCK_NUMBER % SIGNER_COUNT == SIGNER_INDEX - var snap = c.snapshot(parent.blockNumber, parent.blockHash, @[]) - if snap.isErr: - return err(snap.error) - return ok(snap.value.calcDifficulty(c.signer)) - - -# # clique/clique.go(710): func (c *Clique) SealHash(header [..] -# proc sealHash(c: var Clique; header: BlockHeader): Hash256 = -# ## SealHash returns the hash of a block prior to it being sealed. -# header.encodeSigHeader.keccakHash - -# ------------------------------------------------------------------------------ -# Test interface -# ------------------------------------------------------------------------------ - -proc snapshotInternal*(c: var Clique; number: BlockNumber; hash: Hash256; - parent: openArray[Blockheader]): auto {. - gcsafe, raises: [Defect,CatchableError].} = - c.snapshot(number, hash, parent) - -proc cfgInternal*(c: var Clique): CliqueCfg = - c.cfg - -proc pp*(rc: var Result[Snapshot,CliqueError]; indent = 0): string = - if rc.isOk: - rc.value.pp(indent) - else: - "(error: " & rc.error.pp & ")" +export + clique_cfg, + clique_defs, + clique_desc, + clique_miner.snapshot # ------------------------------------------------------------------------------ # End diff --git a/nimbus/p2p/clique/clique_cfg.nim b/nimbus/p2p/clique/clique_cfg.nim index 4a9ae753d..30aa92303 100644 --- a/nimbus/p2p/clique/clique_cfg.nim +++ b/nimbus/p2p/clique/clique_cfg.nim @@ -49,7 +49,7 @@ type string {.gcsafe,raises: [Defect,CatchableError].} CliqueCfg* = ref object - dbChain*: BaseChainDB + db*: BaseChainDB signatures*: EcRecover ## Recent block signatures to speed up mining period*: Duration ## time between blocks to enforce prng*: Rand ## PRNG state for internal random generator @@ -65,10 +65,10 @@ type # Public functions # ------------------------------------------------------------------------------ -proc newCliqueCfg*(dbChain: BaseChainDB; period = BLOCK_PERIOD; +proc newCliqueCfg*(db: BaseChainDB; period = BLOCK_PERIOD; epoch = 0.u256): CliqueCfg = CliqueCfg( - dbChain: dbChain, + db: db, period: period, bcEpoch: if epoch.isZero: EPOCH_LENGTH.u256 else: epoch, signatures: initEcRecover(), diff --git a/nimbus/p2p/clique/clique_desc.nim b/nimbus/p2p/clique/clique_desc.nim new file mode 100644 index 000000000..e8931534b --- /dev/null +++ b/nimbus/p2p/clique/clique_desc.nim @@ -0,0 +1,152 @@ +# Nimbus +# Copyright (c) 2018 Status Research & Development GmbH +# Licensed under either of +# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or +# http://www.apache.org/licenses/LICENSE-2.0) +# * MIT license ([LICENSE-MIT](LICENSE-MIT) or +# http://opensource.org/licenses/MIT) +# at your option. This file may not be copied, modified, or distributed except +# according to those terms. + +## +## Descriptor Objects for Clique PoA Consensus Protocol +## ==================================================== +## +## For details see +## `EIP-225 `_ +## and +## `go-ethereum `_ +## + +import + std/[tables], + ../../db/db_chain, + ../../constants, + ./clique_cfg, + ./clique_defs, + ./recent_snaps, + chronos, + eth/[common, keys, rlp] + +type + # clique/clique.go(142): type SignerFn func(signer [..] + CliqueSignerFn* = ## Hashes and signs the data to be signed by + ## a backing account + proc(signer: EthAddress; + message: openArray[byte]): Result[Hash256,cstring] {.gcsafe.} + + Proposals = Table[EthAddress,bool] + + # clique/clique.go(172): type Clique struct { [..] + Clique* = ref object ## Clique is the proof-of-authority consensus engine + ## proposed to support the Ethereum testnet following + ## the Ropsten attacks. + cCfg: CliqueCfg ## Common engine parameters to fine tune behaviour + + cRecents: RecentSnaps ## Snapshots for recent block to speed up reorgs + # signatures => see CliqueCfg + + cProposals: Proposals ## Cu1rrent list of proposals we are pushing + + signer*: EthAddress ## Ethereum address of the signing key + signFn*: CliqueSignerFn ## Signer function to authorize hashes with + cLock: AsyncLock ## Protects the signer fields + + stopSealReq*: bool ## Stop running `seal()` function + stopVHeaderReq*: bool ## Stop running `verifyHeader()` function + + cFakeDiff: bool ## Testing only: skip difficulty verifications + cDebug: bool ## debug mode + +{.push raises: [Defect].} + +# ------------------------------------------------------------------------------ +# Public constructor +# ------------------------------------------------------------------------------ + +# clique/clique.go(191): func New(config [..] +proc newClique*(cfg: CliqueCfg): Clique = + ## Initialiser for Clique proof-of-authority consensus engine with the + ## initial signers set to the ones provided by the user. + Clique(cCfg: cfg, + cRecents: initRecentSnaps(cfg), + cProposals: initTable[EthAddress,bool](), + cLock: newAsyncLock()) + +# ------------------------------------------------------------------------------ +# Public debug/pretty print +# ------------------------------------------------------------------------------ + +proc pp*(rc: var Result[Snapshot,CliqueError]; indent = 0): string = + if rc.isOk: + rc.value.pp(indent) + else: + "(error: " & rc.error.pp & ")" + +# ------------------------------------------------------------------------------ +# Public getters +# ------------------------------------------------------------------------------ + +proc cfg*(c: Clique): auto {.inline.} = + ## Getter + c.cCfg + +proc db*(c: Clique): BaseChainDB {.inline.} = + ## Getter + c.cCfg.db + +proc recents*(c: Clique): var RecentSnaps {.inline.} = + ## Getter + c.cRecents + +proc proposals*(c: Clique): var Proposals {.inline.} = + ## Getter + c.cProposals + +proc debug*(c: Clique): auto {.inline.} = + ## Getter + c.cDebug + +proc fakeDiff*(c: Clique): auto {.inline.} = + ## Getter + c.cFakeDiff + +# ------------------------------------------------------------------------------ +# Public setters +# ------------------------------------------------------------------------------ + +proc `db=`*(c: Clique; db: BaseChainDB) {.inline.} = + ## Setter, re-set database + c.cCfg.db = db + c.cProposals = initTable[EthAddress,bool]() + c.cRecents = c.cCfg.initRecentSnaps + c.cRecents.debug = c.cDebug + # note that the signatures[] cache need not be flushed + +proc `debug=`*(c: Clique; debug: bool) = + ## Set debugging mode on/off and set the `fakeDiff` flag `true` + c.cFakeDiff = true + c.cDebug = debug + c.cRecents.debug = debug + +# ------------------------------------------------------------------------------ +# Public lock/unlock +# ------------------------------------------------------------------------------ + +proc lock*(c: Clique) {.inline, raises: [Defect,CatchableError].} = + ## Lock descriptor + waitFor c.cLock.acquire + +proc unLock*(c: Clique) {.inline, raises: [Defect,AsyncLockError].} = + ## Unlock descriptor + c.cLock.release + +template doExclusively*(c: Clique; action: untyped) = + ## Handy helper + c.lock + action + c.unlock + +# ------------------------------------------------------------------------------ +# End +# ------------------------------------------------------------------------------ diff --git a/nimbus/p2p/clique/clique_miner.nim b/nimbus/p2p/clique/clique_miner.nim new file mode 100644 index 000000000..f906984d2 --- /dev/null +++ b/nimbus/p2p/clique/clique_miner.nim @@ -0,0 +1,594 @@ +# Nimbus +# Copyright (c) 2018 Status Research & Development GmbH +# Licensed under either of +# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or +# http://www.apache.org/licenses/LICENSE-2.0) +# * MIT license ([LICENSE-MIT](LICENSE-MIT) or +# http://opensource.org/licenses/MIT) +# at your option. This file may not be copied, modified, or distributed except +# according to those terms. + +## +## Mining Support for Clique PoA Consensus Protocol +## ================================================ +## +## Note that mining in currently unsupported by `NIMBUS` +## +## For details see +## `EIP-225 `_ +## and +## `go-ethereum `_ +## + +import + std/[random, sequtils, strformat, tables, times], + ../../constants, + ../../db/state_db, + ../../utils, + ../gaslimit, + ./clique_cfg, + ./clique_defs, + ./clique_desc, + ./clique_utils, + ./ec_recover, + ./recent_snaps, + chronicles, + chronos, + eth/[common, keys, rlp], + nimcrypto + +{.push raises: [Defect].} + +logScope: + topics = "clique PoA Mining" + +type + CliqueSyncDefect* = object of Defect + ## Defect raised with lock/unlock problem + +proc snapshot*(c: Clique; blockNumber: BlockNumber; hash: Hash256; + parents: openArray[Blockheader]): Result[Snapshot,CliqueError] {. + gcsafe, raises: [Defect,CatchableError].} + +# ------------------------------------------------------------------------------ +# Private Helpers +# ------------------------------------------------------------------------------ + +template syncExceptionWrap(action: untyped) = + try: + action + except: + raise (ref CliqueSyncDefect)(msg: getCurrentException().msg) + +# ------------------------------------------------------------------------------ +# Private functions +# ------------------------------------------------------------------------------ + +# clique/clique.go(145): func ecrecover(header [..] +proc ecrecover(c: Clique; header: BlockHeader): Result[EthAddress,CliqueError] + {.gcsafe, raises: [Defect,CatchableError].} = + ## ecrecover extracts the Ethereum account address from a signed header. + c.cfg.signatures.getEcRecover(header) + + +# clique/clique.go(463): func (c *Clique) verifySeal(chain [..] +proc verifySeal(c: Clique; header: BlockHeader; + parents: openArray[BlockHeader]): CliqueResult + {.gcsafe, raises: [Defect,CatchableError].} = + ## Check whether the signature contained in the header satisfies the + ## consensus protocol requirements. The method accepts an optional list of + ## parent headers that aren't yet part of the local blockchain to generate + ## the snapshots from. + + # Verifying the genesis block is not supported + if header.blockNumber.isZero: + return err((errUnknownBlock,"")) + + # Retrieve the snapshot needed to verify this header and cache it + var snap = c.snapshot(header.blockNumber-1, header.parentHash, parents) + if snap.isErr: + return err(snap.error) + + # Resolve the authorization key and check against signers + let signer = c.ecrecover(header) + if signer.isErr: + return err(signer.error) + + if not snap.value.isSigner(signer.value): + return err((errUnauthorizedSigner,"")) + + let seen = snap.value.recent(signer.value) + if seen.isOk: + # Signer is among recents, only fail if the current block does not + # shift it out + if header.blockNumber - snap.value.signersThreshold.u256 < seen.value: + return err((errRecentlySigned,"")) + + # Ensure that the difficulty corresponds to the turn-ness of the signer + if not c.fakeDiff: + if snap.value.inTurn(header.blockNumber, signer.value): + if header.difficulty != DIFF_INTURN: + return err((errWrongDifficulty,"")) + else: + if header.difficulty != DIFF_NOTURN: + return err((errWrongDifficulty,"")) + + return ok() + + +# clique/clique.go(314): func (c *Clique) verifyCascadingFields(chain [..] +proc verifyCascadingFields(c: Clique; header: BlockHeader; + parents: openArray[BlockHeader]): CliqueResult + {.gcsafe, raises: [Defect,CatchableError].} = + ## Verify all the header fields that are not standalone, rather depend on a + ## batch of previous headers. The caller may optionally pass in a batch of + ## parents (ascending order) to avoid looking those up from the database. + ## This is useful for concurrently verifying a batch of new headers. + + # The genesis block is the always valid dead-end + if header.blockNumber.isZero: + return err((errZeroBlockNumberRejected,"")) + + # Ensure that the block's timestamp isn't too close to its parent + var parent: BlockHeader + if 0 < parents.len: + parent = parents[^1] + else: + let rc = c.db.getBlockHeaderResult(header.blockNumber-1) + if rc.isErr: + return err((errUnknownAncestor,"")) + parent = rc.value + + if parent.blockNumber != header.blockNumber-1 or + parent.hash != header.parentHash: + return err((errUnknownAncestor,"")) + + if header.timestamp < parent.timestamp + c.cfg.period: + return err((errInvalidTimestamp,"")) + + # Verify that the gasUsed is <= gasLimit + if header.gasLimit < header.gasUsed: + return err((errCliqueExceedsGasLimit, + &"invalid gasUsed: have {header.gasUsed}, " & + &"gasLimit {header.gasLimit}")) + + let rc = c.db.validateGasLimitOrBaseFee(header, parent) + if rc.isErr: + return err((errCliqueGasLimitOrBaseFee, rc.error)) + + # Retrieve the snapshot needed to verify this header and cache it + var snap = c.snapshot(header.blockNumber-1, header.parentHash, parents) + if snap.isErr: + return err(snap.error) + + # If the block is a checkpoint block, verify the signer list + if (header.blockNumber mod c.cfg.epoch.u256) == 0: + let + signersList = snap.value.signers + extraList = header.extraData.extraDataAddresses + if signersList != extraList: + return err((errMismatchingCheckpointSigners,"")) + + # All basic checks passed, verify the seal and return + return c.verifySeal(header, parents) + + +# clique/clique.go(246): func (c *Clique) verifyHeader(chain [..] +proc verifyHeader(c: Clique; header: BlockHeader; + parents: openArray[BlockHeader]): CliqueResult + {.gcsafe, raises: [Defect,CatchableError].} = + ## Check whether a header conforms to the consensus rules.The caller may + ## optionally pass in a batch of parents (ascending order) to avoid looking + ## those up from the database. This is useful for concurrently verifying + ## a batch of new headers. + if header.blockNumber.isZero: + return err((errUnknownBlock,"")) + + # Don't waste time checking blocks from the future + if getTime() < header.timestamp: + return err((errFutureBlock,"")) + + # Checkpoint blocks need to enforce zero beneficiary + let isCheckPoint = (header.blockNumber mod c.cfg.epoch.u256) == 0 + if isCheckPoint and not header.coinbase.isZero: + return err((errInvalidCheckpointBeneficiary,"")) + + # Nonces must be 0x00..0 or 0xff..f, zeroes enforced on checkpoints + if header.nonce != NONCE_AUTH and header.nonce != NONCE_DROP: + return err((errInvalidVote,"")) + if isCheckPoint and header.nonce != NONCE_DROP: + return err((errInvalidCheckpointVote,"")) + + # Check that the extra-data contains both the vanity and signature + if header.extraData.len < EXTRA_VANITY: + return err((errMissingVanity,"")) + if header.extraData.len < EXTRA_VANITY + EXTRA_SEAL: + return err((errMissingSignature,"")) + + # Ensure that the extra-data contains a signer list on checkpoint, + # but none otherwise + let signersBytes = header.extraData.len - EXTRA_VANITY - EXTRA_SEAL + if not isCheckPoint and signersBytes != 0: + return err((errExtraSigners,"")) + + if isCheckPoint and (signersBytes mod EthAddress.len) != 0: + return err((errInvalidCheckpointSigners,"")) + + # Ensure that the mix digest is zero as we do not have fork protection + # currently + if not header.mixDigest.isZero: + return err((errInvalidMixDigest,"")) + + # Ensure that the block does not contain any uncles which are meaningless + # in PoA + if header.ommersHash != EMPTY_UNCLE_HASH: + return err((errInvalidUncleHash,"")) + + # Ensure that the block's difficulty is meaningful (may not be correct at + # this point) + if not header.blockNumber.isZero: + if header.difficulty.isZero or + (header.difficulty != DIFF_INTURN and + header.difficulty != DIFF_NOTURN): + return err((errInvalidDifficulty,"")) + + # verify that the gas limit is <= 2^63-1 + when header.gasLimit.typeof isnot int64: + if int64.high < header.gasLimit: + return err((errCliqueExceedsGasLimit, + &"invalid gasLimit: have {header.gasLimit}, must be int64")) + + # If all checks passed, validate any special fields for hard forks + let rc = c.db.config.verifyForkHashes(header) + if rc.isErr: + return err(rc.error) + + # All basic checks passed, verify cascading fields + return c.verifyCascadingFields(header, parents) + + +# clique/clique.go(681): func calcDifficulty(snap [..] +proc calcDifficulty(snap: var Snapshot; signer: EthAddress): DifficultyInt = + if snap.inTurn(snap.blockNumber + 1, signer): + DIFF_INTURN + else: + DIFF_NOTURN + +# ------------------------------------------------------------------------------ +# Public functions +# ------------------------------------------------------------------------------ + +# clique/clique.go(369): func (c *Clique) snapshot(chain [..] +proc snapshot*(c: Clique; blockNumber: BlockNumber; hash: Hash256; + parents: openArray[Blockheader]): Result[Snapshot,CliqueError] + {.gcsafe, raises: [Defect,CatchableError].} = + ## snapshot retrieves the authorization snapshot at a given point in time. + c.recents.getRecentSnaps: + RecentArgs(blockHash: hash, + blockNumber: blockNumber, + parents: toSeq(parents)) + +# clique/clique.go(212): func (c *Clique) Author(header [..] +proc author*(c: Clique; header: BlockHeader): Result[EthAddress,CliqueError] + {.gcsafe, raises: [Defect,CatchableError].} = + ## For the Consensus Engine, `author()` retrieves the Ethereum address of the + ## account that minted the given block, which may be different from the + ## header's coinbase if a consensus engine is based on signatures. + ## + ## This implementation returns the Ethereum address recovered from the + ## signature in the header's extra-data section. + c.ecrecover(header) + + +# clique/clique.go(217): func (c *Clique) VerifyHeader(chain [..] +proc verifyHeader*(c: Clique; header: BlockHeader): CliqueResult + {.gcsafe, raises: [Defect,CatchableError].} = + ## For the Consensus Engine, `verifyHeader()` checks whether a header + ## conforms to the consensus rules of a given engine. Verifying the seal + ## may be done optionally here, or explicitly via the `verifySeal()` method. + ## + ## This implementation checks whether a header conforms to the consensus + ## rules. + c.verifyHeader(header, @[]) + +# clique/clique.go(224): func (c *Clique) VerifyHeader(chain [..] +proc verifyHeaders*(c: Clique; headers: openArray[BlockHeader]): + Future[seq[CliqueResult]] {.async,gcsafe.} = + ## For the Consensus Engine, `verifyHeader()` s similar to VerifyHeader, but + ## verifies a batch of headers concurrently. This method is accompanied + ## by a `stopVerifyHeader()` method that can abort the operations. + ## + ## This implementation checks whether a header conforms to the consensus + ## rules. It verifies a batch of headers. If running in the background, + ## the process can be stopped by calling the `stopVerifyHeader()` function. + syncExceptionWrap: + c.doExclusively: + c.stopVHeaderReq = false + for n in 0 ..< headers.len: + c.doExclusively: + let isStopRequest = c.stopVHeaderReq + if isStopRequest: + result.add cliqueResultErr((errCliqueStopped,"")) + break + result.add c.verifyHeader(headers[n], headers[0 ..< n]) + c.doExclusively: + c.stopVHeaderReq = false + +proc stopVerifyHeader*(c: Clique): bool {.discardable.} = + ## Activate the stop flag for running `verifyHeader()` function. + ## Returns `true` if the stop flag could be activated. + syncExceptionWrap: + c.doExclusively: + if not c.stopVHeaderReq: + c.stopVHeaderReq = true + result = true + + +# clique/clique.go(450): func (c *Clique) VerifyUncles(chain [..] +proc verifyUncles*(c: Clique; ethBlock: EthBlock): CliqueResult = + ## For the Consensus Engine, `verifyUncles()` verifies that the given + ## block's uncles conform to the consensus rules of a given engine. + ## + ## This implementation always returns an error for existing uncles as this + ## consensus mechanism doesn't permit uncles. + if 0 < ethBlock.uncles.len: + return err((errCliqueUnclesNotAllowed,"")) + result = ok() + + +# clique/clique.go(506): func (c *Clique) Prepare(chain [..] +proc prepare*(c: Clique; header: var BlockHeader): CliqueResult + {.gcsafe, raises: [Defect,CatchableError].} = + ## For the Consensus Engine, `prepare()` initializes the consensus fields + ## of a block header according to the rules of a particular engine. The + ## changes are executed inline. + ## + ## This implementation prepares all the consensus fields of the header for + ## running the transactions on top. + + # If the block isn't a checkpoint, cast a random vote (good enough for now) + header.coinbase.reset + header.nonce.reset + + # Assemble the voting snapshot to check which votes make sense + var snap = c.snapshot(header.blockNumber-1, header.parentHash, @[]) + if snap.isErr: + return err(snap.error) + + if (header.blockNumber mod c.cfg.epoch) != 0: + c.doExclusively: + # Gather all the proposals that make sense voting on + var addresses: seq[EthAddress] + for (address,authorize) in c.proposals.pairs: + if snap.value.validVote(address, authorize): + addresses.add address + + # If there's pending proposals, cast a vote on them + if 0 < addresses.len: + header.coinbase = addresses[c.cfg.prng.rand(addresses.len-1)] + header.nonce = if header.coinbase in c.proposals: NONCE_AUTH + else: NONCE_DROP + + # Set the correct difficulty + header.difficulty = snap.value.calcDifficulty(c.signer) + + # Ensure the extra data has all its components + header.extraData.setLen(EXTRA_VANITY) + if (header.blockNumber mod c.cfg.epoch) == 0: + header.extraData.add snap.value.signers.mapIt(toSeq(it)).concat + header.extraData.add 0.byte.repeat(EXTRA_SEAL) + + # Mix digest is reserved for now, set to empty + header.mixDigest.reset + + # Ensure the timestamp has the correct delay + let parent = c.db.getBlockHeaderResult(header.blockNumber-1) + if parent.isErr: + return err((errUnknownAncestor,"")) + + header.timestamp = parent.value.timestamp + c.cfg.period + if header.timestamp < getTime(): + header.timestamp = getTime() + + return ok() + + +# clique/clique.go(571): func (c *Clique) Finalize(chain [..] +proc finalize*(c: Clique; header: BlockHeader; db: AccountStateDB) = + ## For the Consensus Engine, `finalize()` runs any post-transaction state + ## modifications (e.g. block rewards) but does not assemble the block. + ## + ## Note: The block header and state database might be updated to reflect any + ## consensus rules that happen at finalization (e.g. block rewards). + ## + ## Not implemented here, raises `AssertionDefect` + raiseAssert "Not implemented" + # + # ## This implementation ensures no uncles are set, nor block rewards given. + # # No block rewards in PoA, so the state remains as is and uncles are dropped + # let deleteEmptyObjectsOk = c.cfg.config.eip158block <= header.blockNumber + # header.stateRoot = db.intermediateRoot(deleteEmptyObjectsOk) + # header.ommersHash = EMPTY_UNCLE_HASH + +# clique/clique.go(579): func (c *Clique) FinalizeAndAssemble(chain [..] +proc finalizeAndAssemble*(c: Clique; header: BlockHeader; + db: AccountStateDB; txs: openArray[Transaction]; + receipts: openArray[Receipt]): + Result[EthBlock,CliqueError] = + ## For the Consensus Engine, `finalizeAndAssemble()` runs any + ## post-transaction state modifications (e.g. block rewards) and assembles + ## the final block. + ## + ## Note: The block header and state database might be updated to reflect any + ## consensus rules that happen at finalization (e.g. block rewards). + ## + ## Not implemented here, raises `AssertionDefect` + raiseAssert "Not implemented" + # ## Ensuring no uncles are set, nor block rewards given, and returns the + # ## final block. + # + # # Finalize block + # c.finalize(header, state, txs, uncles) + # + # # Assemble and return the final block for sealing + # return types.NewBlock(header, txs, nil, receipts, + # trie.NewStackTrie(nil)), nil + + +# clique/clique.go(589): func (c *Clique) Authorize(signer [..] +proc authorize*(c: Clique; signer: EthAddress; signFn: CliqueSignerFn) = + ## Injects private key into the consensus engine to mint new blocks with. + syncExceptionWrap: + c.doExclusively: + c.signer = signer + c.signFn = signFn + + +# clique/clique.go(724): func CliqueRLP(header [..] +proc cliqueRlp*(header: BlockHeader): seq[byte] = + ## Returns the rlp bytes which needs to be signed for the proof-of-authority + ## sealing. The RLP to sign consists of the entire header apart from the 65 + ## byte signature contained at the end of the extra data. + ## + ## Note, the method requires the extra data to be at least 65 bytes, + ## otherwise it panics. This is done to avoid accidentally using both forms + ## (signature present or not), which could be abused to produce different + ##hashes for the same header. + header.encodeSealHeader + + +# clique/clique.go(688): func SealHash(header *types.Header) common.Hash { +proc sealHash*(header: BlockHeader): Hash256 = + ## For the Consensus Engine, `sealHash()` returns the hash of a block prior + ## to it being sealed. + ## + ## This implementation returns the hash of a block prior to it being sealed. + header.hashSealHeader + + +# clique/clique.go(599): func (c *Clique) Seal(chain [..] +proc seal*(c: Clique; ethBlock: EthBlock): + Future[Result[EthBlock,CliqueError]] {.async,gcsafe.} = + ## For the Consensus Engine, `seal()` generates a new sealing request for + ## the given input block and pushes the result into the given channel. + ## + ## Note, the method returns immediately and will send the result async. More + ## than one result may also be returned depending on the consensus algorithm. + ## + ## This implementation attempts to create a sealed block using the local + ## signing credentials. If running in the background, the process can be + ## stopped by calling the `stopSeal()` function. + c.doExclusively: + c.stopSealReq = false + var header = ethBlock.header + + # Sealing the genesis block is not supported + if header.blockNumber.isZero: + return err((errUnknownBlock,"")) + + # For 0-period chains, refuse to seal empty blocks (no reward but would spin + # sealing) + if c.cfg.period.isZero and ethBlock.txs.len == 0: + info $nilCliqueSealNoBlockYet + return err((nilCliqueSealNoBlockYet,"")) + + # Don't hold the signer fields for the entire sealing procedure + c.doExclusively: + let + signer = c.signer + signFn = c.signFn + + # Bail out if we're unauthorized to sign a block + var snap = c.snapshot(header.blockNumber-1, header.parentHash, @[]) + if snap.isErr: + return err(snap.error) + if not snap.value.isSigner(signer): + return err((errUnauthorizedSigner,"")) + + # If we're amongst the recent signers, wait for the next block + let seen = snap.value.recent(signer) + if seen.isOk: + # Signer is among recents, only wait if the current block does not + # shift it out + if header.blockNumber < seen.value + snap.value.signersThreshold.u256: + info $nilCliqueSealSignedRecently + return err((nilCliqueSealSignedRecently,"")) + + # Sweet, the protocol permits us to sign the block, wait for our time + var delay = header.timestamp - getTime() + if header.difficulty == DIFF_NOTURN: + # It's not our turn explicitly to sign, delay it a bit + let wiggle = snap.value.signersThreshold.int64 * WIGGLE_TIME + # Kludge for limited rand() argument range + if wiggle.inSeconds < (int.high div 1000).int64: + let rndWiggleMs = c.cfg.prng.rand(wiggle.inMilliSeconds.int) + delay += initDuration(milliseconds = rndWiggleMs) + else: + let rndWiggleSec = c.cfg.prng.rand((wiggle.inSeconds and int.high).int) + delay += initDuration(seconds = rndWiggleSec) + + trace "Out-of-turn signing requested", + wiggle = $wiggle + + # Sign all the things! + let sigHash = signFn(signer,header.cliqueRlp) + if sigHash.isErr: + return err((errCliqueSealSigFn,$sigHash.error)) + let extraLen = header.extraData.len + if EXTRA_SEAL < extraLen: + header.extraData.setLen(extraLen - EXTRA_SEAL) + header.extraData.add sigHash.value.data + + # Wait until sealing is terminated or delay timeout. + trace "Waiting for slot to sign and propagate", + delay = $delay + + # FIXME: double check + let timeOutTime = getTime() + delay + while getTime() < timeOutTime: + c.doExclusively: + let isStopRequest = c.stopVHeaderReq + if isStopRequest: + warn "Sealing result is not read by miner", + sealhash = sealHash(header) + return err((errCliqueStopped,"")) + poll() + + c.doExclusively: + c.stopSealReq = false + return ok(ethBlock.withHeader(header)) + +proc stopSeal*(c: Clique): bool {.discardable.} = + ## Activate the stop flag for running `seal()` function. + ## Returns `true` if the stop flag could be activated. + syncExceptionWrap: + c.doExclusively: + if not c.stopSealReq: + c.stopSealReq = true + result =true + + +# clique/clique.go(673): func (c *Clique) CalcDifficulty(chain [..] +proc calcDifficulty(c: Clique; + parent: BlockHeader): Result[DifficultyInt,CliqueError] + {.gcsafe, raises: [Defect,CatchableError].} = + ## For the Consensus Engine, `calcDifficulty()` is the difficulty adjustment + ## algorithm. It returns the difficulty that a new block should have. + ## + ## This implementation returns the difficulty that a new block should have: + ## * DIFF_NOTURN(2) if BLOCK_NUMBER % SIGNER_COUNT != SIGNER_INDEX + ## * DIFF_INTURN(1) if BLOCK_NUMBER % SIGNER_COUNT == SIGNER_INDEX + var snap = c.snapshot(parent.blockNumber, parent.blockHash, @[]) + if snap.isErr: + return err(snap.error) + return ok(snap.value.calcDifficulty(c.signer)) + + +# # clique/clique.go(710): func (c *Clique) SealHash(header [..] +# proc sealHash(c: Clique; header: BlockHeader): Hash256 = +# ## SealHash returns the hash of a block prior to it being sealed. +# header.encodeSigHeader.keccakHash + +# ------------------------------------------------------------------------------ +# End +# ------------------------------------------------------------------------------ diff --git a/nimbus/p2p/clique/clique_utils.nim b/nimbus/p2p/clique/clique_utils.nim index 7eb562c0c..48e14ccc5 100644 --- a/nimbus/p2p/clique/clique_utils.nim +++ b/nimbus/p2p/clique/clique_utils.nim @@ -23,11 +23,8 @@ import std/[algorithm, strformat, times], ../../chain_config, - ../../config, ../../constants, ../../db/db_chain, - ../../errors, - ../../forks, ../../utils, ./clique_defs, eth/[common, rlp], @@ -85,12 +82,12 @@ proc extraDataAddresses*(extraData: Blob): seq[EthAddress] = addrOffset += EthAddress.len -proc getBlockHeaderResult*(c: BaseChainDB; +proc getBlockHeaderResult*(db: BaseChainDB; number: BlockNumber): Result[BlockHeader,void] {. gcsafe, raises: [Defect,RlpError].} = ## Slightly re-phrased dbChain.getBlockHeader(..) command var header: BlockHeader - if c.getBlockHeader(number, header): + if db_chain.getBlockHeader(db, number, header): return ok(header) err() diff --git a/nimbus/p2p/clique/recent_snaps.nim b/nimbus/p2p/clique/recent_snaps.nim index 46cd2ac6a..4b7acaa5e 100644 --- a/nimbus/p2p/clique/recent_snaps.nim +++ b/nimbus/p2p/clique/recent_snaps.nim @@ -102,7 +102,7 @@ proc canDiskCheckPointOk(d: RecentDesc): # checkpoint trusted and snapshot it. if FULL_IMMUTABILITY_THRESHOLD < d.local.headers.len: return true - if d.cfg.dbChain.getBlockHeaderResult(d.args.blockNumber - 1).isErr: + if d.cfg.db.getBlockHeaderResult(d.args.blockNumber - 1).isErr: return true # ------------------------------------------------------------------------------ @@ -123,7 +123,7 @@ proc tryStoreDiskCheckPoint(d: RecentDesc; snap: var Snapshot): bool {.gcsafe, raises: [Defect,RlpError].} = if d.canDiskCheckPointOk: # clique/clique.go(395): checkpoint := chain.GetHeaderByNumber [..] - let checkPoint = d.cfg.dbChain.getBlockHeaderResult(d.args.blockNumber) + let checkPoint = d.cfg.db.getBlockHeaderResult(d.args.blockNumber) if checkPoint.isErr: return false let @@ -178,7 +178,7 @@ proc initRecentSnaps*(rs: var RecentSnaps; else: # No explicit parents (or no more left), reach out to the database - let rc = d.cfg.dbChain.getBlockHeaderResult(d.args.blockNumber) + let rc = d.cfg.db.getBlockHeaderResult(d.args.blockNumber) if rc.isErr: return err((errUnknownAncestor,"")) header = rc.value @@ -210,8 +210,8 @@ proc initRecentSnaps*(rs: var RecentSnaps; if rc.isErr: return err(rc.error) trace "Stored voting snapshot to disk", - blockNumber = d.blockNumber, - blockHash = hash + blockNumber = snap.blockNumber, + blockHash = snap.blockHash # clique/clique.go(438): c.recents.Add(snap.Hash, snap) return ok(snap) @@ -223,9 +223,6 @@ proc initRecentSnaps*(rs: var RecentSnaps; proc initRecentSnaps*(cfg: CliqueCfg): RecentSnaps {.gcsafe,raises: [Defect].} = result.initRecentSnaps(cfg) -proc setDebug*(rs: var RecentSnaps; debug: bool) = - ## Set debugging mode on/off - rs.debug = debug proc getRecentSnaps*(rs: var RecentSnaps; args: RecentArgs): auto {. gcsafe, raises: [Defect,CatchableError].} = @@ -237,6 +234,11 @@ proc getRecentSnaps*(rs: var RecentSnaps; args: RecentArgs): auto {. args: args, local: LocalArgs()) + +proc `debug=`*(rs: var RecentSnaps; debug: bool) = + ## Setter, debugging mode on/off + rs.debug = debug + # ------------------------------------------------------------------------------ # End # ------------------------------------------------------------------------------ diff --git a/nimbus/p2p/clique/snapshot.nim b/nimbus/p2p/clique/snapshot.nim index 2f7642994..5a1e3dd5f 100644 --- a/nimbus/p2p/clique/snapshot.nim +++ b/nimbus/p2p/clique/snapshot.nim @@ -25,7 +25,7 @@ const import std/[algorithm, sequtils, strformat, strutils, tables, times], - ../../db/[storage_types, db_chain], + ../../db/storage_types, ../../utils/lru_cache, ./clique_cfg, ./clique_defs, @@ -38,8 +38,8 @@ type AddressHistory = Table[BlockNumber,EthAddress] SnapshotData* = object - blockNumber: BlockNumber ## truncated block num where snapshot was created - blockHash: Hash256 ## block hash where snapshot was created + blockNumber: BlockNumber ## block number where snapshot was created on + blockHash: Hash256 ## block hash where snapshot was created on recents: AddressHistory ## recent signers for spam protections # clique/snapshot.go(58): Recents map[uint64]common.Address [..] @@ -168,16 +168,20 @@ proc blockNumber*(s: var Snapshot): BlockNumber = ## Getter s.data.blockNumber +proc blockHash*(s: var Snapshot): Hash256 = + ## Getter + s.data.blockHash + + # clique/snapshot.go(88): func loadSnapshot(config [..] proc loadSnapshot*(s: var Snapshot; cfg: CliqueCfg; hash: Hash256): CliqueResult {.gcsafe, raises: [Defect].} = ## Load an existing snapshot from the database. try: - let - key = hash.cliqueSnapshotKey - value = cfg.dbChain.db.get(key.toOpenArray) - s.data = value.decode(SnapshotData) s.cfg = cfg + s.data = s.cfg.db.db + .get(hash.cliqueSnapshotKey.toOpenArray) + .decode(SnapshotData) except CatchableError as e: return err((errSnapshotLoad,e.msg)) result = ok() @@ -187,10 +191,8 @@ proc loadSnapshot*(s: var Snapshot; cfg: CliqueCfg; proc storeSnapshot*(s: var Snapshot): CliqueResult {.gcsafe,raises: [Defect].} = ## Insert the snapshot into the database. try: - let - key = s.data.blockHash.cliqueSnapshotKey - value = rlp.encode(s.data) - s.cfg.dbChain.db.put(key.toOpenArray, value) + s.cfg.db.db + .put(s.data.blockHash.cliqueSnapshotKey.toOpenArray, rlp.encode(s.data)) except CatchableError as e: return err((errSnapshotStore,e.msg)) result = ok() diff --git a/nimbus/p2p/executor.nim b/nimbus/p2p/executor.nim index 3e02d0d1c..f37f329f0 100644 --- a/nimbus/p2p/executor.nim +++ b/nimbus/p2p/executor.nim @@ -1,210 +1,26 @@ -import options, sets, - eth/[common, bloom, trie/db], chronicles, nimcrypto, - ../db/[db_chain, accounts_cache], - ../utils, ../constants, ../transaction, - ../vm_state, ../vm_types, - ./dao, ./validate, ../config, ../forks, - ../transaction/call_evm -proc eip1559TxNormalization(tx: Transaction): Transaction = - result = tx - if tx.txType < TxEip1559: - result.maxPriorityFee = tx.gasPrice - result.maxFee = tx.gasPrice +# Nimbus +# Copyright (c) 2018 Status Research & Development GmbH +# Licensed under either of +# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or +# http://www.apache.org/licenses/LICENSE-2.0) +# * MIT license ([LICENSE-MIT](LICENSE-MIT) or +# http://opensource.org/licenses/MIT) +# at your option. This file may not be copied, modified, or distributed except +# according to those terms. -proc processTransaction*(tx: Transaction, sender: EthAddress, vmState: BaseVMState, fork: Fork): GasInt = - ## Process the transaction, write the results to db. - ## Returns amount of ETH to be rewarded to miner - trace "Sender", sender - trace "txHash", rlpHash = tx.rlpHash +import + ./executor/[ + calculate_reward, + executor_helpers, + process_block, + process_transaction] - var tx = eip1559TxNormalization(tx) - var priorityFee: GasInt - if fork >= FkLondon: - # priority fee is capped because the base fee is filled first - let baseFee = vmState.blockHeader.baseFee.truncate(GasInt) - priorityFee = min(tx.maxPriorityFee, tx.maxFee - baseFee) - # signer pays both the priority fee and the base fee - # tx.gasPrice now is the effective gasPrice - tx.gasPrice = priorityFee + baseFee - - let miner = vmState.coinbase() - if validateTransaction(vmState, tx, sender, fork): - result = txCallEvm(tx, sender, vmState, fork) - - # miner fee - if fork >= FkLondon: - # miner only receives the priority fee; - # note that the base fee is not given to anyone (it is burned) - let txFee = result.u256 * priorityFee.u256 - vmState.accountDb.addBalance(miner, txFee) - else: - let txFee = result.u256 * tx.gasPrice.u256 - vmState.accountDb.addBalance(miner, txFee) - - vmState.cumulativeGasUsed += result - - vmState.mutateStateDB: - for deletedAccount in vmState.selfDestructs: - db.deleteAccount deletedAccount - - if fork >= FkSpurious: - vmState.touchedAccounts.incl(miner) - # EIP158/161 state clearing - for account in vmState.touchedAccounts: - if db.accountExists(account) and db.isEmptyAccount(account): - debug "state clearing", account - db.deleteAccount(account) - - if vmState.generateWitness: - vmState.accountDb.collectWitnessData() - vmState.accountDb.persist(clearCache = false) - -type - # TODO: these types need to be removed - # once eth/bloom and eth/common sync'ed - Bloom = common.BloomFilter - LogsBloom = bloom.BloomFilter - -# TODO: move these three receipt procs below somewhere else more appropriate -func logsBloom(logs: openArray[Log]): LogsBloom = - for log in logs: - result.incl log.address - for topic in log.topics: - result.incl topic - -func createBloom*(receipts: openArray[Receipt]): Bloom = - var bloom: LogsBloom - for rec in receipts: - bloom.value = bloom.value or logsBloom(rec.logs).value - result = bloom.value.toByteArrayBE - -proc makeReceipt*(vmState: BaseVMState, fork: Fork, txType: TxType): Receipt = - var rec: Receipt - if fork < FkByzantium: - rec.isHash = true - rec.hash = vmState.accountDb.rootHash - else: - rec.isHash = false - rec.status = vmState.status - - rec.receiptType = txType - rec.cumulativeGasUsed = vmState.cumulativeGasUsed - rec.logs = vmState.getAndClearLogEntries() - rec.bloom = logsBloom(rec.logs).value.toByteArrayBE - rec - -func eth(n: int): Uint256 {.compileTime.} = - n.u256 * pow(10.u256, 18) - -const - eth5 = 5.eth - eth3 = 3.eth - eth2 = 2.eth - blockRewards*: array[Fork, Uint256] = [ - eth5, # FkFrontier - eth5, # FkHomestead - eth5, # FkTangerine - eth5, # FkSpurious - eth3, # FkByzantium - eth2, # FkConstantinople - eth2, # FkPetersburg - eth2, # FkIstanbul - eth2, # FkBerlin - eth2 # FkLondon - ] - -proc calculateReward(fork: Fork, header: BlockHeader, body: BlockBody, vmState: BaseVMState) = - # PoA consensus engine have no reward for miner - if vmState.consensusEnginePoA: return - - let blockReward = blockRewards[fork] - var mainReward = blockReward - - for uncle in body.uncles: - var uncleReward = uncle.blockNumber.u256 + 8.u256 - uncleReward -= header.blockNumber.u256 - uncleReward = uncleReward * blockReward - uncleReward = uncleReward div 8.u256 - vmState.mutateStateDB: - db.addBalance(uncle.coinbase, uncleReward) - mainReward += blockReward div 32.u256 - - vmState.mutateStateDB: - db.addBalance(header.coinbase, mainReward) - -proc processBlock*(chainDB: BaseChainDB, header: BlockHeader, body: BlockBody, vmState: BaseVMState): ValidationResult = - var dbTx = chainDB.db.beginTransaction() - defer: dbTx.dispose() - - if chainDB.config.daoForkSupport and header.blockNumber == chainDB.config.daoForkBlock: - vmState.mutateStateDB: - db.applyDAOHardFork() - - if body.transactions.calcTxRoot != header.txRoot: - debug "Mismatched txRoot", blockNumber=header.blockNumber - return ValidationResult.Error - - let fork = chainDB.config.toFork(vmState.blockNumber) - - if header.txRoot != BLANK_ROOT_HASH: - if body.transactions.len == 0: - debug "No transactions in body", blockNumber=header.blockNumber - return ValidationResult.Error - else: - trace "Has transactions", blockNumber = header.blockNumber, blockHash = header.blockHash - - vmState.receipts = newSeq[Receipt](body.transactions.len) - vmState.cumulativeGasUsed = 0 - for txIndex, tx in body.transactions: - var sender: EthAddress - if tx.getSender(sender): - discard processTransaction(tx, sender, vmState, fork) - else: - debug "Could not get sender", txIndex, tx - return ValidationResult.Error - vmState.receipts[txIndex] = makeReceipt(vmState, fork, tx.txType) - - if vmState.cumulativeGasUsed != header.gasUsed: - debug "gasUsed neq cumulativeGasUsed", - gasUsed=header.gasUsed, - cumulativeGasUsed=vmState.cumulativeGasUsed - return ValidationResult.Error - - if header.ommersHash != EMPTY_UNCLE_HASH: - let h = chainDB.persistUncles(body.uncles) - if h != header.ommersHash: - debug "Uncle hash mismatch" - return ValidationResult.Error - - calculateReward(fork, header, body, vmState) - - # Reward beneficiary - vmState.mutateStateDB: - if vmState.generateWitness: - db.collectWitnessData() - db.persist(ClearCache in vmState.flags) - - let stateDb = vmState.accountDb - if header.stateRoot != stateDb.rootHash: - debug "wrong state root in block", blockNumber=header.blockNumber, expected=header.stateRoot, actual=stateDb.rootHash, arrivedFrom=chainDB.getCanonicalHead().stateRoot - return ValidationResult.Error - - let bloom = createBloom(vmState.receipts) - if header.bloom != bloom: - debug "wrong bloom in block", blockNumber=header.blockNumber - return ValidationResult.Error - - let receiptRoot = calcReceiptRoot(vmState.receipts) - if header.receiptRoot != receiptRoot: - debug "wrong receiptRoot in block", blockNumber=header.blockNumber, actual=receiptRoot, expected=header.receiptRoot - return ValidationResult.Error - - # `applyDeletes = false` - # If the trie pruning activated, each of the block will have its own state trie keep intact, - # rather than destroyed by trie pruning. But the current block will still get a pruned trie. - # If trie pruning deactivated, `applyDeletes` have no effects. - dbTx.commit(applyDeletes = false) +export + calculate_reward.blockRewards, + executor_helpers.createBloom, + process_block, + process_transaction #[ diff --git a/nimbus/p2p/executor/calculate_reward.nim b/nimbus/p2p/executor/calculate_reward.nim new file mode 100644 index 000000000..3f23b5ae3 --- /dev/null +++ b/nimbus/p2p/executor/calculate_reward.nim @@ -0,0 +1,57 @@ +# Nimbus +# Copyright (c) 2018 Status Research & Development GmbH +# Licensed under either of +# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or +# http://www.apache.org/licenses/LICENSE-2.0) +# * MIT license ([LICENSE-MIT](LICENSE-MIT) or +# http://opensource.org/licenses/MIT) +# at your option. This file may not be copied, modified, or distributed except +# according to those terms. + +import + ../../db/accounts_cache, + ../../forks, + ../../vm_state, + ../../vm_types, + ./executor_helpers, + eth/common + + +func eth(n: int): Uint256 {.compileTime.} = + n.u256 * pow(10.u256, 18) + +const + eth5 = 5.eth + eth3 = 3.eth + eth2 = 2.eth + blockRewards*: array[Fork, Uint256] = [ + eth5, # FkFrontier + eth5, # FkHomestead + eth5, # FkTangerine + eth5, # FkSpurious + eth3, # FkByzantium + eth2, # FkConstantinople + eth2, # FkPetersburg + eth2, # FkIstanbul + eth2, # FkBerlin + eth2 # FkLondon + ] + +proc calculateReward*(vmState: BaseVMState; + header: BlockHeader; body: BlockBody) = + let blockReward = blockRewards[vmState.getFork] + var mainReward = blockReward + + for uncle in body.uncles: + var uncleReward = uncle.blockNumber.u256 + 8.u256 + uncleReward -= header.blockNumber.u256 + uncleReward = uncleReward * blockReward + uncleReward = uncleReward div 8.u256 + vmState.mutateStateDB: + db.addBalance(uncle.coinbase, uncleReward) + mainReward += blockReward div 32.u256 + + vmState.mutateStateDB: + db.addBalance(header.coinbase, mainReward) + +# End diff --git a/nimbus/p2p/executor/executor_helpers.nim b/nimbus/p2p/executor/executor_helpers.nim new file mode 100644 index 000000000..b01feac49 --- /dev/null +++ b/nimbus/p2p/executor/executor_helpers.nim @@ -0,0 +1,57 @@ +# Nimbus +# Copyright (c) 2018 Status Research & Development GmbH +# Licensed under either of +# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or +# http://www.apache.org/licenses/LICENSE-2.0) +# * MIT license ([LICENSE-MIT](LICENSE-MIT) or +# http://opensource.org/licenses/MIT) +# at your option. This file may not be copied, modified, or distributed except +# according to those terms. + +import + ../../config, + ../../db/accounts_cache, + ../../forks, + ../../vm_state, + ../../vm_types, + eth/[common, bloom] + +type + # TODO: these types need to be removed + # once eth/bloom and eth/common sync'ed + Bloom = common.BloomFilter + LogsBloom = bloom.BloomFilter + +# TODO: move these three receipt procs below somewhere else more appropriate +func logsBloom(logs: openArray[Log]): LogsBloom = + for log in logs: + result.incl log.address + for topic in log.topics: + result.incl topic + +func createBloom*(receipts: openArray[Receipt]): Bloom = + var bloom: LogsBloom + for rec in receipts: + bloom.value = bloom.value or logsBloom(rec.logs).value + result = bloom.value.toByteArrayBE + +proc getFork*(vmState: BaseVMState): Fork {.inline.} = + ## Shortcut for configured fork, deliberately not naming it toFork() + vmState.chainDB.config.toFork(vmState.blockNumber) + +proc makeReceipt*(vmState: BaseVMState; txType: TxType): Receipt = + var rec: Receipt + if vmState.getFork < FkByzantium: + rec.isHash = true + rec.hash = vmState.accountDb.rootHash + else: + rec.isHash = false + rec.status = vmState.status + + rec.receiptType = txType + rec.cumulativeGasUsed = vmState.cumulativeGasUsed + rec.logs = vmState.getAndClearLogEntries() + rec.bloom = logsBloom(rec.logs).value.toByteArrayBE + rec + +# End diff --git a/nimbus/p2p/executor/process_block.nim b/nimbus/p2p/executor/process_block.nim new file mode 100644 index 000000000..1979de30b --- /dev/null +++ b/nimbus/p2p/executor/process_block.nim @@ -0,0 +1,169 @@ +# Nimbus +# Copyright (c) 2018 Status Research & Development GmbH +# Licensed under either of +# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or +# http://www.apache.org/licenses/LICENSE-2.0) +# * MIT license ([LICENSE-MIT](LICENSE-MIT) or +# http://opensource.org/licenses/MIT) +# at your option. This file may not be copied, modified, or distributed except +# according to those terms. + +import + ../../config, + ../../constants, + ../../db/[db_chain, accounts_cache], + ../../transaction, + ../../utils, + ../../vm_state, + ../../vm_types, + ../clique, + ../dao, + ./calculate_reward, + ./executor_helpers, + ./process_transaction, + ./update_poastate, + chronicles, + eth/[common, trie/db], + nimcrypto + +# ------------------------------------------------------------------------------ +# Private functions +# ------------------------------------------------------------------------------ + +proc procBlkPreamble(vmState: BaseVMState; dbTx: DbTransaction; + header: BlockHeader, body: BlockBody): bool = + if vmState.chainDB.config.daoForkSupport and + vmState.chainDB.config.daoForkBlock == header.blockNumber: + vmState.mutateStateDB: + db.applyDAOHardFork() + + if body.transactions.calcTxRoot != header.txRoot: + debug "Mismatched txRoot", + blockNumber = header.blockNumber + return false + + if header.txRoot != BLANK_ROOT_HASH: + if body.transactions.len == 0: + debug "No transactions in body", + blockNumber = header.blockNumber + return false + else: + trace "Has transactions", + blockNumber = header.blockNumber, + blockHash = header.blockHash + vmState.receipts = newSeq[Receipt](body.transactions.len) + vmState.cumulativeGasUsed = 0 + for txIndex, tx in body.transactions: + var sender: EthAddress + if tx.getSender(sender): + discard tx.processTransaction(sender, vmState) + else: + debug "Could not get sender", + txIndex, tx + return false + vmState.receipts[txIndex] = vmState.makeReceipt(tx.txType) + + if vmState.cumulativeGasUsed != header.gasUsed: + debug "gasUsed neq cumulativeGasUsed", + gasUsed = header.gasUsed, + cumulativeGasUsed = vmState.cumulativeGasUsed + return false + + if header.ommersHash != EMPTY_UNCLE_HASH: + let h = vmState.chainDB.persistUncles(body.uncles) + if h != header.ommersHash: + debug "Uncle hash mismatch" + return false + + return true + + +proc procBlkEpilogue(vmState: BaseVMState; dbTx: DbTransaction; + header: BlockHeader, body: BlockBody): bool = + # Reward beneficiary + vmState.mutateStateDB: + if vmState.generateWitness: + db.collectWitnessData() + db.persist(ClearCache in vmState.flags) + + let stateDb = vmState.accountDb + if header.stateRoot != stateDb.rootHash: + debug "wrong state root in block", + blockNumber = header.blockNumber, + expected = header.stateRoot, + actual = stateDb.rootHash, + arrivedFrom = vmState.chainDB.getCanonicalHead().stateRoot + return false + + let bloom = createBloom(vmState.receipts) + if header.bloom != bloom: + debug "wrong bloom in block", + blockNumber = header.blockNumber + return false + + let receiptRoot = calcReceiptRoot(vmState.receipts) + if header.receiptRoot != receiptRoot: + debug "wrong receiptRoot in block", + blockNumber = header.blockNumber, + actual = receiptRoot, + expected = header.receiptRoot + return false + + return true + +# ------------------------------------------------------------------------------ +# Public functions +# ------------------------------------------------------------------------------ + +proc processBlock*(vmState: BaseVMState; + header: BlockHeader, body: BlockBody): ValidationResult = + ## Processes `(header,body)` pair for a non-PoA network + if vmState.chainDB.config.poaEngine: + # PoA consensus engine unsupported, see the other version of + # processBlock() below + debug "Unsupported PoA request" + return ValidationResult.Error + + var dbTx = vmState.chainDB.db.beginTransaction() + defer: dbTx.dispose() + + if not vmState.procBlkPreamble(dbTx, header, body): + return ValidationResult.Error + + vmState.calculateReward(header, body) + + if not vmState.procBlkEpilogue(dbTx, header, body): + return ValidationResult.Error + + # `applyDeletes = false` + # If the trie pruning activated, each of the block will have its own state + # trie keep intact, rather than destroyed by trie pruning. But the current + # block will still get a pruned trie. If trie pruning deactivated, + # `applyDeletes` have no effects. + dbTx.commit(applyDeletes = false) + + +proc processBlock*(vmState: BaseVMState; poa: Clique; + header: BlockHeader, body: BlockBody): ValidationResult = + ## Processes `(header,body)` pair for a any network regardless of PoA or not + var dbTx = vmState.chainDB.db.beginTransaction() + defer: dbTx.dispose() + + if not vmState.procBlkPreamble(dbTx, header, body): + return ValidationResult.Error + + # PoA consensus engine have no reward for miner + if not vmState.chainDB.config.poaEngine: + vmState.calculateReward(header, body) + elif not vmState.updatePoaState(header, body): + debug "PoA update failed" + return ValidationResult.Error + + if not vmState.procBlkEpilogue(dbTx, header, body): + return ValidationResult.Error + + dbTx.commit(applyDeletes = false) + +# ------------------------------------------------------------------------------ +# End +# ------------------------------------------------------------------------------ diff --git a/nimbus/p2p/executor/process_transaction.nim b/nimbus/p2p/executor/process_transaction.nim new file mode 100644 index 000000000..4569bd934 --- /dev/null +++ b/nimbus/p2p/executor/process_transaction.nim @@ -0,0 +1,84 @@ +# Nimbus +# Copyright (c) 2018 Status Research & Development GmbH +# Licensed under either of +# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or +# http://www.apache.org/licenses/LICENSE-2.0) +# * MIT license ([LICENSE-MIT](LICENSE-MIT) or +# http://opensource.org/licenses/MIT) +# at your option. This file may not be copied, modified, or distributed except +# according to those terms. + +import + std/[sets], + ../../db/accounts_cache, + ../../forks, + ../../transaction/call_evm, + ../../vm_state, + ../../vm_types, + ../validate, + ./executor_helpers, + chronicles, + eth/common + + +proc eip1559TxNormalization(tx: Transaction): Transaction = + result = tx + if tx.txType < TxEip1559: + result.maxPriorityFee = tx.gasPrice + result.maxFee = tx.gasPrice + + +proc processTransaction*(tx: Transaction, sender: EthAddress, vmState: BaseVMState, fork: Fork): GasInt = + ## Process the transaction, write the results to db. + ## Returns amount of ETH to be rewarded to miner + trace "Sender", sender + trace "txHash", rlpHash = tx.rlpHash + + var tx = eip1559TxNormalization(tx) + var priorityFee: GasInt + if fork >= FkLondon: + # priority fee is capped because the base fee is filled first + let baseFee = vmState.blockHeader.baseFee.truncate(GasInt) + priorityFee = min(tx.maxPriorityFee, tx.maxFee - baseFee) + # signer pays both the priority fee and the base fee + # tx.gasPrice now is the effective gasPrice + tx.gasPrice = priorityFee + baseFee + + let miner = vmState.coinbase() + if validateTransaction(vmState, tx, sender, fork): + result = txCallEvm(tx, sender, vmState, fork) + + # miner fee + if fork >= FkLondon: + # miner only receives the priority fee; + # note that the base fee is not given to anyone (it is burned) + let txFee = result.u256 * priorityFee.u256 + vmState.accountDb.addBalance(miner, txFee) + else: + let txFee = result.u256 * tx.gasPrice.u256 + vmState.accountDb.addBalance(miner, txFee) + + vmState.cumulativeGasUsed += result + + vmState.mutateStateDB: + for deletedAccount in vmState.selfDestructs: + db.deleteAccount deletedAccount + + if fork >= FkSpurious: + vmState.touchedAccounts.incl(miner) + # EIP158/161 state clearing + for account in vmState.touchedAccounts: + if db.accountExists(account) and db.isEmptyAccount(account): + debug "state clearing", account + db.deleteAccount(account) + + if vmState.generateWitness: + vmState.accountDb.collectWitnessData() + vmState.accountDb.persist(clearCache = false) + + +proc processTransaction*(tx: Transaction, + sender: EthAddress, vmState: BaseVMState): GasInt = + tx.processTransaction(sender, vmState, vmState.getFork) + +# End diff --git a/nimbus/p2p/executor/update_poastate.nim b/nimbus/p2p/executor/update_poastate.nim new file mode 100644 index 000000000..1f3dd10af --- /dev/null +++ b/nimbus/p2p/executor/update_poastate.nim @@ -0,0 +1,20 @@ +# Nimbus +# Copyright (c) 2018 Status Research & Development GmbH +# Licensed under either of +# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or +# http://www.apache.org/licenses/LICENSE-2.0) +# * MIT license ([LICENSE-MIT](LICENSE-MIT) or +# http://opensource.org/licenses/MIT) +# at your option. This file may not be copied, modified, or distributed except +# according to those terms. + +import + ../../vm_types, + eth/[common] + + +proc updatePoaState*(vmState: BaseVMState; + header: BlockHeader; body: BlockBody): bool = + true + +# End diff --git a/nimbus/tracer.nim b/nimbus/tracer.nim index b744d8500..b2eb7db01 100644 --- a/nimbus/tracer.nim +++ b/nimbus/tracer.nim @@ -2,8 +2,7 @@ import db/[db_chain, accounts_cache, capturedb], eth/common, utils, json, constants, vm_state, vm_types, transaction, p2p/executor, eth/trie/db, nimcrypto, strutils, - chronicles, rpc/hexstrings, launcher, - ./config + chronicles, rpc/hexstrings, launcher when defined(geth): import db/geth_db @@ -104,7 +103,6 @@ proc traceTransaction*(chainDB: BaseChainDB, header: BlockHeader, beforeRoot: Hash256 let - fork = chainDB.config.toFork(header.blockNumber) miner = vmState.coinbase() for idx, tx in body.transactions: @@ -120,7 +118,7 @@ proc traceTransaction*(chainDB: BaseChainDB, header: BlockHeader, stateDiff["beforeRoot"] = %($stateDb.rootHash) beforeRoot = stateDb.rootHash - gasUsed = processTransaction(tx, sender, vmState, fork) + gasUsed = processTransaction(tx, sender, vmState) if idx == txIndex: after.captureAccount(stateDb, sender, senderName) @@ -176,7 +174,7 @@ proc dumpBlockState*(db: BaseChainDB, header: BlockHeader, body: BlockBody, dump for idx, uncle in body.uncles: before.captureAccount(stateBefore, uncle.coinbase, uncleName & $idx) - discard captureChainDB.processBlock(header, body, vmState) + discard vmState.processBlock(header, body) var stateAfter = vmState.accountDb @@ -220,11 +218,10 @@ proc traceBlock*(chainDB: BaseChainDB, header: BlockHeader, body: BlockBody, tra doAssert(body.transactions.len != 0) var gasUsed = GasInt(0) - let fork = chainDB.config.toFork(header.blockNumber) for tx in body.transactions: let sender = tx.getSender - gasUsed = gasUsed + processTransaction(tx, sender, vmState, fork) + gasUsed = gasUsed + processTransaction(tx, sender, vmState) result = vmState.getTracingResult() result["gas"] = %gasUsed diff --git a/premix/debug.nim b/premix/debug.nim index be3363a9d..640e04e70 100644 --- a/premix/debug.nim +++ b/premix/debug.nim @@ -23,7 +23,7 @@ proc executeBlock(blockEnv: JsonNode, memoryDB: TrieDatabaseRef, blockNumber: Ui defer: transaction.dispose() let vmState = newBaseVMState(parent.stateRoot, header, chainDB) - validationResult = processBlock(chainDB, header, body, vmState) + validationResult = vmState.processBlock(header, body) if validationResult != ValidationResult.OK: error "block validation error", validationResult diff --git a/premix/dumper.nim b/premix/dumper.nim index dae22c782..f1325dc0f 100644 --- a/premix/dumper.nim +++ b/premix/dumper.nim @@ -27,7 +27,7 @@ proc dumpDebug(chainDB: BaseChainDB, blockNumber: Uint256) = vmState = newBaseVMState(parent.stateRoot, header, captureChainDB) captureChainDB.setHead(parent, true) - discard processBlock(captureChainDB, header, body, vmState) + discard vmState.processBlock(header, body) transaction.rollback() dumpDebuggingMetaData(captureChainDB, header, body, vmState, false) diff --git a/premix/hunter.nim b/premix/hunter.nim index 4ae071126..8e6540111 100644 --- a/premix/hunter.nim +++ b/premix/hunter.nim @@ -96,7 +96,7 @@ proc huntProblematicBlock(blockNumber: Uint256): ValidationResult = defer: transaction.dispose() let vmState = newHunterVMState(parentBlock.header.stateRoot, thisBlock.header, chainDB) - validationResult = processBlock(chainDB, thisBlock.header, thisBlock.body, vmState) + validationResult = vmState.processBlock(thisBlock.header, thisBlock.body) if validationResult != ValidationResult.OK: transaction.rollback() diff --git a/premix/regress.nim b/premix/regress.nim index cb84d887f..0384810ae 100644 --- a/premix/regress.nim +++ b/premix/regress.nim @@ -31,7 +31,7 @@ proc validateBlock(chainDB: BaseChainDB, blockNumber: BlockNumber): BlockNumber let vmState = newBaseVMState(parent.stateRoot, headers[i], chainDB) - validationResult = processBlock(chainDB, headers[i], bodies[i], vmState) + validationResult = vmState.processBlock(headers[i], bodies[i]) if validationResult != ValidationResult.OK: error "block validation error", validationResult, blockNumber = blockNumber + i.u256 diff --git a/tests/test_blockchain_json.nim b/tests/test_blockchain_json.nim index 710202ab2..27960d108 100644 --- a/tests/test_blockchain_json.nim +++ b/tests/test_blockchain_json.nim @@ -12,7 +12,7 @@ import stew/endians2, nimcrypto, ./test_helpers, ./test_allowed_to_fail, ../premix/parser, test_config, - ../nimbus/[vm_state, utils, vm_types, errors, transaction, constants, forks], + ../nimbus/[vm_state, utils, vm_types, errors, constants, forks], ../nimbus/db/[db_chain, accounts_cache], ../nimbus/utils/header, ../nimbus/p2p/[executor, validate], @@ -247,7 +247,7 @@ proc importBlock(tester: var Tester, chainDB: BaseChainDB, transactions: result.txs, uncles: result.uncles ) - let res = processBlock(chainDB, result.header, body, tester.vmState) + let res = tester.vmState.processBlock(result.header, body) if res == ValidationResult.Error: if not (tb.hasException or (not tb.goodBlock)): raise newException(ValidationError, "process block validation") diff --git a/tests/test_clique.nim b/tests/test_clique.nim index 7553bede7..54f87e90f 100644 --- a/tests/test_clique.nim +++ b/tests/test_clique.nim @@ -9,17 +9,37 @@ # according to those terms. import - std/[algorithm, sequtils, strformat, strutils], - ../nimbus/p2p/[clique, clique/snapshot], + std/[algorithm, os, sequtils, strformat, strutils], + ../nimbus/config, + ../nimbus/db/db_chain, + ../nimbus/p2p/[chain, clique, clique/snapshot], ../nimbus/utils, - ./test_clique/pool, - eth/keys, + ./test_clique/[pool, undump], + eth/[common, keys], stint, unittest2 +let + goerliCapture = "test_clique" / "goerli51840.txt.gz" + +# ------------------------------------------------------------------------------ +# Helpers +# ------------------------------------------------------------------------------ + +proc db(ap: TesterPool): auto = + ## Getter + ap.clique.db + +proc getBlockHeader(ap: TesterPool; number: BlockNumber): BlockHeader = + ## Shortcut => db/db_chain.getBlockHeader() + doAssert ap.db.getBlockHeader(number, result) + +# ------------------------------------------------------------------------------ +# Test Runners +# ------------------------------------------------------------------------------ # clique/snapshot_test.go(99): func TestClique(t *testing.T) { -proc cliqueMain*(noisy = defined(debug)) = +proc runCliqueSnapshot(noisy = true) = ## Clique PoA Snapshot ## :: ## Tests that Clique signer voting is evaluated correctly for various @@ -75,7 +95,73 @@ proc cliqueMain*(noisy = defined(debug)) = check snapResult == expected -when isMainModule: - cliqueMain() +proc runGoerliReplay(noisy = true; + dir = "tests"; stopAfterBlock = uint64.high) = + var + pool = GoerliNet.newVoterPool + xChain = pool.db.newChain + cache: array[7,(seq[BlockHeader],seq[BlockBody])] + cInx = 0 + stoppedOk = false + suite "Replay Goerli Chain": + + for w in (dir / goerliCapture).undumpNextGroup: + + if w[0][0].blockNumber == 0.u256: + # Verify Genesis + doAssert w[0][0] == pool.getBlockHeader(0.u256) + + else: + # Condense in cache + cache[cInx] = w + cInx.inc + + # Handy for partial tests + if stopAfterBlock <= cache[cInx-1][0][0].blockNumber.truncate(uint64): + stoppedOk = true + break + + # Run from cache if complete set + if cache.len <= cInx: + cInx = 0 + let + first = cache[0][0][0].blockNumber + last = cache[^1][0][^1].blockNumber + test &"Goerli Blocks #{first}..#{last} ({cache.len} transactions)": + for (headers,bodies) in cache: + let addedPersistBlocks = xChain.persistBlocks(headers, bodies) + check addedPersistBlocks == ValidationResult.Ok + if addedPersistBlocks != ValidationResult.Ok: return + + # Rest from cache + if 0 < cInx: + let + first = cache[0][0][0].blockNumber + last = cache[cInx-1][0][^1].blockNumber + test &"Goerli Blocks #{first}..#{last} ({cInx} transactions)": + for (headers,bodies) in cache: + let addedPersistBlocks = xChain.persistBlocks(headers, bodies) + check addedPersistBlocks == ValidationResult.Ok + if addedPersistBlocks != ValidationResult.Ok: return + + if stoppedOk: + test &"Runner stooped after reaching #{stopAfterBlock}": + discard + +# ------------------------------------------------------------------------------ +# Main function(s) +# ------------------------------------------------------------------------------ + +proc cliqueMain*(noisy = defined(debug)) = + noisy.runCliqueSnapshot + noisy.runGoerliReplay + +when isMainModule: + let noisy = defined(debug) + noisy.runCliqueSnapshot + noisy.runGoerliReplay(dir = ".", stopAfterBlock = 1000) + +# ------------------------------------------------------------------------------ # End +# ------------------------------------------------------------------------------ diff --git a/tests/test_clique/goerli51840.txt.gz b/tests/test_clique/goerli51840.txt.gz new file mode 100644 index 000000000..dfceb9811 Binary files /dev/null and b/tests/test_clique/goerli51840.txt.gz differ diff --git a/tests/test_clique/gunzip.nim b/tests/test_clique/gunzip.nim new file mode 100644 index 000000000..9706f49c7 --- /dev/null +++ b/tests/test_clique/gunzip.nim @@ -0,0 +1,246 @@ +# Nimbus +# Copyright (c) 2018-2019 Status Research & Development GmbH +# Licensed under either of +# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or +# http://www.apache.org/licenses/LICENSE-2.0) +# * MIT license ([LICENSE-MIT](LICENSE-MIT) or +# http://opensource.org/licenses/MIT) +# at your option. This file may not be copied, modified, or distributed except +# according to those terms. + +import + std/strutils, + stew/results, + zlib + +type + GUnzip = object + mz: ZStream + + # fields used in explode() + inCache: string + inCount: uint + outBuf: array[4096,char] + outCount: uint + outDoneOK: bool + + # fields used by nextChunk() + gzIn: File + gzOpenOK: bool + gzMax: int64 + gzCount: int64 + gzName: string + + # fields used by nextLine() + lnList: seq[string] + lnInx: int + +{.push raises: [Defect].} + +# ------------------------------------------------------------------------------ +# Private deflate helpers: +# ------------------------------------------------------------------------------ + +proc explode(state: var GUnzip; data: openArray[char]; + start, dataLen: int): Result[string,ZError] = + var + inBuf = state.inCache & data[start ..< start + dataLen].join + outData = "" + rc: ZError + + state.mz.next_in = cast[ptr cuchar](inBuf[0].addr) + state.mz.total_in = 0 + state.mz.avail_in = inBuf.len.cuint + + while not state.outDoneOK and 0 < state.mz.avail_in: + state.mz.next_out = cast[ptr cuchar](state.outBuf[0].addr) + state.mz.avail_out = state.outBuf.len.cuint + state.mz.total_out = 0 + + # Save inpust state to compare with later on + let availIn = state.mz.avail_in + + # Deflate current block next_in[] => next_out[] + rc = state.mz.inflate(Z_SYNC_FLUSH) + if rc == Z_STREAM_END: + state.outDoneOK = true + rc = state.mz.inflateEnd + if rc != Z_OK: + break + + # Append processed data + if 0 < state.mz.total_out: + outData &= toOpenArray(state.outBuf, 0, state.mz.total_out-1).join + state.outCount += state.mz.total_out.uint + + # Stop unless state change + if state.mz.avail_in == availIn and + state.mz.avail_out == state.outBuf.len.cuint: + break + + # Cache left-over for next gzExplode() session + state.inCount += state.mz.total_in.uint + state.inCache = + if state.mz.total_in.int < inBuf.len - 1: + inBuf[state.mz.total_in.int ..< inBuf.len] + else: + "" + + # Return code + if rc != Z_OK: + err(rc) + else: + ok(outData) + +# ------------------------------------------------------------------------------ +# Public +# ------------------------------------------------------------------------------ + +proc open*(state: var GUnzip; fileName: string): + Result[void,ZError] {.gcsafe, raises: [Defect,IOError].} = + ## Open gzipped file with path `fileName` and prepare for deflating and + ## extraction. + + # Clear descriptor + if state.gzOpenOK: + state.gzIn.close + state.reset + + var + strBuf = 1024.newString + start = 10 + rc = state.mz.inflateInit2(Z_RAW_DEFLATE) + doAssert rc == Z_OK + + state.gzIn = fileName.open(fmRead) + state.gzOpenOK = true + state.gzMax = state.gzIn.getFileSize + state.gzCount = state.gzIn.readChars(strBuf, 0, strBuf.len) + + # Parse GZIP header (RFC 1952) + doAssert 18 < state.gzCount + doAssert (strBuf[0].ord == 0x1f and # magic number + strBuf[1].ord == 0x8b and # magic number + strBuf[2].ord == 0x08) # deflate + doAssert (strBuf[3].ord and 0xf7) == 0 # unsupported flags + if (strBuf[3].ord and 8) == 8: # FNAME + let endPos = strBuf.find(0.chr, start) + state.gzName = strBuf[start ..< endPos] + start = endPos + 1 + + # Cut off trailor + state.gzMax -= 8 + if state.gzMax < state.gzCount: + state.gzCount = state.gzMax + + # Store unused data for the next read + state.inCache = strBuf[start ..< state.gzCount] + return ok() + + +proc close*(state: var GUnzip) {.gcsafe.} = + ## Close any open files and free resources + if state.gzOpenOK: + state.gzIn.close + state.reset + + +proc nextChunk*(state: var GUnzip): + Result[string,ZError] {.gcsafe, raises: [Defect,IOError].} = + ## Fetch next unzipped data chunk, return and empty string if input + ## is exhausted. + var strBuf = 4096.newString + result = ok("") + + while state.gzCount < state.gzMax: + var strLen = state.gzIn.readChars(strBuf, 0, strBuf.len) + if state.gzMax < state.gzCount + strLen: + strLen = (state.gzMax - state.gzCount).int + state.gzCount += strLen + + result = state.explode(strBuf, 0, strLen) + if result.isErr: + state.close + return + if result.value != "": + return + + +proc nextChunkOk*(state: var GUnzip): bool {.inline,gcsafe.} = + ## True if there is another chunk of data so that `nextChunk()` might + ## fetch another non-empty unzipped data chunk. + state.gzCount < state.gzMax + + +proc nextLine*(state: var GUnzip): + Result[string,ZError] {.gcsafe, raises: [Defect,IOError].} = + ## Assume that the `state` argument descriptor referes to a gzipped text + ## file with lines separated by a newline character. Then fetch the next + ## unzipped line and return it. + ## + ## If all lines are exhausted, the error `Z_STREAM_END` is returned. See + ## function `nextLineOk()` for inquiry whether there would be a next + ## unzipped line, at all. + + # Return next item from list (but spare the last) + if state.lnInx + 1 < state.lnList.len: + result = ok(state.lnList[state.lnInx]) + state.lnInx += 1 + + elif not state.nextChunkOk: + result = err(Z_STREAM_END) + + else: + # Need to refill, concatenate old last item with new first + if state.lnInx + 1 == state.lnList.len: + state.lnList = @[state.lnList[state.lnInx]] + + # First encounter => initialise + else: + state.lnList = @[""] + + # Fetch at least two lines + while state.nextChunkOk and state.lnList.len < 2: + let rc = state.nextChunk + if rc.isErr: + return rc + var q = rc.value.split('\n') + q[0] = state.lnList[0] & q[0] + state.lnList = q + + result = ok(state.lnList[0]) + state.lnInx = 1 + + +proc nextLineOk*(state: var GUnzip): bool {.inline,gcsafe.} = + ## True if there is another unzipped line available with `nextLine()`. + state.nextChunkOk or state.lnInx + 1 < state.lnList.len + + +iterator gunzipLines*(state: var GUnzip): + (int,string) {.gcsafe, raises: [Defect,IOError].} = + ## Iterate over all lines of gzipped text file `fileName` and return + ## the pair `(line-number,line-text)` + var lno = 0 + while state.nextLineOk: + let rc = state.nextLine + if rc.isErr: + break + lno.inc + yield (lno,rc.value) + + +iterator gunzipLines*(fileName: string): + (int,string) {.gcsafe, raises: [Defect,IOError].} = + ## Open a gzipped text file, iterate over its lines (using the other + ## version of `gunzipLines()`) and close it. + var state: GUnzip + doAssert state.open(fileName).isOk + defer: state.close + + for w in state.gunzipLines: + yield w + +# ------------------------------------------------------------------------------ +# End +# ------------------------------------------------------------------------------ diff --git a/tests/test_clique/pool.nim b/tests/test_clique/pool.nim index 9a54db890..ac0c20656 100644 --- a/tests/test_clique/pool.nim +++ b/tests/test_clique/pool.nim @@ -13,9 +13,9 @@ import ../../nimbus/[config, chain_config, constants, genesis, utils], ../../nimbus/db/db_chain, ../../nimbus/p2p/clique, - ../../nimbus/p2p/clique/[clique_defs, clique_utils], + ../../nimbus/p2p/clique/clique_utils, ./voter_samples as vs, - eth/[common, keys, rlp, trie/db], + eth/[common, keys, p2p, rlp, trie/db], ethash, secp256k1_abi, stew/objects @@ -51,9 +51,9 @@ type # Private Helpers # ------------------------------------------------------------------------------ -proc chain(ap: TesterPool): BaseChainDB = +proc chain(ap: TesterPool): auto = ## Getter - ap.engine.cfgInternal.dbChain + ap.engine.db proc getBlockHeader(ap: TesterPool; number: BlockNumber): BlockHeader = ## Shortcut => db/db_chain.getBlockHeader() @@ -98,14 +98,13 @@ proc privateKey(ap: TesterPool; account: string): PrivateKey = proc resetChainDb(ap: TesterPool; extraData: Blob) = ## Setup new block chain with bespoke genesis - ap.engine.cfgInternal.dbChain = BaseChainDB( - db: newMemoryDb(), - config: ap.boot.config) + ap.engine.db = BaseChainDB(db: newMemoryDb(), config: ap.boot.config) + ap.engine.db.populateProgress # new genesis block var g = ap.boot.genesis if 0 < extraData.len: g.extraData = extraData - g.commit(ap.engine.cfgInternal.dbChain) + g.commit(ap.engine.db) # ------------------------------------------------------------------------------ # Private pretty printer call backs @@ -184,6 +183,9 @@ proc ppBlockHeader(ap: TesterPool; v: BlockHeader; delim: string): string = ## Pretty print block header let sep = if 0 < delim.len: delim else: ";" &"(blockNumber=#{v.blockNumber.truncate(uint64)}" & + &"{sep}parentHash={v.parentHash}" & + &"{sep}selfHash={v.hash}" & + &"{sep}stateRoot={v.stateRoot}" & &"{sep}coinbase={ap.ppAddress(v.coinbase)}" & &"{sep}nonce={ap.ppNonce(v.nonce)}" & &"{sep}extraData={ap.ppExtraData(v.extraData)})" @@ -200,18 +202,16 @@ proc initPrettyPrinters(pp: var PrettyPrinters; ap: TesterPool) = proc initTesterPool(ap: TesterPool): TesterPool {.discardable.} = result = ap - result.boot.config.poaEngine = true result.prng = initRand(prngSeed) result.batch = @[newSeq[BlockHeader]()] result.accounts = initTable[string,PrivateKey]() result.xSeals = initTable[XSealKey,XSealValue]() result.names = initTable[EthAddress,string]() - result.engine = newCliqueCfg( - dbChain = BaseChainDB(), - period = initDuration(seconds = 1)) - .initClique - result.engine.setDebug(false) - result.engine.cfgInternal.prettyPrint.initPrettyPrinters(result) + result.engine = BaseChainDB( + db: newMemoryDb(), + config: ap.boot.config).newCliqueCfg.newClique + result.engine.debug = false + result.engine.cfg.prettyPrint.initPrettyPrinters(result) result.resetChainDb(@[]) # ------------------------------------------------------------------------------ @@ -220,13 +220,13 @@ proc initTesterPool(ap: TesterPool): TesterPool {.discardable.} = proc getPrettyPrinters*(t: TesterPool): var PrettyPrinters = ## Mixin for pretty printers, see `clique/clique_cfg.pp()` - t.engine.cfgInternal.prettyPrint + t.engine.cfg.prettyPrint proc setDebug*(ap: TesterPool; debug=true): TesterPool {.inline,discardable,} = ## Set debugging mode on/off result = ap ap.debug = debug - ap.engine.setDebug(debug) + ap.engine.debug = debug proc say*(t: TesterPool; v: varargs[string,`$`]) = if t.debug: @@ -298,25 +298,33 @@ proc snapshot*(ap: TesterPool; number: BlockNumber; hash: Hash256; .sorted .join("\n" & ' '.repeat(23)) - ap.engine.snapshotInternal(number, hash, parent) + ap.engine.snapshot(number, hash, parent) + +proc clique*(ap: TesterPool): Clique = + ## Getter + ap.engine # ------------------------------------------------------------------------------ # Public: Constructor # ------------------------------------------------------------------------------ -proc newVoterPool*(genesisTemplate = ""): TesterPool = - new result - if genesisTemplate == "": - let networkId = getConfiguration().net.networkId - result.boot.genesis = defaultGenesisBlockForNetwork(networkId) - else: - # Find genesis block - doAssert genesisTemplate.loadCustomGenesis(result.boot) - result.initTesterPool - proc newVoterPool*(customGenesis: CustomGenesis): TesterPool = TesterPool(boot: customGenesis).initTesterPool +proc newVoterPool*(id: NetworkId): TesterPool = + CustomGenesis( + config: chainConfig(id), + genesis: defaultGenesisBlockForNetwork(id)).newVoterPool + +proc newVoterPool*(genesisTemplate = ""): TesterPool = + if genesisTemplate == "": + return getConfiguration().net.networkId.newVoterPool + + # Find genesis block from template + new result + doAssert genesisTemplate.loadCustomGenesis(result.boot) + result.initTesterPool + # ------------------------------------------------------------------------------ # Public: set up & manage voter database # ------------------------------------------------------------------------------ @@ -351,7 +359,7 @@ proc resetVoterChain*(ap: TesterPool; signers: openArray[string]; # store modified genesis block and epoch ap.resetChainDb(extraData) - ap.engine.cfgInternal.epoch = epoch.uint + ap.engine.cfg.epoch = epoch.uint # clique/snapshot_test.go(415): blocks, _ := core.GenerateChain(&config, [..] @@ -389,7 +397,7 @@ proc appendVoter*(ap: TesterPool; # clique/snapshot_test.go(432): if auths := tt.votes[j].checkpoint; [..] if 0 < voter.checkpoint.len: - doAssert (header.blockNumber mod ap.engine.cfgInternal.epoch) == 0 + doAssert (header.blockNumber mod ap.engine.cfg.epoch) == 0 ap.checkpoint(header,voter.checkpoint) # Generate the signature, embed it into the header and the block diff --git a/tests/test_clique/undump.nim b/tests/test_clique/undump.nim new file mode 100644 index 000000000..90631129c --- /dev/null +++ b/tests/test_clique/undump.nim @@ -0,0 +1,138 @@ +# Nimbus +# Copyright (c) 2018-2019 Status Research & Development GmbH +# Licensed under either of +# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or +# http://www.apache.org/licenses/LICENSE-2.0) +# * MIT license ([LICENSE-MIT](LICENSE-MIT) or +# http://opensource.org/licenses/MIT) +# at your option. This file may not be copied, modified, or distributed except +# according to those terms. + +import + std/[sequtils, strformat, strutils], + ../../nimbus/db/db_chain, + ../../nimbus/utils, + ./gunzip, + eth/[common, rlp], + nimcrypto, + stew/results + +# ------------------------------------------------------------------------------ +# Private helpers +# ------------------------------------------------------------------------------ + +template say(args: varargs[untyped]) = + # echo args + discard + +proc toByteSeq(s: string): seq[byte] = + nimcrypto.fromHex(s) + +# ------------------------------------------------------------------------------ +# Public capture +# ------------------------------------------------------------------------------ + +proc dumpGroupBegin*(headers: openArray[BlockHeader]): string = + & "transaction #{headers[0].blockNumber} {headers.len}" + +proc dumpGroupBlock*(header: BlockHeader; body: BlockBody): string = + &"block {rlp.encode(header).toHex} {rlp.encode(body).toHex}" + +proc dumpGroupEnd*: string = + "commit" + + +proc dumpGroupEndNl*: string = + dumpGroupEnd() & "\n\n" + +proc dumpGroupBlockNl*(header: BlockHeader; body: BlockBody): string = + dumpGroupBlock(header, body) & "\n" + +proc dumpGroupBeginNl*(db: var BaseChainDB; + headers: openArray[BlockHeader]): string = + if headers[0].blockNumber == 1.u256: + let + h0 = db.getBlockHeader(0.u256) + b0 = db.getBlockBody(h0.hash) + result = "" & + dumpGroupBegin(@[h0]) & "\n" & + dumpGroupBlockNl(h0,b0) & + dumpGroupEndNl() + + result &= dumpGroupBegin(headers) & "\n" + + +proc dumpGroupNl*(db: var BaseChainDB; headers: openArray[BlockHeader]; + bodies: openArray[BlockBody]): string = + db.dumpGroupBeginNl(headers) & + toSeq(countup(0, headers.len-1)) + .mapIt(dumpGroupBlockNl(headers[it], bodies[it])) + .join & + dumpGroupEndNl() + +# ------------------------------------------------------------------------------ +# Public undump +# ------------------------------------------------------------------------------ + +iterator undumpNextGroup*(gzFile: string): (seq[BlockHeader],seq[BlockBody]) = + var + headerQ: seq[BlockHeader] + bodyQ: seq[BlockBody] + line = "" + lno = 0 + current = 0u + start = 0u + top = 0u + waitFor = "transaction" + + for lno,line in gzFile.gunzipLines: + if line.len == 0 or line[0] == '#': + continue + var flds = line.split + if 0 < flds.len and (waitFor == "" or waitFor == flds[0]): + case flds[0] + of "transaction": + let flds1Len = flds[1].len + if flds.len == 3 and + 0 < flds1Len and flds[1][0] == '#' and + 0 < flds[2].len: + start = flds[1][1 ..< flds1Len].parseUInt + top = start + flds[2].parseUInt + current = start + waitFor = "" + headerQ.reset + bodyQ.reset + continue + else: + echo &"*** Ignoring line({lno}): {line}." + waitFor = "transaction" + of "block": + if flds.len == 3 and + 0 < flds[1].len and + 0 < flds[2].len and + start <= current and current < top: + var + rlpHeader = flds[1].rlpFromHex + rlpBody = flds[2].rlpFromHex + headerQ.add rlpHeader.read(BlockHeader) + bodyQ.add rlpBody.read(BlockBody) + current.inc + continue + else: + echo &"*** Ignoring line({lno}): {line}." + waitFor = "transaction" + of "commit": + if current == top: + say &"*** commit({lno}) #{start}..{top-1}" + else: + echo &"*** commit({lno}) error, current({current}) should be {top}" + yield (headerQ, bodyQ) + waitFor = "transaction" + continue + + echo &"*** Ignoring line({lno}): {line}." + waitFor = "transaction" + +# ------------------------------------------------------------------------------ +# End +# ------------------------------------------------------------------------------ diff --git a/tests/test_rpc.nim b/tests/test_rpc.nim index 1abd8a610..c04920fb5 100644 --- a/tests/test_rpc.nim +++ b/tests/test_rpc.nim @@ -14,7 +14,8 @@ import ../nimbus/rpc/[common, p2p, hexstrings, rpc_types, rpc_utils], ../nimbus/[constants, vm_state, config, genesis, utils, transaction], ../nimbus/db/[accounts_cache, db_chain, storage_types, state_db], - ../nimbus/p2p/[chain, executor], ../nimbus/utils/difficulty, + ../nimbus/p2p/[chain, executor, executor/executor_helpers], + ../nimbus/utils/difficulty, ./rpcclient/test_hexstrings, ./test_helpers, ./macro_assembler # Perform checks for hex string validation @@ -41,7 +42,6 @@ proc setupEnv(chain: BaseChainDB, signer, ks2: EthAddress, conf: NimbusConfigura acc = conf.getAccount(signer).tryGet() blockNumber = 1.toBlockNumber parentHash = parent.blockHash - fork = chain.config.toFork(blockNumber) const code = evmByteCode: PUSH4 "0xDEADBEEF" # PUSH @@ -84,8 +84,8 @@ proc setupEnv(chain: BaseChainDB, signer, ks2: EthAddress, conf: NimbusConfigura vmState.cumulativeGasUsed = 0 for txIndex, tx in txs: let sender = tx.getSender() - discard processTransaction(tx, sender, vmState, fork) - vmState.receipts[txIndex] = makeReceipt(vmState, fork, tx.txType) + discard processTransaction(tx, sender, vmState) + vmState.receipts[txIndex] = makeReceipt(vmState, tx.txType) let receiptRoot = chain.persistReceipts(vmState.receipts)