saner source code grouping
This commit is contained in:
parent
e78110202b
commit
ac2cb82a2b
|
@ -8,8 +8,11 @@
|
|||
# those terms.
|
||||
|
||||
import
|
||||
chronicles, eth/[common, rlp], stew/io2,
|
||||
./p2p/chain, ./db/[db_chain, select_backend]
|
||||
chronicles,
|
||||
eth/rlp, stew/io2,
|
||||
./chain,
|
||||
../db/select_backend,
|
||||
../common/common
|
||||
|
||||
type
|
||||
# trick the rlp decoder
|
||||
|
@ -17,14 +20,14 @@ type
|
|||
EthHeader = object
|
||||
header: BlockHeader
|
||||
|
||||
proc importRlpBlock*(blocksRlp: openArray[byte]; chainDB: BaseChainDB; importFile: string = ""): bool =
|
||||
proc importRlpBlock*(blocksRlp: openArray[byte]; com: CommonRef; importFile: string = ""): bool =
|
||||
var
|
||||
# the encoded rlp can contains one or more blocks
|
||||
rlp = rlpFromBytes(blocksRlp)
|
||||
chain = newChain(chainDB, extraValidation = true)
|
||||
chain = newChain(com, extraValidation = true)
|
||||
errorCount = 0
|
||||
let
|
||||
head = chainDB.getCanonicalHead()
|
||||
head = com.db.getCanonicalHead()
|
||||
|
||||
while rlp.hasData:
|
||||
try:
|
||||
|
@ -52,12 +55,11 @@ proc importRlpBlock*(blocksRlp: openArray[byte]; chainDB: BaseChainDB; importFil
|
|||
|
||||
return errorCount == 0
|
||||
|
||||
proc importRlpBlock*(importFile: string; chainDB: BaseChainDB): bool =
|
||||
proc importRlpBlock*(importFile: string; com: CommonRef): bool =
|
||||
let res = io2.readAllBytes(importFile)
|
||||
if res.isErr:
|
||||
error "failed to import",
|
||||
fileName = importFile
|
||||
return false
|
||||
|
||||
importRlpBlock(res.get, chainDB, importFile)
|
||||
|
||||
|
||||
importRlpBlock(res.get, com, importFile)
|
|
@ -9,11 +9,10 @@
|
|||
# according to those terms.
|
||||
|
||||
import
|
||||
./chain/[chain_desc, chain_misc, persist_blocks]
|
||||
./chain/[chain_desc, persist_blocks]
|
||||
|
||||
export
|
||||
chain_desc,
|
||||
chain_misc,
|
||||
persist_blocks
|
||||
|
||||
# End
|
|
@ -9,50 +9,22 @@
|
|||
# according to those terms.
|
||||
|
||||
import
|
||||
../../chain_config,
|
||||
../../db/db_chain,
|
||||
../../genesis,
|
||||
../../utils,
|
||||
../../utils/pow,
|
||||
../../chain_config,
|
||||
../../common/common,
|
||||
../../utils/utils,
|
||||
../pow,
|
||||
../clique,
|
||||
../validate,
|
||||
chronicles,
|
||||
eth/[common],
|
||||
stew/endians2,
|
||||
stint
|
||||
stew/endians2
|
||||
|
||||
export
|
||||
common
|
||||
|
||||
type
|
||||
ChainFork* = enum
|
||||
## `ChainFork` has extra forks not in the EVM fork list. These are the
|
||||
## unique `DAOFork`, and Glacier forks `MuirGlacier` and `ArrowGlacier`.
|
||||
## At the Glacier forks, only block difficulty calculation changed.
|
||||
Frontier,
|
||||
Homestead,
|
||||
DAOFork,
|
||||
Tangerine,
|
||||
Spurious,
|
||||
Byzantium,
|
||||
Constantinople,
|
||||
Petersburg,
|
||||
Istanbul,
|
||||
MuirGlacier,
|
||||
Berlin,
|
||||
London,
|
||||
ArrowGlacier,
|
||||
GrayGlacier,
|
||||
MergeFork,
|
||||
Shanghai,
|
||||
Cancun
|
||||
|
||||
Chain* = ref object of RootRef
|
||||
db: BaseChainDB
|
||||
forkIds: array[ChainFork, ForkID]
|
||||
|
||||
blockZeroHash: KeccakHash ##\
|
||||
## Overload cache for `genesisHash()` method
|
||||
|
||||
blockZeroStateRoot: KeccakHash
|
||||
ChainRef* = ref object of RootRef
|
||||
com: CommonRef
|
||||
## common block chain configuration
|
||||
## used throughout entire app
|
||||
|
||||
validateBlock: bool ##\
|
||||
## If turn off, `persistBlocks` will always return
|
||||
|
@ -75,105 +47,24 @@ type
|
|||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
func toNextFork(n: BlockNumber): uint64 =
|
||||
if n == high(BlockNumber):
|
||||
result = 0'u64
|
||||
else:
|
||||
result = n.truncate(uint64)
|
||||
|
||||
func toNextFork(n: Option[BlockNumber]): uint64 =
|
||||
if n.isSome:
|
||||
n.get.truncate(uint64)
|
||||
else:
|
||||
0'u64
|
||||
|
||||
proc isBlockAfterTtd*(c: Chain, header: BlockHeader): bool
|
||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
||||
isBlockAfterTtd(c.db, header)
|
||||
|
||||
func getNextFork(c: ChainConfig, fork: ChainFork): uint64 =
|
||||
let next: array[ChainFork, uint64] = [
|
||||
0'u64,
|
||||
toNextFork(c.homesteadBlock),
|
||||
toNextFork(c.daoForkBlock),
|
||||
toNextFork(c.eip150Block),
|
||||
toNextFork(c.eip158Block),
|
||||
toNextFork(c.byzantiumBlock),
|
||||
toNextFork(c.constantinopleBlock),
|
||||
toNextFork(c.petersburgBlock),
|
||||
toNextFork(c.istanbulBlock),
|
||||
toNextFork(c.muirGlacierBlock),
|
||||
toNextFork(c.berlinBlock),
|
||||
toNextFork(c.londonBlock),
|
||||
toNextFork(c.arrowGlacierBlock),
|
||||
toNextFork(c.grayGlacierBlock),
|
||||
toNextFork(c.mergeForkBlock),
|
||||
toNextFork(c.shanghaiBlock),
|
||||
toNextFork(c.cancunBlock),
|
||||
]
|
||||
|
||||
if fork == high(ChainFork):
|
||||
result = 0
|
||||
return
|
||||
|
||||
result = next[fork]
|
||||
for x in fork..high(ChainFork):
|
||||
if result != next[x]:
|
||||
result = next[x]
|
||||
break
|
||||
|
||||
func calculateForkId(c: ChainConfig, fork: ChainFork,
|
||||
prevCRC: uint32, prevFork: uint64): ForkID =
|
||||
result.nextFork = c.getNextFork(fork)
|
||||
|
||||
if result.nextFork != prevFork:
|
||||
result.crc = crc32(prevCRC, toBytesBE(prevFork))
|
||||
else:
|
||||
result.crc = prevCRC
|
||||
|
||||
func calculateForkIds(c: ChainConfig,
|
||||
genesisCRC: uint32): array[ChainFork, ForkID] =
|
||||
var prevCRC = genesisCRC
|
||||
var prevFork = c.getNextFork(Frontier)
|
||||
|
||||
for fork in ChainFork:
|
||||
result[fork] = calculateForkId(c, fork, prevCRC, prevFork)
|
||||
prevFork = result[fork].nextFork
|
||||
prevCRC = result[fork].crc
|
||||
|
||||
proc setForkId(c: Chain)
|
||||
{. raises: [Defect,CatchableError].} =
|
||||
let blockZero = c.db.toGenesisHeader
|
||||
c.blockZeroHash = blockZero.blockHash
|
||||
c.blockZeroStateRoot = blockZero.stateRoot
|
||||
let genesisCRC = crc32(0, c.blockZeroHash.data)
|
||||
c.forkIds = calculateForkIds(c.db.config, genesisCRC)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private constructor helper
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc initChain(c: Chain; db: BaseChainDB; poa: Clique; extraValidation: bool)
|
||||
proc initChain(c: ChainRef; com: CommonRef; poa: Clique; extraValidation: bool)
|
||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
||||
## Constructor for the `Chain` descriptor object. For most applications,
|
||||
## the `poa` argument is transparent and should be initilaised on the fly
|
||||
## which is available below.
|
||||
c.db = db
|
||||
c.com = com
|
||||
|
||||
if not db.config.daoForkSupport:
|
||||
db.config.daoForkBlock = db.config.homesteadBlock
|
||||
c.validateBlock = true
|
||||
c.extraValidation = extraValidation
|
||||
c.setForkId()
|
||||
|
||||
# Initalise the PoA state regardless of whether it is needed on the current
|
||||
# network. For non-PoA networks (when `db.config.poaEngine` is `false`),
|
||||
# this descriptor is ignored.
|
||||
c.poa = db.newClique
|
||||
c.poa = com.newClique
|
||||
|
||||
# Always initialise the PoW epoch cache even though it migh no be used
|
||||
# unless `extraValidation` is set `true`.
|
||||
|
@ -183,95 +74,91 @@ proc initChain(c: Chain; db: BaseChainDB; poa: Clique; extraValidation: bool)
|
|||
# Public constructors
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc newChain*(db: BaseChainDB; poa: Clique; extraValidation: bool): Chain
|
||||
proc newChain*(com: CommonRef; poa: Clique; extraValidation: bool): ChainRef
|
||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
||||
## Constructor for the `Chain` descriptor object. For most applications,
|
||||
## the `poa` argument is transparent and should be initilaised on the fly
|
||||
## which is available below. The argument `extraValidation` enables extra
|
||||
## block chain validation if set `true`.
|
||||
new result
|
||||
result.initChain(db, poa, extraValidation)
|
||||
result.initChain(com, poa, extraValidation)
|
||||
|
||||
proc newChain*(db: BaseChainDB, extraValidation: bool): Chain
|
||||
proc newChain*(com: CommonRef, extraValidation: bool): ChainRef
|
||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
||||
## Constructor for the `Chain` descriptor object with default initialisation
|
||||
## for the PoA handling. The argument `extraValidation` enables extra block
|
||||
## chain validation if set `true`.
|
||||
new result
|
||||
result.initChain(db, db.newClique, extraValidation)
|
||||
result.initChain(com, com.newClique, extraValidation)
|
||||
|
||||
proc newChain*(db: BaseChainDB): Chain
|
||||
proc newChain*(com: CommonRef): ChainRef
|
||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
||||
## Constructor for the `Chain` descriptor object. All sub-object descriptors
|
||||
## are initialised with defaults. So is extra block chain validation
|
||||
## * `enabled` for PoA networks (such as Goerli)
|
||||
## * `disabled` for nopn-PaA networks
|
||||
## * `disabled` for non-PaA networks
|
||||
new result
|
||||
result.initChain(db, db.newClique, db.config.poaEngine)
|
||||
result.initChain(com, com.newClique, com.consensus == ConsensusType.POA)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public `Chain` getters
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc clique*(c: Chain): var Clique =
|
||||
proc clique*(c: ChainRef): var Clique =
|
||||
## Getter
|
||||
c.poa
|
||||
|
||||
proc pow*(c: Chain): PowRef =
|
||||
proc pow*(c: ChainRef): PowRef =
|
||||
## Getter
|
||||
c.pow
|
||||
|
||||
proc db*(c: Chain): BaseChainDB =
|
||||
proc db*(c: ChainRef): ChainDBRef =
|
||||
## Getter
|
||||
c.db
|
||||
c.com.db
|
||||
|
||||
proc validateBlock*(c: Chain): bool =
|
||||
proc com*(c: ChainRef): CommonRef =
|
||||
## Getter
|
||||
c.com
|
||||
|
||||
proc validateBlock*(c: ChainRef): bool =
|
||||
## Getter
|
||||
c.validateBlock
|
||||
|
||||
proc extraValidation*(c: Chain): bool =
|
||||
proc extraValidation*(c: ChainRef): bool =
|
||||
## Getter
|
||||
c.extraValidation
|
||||
|
||||
proc forkIds*(c: Chain): array[ChainFork,ForkID] =
|
||||
## Getter
|
||||
c.forkIds
|
||||
|
||||
proc verifyFrom*(c: Chain): BlockNumber =
|
||||
proc verifyFrom*(c: ChainRef): BlockNumber =
|
||||
## Getter
|
||||
c.verifyFrom
|
||||
|
||||
proc currentBlock*(c: Chain): BlockHeader
|
||||
proc currentBlock*(c: ChainRef): BlockHeader
|
||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
||||
## currentBlock retrieves the current head block of the canonical chain.
|
||||
## Ideally the block should be retrieved from the blockchain's internal cache.
|
||||
## but now it's enough to retrieve it from database
|
||||
c.db.getCanonicalHead()
|
||||
|
||||
func genesisHash*(c: Chain): Hash256 =
|
||||
## Getter
|
||||
c.blockZeroHash
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public `Chain` setters
|
||||
# ------------------------------------------------------------------------------
|
||||
proc `validateBlock=`*(c: Chain; validateBlock: bool) =
|
||||
proc `validateBlock=`*(c: ChainRef; validateBlock: bool) =
|
||||
## Setter. If set `true`, the assignment value `validateBlock` enables
|
||||
## block execution, else it will always return ValidationResult.OK
|
||||
c.validateBlock = validateBlock
|
||||
|
||||
proc `extraValidation=`*(c: Chain; extraValidation: bool) =
|
||||
proc `extraValidation=`*(c: ChainRef; extraValidation: bool) =
|
||||
## Setter. If set `true`, the assignment value `extraValidation` enables
|
||||
## extra block chain validation.
|
||||
c.extraValidation = extraValidation
|
||||
|
||||
proc `verifyFrom=`*(c: Chain; verifyFrom: BlockNumber) =
|
||||
proc `verifyFrom=`*(c: ChainRef; verifyFrom: BlockNumber) =
|
||||
## Setter. The assignment value `verifyFrom` defines the first block where
|
||||
## validation should start if the `Clique` field `extraValidation` was set
|
||||
## `true`.
|
||||
c.verifyFrom = verifyFrom
|
||||
|
||||
proc `verifyFrom=`*(c: Chain; verifyFrom: uint64) =
|
||||
proc `verifyFrom=`*(c: ChainRef; verifyFrom: uint64) =
|
||||
## Variant of `verifyFrom=`
|
||||
c.verifyFrom = verifyFrom.u256
|
||||
|
|
@ -9,7 +9,6 @@
|
|||
# according to those terms.
|
||||
|
||||
import
|
||||
../../db/db_chain,
|
||||
../../vm_state,
|
||||
../../vm_types,
|
||||
../clique,
|
||||
|
@ -18,7 +17,6 @@ import
|
|||
./chain_desc,
|
||||
./chain_helpers,
|
||||
chronicles,
|
||||
eth/[common, trie/db],
|
||||
stew/endians2,
|
||||
stint
|
||||
|
||||
|
@ -41,21 +39,24 @@ type
|
|||
# Private
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc persistBlocksImpl(c: Chain; headers: openArray[BlockHeader];
|
||||
proc persistBlocksImpl(c: ChainRef; headers: openArray[BlockHeader];
|
||||
bodies: openArray[BlockBody],
|
||||
flags: PersistBlockFlags = {}): ValidationResult
|
||||
# wildcard exception, wrapped below in public section
|
||||
{.inline, raises: [Exception].} =
|
||||
c.db.highestBlock = headers[^1].blockNumber
|
||||
|
||||
let transaction = c.db.db.beginTransaction()
|
||||
defer: transaction.dispose()
|
||||
|
||||
var cliqueState = c.clique.cliqueSave
|
||||
defer: c.clique.cliqueRestore(cliqueState)
|
||||
|
||||
let td = some(c.db.getScore(headers[0].parentHash))
|
||||
c.com.hardForkTransition(headers[0].blockNumber, td)
|
||||
|
||||
# Note that `0 < headers.len`, assured when called from `persistBlocks()`
|
||||
let vmState = BaseVMState()
|
||||
if not vmState.init(headers[0], c.db):
|
||||
if not vmState.init(headers[0], c.com):
|
||||
debug "Cannot initialise VmState",
|
||||
fromBlock = headers[0].blockNumber,
|
||||
toBlock = headers[^1].blockNumber
|
||||
|
@ -69,6 +70,9 @@ proc persistBlocksImpl(c: Chain; headers: openArray[BlockHeader];
|
|||
let
|
||||
(header, body) = (headers[i], bodies[i])
|
||||
|
||||
let td = some(c.db.getScore(header.parentHash))
|
||||
c.com.hardForkTransition(header.blockNumber, td)
|
||||
|
||||
if not vmState.reinit(header):
|
||||
debug "Cannot update VmState",
|
||||
blockNumber = header.blockNumber,
|
||||
|
@ -91,8 +95,8 @@ proc persistBlocksImpl(c: Chain; headers: openArray[BlockHeader];
|
|||
|
||||
if c.validateBlock and c.extraValidation and
|
||||
c.verifyFrom <= header.blockNumber:
|
||||
let isBlockAfterTtd = c.isBlockAfterTtd(header)
|
||||
if c.db.config.poaEngine and not isBlockAfterTtd:
|
||||
|
||||
if c.com.consensus == ConsensusType.POA:
|
||||
var parent = if 0 < i: @[headers[i-1]] else: @[]
|
||||
let rc = c.clique.cliqueVerify(header,parent)
|
||||
if rc.isOk:
|
||||
|
@ -104,11 +108,10 @@ proc persistBlocksImpl(c: Chain; headers: openArray[BlockHeader];
|
|||
msg = $rc.error
|
||||
return ValidationResult.Error
|
||||
else:
|
||||
let res = c.db.validateHeaderAndKinship(
|
||||
let res = c.com.validateHeaderAndKinship(
|
||||
header,
|
||||
body,
|
||||
checkSealOK = false, # TODO: how to checkseal from here
|
||||
ttdReached = isBlockAfterTtd,
|
||||
pow = c.pow)
|
||||
if res.isErr:
|
||||
debug "block validation error",
|
||||
|
@ -116,7 +119,8 @@ proc persistBlocksImpl(c: Chain; headers: openArray[BlockHeader];
|
|||
return ValidationResult.Error
|
||||
|
||||
if NoPersistHeader notin flags:
|
||||
discard c.db.persistHeaderToDb(header)
|
||||
let ttd = c.com.ttd
|
||||
discard c.db.persistHeaderToDb(header, ttd)
|
||||
|
||||
if NoSaveTxs notin flags:
|
||||
discard c.db.persistTransactions(header.blockNumber, body.transactions)
|
||||
|
@ -127,7 +131,7 @@ proc persistBlocksImpl(c: Chain; headers: openArray[BlockHeader];
|
|||
# update currentBlock *after* we persist it
|
||||
# so the rpc return consistent result
|
||||
# between eth_blockNumber and eth_syncing
|
||||
c.db.currentBlock = header.blockNumber
|
||||
c.com.syncCurrent = header.blockNumber
|
||||
|
||||
transaction.commit()
|
||||
|
||||
|
@ -135,7 +139,7 @@ proc persistBlocksImpl(c: Chain; headers: openArray[BlockHeader];
|
|||
# Public `ChainDB` methods
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc insertBlockWithoutSetHead*(c: Chain, header: BlockHeader,
|
||||
proc insertBlockWithoutSetHead*(c: ChainRef, header: BlockHeader,
|
||||
body: BlockBody): ValidationResult
|
||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
||||
|
||||
|
@ -144,7 +148,7 @@ proc insertBlockWithoutSetHead*(c: Chain, header: BlockHeader,
|
|||
if result == ValidationResult.OK:
|
||||
c.db.persistHeaderToDbWithoutSetHead(header)
|
||||
|
||||
proc setCanonical*(c: Chain, header: BlockHeader): ValidationResult
|
||||
proc setCanonical*(c: ChainRef, header: BlockHeader): ValidationResult
|
||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
||||
|
||||
if header.parentHash == Hash256():
|
||||
|
@ -162,7 +166,7 @@ proc setCanonical*(c: Chain, header: BlockHeader): ValidationResult
|
|||
if result == ValidationResult.OK:
|
||||
discard c.db.setHead(header.blockHash)
|
||||
|
||||
proc setCanonical*(c: Chain, blockHash: Hash256): ValidationResult
|
||||
proc setCanonical*(c: ChainRef, blockHash: Hash256): ValidationResult
|
||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
||||
var header: BlockHeader
|
||||
if not c.db.getBlockHeader(blockHash, header):
|
||||
|
@ -172,7 +176,7 @@ proc setCanonical*(c: Chain, blockHash: Hash256): ValidationResult
|
|||
|
||||
setCanonical(c, header)
|
||||
|
||||
proc persistBlocks*(c: Chain; headers: openArray[BlockHeader];
|
||||
proc persistBlocks*(c: ChainRef; headers: openArray[BlockHeader];
|
||||
bodies: openArray[BlockBody]): ValidationResult
|
||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
||||
# Run the VM here
|
|
@ -20,10 +20,8 @@
|
|||
|
||||
import
|
||||
std/[sequtils, times],
|
||||
../db/db_chain,
|
||||
./clique/[clique_cfg, clique_defs, clique_desc, clique_verify],
|
||||
./clique/snapshot/[ballot, snapshot_desc],
|
||||
eth/common,
|
||||
stew/results
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
@ -46,7 +44,7 @@ type
|
|||
# Public
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc newClique*(db: BaseChainDB): Clique =
|
||||
proc newClique*(com: CommonRef): Clique =
|
||||
## Constructor for a new Clique proof-of-authority consensus engine. The
|
||||
## initial state of the engine is `empty`, there are no authorised signers.
|
||||
##
|
||||
|
@ -54,11 +52,11 @@ proc newClique*(db: BaseChainDB): Clique =
|
|||
## will be taken from chain_config. Otherwise, default value in `newCliqueCfg`
|
||||
## will be used
|
||||
|
||||
let cfg = db.newCliqueCfg
|
||||
if db.config.cliquePeriod > 0:
|
||||
cfg.period = initDuration(seconds = db.config.cliquePeriod)
|
||||
if db.config.cliqueEpoch > 0:
|
||||
cfg.epoch = db.config.cliqueEpoch
|
||||
let cfg = com.newCliqueCfg
|
||||
if com.cliquePeriod > 0:
|
||||
cfg.period = initDuration(seconds = com.cliquePeriod)
|
||||
if com.cliqueEpoch > 0:
|
||||
cfg.epoch = com.cliqueEpoch
|
||||
cfg.newClique
|
||||
|
||||
proc cliqueSave*(c: var Clique): CliqueState =
|
|
@ -20,21 +20,22 @@
|
|||
|
||||
import
|
||||
std/[random, times],
|
||||
eth/common,
|
||||
ethash,
|
||||
stew/results,
|
||||
stint,
|
||||
../../db/db_chain,
|
||||
../../common/common,
|
||||
../../utils/ec_recover,
|
||||
./clique_defs
|
||||
|
||||
export
|
||||
common
|
||||
|
||||
const
|
||||
prngSeed = 42
|
||||
|
||||
type
|
||||
CliqueCfg* = ref object of RootRef
|
||||
db*: BaseChainDB ##\
|
||||
## All purpose (incl. blockchain) database.
|
||||
com*: CommonRef ##\
|
||||
## Configuration, database.
|
||||
|
||||
nSnaps*: uint64 ##\
|
||||
## Number of snapshots stored on disk (for logging troublesshoting)
|
||||
|
@ -78,9 +79,9 @@ type
|
|||
# Public constructor
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc newCliqueCfg*(db: BaseChainDB): CliqueCfg =
|
||||
proc newCliqueCfg*(com: CommonRef): CliqueCfg =
|
||||
result = CliqueCfg(
|
||||
db: db,
|
||||
com: com,
|
||||
epoch: EPOCH_LENGTH,
|
||||
period: BLOCK_PERIOD,
|
||||
ckpInterval: CHECKPOINT_INTERVAL,
|
|
@ -20,13 +20,11 @@
|
|||
|
||||
import
|
||||
std/tables,
|
||||
../../db/db_chain,
|
||||
../../constants,
|
||||
./clique_cfg,
|
||||
./clique_defs,
|
||||
./snapshot/snapshot_desc,
|
||||
chronicles,
|
||||
eth/[common, keys, rlp],
|
||||
eth/[keys, rlp],
|
||||
stew/[keyed_queue, results]
|
||||
|
||||
const
|
||||
|
@ -161,9 +159,9 @@ proc cfg*(c: Clique): CliqueCfg =
|
|||
## Getter
|
||||
c.cfg
|
||||
|
||||
proc db*(c: Clique): BaseChainDB =
|
||||
proc com*(c: Clique): CommonRef =
|
||||
## Getter
|
||||
c.cfg.db
|
||||
c.cfg.com
|
||||
|
||||
proc applySnapsMinBacklog*(c: Clique): bool =
|
||||
## Getter.
|
||||
|
@ -182,9 +180,9 @@ proc applySnapsMinBacklog*(c: Clique): bool =
|
|||
# Public setters
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc `db=`*(c: Clique; db: BaseChainDB) =
|
||||
proc `com=`*(c: Clique; com: CommonRef) =
|
||||
## Setter, re-set database
|
||||
c.cfg.db = db
|
||||
c.cfg.com = com
|
||||
c.proposals = initTable[EthAddress,bool]()
|
||||
|
||||
proc `snapshot=`*(c: Clique; snaps: Snapshot) =
|
|
@ -179,7 +179,7 @@ proc cliqueGenvote*(
|
|||
##
|
||||
## [..]
|
||||
##
|
||||
## | var db: BaseChainDB = ...
|
||||
## | var db: ChainDBRef = ...
|
||||
## | var c = db.newChain
|
||||
##
|
||||
##
|
||||
|
@ -198,7 +198,7 @@ proc cliqueGenvote*(
|
|||
## [..]
|
||||
##
|
||||
c.clique_genvote(voter, seal,
|
||||
parent = c.cfg.db.getCanonicalHead,
|
||||
parent = c.cfg.com.db.getCanonicalHead,
|
||||
elapsed = elapsed,
|
||||
voteInOk = voteInOk,
|
||||
outOfTurn = outOfTurn,
|
|
@ -21,7 +21,7 @@
|
|||
import
|
||||
std/[algorithm, times],
|
||||
../../constants,
|
||||
../../utils,
|
||||
../../utils/utils,
|
||||
./clique_defs,
|
||||
eth/[common, rlp],
|
||||
stew/[objects, results],
|
|
@ -21,10 +21,9 @@
|
|||
import
|
||||
std/[sequtils, strutils],
|
||||
chronicles,
|
||||
eth/[common, keys],
|
||||
eth/[keys],
|
||||
stew/[keyed_queue, results],
|
||||
stint,
|
||||
"../.."/[constants, db/db_chain, utils/prettify],
|
||||
"../.."/[utils/prettify],
|
||||
"."/[clique_cfg, clique_defs, clique_desc],
|
||||
./snapshot/[snapshot_apply, snapshot_desc]
|
||||
|
||||
|
@ -222,7 +221,7 @@ proc findSnapshot(d: var LocalSnaps): bool
|
|||
return false
|
||||
|
||||
# No explicit parents (or no more parents left), reach out to the database
|
||||
elif not d.c.cfg.db.getBlockHeader(hash, header):
|
||||
elif not d.c.cfg.com.db.getBlockHeader(hash, header):
|
||||
d.trail.error = (errUnknownAncestor,"")
|
||||
return false
|
||||
|
||||
|
@ -378,7 +377,7 @@ proc cliqueSnapshotSeq*(c: Clique; hash: Hash256;
|
|||
return ok(rc.value)
|
||||
|
||||
var header: BlockHeader
|
||||
if not c.cfg.db.getBlockHeader(hash, header):
|
||||
if not c.cfg.com.db.getBlockHeader(hash, header):
|
||||
return err((errUnknownHash,""))
|
||||
|
||||
# Avoid deep copy, sequence will not be changed by `updateSnapshot()`
|
|
@ -22,10 +22,7 @@
|
|||
|
||||
import
|
||||
std/[strformat, times],
|
||||
../../chain_config,
|
||||
../../constants,
|
||||
../../db/db_chain,
|
||||
../../utils,
|
||||
../../utils/utils,
|
||||
../gaslimit,
|
||||
./clique_cfg,
|
||||
./clique_defs,
|
||||
|
@ -34,7 +31,6 @@ import
|
|||
./clique_snapshot,
|
||||
./snapshot/[ballot, snapshot_desc],
|
||||
chronicles,
|
||||
eth/common,
|
||||
stew/results
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
@ -52,21 +48,19 @@ proc verifyForkHashes(c: Clique; header: BlockHeader): CliqueOkResult
|
|||
## Verify that blocks conforming to network hard-forks do have the correct
|
||||
## hashes, to avoid clients going off on different chains.
|
||||
|
||||
if c.db.config.eip150Block.isZero or
|
||||
c.db.config.eip150Block != header.blockNumber:
|
||||
return ok()
|
||||
if c.com.eip150Block.isSome and
|
||||
c.com.eip150Block.get == header.blockNumber:
|
||||
|
||||
# If the homestead reprice hash is set, validate it
|
||||
let
|
||||
eip150 = c.db.config.eip150Hash
|
||||
hash = header.blockHash
|
||||
# If the homestead reprice hash is set, validate it
|
||||
let
|
||||
eip150 = c.com.eip150Hash
|
||||
hash = header.blockHash
|
||||
|
||||
if eip150 == hash:
|
||||
return ok()
|
||||
|
||||
err((errCliqueGasRepriceFork,
|
||||
&"Homestead gas reprice fork: have {eip150}, want {hash}"))
|
||||
if eip150 != hash:
|
||||
return err((errCliqueGasRepriceFork,
|
||||
&"Homestead gas reprice fork: have {eip150}, want {hash}"))
|
||||
|
||||
return ok()
|
||||
|
||||
proc signersThreshold*(s: Snapshot): int =
|
||||
## Minimum number of authorised signers needed.
|
||||
|
@ -160,7 +154,7 @@ proc verifyCascadingFields(c: Clique; header: BlockHeader;
|
|||
var parent: BlockHeader
|
||||
if 0 < parents.len:
|
||||
parent = parents[^1]
|
||||
elif not c.db.getBlockHeader(header.blockNumber-1, parent):
|
||||
elif not c.com.db.getBlockHeader(header.blockNumber-1, parent):
|
||||
return err((errUnknownAncestor,""))
|
||||
|
||||
if parent.blockNumber != header.blockNumber-1 or
|
||||
|
@ -183,7 +177,7 @@ proc verifyCascadingFields(c: Clique; header: BlockHeader;
|
|||
# EIP-1559/London fork.
|
||||
block:
|
||||
# clique/clique.go(337): if !chain.Config().IsLondon(header.Number) {
|
||||
let rc = c.db.validateGasLimitOrBaseFee(header, parent)
|
||||
let rc = c.com.validateGasLimitOrBaseFee(header, parent)
|
||||
if rc.isErr:
|
||||
return err((errCliqueGasLimitOrBaseFee, rc.error))
|
||||
|
|
@ -26,7 +26,7 @@ import
|
|||
../clique_helpers,
|
||||
./ballot,
|
||||
chronicles,
|
||||
eth/[common, rlp, trie/db],
|
||||
eth/[rlp],
|
||||
stew/results
|
||||
|
||||
export tables
|
||||
|
@ -144,7 +144,8 @@ proc loadSnapshot*(cfg: CliqueCfg; hash: Hash256):
|
|||
## Load an existing snapshot from the database.
|
||||
var s = Snapshot(cfg: cfg)
|
||||
try:
|
||||
let rlpData = s.cfg.db.db.get(hash.cliqueSnapshotKey.toOpenArray)
|
||||
let db = s.cfg.com.db.db
|
||||
let rlpData = db.get(hash.cliqueSnapshotKey.toOpenArray)
|
||||
|
||||
# The following check is only needed for Github/CI for 64bit Windows (not
|
||||
# reproducible on my local Win7 -- jordan). What normally happens when
|
||||
|
@ -167,7 +168,8 @@ proc storeSnapshot*(cfg: CliqueCfg; s: Snapshot): CliqueOkResult =
|
|||
let
|
||||
key = s.data.blockHash.cliqueSnapshotKey
|
||||
val = rlp.encode(s.data)
|
||||
s.cfg.db.db.put(key.toOpenArray, val)
|
||||
db = s.cfg.com.db.db
|
||||
db.put(key.toOpenArray, val)
|
||||
|
||||
cfg.nSnaps.inc
|
||||
cfg.snapsData += val.len.uint
|
|
@ -10,51 +10,17 @@
|
|||
|
||||
import
|
||||
../../db/accounts_cache,
|
||||
../../forks,
|
||||
../../common/common,
|
||||
../../vm_state,
|
||||
../../vm_types,
|
||||
./executor_helpers,
|
||||
eth/common
|
||||
|
||||
|
||||
func eth(n: int): UInt256 {.compileTime.} =
|
||||
n.u256 * pow(10.u256, 18)
|
||||
|
||||
const
|
||||
eth5 = 5.eth
|
||||
eth3 = 3.eth
|
||||
eth2 = 2.eth
|
||||
eth0 = 0.u256
|
||||
|
||||
# Note than the `blockRewards` were previously exported but nowhere
|
||||
# used otherwise.
|
||||
blockRewards: array[Fork, UInt256] = [
|
||||
eth5, # FkFrontier
|
||||
eth5, # FkHomestead
|
||||
eth5, # FkTangerine
|
||||
eth5, # FkSpurious
|
||||
eth3, # FkByzantium
|
||||
eth2, # FkConstantinople
|
||||
eth2, # FkPetersburg
|
||||
eth2, # FkIstanbul
|
||||
eth2, # FkBerlin
|
||||
eth2, # FkLondon
|
||||
eth0, # FkParis
|
||||
eth0, # FkShanghai
|
||||
eth0, # FkCancun
|
||||
]
|
||||
../../vm_types
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
|
||||
proc calculateReward*(vmState: BaseVMState; account: EthAddress;
|
||||
number: BlockNumber; uncles: openArray[BlockHeader])
|
||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
||||
|
||||
var blockReward: UInt256
|
||||
safeExecutor("getFork"):
|
||||
blockReward = blockRewards[vmState.getForkUnsafe]
|
||||
|
||||
let blockReward = vmState.com.blockReward()
|
||||
var mainReward = blockReward
|
||||
|
||||
for uncle in uncles:
|
|
@ -10,12 +10,11 @@
|
|||
|
||||
import
|
||||
std/[strformat],
|
||||
../../chain_config,
|
||||
../../common/common,
|
||||
../../db/accounts_cache,
|
||||
../../forks,
|
||||
../../vm_state,
|
||||
../../vm_types,
|
||||
eth/[common, bloom]
|
||||
eth/[bloom]
|
||||
|
||||
type
|
||||
ExecutorError* = object of CatchableError
|
||||
|
@ -32,7 +31,6 @@ type
|
|||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# TODO: move these three receipt procs below somewhere else more appropriate
|
||||
func logsBloom(logs: openArray[Log]): LogsBloom =
|
||||
for log in logs:
|
||||
result.incl log.address
|
||||
|
@ -60,27 +58,16 @@ func createBloom*(receipts: openArray[Receipt]): Bloom =
|
|||
bloom.value = bloom.value or logsBloom(rec.logs).value
|
||||
result = bloom.value.toByteArrayBE
|
||||
|
||||
proc getForkUnsafe*(vmState: BaseVMState): Fork
|
||||
{.gcsafe, raises: [Exception].} =
|
||||
## Shortcut for configured fork, deliberately not naming it toFork(). This
|
||||
## function may throw an `Exception` and must be wrapped.
|
||||
vmState.chainDB.config.toFork(vmState.blockNumber)
|
||||
|
||||
proc makeReceipt*(vmState: BaseVMState; txType: TxType): Receipt
|
||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
||||
|
||||
proc getFork(vmState: BaseVMState): Fork
|
||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
||||
safeExecutor("getFork"):
|
||||
result = vmState.getForkUnsafe
|
||||
|
||||
var rec: Receipt
|
||||
if vmState.getFork < FkByzantium:
|
||||
rec.isHash = true
|
||||
rec.hash = vmState.stateDB.rootHash
|
||||
else:
|
||||
if vmState.com.forkGTE(Byzantium):
|
||||
rec.isHash = false
|
||||
rec.status = vmState.status
|
||||
else:
|
||||
rec.isHash = true
|
||||
rec.hash = vmState.stateDB.rootHash
|
||||
|
||||
rec.receiptType = txType
|
||||
rec.cumulativeGasUsed = vmState.cumulativeGasUsed
|
|
@ -9,10 +9,11 @@
|
|||
# according to those terms.
|
||||
|
||||
import
|
||||
../../common/common,
|
||||
../../constants,
|
||||
../../db/[db_chain, accounts_cache],
|
||||
../../db/accounts_cache,
|
||||
../../transaction,
|
||||
../../utils,
|
||||
../../utils/utils,
|
||||
../../vm_state,
|
||||
../../vm_types,
|
||||
../clique,
|
||||
|
@ -21,7 +22,6 @@ import
|
|||
./executor_helpers,
|
||||
./process_transaction,
|
||||
chronicles,
|
||||
eth/[common, trie/db],
|
||||
stew/results
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
@ -34,8 +34,8 @@ proc procBlkPreamble(vmState: BaseVMState;
|
|||
header: BlockHeader; body: BlockBody): bool
|
||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
||||
|
||||
if vmState.chainDB.config.daoForkSupport and
|
||||
vmState.chainDB.config.daoForkBlock == header.blockNumber:
|
||||
if vmState.com.daoForkSupport and
|
||||
vmState.com.daoForkBlock.get == header.blockNumber:
|
||||
vmState.mutateStateDB:
|
||||
db.applyDAOHardFork()
|
||||
|
||||
|
@ -73,14 +73,13 @@ proc procBlkPreamble(vmState: BaseVMState;
|
|||
return false
|
||||
|
||||
if header.ommersHash != EMPTY_UNCLE_HASH:
|
||||
let h = vmState.chainDB.persistUncles(body.uncles)
|
||||
let h = vmState.com.db.persistUncles(body.uncles)
|
||||
if h != header.ommersHash:
|
||||
debug "Uncle hash mismatch"
|
||||
return false
|
||||
|
||||
true
|
||||
|
||||
|
||||
proc procBlkEpilogue(vmState: BaseVMState;
|
||||
header: BlockHeader; body: BlockBody): bool
|
||||
{.gcsafe, raises: [Defect,RlpError].} =
|
||||
|
@ -96,7 +95,7 @@ proc procBlkEpilogue(vmState: BaseVMState;
|
|||
blockNumber = header.blockNumber,
|
||||
expected = header.stateRoot,
|
||||
actual = stateDb.rootHash,
|
||||
arrivedFrom = vmState.chainDB.getCanonicalHead().stateRoot
|
||||
arrivedFrom = vmState.com.db.getCanonicalHead().stateRoot
|
||||
return false
|
||||
|
||||
let bloom = createBloom(vmState.receipts)
|
||||
|
@ -126,19 +125,20 @@ proc processBlockNotPoA*(
|
|||
{.gcsafe, raises: [Defect,CatchableError].} =
|
||||
## Processes `(header,body)` pair for a non-PoA network, only. This function
|
||||
## will fail when applied to a PoA network like `Goerli`.
|
||||
if vmState.chainDB.config.poaEngine:
|
||||
if vmState.com.consensus == ConsensusType.POA:
|
||||
# PoA consensus engine unsupported, see the other version of
|
||||
# processBlock() below
|
||||
debug "Unsupported PoA request"
|
||||
return ValidationResult.Error
|
||||
|
||||
var dbTx = vmState.chainDB.db.beginTransaction()
|
||||
var dbTx = vmState.com.db.db.beginTransaction()
|
||||
defer: dbTx.dispose()
|
||||
|
||||
if not vmState.procBlkPreamble(header, body):
|
||||
return ValidationResult.Error
|
||||
|
||||
if not vmState.ttdReached: # EIP-3675: no reward for miner
|
||||
# EIP-3675: no reward for miner in POA/POS
|
||||
if vmState.com.consensus == ConsensusType.POW:
|
||||
vmState.calculateReward(header, body)
|
||||
|
||||
if not vmState.procBlkEpilogue(header, body):
|
||||
|
@ -177,16 +177,14 @@ proc processBlock*(
|
|||
# debug "PoA update failed"
|
||||
# return ValidationResult.Error
|
||||
|
||||
var dbTx = vmState.chainDB.db.beginTransaction()
|
||||
var dbTx = vmState.com.db.db.beginTransaction()
|
||||
defer: dbTx.dispose()
|
||||
|
||||
if not vmState.procBlkPreamble(header, body):
|
||||
return ValidationResult.Error
|
||||
|
||||
let disableReward = vmState.chainDB.config.poaEngine or
|
||||
vmState.ttdReached # EIP-3675: no reward for miner
|
||||
|
||||
if not disableReward:
|
||||
# EIP-3675: no reward for miner in POA/POS
|
||||
if vmState.com.consensus == ConsensusType.POW:
|
||||
vmState.calculateReward(header, body)
|
||||
|
||||
if not vmState.procBlkEpilogue(header, body):
|
|
@ -10,8 +10,8 @@
|
|||
|
||||
import
|
||||
std/[sets],
|
||||
../../common/common,
|
||||
../../db/accounts_cache,
|
||||
../../forks,
|
||||
../../transaction/call_evm,
|
||||
../../transaction,
|
||||
../../vm_state,
|
||||
|
@ -19,7 +19,6 @@ import
|
|||
../validate,
|
||||
./executor_helpers,
|
||||
chronicles,
|
||||
eth/common,
|
||||
stew/results
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
@ -28,7 +27,7 @@ import
|
|||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc eip1559BaseFee(header: BlockHeader; fork: Fork): UInt256 =
|
||||
proc eip1559BaseFee(header: BlockHeader; fork: EVMFork): UInt256 =
|
||||
## Actually, `baseFee` should be 0 for pre-London headers already. But this
|
||||
## function just plays safe. In particular, the `test_general_state_json.nim`
|
||||
## module modifies this block header `baseFee` field unconditionally :(.
|
||||
|
@ -40,7 +39,7 @@ proc processTransactionImpl(
|
|||
tx: Transaction; ## Transaction to validate
|
||||
sender: EthAddress; ## tx.getSender or tx.ecRecover
|
||||
header: BlockHeader; ## Header for the block containing the current tx
|
||||
fork: Fork): Result[GasInt,void]
|
||||
fork: EVMFork): Result[GasInt,void]
|
||||
# wildcard exception, wrapped below
|
||||
{.gcsafe, raises: [Exception].} =
|
||||
## Modelled after `https://eips.ethereum.org/EIPS/eip-1559#specification`_
|
||||
|
@ -115,7 +114,7 @@ proc processTransaction*(
|
|||
tx: Transaction; ## Transaction to validate
|
||||
sender: EthAddress; ## tx.getSender or tx.ecRecover
|
||||
header: BlockHeader; ## Header for the block containing the current tx
|
||||
fork: Fork): Result[GasInt,void]
|
||||
fork: EVMFork): Result[GasInt,void]
|
||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
||||
## Process the transaction, write the results to accounts db. The function
|
||||
## returns the amount of gas burned if executed.
|
||||
|
@ -130,9 +129,7 @@ proc processTransaction*(
|
|||
{.gcsafe, raises: [Defect,CatchableError].} =
|
||||
## Variant of `processTransaction()` with `*fork* derived
|
||||
## from the `vmState` argument.
|
||||
var fork: Fork
|
||||
safeExecutor("processTransaction"):
|
||||
fork = vmState.getForkUnsafe
|
||||
let fork = vmState.com.toEVMFork(header.blockNumber)
|
||||
vmState.processTransaction(tx, sender, header, fork)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
|
@ -9,12 +9,10 @@
|
|||
# according to those terms.
|
||||
|
||||
import
|
||||
../common/common,
|
||||
std/strformat,
|
||||
stew/results,
|
||||
eth/[common, eip1559],
|
||||
../db/db_chain,
|
||||
../constants,
|
||||
../chain_config
|
||||
eth/[eip1559]
|
||||
|
||||
export
|
||||
eip1559
|
||||
|
@ -42,10 +40,10 @@ proc validateGasLimit(header: BlockHeader; limit: GasInt): Result[void, string]
|
|||
return err("invalid gas limit below 5000")
|
||||
ok()
|
||||
|
||||
proc validateGasLimit(c: BaseChainDB; header: BlockHeader): Result[void, string]
|
||||
proc validateGasLimit(com: CommonRef; header: BlockHeader): Result[void, string]
|
||||
{.raises: [Defect].} =
|
||||
let parent = try:
|
||||
c.getBlockHeader(header.parentHash)
|
||||
com.db.getBlockHeader(header.parentHash)
|
||||
except CatchableError:
|
||||
return err "Parent block not in database"
|
||||
header.validateGasLimit(parent.gasLimit)
|
||||
|
@ -55,22 +53,22 @@ proc validateGasLimit(c: BaseChainDB; header: BlockHeader): Result[void, string]
|
|||
# ------------------------------------------------------------------------------
|
||||
|
||||
# consensus/misc/eip1559.go(55): func CalcBaseFee(config [..]
|
||||
proc calcEip1599BaseFee*(c: ChainConfig; parent: BlockHeader): UInt256 =
|
||||
proc calcEip1599BaseFee*(com: CommonRef; parent: BlockHeader): UInt256 =
|
||||
## calculates the basefee of the header.
|
||||
|
||||
# If the current block is the first EIP-1559 block, return the
|
||||
# initial base fee.
|
||||
if c.isLondon(parent.blockNumber):
|
||||
if com.isLondon(parent.blockNumber):
|
||||
eip1559.calcEip1599BaseFee(parent.gasLimit, parent.gasUsed, parent.baseFee)
|
||||
else:
|
||||
EIP1559_INITIAL_BASE_FEE
|
||||
|
||||
# consensus/misc/eip1559.go(32): func VerifyEip1559Header(config [..]
|
||||
proc verifyEip1559Header(c: ChainConfig;
|
||||
proc verifyEip1559Header(com: CommonRef;
|
||||
parent, header: BlockHeader): Result[void, string]
|
||||
{.raises: [Defect].} =
|
||||
## Verify that the gas limit remains within allowed bounds
|
||||
let limit = if c.isLondon(parent.blockNumber):
|
||||
let limit = if com.isLondon(parent.blockNumber):
|
||||
parent.gasLimit
|
||||
else:
|
||||
parent.gasLimit * EIP1559_ELASTICITY_MULTIPLIER
|
||||
|
@ -84,7 +82,7 @@ proc verifyEip1559Header(c: ChainConfig;
|
|||
return err("Post EIP-1559 header expected to have base fee")
|
||||
|
||||
# Verify the baseFee is correct based on the parent header.
|
||||
var expectedBaseFee = c.calcEip1599BaseFee(parent)
|
||||
var expectedBaseFee = com.calcEip1599BaseFee(parent)
|
||||
if headerBaseFee != expectedBaseFee:
|
||||
try:
|
||||
return err(&"invalid baseFee: have {expectedBaseFee}, "&
|
||||
|
@ -97,20 +95,20 @@ proc verifyEip1559Header(c: ChainConfig;
|
|||
|
||||
return ok()
|
||||
|
||||
proc validateGasLimitOrBaseFee*(c: BaseChainDB;
|
||||
proc validateGasLimitOrBaseFee*(com: CommonRef;
|
||||
header, parent: BlockHeader): Result[void, string]
|
||||
{.gcsafe, raises: [Defect].} =
|
||||
|
||||
if not c.config.isLondon(header.blockNumber):
|
||||
if not com.isLondon(header.blockNumber):
|
||||
# Verify BaseFee not present before EIP-1559 fork.
|
||||
if not header.baseFee.isZero:
|
||||
return err("invalid baseFee before London fork: have " & $header.baseFee & ", want <0>")
|
||||
let rc = c.validateGasLimit(header)
|
||||
let rc = com.validateGasLimit(header)
|
||||
if rc.isErr:
|
||||
return rc
|
||||
else:
|
||||
let rc = c.config.verifyEip1559Header(parent = parent,
|
||||
header = header)
|
||||
let rc = com.verifyEip1559Header(parent = parent,
|
||||
header = header)
|
||||
if rc.isErr:
|
||||
return rc
|
||||
|
|
@ -14,7 +14,7 @@
|
|||
|
||||
import
|
||||
std/[options, strutils],
|
||||
../utils,
|
||||
../utils/utils,
|
||||
./pow/[pow_cache, pow_dataset],
|
||||
eth/[common, keys, p2p, rlp],
|
||||
ethash,
|
|
@ -1,6 +1,9 @@
|
|||
import
|
||||
times, eth/common, stint,
|
||||
../constants, ../chain_config
|
||||
times,
|
||||
../../common/common
|
||||
|
||||
export
|
||||
common
|
||||
|
||||
const
|
||||
ExpDiffPeriod = 100000.u256
|
||||
|
@ -168,21 +171,21 @@ template calcDifficultyGrayGlacier*(timeStamp: EthTime, parent: BlockHeader): Di
|
|||
## Offset the bomb a total of 11.4M blocks.
|
||||
makeDifficultyCalculator(11_400_000, timeStamp, parent)
|
||||
|
||||
func calcDifficulty*(c: ChainConfig, timeStamp: EthTime, parent: BlockHeader): DifficultyInt =
|
||||
let next = parent.blockNumber + bigOne
|
||||
if next >= c.grayGlacierBlock:
|
||||
func calcDifficulty*(com: CommonRef, timeStamp: EthTime, parent: BlockHeader): DifficultyInt =
|
||||
let next = com.toFork(parent.blockNumber + bigOne)
|
||||
if next >= GrayGlacier:
|
||||
result = calcDifficultyGrayGlacier(timeStamp, parent)
|
||||
elif next >= c.arrowGlacierBlock:
|
||||
elif next >= ArrowGlacier:
|
||||
result = calcDifficultyArrowGlacier(timeStamp, parent)
|
||||
elif next >= c.londonBlock:
|
||||
elif next >= London:
|
||||
result = calcDifficultyLondon(timeStamp, parent)
|
||||
elif next >= c.muirGlacierBlock:
|
||||
elif next >= MuirGlacier:
|
||||
result = calcDifficultyMuirGlacier(timeStamp, parent)
|
||||
elif next >= c.constantinopleBlock:
|
||||
elif next >= Constantinople:
|
||||
result = calcDifficultyConstantinople(timeStamp, parent)
|
||||
elif next >= c.byzantiumBlock:
|
||||
elif next >= Byzantium:
|
||||
result = calcDifficultyByzantium(timeStamp, parent)
|
||||
elif next >= c.homesteadBlock:
|
||||
elif next >= Homestead:
|
||||
result = calcDifficultyHomestead(timeStamp, parent)
|
||||
else:
|
||||
result = calcDifficultyFrontier(timeStamp, parent)
|
|
@ -7,10 +7,9 @@
|
|||
|
||||
|
||||
import
|
||||
strformat, times, options,
|
||||
eth/[common, rlp],
|
||||
./difficulty, ../constants,
|
||||
../chain_config
|
||||
std/[strformat, times],
|
||||
eth/[rlp],
|
||||
./difficulty
|
||||
|
||||
export BlockHeader
|
||||
|
||||
|
@ -55,7 +54,7 @@ func computeGasLimit*(parentGasUsed, parentGasLimit, gasFloor, gasCeil: GasInt):
|
|||
|
||||
return limit
|
||||
|
||||
proc generateHeaderFromParentHeader*(config: ChainConfig, parent: BlockHeader,
|
||||
proc generateHeaderFromParentHeader*(com: CommonRef, parent: BlockHeader,
|
||||
coinbase: EthAddress, timestamp: Option[EthTime],
|
||||
gasLimit: GasInt, extraData: Blob, baseFee: Option[UInt256]): BlockHeader =
|
||||
|
||||
|
@ -71,7 +70,7 @@ proc generateHeaderFromParentHeader*(config: ChainConfig, parent: BlockHeader,
|
|||
result = BlockHeader(
|
||||
timestamp: lcTimestamp,
|
||||
blockNumber: (parent.blockNumber + 1),
|
||||
difficulty: config.calcDifficulty(lcTimestamp, parent),
|
||||
difficulty: com.calcDifficulty(lcTimestamp, parent),
|
||||
gasLimit: gasLimit,
|
||||
stateRoot: parent.stateRoot,
|
||||
coinbase: coinbase,
|
|
@ -9,25 +9,25 @@
|
|||
# according to those terms.
|
||||
|
||||
import
|
||||
std/[times, tables, typetraits, options],
|
||||
std/[times, tables, typetraits],
|
||||
pkg/[chronos,
|
||||
stew/results,
|
||||
chronicles,
|
||||
eth/common,
|
||||
eth/keys,
|
||||
eth/rlp],
|
||||
"."/[config,
|
||||
db/db_chain,
|
||||
p2p/chain,
|
||||
constants,
|
||||
utils/header],
|
||||
"."/p2p/clique/[clique_defs,
|
||||
".."/[config,
|
||||
constants],
|
||||
"."/[
|
||||
chain,
|
||||
tx_pool,
|
||||
validate],
|
||||
"."/clique/[clique_defs,
|
||||
clique_desc,
|
||||
clique_cfg,
|
||||
clique_sealer],
|
||||
./p2p/[gaslimit, validate],
|
||||
"."/[chain_config, utils, context],
|
||||
"."/utils/tx_pool
|
||||
../utils/utils,
|
||||
../common/[common, context]
|
||||
|
||||
|
||||
from web3/ethtypes as web3types import nil
|
||||
from web3/engine_api_types import PayloadAttributesV1, ExecutionPayloadV1
|
||||
|
@ -47,12 +47,12 @@ type
|
|||
SealingEngineObj = object of RootObj
|
||||
state: EngineState
|
||||
engineLoop: Future[void]
|
||||
chain*: Chain
|
||||
chain*: ChainRef
|
||||
ctx: EthContext
|
||||
signer: EthAddress
|
||||
txPool: TxPoolRef
|
||||
|
||||
proc validateSealer*(conf: NimbusConf, ctx: EthContext, chain: Chain): Result[void, string] =
|
||||
proc validateSealer*(conf: NimbusConf, ctx: EthContext, chain: ChainRef): Result[void, string] =
|
||||
if conf.engineSigner == ZERO_ADDRESS:
|
||||
return err("signer address should not zero, use --engine-signer to set signer address")
|
||||
|
||||
|
@ -64,8 +64,8 @@ proc validateSealer*(conf: NimbusConf, ctx: EthContext, chain: Chain): Result[vo
|
|||
if not acc.unlocked:
|
||||
return err("signer account not unlocked, please unlock it first via rpc/password file")
|
||||
|
||||
let chainConf = chain.db.config
|
||||
if not chainConf.poaEngine:
|
||||
let com = chain.com
|
||||
if com.consensus != ConsensusType.POA:
|
||||
return err("currently only PoA engine is supported")
|
||||
|
||||
ok()
|
||||
|
@ -83,7 +83,7 @@ proc prepareBlock(engine: SealingEngineRef,
|
|||
|
||||
var blk = engine.txPool.ethBlock()
|
||||
|
||||
if engine.chain.isBlockAfterTtd(blk.header):
|
||||
if engine.chain.com.isBlockAfterTtd(blk.header):
|
||||
blk.header.difficulty = DifficultyInt.zero
|
||||
blk.header.mixDigest = prevRandao
|
||||
blk.header.nonce = default(BlockNonce)
|
||||
|
@ -251,7 +251,7 @@ proc generateExecutionPayload*(engine: SealingEngineRef,
|
|||
return ok()
|
||||
|
||||
proc new*(_: type SealingEngineRef,
|
||||
chain: Chain,
|
||||
chain: ChainRef,
|
||||
ctx: EthContext,
|
||||
signer: EthAddress,
|
||||
txPool: TxPoolRef,
|
|
@ -228,7 +228,7 @@
|
|||
## A piece of code using this pool architecture could look like as follows:
|
||||
## ::
|
||||
## # see also unit test examples, e.g. "Block packer tests"
|
||||
## var db: BaseChainDB # to be initialised
|
||||
## var db: ChainDBRef # to be initialised
|
||||
## var txs: seq[Transaction] # to be initialised
|
||||
##
|
||||
## proc mineThatBlock(blk: EthBlock) # external function
|
||||
|
@ -424,7 +424,6 @@
|
|||
|
||||
import
|
||||
std/[sequtils, tables],
|
||||
../db/db_chain,
|
||||
./tx_pool/[tx_chain, tx_desc, tx_info, tx_item],
|
||||
./tx_pool/tx_tabs,
|
||||
./tx_pool/tx_tasks/[
|
||||
|
@ -435,9 +434,9 @@ import
|
|||
tx_packer,
|
||||
tx_recover],
|
||||
chronicles,
|
||||
eth/[common, keys],
|
||||
eth/keys,
|
||||
stew/[keyed_queue, results],
|
||||
stint
|
||||
../common/common
|
||||
|
||||
export
|
||||
TxItemRef,
|
||||
|
@ -504,12 +503,12 @@ proc setHead(xp: TxPoolRef; val: BlockHeader)
|
|||
# Public constructor/destructor
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc new*(T: type TxPoolRef; db: BaseChainDB; miner: EthAddress): T
|
||||
proc new*(T: type TxPoolRef; com: CommonRef; miner: EthAddress): T
|
||||
{.gcsafe,raises: [Defect,CatchableError].} =
|
||||
## Constructor, returns a new tx-pool descriptor. The `miner` argument is
|
||||
## the fee beneficiary for informational purposes only.
|
||||
new result
|
||||
result.init(db,miner)
|
||||
result.init(com, miner)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, task manager, pool actions serialiser
|
||||
|
@ -616,6 +615,15 @@ proc ethBlock*(xp: TxPoolRef): EthBlock
|
|||
##
|
||||
## Note that this getter runs *ad hoc* all the txs through the VM in
|
||||
## order to build the block.
|
||||
|
||||
# do hardfork transition
|
||||
# this transition will leak into outside txpool
|
||||
# but it's ok, other subsystem will likely gladly accept the
|
||||
# the transition, until proven otherwise
|
||||
let parentHash = xp.chain.vmState.parent.blockHash
|
||||
let td = some(xp.chain.com.db.getScore(parentHash))
|
||||
xp.chain.com.hardForkTransition(xp.chain.vmState.parent.blockNumber+1, td)
|
||||
|
||||
xp.packerVmExec # updates vmState
|
||||
result.header = xp.chain.getHeader # uses updated vmState
|
||||
for (_,nonceList) in xp.txDB.packingOrderAccounts(txItemPacked):
|
|
@ -14,18 +14,16 @@
|
|||
|
||||
import
|
||||
std/[sets, times],
|
||||
../../chain_config,
|
||||
../../common/common,
|
||||
../../constants,
|
||||
../../db/[accounts_cache, db_chain],
|
||||
../../forks,
|
||||
../../p2p/executor,
|
||||
../../utils,
|
||||
../../utils/difficulty,
|
||||
../../db/accounts_cache,
|
||||
../../core/executor,
|
||||
../../utils/utils,
|
||||
../../core/pow/difficulty,
|
||||
../../vm_state,
|
||||
../../vm_types,
|
||||
./tx_chain/[tx_basefee, tx_gaslimits],
|
||||
./tx_item,
|
||||
eth/[common]
|
||||
./tx_item
|
||||
|
||||
export
|
||||
TxChainGasLimits,
|
||||
|
@ -59,7 +57,7 @@ type
|
|||
## State cache of the transaction environment for creating a new\
|
||||
## block. This state is typically synchrionised with the canonical\
|
||||
## block chain head when updated.
|
||||
db: BaseChainDB ## Block chain database
|
||||
com: CommonRef ## Block chain config
|
||||
miner: EthAddress ## Address of fee beneficiary
|
||||
lhwm: TxChainGasLimitsPc ## Hwm/lwm gas limit percentage
|
||||
|
||||
|
@ -93,7 +91,7 @@ proc resetTxEnv(dh: TxChainRef; parent: BlockHeader; fee: Option[UInt256])
|
|||
prevRandao= dh.prevRandao,
|
||||
difficulty= dh.calcDifficulty(timestamp, parent),
|
||||
miner = dh.miner,
|
||||
chainDB = dh.db)
|
||||
com = dh.com)
|
||||
|
||||
dh.txEnv.txRoot = EMPTY_ROOT_HASH
|
||||
dh.txEnv.stateRoot = dh.txEnv.vmState.parent.stateRoot
|
||||
|
@ -102,28 +100,29 @@ proc update(dh: TxChainRef; parent: BlockHeader)
|
|||
{.gcsafe,raises: [Defect,CatchableError].} =
|
||||
|
||||
let
|
||||
acc = AccountsCache.init(dh.db.db, parent.stateRoot, dh.db.pruneTrie)
|
||||
fee = if FkLondon <= dh.db.config.toFork(parent.blockNumber + 1):
|
||||
some(dh.db.config.baseFeeGet(parent).uint64.u256)
|
||||
db = dh.com.db
|
||||
acc = AccountsCache.init(db.db, parent.stateRoot, dh.com.pruneTrie)
|
||||
fee = if dh.com.isLondon(parent.blockNumber + 1):
|
||||
some(dh.com.baseFeeGet(parent).uint64.u256)
|
||||
else:
|
||||
UInt256.none()
|
||||
|
||||
# Keep a separate accounts descriptor positioned at the sync point
|
||||
dh.roAcc = ReadOnlyStateDB(acc)
|
||||
|
||||
dh.limits = dh.db.gasLimitsGet(parent, dh.lhwm)
|
||||
dh.limits = dh.com.gasLimitsGet(parent, dh.lhwm)
|
||||
dh.resetTxEnv(parent, fee)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, constructor
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc new*(T: type TxChainRef; db: BaseChainDB; miner: EthAddress): T
|
||||
proc new*(T: type TxChainRef; com: CommonRef; miner: EthAddress): T
|
||||
{.gcsafe,raises: [Defect,CatchableError].} =
|
||||
## Constructor
|
||||
new result
|
||||
|
||||
result.db = db
|
||||
result.com = com
|
||||
result.miner = miner
|
||||
result.lhwm.lwmTrg = TRG_THRESHOLD_PER_CENT
|
||||
result.lhwm.hwmMax = MAX_THRESHOLD_PER_CENT
|
||||
|
@ -132,10 +131,10 @@ proc new*(T: type TxChainRef; db: BaseChainDB; miner: EthAddress): T
|
|||
result.calcDifficulty = proc(timeStamp: EthTime, parent: BlockHeader):
|
||||
DifficultyInt {.gcsafe, raises:[].} =
|
||||
try:
|
||||
db.config.calcDifficulty(timestamp, parent)
|
||||
com.calcDifficulty(timestamp, parent)
|
||||
except:
|
||||
0.u256
|
||||
result.update(db.getCanonicalHead)
|
||||
result.update(com.db.getCanonicalHead)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
|
@ -193,13 +192,9 @@ proc clearAccounts*(dh: TxChainRef)
|
|||
# Public functions, getters
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc db*(dh: TxChainRef): BaseChainDB =
|
||||
proc com*(dh: TxChainRef): CommonRef =
|
||||
## Getter
|
||||
dh.db
|
||||
|
||||
proc config*(dh: TxChainRef): ChainConfig =
|
||||
## Getter, shortcut for `dh.db.config`
|
||||
dh.db.config
|
||||
dh.com
|
||||
|
||||
proc head*(dh: TxChainRef): BlockHeader =
|
||||
## Getter
|
||||
|
@ -229,9 +224,9 @@ proc baseFee*(dh: TxChainRef): GasPrice =
|
|||
else:
|
||||
0.GasPrice
|
||||
|
||||
proc nextFork*(dh: TxChainRef): Fork =
|
||||
proc nextFork*(dh: TxChainRef): EVMFork =
|
||||
## Getter, fork of next block
|
||||
dh.db.config.toFork(dh.txEnv.vmState.blockNumber)
|
||||
dh.com.toEVMFork(dh.txEnv.vmState.blockNumber)
|
||||
|
||||
proc gasUsed*(dh: TxChainRef): GasInt =
|
||||
## Getter, accumulated gas burned for collected blocks
|
||||
|
@ -270,7 +265,7 @@ proc `baseFee=`*(dh: TxChainRef; val: GasPrice) =
|
|||
## Setter, temorarily overwrites parameter until next `head=` update. This
|
||||
## function would be called in exceptional cases only as this parameter is
|
||||
## determined by the `head=` update.
|
||||
if 0 < val or FkLondon <= dh.db.config.toFork(dh.txEnv.vmState.blockNumber):
|
||||
if 0 < val or dh.com.isLondon(dh.txEnv.vmState.blockNumber):
|
||||
dh.txEnv.vmState.fee = some(val.uint64.u256)
|
||||
else:
|
||||
dh.txEnv.vmState.fee = UInt256.none()
|
||||
|
@ -288,7 +283,7 @@ proc `lhwm=`*(dh: TxChainRef; val: TxChainGasLimitsPc) =
|
|||
if dh.lhwm != val:
|
||||
dh.lhwm = val
|
||||
let parent = dh.txEnv.vmState.parent
|
||||
dh.limits = dh.db.gasLimitsGet(parent, dh.limits.gasLimit, dh.lhwm)
|
||||
dh.limits = dh.com.gasLimitsGet(parent, dh.limits.gasLimit, dh.lhwm)
|
||||
dh.txEnv.vmState.gasLimit = if dh.maxMode: dh.limits.maxLimit
|
||||
else: dh.limits.trgLimit
|
||||
|
|
@ -13,11 +13,10 @@
|
|||
##
|
||||
|
||||
import
|
||||
../../../chain_config,
|
||||
../../../common/common,
|
||||
../../../constants,
|
||||
../../../forks,
|
||||
../tx_item,
|
||||
eth/[common, eip1559]
|
||||
eth/eip1559
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
|
@ -28,7 +27,7 @@ const
|
|||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc baseFeeGet*(config: ChainConfig; parent: BlockHeader): GasPrice =
|
||||
proc baseFeeGet*(com: CommonRef; parent: BlockHeader): GasPrice =
|
||||
## Calculates the `baseFee` of the head assuming this is the parent of a
|
||||
## new block header to generate. This function is derived from
|
||||
## `p2p/gaslimit.calcEip1599BaseFee()` which in turn has its origins on
|
||||
|
@ -36,8 +35,8 @@ proc baseFeeGet*(config: ChainConfig; parent: BlockHeader): GasPrice =
|
|||
|
||||
# Note that the baseFee is calculated for the next header
|
||||
let
|
||||
parentFork = config.toFork(parent.blockNumber)
|
||||
nextFork = config.toFork(parent.blockNumber + 1)
|
||||
parentFork = com.toEVMFork(parent.blockNumber)
|
||||
nextFork = com.toEVMFork(parent.blockNumber + 1)
|
||||
|
||||
if nextFork < FkLondon:
|
||||
return 0.GasPrice
|
|
@ -14,11 +14,10 @@
|
|||
|
||||
import
|
||||
std/[math],
|
||||
../../../chain_config,
|
||||
../../../db/db_chain,
|
||||
../../../common/common,
|
||||
../../../constants,
|
||||
../../../utils/header,
|
||||
eth/[common, eip1559]
|
||||
../../pow/header,
|
||||
eth/[eip1559]
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
|
@ -91,12 +90,12 @@ proc setPreLondonLimits(gl: var TxChainGasLimits) =
|
|||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc gasLimitsGet*(db: BaseChainDB; parent: BlockHeader; parentLimit: GasInt;
|
||||
proc gasLimitsGet*(com: CommonRef; parent: BlockHeader; parentLimit: GasInt;
|
||||
pc: TxChainGasLimitsPc): TxChainGasLimits =
|
||||
## Calculate gas limits for the next block header.
|
||||
result.gasLimit = parentLimit
|
||||
|
||||
if isLondon(db.config, parent.blockNumber+1):
|
||||
if com.isLondon(parent.blockNumber+1):
|
||||
result.setPostLondonLimits
|
||||
else:
|
||||
result.setPreLondonLimits
|
||||
|
@ -109,9 +108,9 @@ proc gasLimitsGet*(db: BaseChainDB; parent: BlockHeader; parentLimit: GasInt;
|
|||
result.trgLimit, (result.maxLimit * pc.hwmMax + 50) div 100)
|
||||
|
||||
# override trgLimit, see https://github.com/status-im/nimbus-eth1/issues/1032
|
||||
if isLondon(db.config, parent.blockNumber+1):
|
||||
if com.isLondon(parent.blockNumber+1):
|
||||
var parentGasLimit = parent.gasLimit
|
||||
if not isLondon(db.config, parent.blockNumber):
|
||||
if not com.isLondon(parent.blockNumber):
|
||||
# Bump by 2x
|
||||
parentGasLimit = parent.gasLimit * EIP1559_ELASTICITY_MULTIPLIER
|
||||
result.trgLimit = calcGasLimit1559(parentGasLimit, desiredLimit = pc.gasCeil)
|
||||
|
@ -122,10 +121,10 @@ proc gasLimitsGet*(db: BaseChainDB; parent: BlockHeader; parentLimit: GasInt;
|
|||
gasFloor = pc.gasFloor,
|
||||
gasCeil = pc.gasCeil)
|
||||
|
||||
proc gasLimitsGet*(db: BaseChainDB; parent: BlockHeader;
|
||||
proc gasLimitsGet*(com: CommonRef; parent: BlockHeader;
|
||||
pc: TxChainGasLimitsPc): TxChainGasLimits =
|
||||
## Variant of `gasLimitsGet()`
|
||||
db.gasLimitsGet(parent, parent.gasLimit, pc)
|
||||
com.gasLimitsGet(parent, parent.gasLimit, pc)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
|
@ -14,13 +14,13 @@
|
|||
|
||||
import
|
||||
std/[times],
|
||||
../../db/db_chain,
|
||||
../../common/common,
|
||||
./tx_chain,
|
||||
./tx_info,
|
||||
./tx_item,
|
||||
./tx_tabs,
|
||||
./tx_tabs/tx_sender, # for verify()
|
||||
eth/[common, keys]
|
||||
eth/keys
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
|
@ -126,13 +126,13 @@ const
|
|||
# Public functions, constructor
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc init*(xp: TxPoolRef; db: BaseChainDB; miner: EthAddress)
|
||||
proc init*(xp: TxPoolRef; com: CommonRef; miner: EthAddress)
|
||||
{.gcsafe,raises: [Defect,CatchableError].} =
|
||||
## Constructor, returns new tx-pool descriptor. The `miner` argument is
|
||||
## the fee beneficiary for informational purposes only.
|
||||
xp.startDate = getTime().utc.toTime
|
||||
|
||||
xp.chain = TxChainRef.new(db, miner)
|
||||
xp.chain = TxChainRef.new(com, miner)
|
||||
xp.txDB = TxTabsRef.new
|
||||
|
||||
xp.lifeTime = txItemLifeTime
|
|
@ -14,8 +14,7 @@
|
|||
|
||||
import
|
||||
std/[hashes, sequtils, strutils, times],
|
||||
../ec_recover,
|
||||
../utils_defs,
|
||||
../../utils/[ec_recover, utils_defs],
|
||||
./tx_info,
|
||||
eth/[common, keys],
|
||||
stew/results
|
|
@ -13,17 +13,17 @@
|
|||
##
|
||||
|
||||
import
|
||||
../../../forks,
|
||||
../../../p2p/validate,
|
||||
../../../common/common,
|
||||
../../../transaction,
|
||||
../../../vm_state,
|
||||
../../../vm_types,
|
||||
../../validate,
|
||||
../tx_chain,
|
||||
../tx_desc,
|
||||
../tx_item,
|
||||
../tx_tabs,
|
||||
chronicles,
|
||||
eth/[common, keys]
|
||||
eth/keys
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
|
@ -15,13 +15,13 @@
|
|||
|
||||
import
|
||||
std/[tables],
|
||||
../../../db/db_chain,
|
||||
../../../common/common,
|
||||
../tx_chain,
|
||||
../tx_desc,
|
||||
../tx_info,
|
||||
../tx_item,
|
||||
chronicles,
|
||||
eth/[common, keys],
|
||||
eth/keys,
|
||||
stew/keyed_queue
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
@ -48,12 +48,14 @@ logScope:
|
|||
# use it as a stack/lifo as the ordering is reversed
|
||||
proc insert(xp: TxPoolRef; kq: TxHeadDiffRef; blockHash: Hash256)
|
||||
{.gcsafe,raises: [Defect,CatchableError].} =
|
||||
for tx in xp.chain.db.getBlockBody(blockHash).transactions:
|
||||
let db = xp.chain.com.db
|
||||
for tx in db.getBlockBody(blockHash).transactions:
|
||||
kq.addTxs[tx.itemID] = tx
|
||||
|
||||
proc remove(xp: TxPoolRef; kq: TxHeadDiffRef; blockHash: Hash256)
|
||||
{.gcsafe,raises: [Defect,CatchableError].} =
|
||||
for tx in xp.chain.db.getBlockBody(blockHash).transactions:
|
||||
let db = xp.chain.com.db
|
||||
for tx in db.getBlockBody(blockHash).transactions:
|
||||
kq.remTxs[tx.itemID] = true
|
||||
|
||||
proc new(T: type TxHeadDiffRef): T =
|
||||
|
@ -118,16 +120,17 @@ proc headDiff*(xp: TxPoolRef;
|
|||
curHead = xp.chain.head
|
||||
curHash = curHead.blockHash
|
||||
newHash = newHead.blockHash
|
||||
db = xp.chain.com.db
|
||||
|
||||
var ignHeader: BlockHeader
|
||||
if not xp.chain.db.getBlockHeader(newHash, ignHeader):
|
||||
if not db.getBlockHeader(newHash, ignHeader):
|
||||
# sanity check
|
||||
warn "Tx-pool head forward for non-existing header",
|
||||
newHead = newHash,
|
||||
newNumber = newHead.blockNumber
|
||||
return err(txInfoErrForwardHeadMissing)
|
||||
|
||||
if not xp.chain.db.getBlockHeader(curHash, ignHeader):
|
||||
if not db.getBlockHeader(curHash, ignHeader):
|
||||
# This can happen if a `setHead()` is performed, where we have discarded
|
||||
# the old head from the chain.
|
||||
if curHead.blockNumber <= newHead.blockNumber:
|
||||
|
@ -175,7 +178,7 @@ proc headDiff*(xp: TxPoolRef;
|
|||
tmpHead = curBranchHead # cache value for error logging
|
||||
tmpHash = curBranchHash
|
||||
curBranchHash = curBranchHead.parentHash # decrement block number
|
||||
if not xp.chain.db.getBlockHeader(curBranchHash, curBranchHead):
|
||||
if not db.getBlockHeader(curBranchHash, curBranchHead):
|
||||
error "Unrooted old chain seen by tx-pool",
|
||||
curBranchHead = tmpHash,
|
||||
curBranchNumber = tmpHead.blockNumber
|
||||
|
@ -203,7 +206,7 @@ proc headDiff*(xp: TxPoolRef;
|
|||
tmpHead = newBranchHead # cache value for error logging
|
||||
tmpHash = newBranchHash
|
||||
newBranchHash = newBranchHead.parentHash # decrement block number
|
||||
if not xp.chain.db.getBlockHeader(newBranchHash, newBranchHead):
|
||||
if not db.getBlockHeader(newBranchHash, newBranchHead):
|
||||
error "Unrooted new chain seen by tx-pool",
|
||||
newBranchHead = tmpHash,
|
||||
newBranchNumber = tmpHead.blockNumber
|
||||
|
@ -219,7 +222,7 @@ proc headDiff*(xp: TxPoolRef;
|
|||
tmpHead = curBranchHead # cache value for error logging
|
||||
tmpHash = curBranchHash
|
||||
curBranchHash = curBranchHead.parentHash
|
||||
if not xp.chain.db.getBlockHeader(curBranchHash, curBranchHead):
|
||||
if not db.getBlockHeader(curBranchHash, curBranchHead):
|
||||
error "Unrooted old chain seen by tx-pool",
|
||||
curBranchHead = tmpHash,
|
||||
curBranchNumber = tmpHead.blockNumber
|
||||
|
@ -230,7 +233,7 @@ proc headDiff*(xp: TxPoolRef;
|
|||
tmpHead = newBranchHead # cache value for error logging
|
||||
tmpHash = newBranchHash
|
||||
newBranchHash = newBranchHead.parentHash
|
||||
if not xp.chain.db.getBlockHeader(newBranchHash, newBranchHead):
|
||||
if not db.getBlockHeader(newBranchHash, newBranchHead):
|
||||
error "Unrooted new chain seen by tx-pool",
|
||||
newBranchHead = tmpHash,
|
||||
newBranchNumber = tmpHead.blockNumber
|
|
@ -14,9 +14,9 @@
|
|||
|
||||
import
|
||||
std/[sets, tables],
|
||||
../../../db/[accounts_cache, db_chain],
|
||||
../../../forks,
|
||||
../../../p2p/[dao, executor, validate],
|
||||
../../../db/accounts_cache,
|
||||
../../../common/common,
|
||||
"../.."/[dao, executor, validate],
|
||||
../../../transaction/call_evm,
|
||||
../../../transaction,
|
||||
../../../vm_state,
|
||||
|
@ -29,7 +29,7 @@ import
|
|||
./tx_bucket,
|
||||
./tx_classify,
|
||||
chronicles,
|
||||
eth/[common, keys, rlp, trie, trie/db],
|
||||
eth/[keys, rlp, trie, trie/db],
|
||||
stew/[sorted_set]
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
@ -161,8 +161,8 @@ proc vmExecInit(xp: TxPoolRef): TxPackerStateRef
|
|||
|
||||
xp.chain.maxMode = (packItemsMaxGasLimit in xp.pFlags)
|
||||
|
||||
if xp.chain.config.daoForkSupport and
|
||||
xp.chain.config.daoForkBlock == xp.chain.head.blockNumber + 1:
|
||||
if xp.chain.com.daoForkSupport and
|
||||
xp.chain.com.daoForkBlock.get == xp.chain.head.blockNumber + 1:
|
||||
xp.chain.vmState.mutateStateDB:
|
||||
db.applyDAOHardFork()
|
||||
|
||||
|
@ -212,10 +212,9 @@ proc vmExecCommit(pst: TxPackerStateRef)
|
|||
let
|
||||
xp = pst.xp
|
||||
vmState = xp.chain.vmState
|
||||
disableReward = vmState.chainDB.config.poaEngine or
|
||||
vmState.ttdReached # EIP-3675: no reward for miner
|
||||
|
||||
if not disableReward:
|
||||
# EIP-3675: no reward for miner in POA/POS
|
||||
if vmState.com.consensus == ConsensusType.POW:
|
||||
let
|
||||
number = xp.chain.head.blockNumber + 1
|
||||
uncles: seq[BlockHeader] = @[] # no uncles yet
|
||||
|
@ -251,7 +250,8 @@ proc vmExecCommit(pst: TxPackerStateRef)
|
|||
proc packerVmExec*(xp: TxPoolRef) {.gcsafe,raises: [Defect,CatchableError].} =
|
||||
## Rebuild `packed` bucket by selection items from the `staged` bucket
|
||||
## after executing them in the VM.
|
||||
let dbTx = xp.chain.db.db.beginTransaction
|
||||
let db = xp.chain.com.db
|
||||
let dbTx = db.db.beginTransaction
|
||||
defer: dbTx.dispose()
|
||||
|
||||
var pst = xp.vmExecInit
|
|
@ -10,14 +10,14 @@
|
|||
|
||||
import
|
||||
std/[sequtils, sets, times],
|
||||
../constants,
|
||||
../db/[db_chain, accounts_cache],
|
||||
../transaction,
|
||||
../utils/[difficulty, header, pow],
|
||||
".."/[vm_state, vm_types, forks, errors],
|
||||
../db/accounts_cache,
|
||||
".."/[transaction, common/common],
|
||||
".."/[vm_state, vm_types, errors],
|
||||
"."/[dao, gaslimit, withdrawals],
|
||||
./pow/[difficulty, header],
|
||||
./pow,
|
||||
chronicles,
|
||||
eth/[common, rlp],
|
||||
eth/[rlp],
|
||||
nimcrypto/utils,
|
||||
stew/[objects, results, endians2]
|
||||
|
||||
|
@ -76,16 +76,17 @@ proc validateSeal(pow: PowRef; header: BlockHeader): Result[void,string] =
|
|||
|
||||
ok()
|
||||
|
||||
proc validateHeader(db: BaseChainDB; header, parentHeader: BlockHeader;
|
||||
proc validateHeader(com: CommonRef; header, parentHeader: BlockHeader;
|
||||
numTransactions: int; checkSealOK: bool;
|
||||
ttdReached: bool; pow: PowRef): Result[void,string] =
|
||||
pow: PowRef): Result[void,string] =
|
||||
|
||||
template inDAOExtraRange(blockNumber: BlockNumber): bool =
|
||||
# EIP-799
|
||||
# Blocks with block numbers in the range [1_920_000, 1_920_009]
|
||||
# MUST have DAOForkBlockExtra
|
||||
let DAOHigh = db.config.daoForkBlock + DAOForkExtraRange.u256
|
||||
db.config.daoForkBlock <= blockNumber and
|
||||
let daoForkBlock = com.daoForkBlock.get
|
||||
let DAOHigh = daoForkBlock + DAOForkExtraRange.u256
|
||||
daoForkBlock <= blockNumber and
|
||||
blockNumber < DAOHigh
|
||||
|
||||
if header.extraData.len > 32:
|
||||
|
@ -103,11 +104,11 @@ proc validateHeader(db: BaseChainDB; header, parentHeader: BlockHeader;
|
|||
if header.timestamp.toUnix <= parentHeader.timestamp.toUnix:
|
||||
return err("timestamp must be strictly later than parent")
|
||||
|
||||
if db.config.daoForkSupport and inDAOExtraRange(header.blockNumber):
|
||||
if com.daoForkSupport and inDAOExtraRange(header.blockNumber):
|
||||
if header.extraData != daoForkBlockExtraData:
|
||||
return err("header extra data should be marked DAO")
|
||||
|
||||
if ttdReached:
|
||||
if com.consensus == ConsensusType.POS:
|
||||
# EIP-4399 and EIP-3675
|
||||
# no need to check mixDigest because EIP-4399 override this field
|
||||
# checking rule
|
||||
|
@ -121,14 +122,14 @@ proc validateHeader(db: BaseChainDB; header, parentHeader: BlockHeader;
|
|||
if header.ommersHash != EMPTY_UNCLE_HASH:
|
||||
return err("Invalid ommers hash in a post-merge block")
|
||||
else:
|
||||
let calcDiffc = db.config.calcDifficulty(header.timestamp, parentHeader)
|
||||
let calcDiffc = com.calcDifficulty(header.timestamp, parentHeader)
|
||||
if header.difficulty < calcDiffc:
|
||||
return err("provided header difficulty is too low")
|
||||
|
||||
if checkSealOK:
|
||||
return pow.validateSeal(header)
|
||||
|
||||
db.validateWithdrawals(header)
|
||||
com.validateWithdrawals(header)
|
||||
|
||||
func validateUncle(currBlock, uncle, uncleParent: BlockHeader):
|
||||
Result[void,string] =
|
||||
|
@ -147,7 +148,7 @@ func validateUncle(currBlock, uncle, uncleParent: BlockHeader):
|
|||
result = ok()
|
||||
|
||||
|
||||
proc validateUncles(chainDB: BaseChainDB; header: BlockHeader;
|
||||
proc validateUncles(com: CommonRef; header: BlockHeader;
|
||||
uncles: seq[BlockHeader]; checkSealOK: bool;
|
||||
pow: PowRef): Result[void,string] =
|
||||
let hasUncles = uncles.len > 0
|
||||
|
@ -171,6 +172,7 @@ proc validateUncles(chainDB: BaseChainDB; header: BlockHeader;
|
|||
else:
|
||||
uncleSet.incl uncleHash
|
||||
|
||||
let chainDB = com.db
|
||||
let recentAncestorHashes = try:
|
||||
chainDB.getAncestorsHashes(MAX_UNCLE_DEPTH + 1, header)
|
||||
except CatchableError as err:
|
||||
|
@ -225,7 +227,7 @@ proc validateUncles(chainDB: BaseChainDB; header: BlockHeader;
|
|||
if result.isErr:
|
||||
return
|
||||
|
||||
result = chainDB.validateGasLimitOrBaseFee(uncle, uncleParent)
|
||||
result = com.validateGasLimitOrBaseFee(uncle, uncleParent)
|
||||
if result.isErr:
|
||||
return
|
||||
|
||||
|
@ -241,7 +243,7 @@ proc validateTransaction*(
|
|||
sender: EthAddress; ## tx.getSender or tx.ecRecover
|
||||
maxLimit: GasInt; ## gasLimit from block header
|
||||
baseFee: UInt256; ## baseFee from block header
|
||||
fork: Fork): bool =
|
||||
fork: EVMFork): bool =
|
||||
let
|
||||
balance = roDB.getBalance(sender)
|
||||
nonce = roDB.getNonce(sender)
|
||||
|
@ -343,7 +345,7 @@ proc validateTransaction*(
|
|||
tx: Transaction; ## tx to validate
|
||||
sender: EthAddress; ## tx.getSender or tx.ecRecover
|
||||
header: BlockHeader; ## Header for the block containing the current tx
|
||||
fork: Fork): bool =
|
||||
fork: EVMFork): bool =
|
||||
## Variant of `validateTransaction()`
|
||||
let
|
||||
roDB = vmState.readOnlyStateDB
|
||||
|
@ -356,25 +358,25 @@ proc validateTransaction*(
|
|||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc validateHeaderAndKinship*(
|
||||
chainDB: BaseChainDB;
|
||||
com: CommonRef;
|
||||
header: BlockHeader;
|
||||
uncles: seq[BlockHeader];
|
||||
numTransactions: int;
|
||||
checkSealOK: bool;
|
||||
ttdReached: bool;
|
||||
pow: PowRef): Result[void, string] =
|
||||
if header.isGenesis:
|
||||
if header.extraData.len > 32:
|
||||
return err("BlockHeader.extraData larger than 32 bytes")
|
||||
return ok()
|
||||
|
||||
let chainDB = com.db
|
||||
let parent = try:
|
||||
chainDB.getBlockHeader(header.parentHash)
|
||||
except CatchableError as err:
|
||||
return err("Failed to load block header from DB")
|
||||
|
||||
result = chainDB.validateHeader(
|
||||
header, parent, numTransactions, checkSealOK, ttdReached, pow)
|
||||
result = com.validateHeader(
|
||||
header, parent, numTransactions, checkSealOK, pow)
|
||||
if result.isErr:
|
||||
return
|
||||
|
||||
|
@ -384,32 +386,30 @@ proc validateHeaderAndKinship*(
|
|||
if not chainDB.exists(header.stateRoot):
|
||||
return err("`state_root` was not found in the db.")
|
||||
|
||||
if not ttdReached:
|
||||
result = chainDB.validateUncles(header, uncles, checkSealOK, pow)
|
||||
if com.consensus != ConsensusType.POS:
|
||||
result = com.validateUncles(header, uncles, checkSealOK, pow)
|
||||
|
||||
if result.isOk:
|
||||
result = chainDB.validateGasLimitOrBaseFee(header, parent)
|
||||
result = com.validateGasLimitOrBaseFee(header, parent)
|
||||
|
||||
proc validateHeaderAndKinship*(
|
||||
chainDB: BaseChainDB;
|
||||
com: CommonRef;
|
||||
header: BlockHeader;
|
||||
body: BlockBody;
|
||||
checkSealOK: bool;
|
||||
ttdReached: bool;
|
||||
pow: PowRef): Result[void, string] {.gcsafe,raises: [CatchableError, Defect].} =
|
||||
|
||||
chainDB.validateHeaderAndKinship(
|
||||
header, body.uncles, body.transactions.len, checkSealOK, ttdReached, pow)
|
||||
com.validateHeaderAndKinship(
|
||||
header, body.uncles, body.transactions.len, checkSealOK, pow)
|
||||
|
||||
proc validateHeaderAndKinship*(
|
||||
chainDB: BaseChainDB;
|
||||
com: CommonRef;
|
||||
ethBlock: EthBlock;
|
||||
checkSealOK: bool;
|
||||
ttdReached: bool;
|
||||
pow: PowRef): Result[void,string] =
|
||||
chainDB.validateHeaderAndKinship(
|
||||
com.validateHeaderAndKinship(
|
||||
ethBlock.header, ethBlock.uncles, ethBlock.txs.len,
|
||||
checkSealOK, ttdReached, pow)
|
||||
checkSealOK, pow)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
|
@ -10,12 +10,11 @@
|
|||
|
||||
import
|
||||
stew/results,
|
||||
eth/common,
|
||||
../db/db_chain
|
||||
../common/common
|
||||
|
||||
# https://eips.ethereum.org/EIPS/eip-4895
|
||||
func validateWithdrawals*(
|
||||
c: BaseChainDB, header: BlockHeader
|
||||
com: CommonRef, header: BlockHeader
|
||||
): Result[void, string] {.raises: [Defect].} =
|
||||
if header.withdrawalsRoot.isSome:
|
||||
return err("Withdrawals not yet implemented")
|
|
@ -9,23 +9,19 @@
|
|||
# according to those terms.
|
||||
|
||||
import
|
||||
../constants, ../forks,
|
||||
../db/accounts_cache,
|
||||
../utils,
|
||||
./code_stream,
|
||||
".."/[db/accounts_cache],
|
||||
"."/[code_stream, memory, message, stack, state],
|
||||
"."/[transaction_tracer, types],
|
||||
./interpreter/[gas_meter, gas_costs, op_codes],
|
||||
./memory,
|
||||
./message,
|
||||
./stack,
|
||||
./state,
|
||||
./transaction_tracer,
|
||||
./types,
|
||||
chronicles,
|
||||
chronos,
|
||||
eth/[common, keys],
|
||||
options,
|
||||
../common/[common, evmforks],
|
||||
../utils/utils,
|
||||
chronicles, chronos,
|
||||
eth/[keys],
|
||||
sets
|
||||
|
||||
export
|
||||
common
|
||||
|
||||
logScope:
|
||||
topics = "vm computation"
|
||||
|
||||
|
@ -103,7 +99,7 @@ template getChainId*(c: Computation): uint =
|
|||
when evmc_enabled:
|
||||
UInt256.fromEvmc(c.host.getTxContext().chain_id).truncate(uint)
|
||||
else:
|
||||
c.vmState.chainDB.config.chainId.uint
|
||||
c.vmState.com.chainId.uint
|
||||
|
||||
template getOrigin*(c: Computation): EthAddress =
|
||||
when evmc_enabled:
|
|
@ -1,4 +1,8 @@
|
|||
import eth/common, stint, evmc/evmc, ../utils
|
||||
import
|
||||
eth/common,
|
||||
stint,
|
||||
evmc/evmc,
|
||||
../utils/utils
|
||||
|
||||
const
|
||||
evmc_native* {.booldefine.} = false
|
Before Width: | Height: | Size: 104 KiB After Width: | Height: | Size: 104 KiB |
|
@ -8,7 +8,7 @@
|
|||
import
|
||||
math, eth/common/eth_types,
|
||||
./utils/[macros_gen_opcodes, utils_numeric],
|
||||
./op_codes, ../../forks, ../../errors
|
||||
./op_codes, ../../common/evmforks, ../../errors
|
||||
|
||||
when defined(evmc_enabled):
|
||||
import evmc/evmc
|
||||
|
@ -141,7 +141,7 @@ when defined(evmc_enabled):
|
|||
gasRefund*: int16
|
||||
|
||||
# Table of gas cost specification for storage instructions per EVM revision.
|
||||
func storageCostSpec(): array[Fork, StorageCostSpec] {.compileTime.} =
|
||||
func storageCostSpec(): array[EVMFork, StorageCostSpec] {.compileTime.} =
|
||||
# Legacy cost schedule.
|
||||
const revs = [
|
||||
FkFrontier, FkHomestead, FkTangerine,
|
||||
|
@ -194,9 +194,9 @@ when defined(evmc_enabled):
|
|||
e[EVMC_STORAGE_MODIFIED_RESTORED] = StorageStoreCost(gasCost: c.warmAccess,
|
||||
gasRefund: c.reset - c.warm_access)
|
||||
|
||||
proc storageStoreCost(): array[Fork, array[evmc_storage_status, StorageStoreCost]] {.compileTime.} =
|
||||
proc storageStoreCost(): array[EVMFork, array[evmc_storage_status, StorageStoreCost]] {.compileTime.} =
|
||||
const tbl = storageCostSpec()
|
||||
for rev in Fork:
|
||||
for rev in EVMFork:
|
||||
let c = tbl[rev]
|
||||
if not c.netCost: # legacy
|
||||
legacySStoreCost(result[rev], c)
|
||||
|
@ -206,7 +206,7 @@ when defined(evmc_enabled):
|
|||
const
|
||||
SstoreCost* = storageStoreCost()
|
||||
|
||||
template gasCosts(fork: Fork, prefix, ResultGasCostsName: untyped) =
|
||||
template gasCosts(fork: EVMFork, prefix, ResultGasCostsName: untyped) =
|
||||
|
||||
## Generate the gas cost for each forks and store them in a const
|
||||
## named `ResultGasCostsName`
|
||||
|
@ -810,7 +810,7 @@ const
|
|||
BerlinGasFees = IstanbulGasFees.berlinGasFees
|
||||
LondonGasFees = BerlinGasFees.londonGasFees
|
||||
|
||||
gasFees*: array[Fork, GasFeeSchedule] = [
|
||||
gasFees*: array[EVMFork, GasFeeSchedule] = [
|
||||
FkFrontier: BaseGasFees,
|
||||
FkHomestead: HomesteadGasFees,
|
||||
FkTangerine: TangerineGasFees,
|
||||
|
@ -835,7 +835,7 @@ gasCosts(FkIstanbul, istanbul, IstanbulGasCosts)
|
|||
gasCosts(FkBerlin, berlin, BerlinGasCosts)
|
||||
gasCosts(FkLondon, london, LondonGasCosts)
|
||||
|
||||
proc forkToSchedule*(fork: Fork): GasCosts =
|
||||
proc forkToSchedule*(fork: EVMFork): GasCosts =
|
||||
if fork < FkHomestead:
|
||||
BaseGasCosts
|
||||
elif fork < FkTangerine:
|
|
@ -17,7 +17,7 @@ const
|
|||
import
|
||||
../code_stream,
|
||||
../computation,
|
||||
../../forks,
|
||||
../../common/evmforks,
|
||||
./gas_costs,
|
||||
./gas_meter,
|
||||
./op_codes,
|
||||
|
@ -27,7 +27,7 @@ import
|
|||
macros
|
||||
|
||||
export
|
||||
Fork, Op,
|
||||
EVMFork, Op,
|
||||
oph_defs,
|
||||
gas_meter
|
||||
|
||||
|
@ -43,7 +43,7 @@ template handleStopDirective(k: var Vm2Ctx) =
|
|||
k.cpt.traceOpCodeEnded(Stop, k.cpt.opIndex)
|
||||
|
||||
|
||||
template handleFixedGasCostsDirective(fork: Fork; op: Op; k: var Vm2Ctx) =
|
||||
template handleFixedGasCostsDirective(fork: EVMFork; op: Op; k: var Vm2Ctx) =
|
||||
if k.cpt.tracingEnabled:
|
||||
k.cpt.opIndex = k.cpt.traceOpCodeStarted(op)
|
||||
|
||||
|
@ -54,7 +54,7 @@ template handleFixedGasCostsDirective(fork: Fork; op: Op; k: var Vm2Ctx) =
|
|||
k.cpt.traceOpCodeEnded(op, k.cpt.opIndex)
|
||||
|
||||
|
||||
template handleOtherDirective(fork: Fork; op: Op; k: var Vm2Ctx) =
|
||||
template handleOtherDirective(fork: EVMFork; op: Op; k: var Vm2Ctx) =
|
||||
if k.cpt.tracingEnabled:
|
||||
k.cpt.opIndex = k.cpt.traceOpCodeStarted(op)
|
||||
|
||||
|
@ -79,8 +79,8 @@ proc toCaseStmt(forkArg, opArg, k: NimNode): NimNode =
|
|||
# Inner case/switch => Fork
|
||||
let branchOnFork = quote do: `forkArg`
|
||||
var forkCaseSubExpr = nnkCaseStmt.newTree(branchOnFork)
|
||||
for fork in Fork:
|
||||
let asFork = quote do: Fork(`fork`)
|
||||
for fork in EVMFork:
|
||||
let asFork = quote do: EVMFork(`fork`)
|
||||
|
||||
let branchStmt = block:
|
||||
if op == Stop:
|
||||
|
@ -120,11 +120,11 @@ proc toCaseStmt(forkArg, opArg, k: NimNode): NimNode =
|
|||
# Public macros/functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
macro genOptimisedDispatcher*(fork: Fork; op: Op; k: Vm2Ctx): untyped =
|
||||
macro genOptimisedDispatcher*(fork: EVMFork; op: Op; k: Vm2Ctx): untyped =
|
||||
result = fork.toCaseStmt(op, k)
|
||||
|
||||
|
||||
template genLowMemDispatcher*(fork: Fork; op: Op; k: Vm2Ctx) =
|
||||
template genLowMemDispatcher*(fork: EVMFork; op: Op; k: Vm2Ctx) =
|
||||
if op == Stop:
|
||||
handleStopDirective(k)
|
||||
break
|
||||
|
@ -151,7 +151,7 @@ when isMainModule and isChatty:
|
|||
|
||||
import ../types
|
||||
|
||||
proc optimised(c: Computation, fork: Fork) {.compileTime.} =
|
||||
proc optimised(c: Computation, fork: EVMFork) {.compileTime.} =
|
||||
var desc: Vm2Ctx
|
||||
while true:
|
||||
genOptimisedDispatcher(fork, desc.cpt.instr, desc)
|
|
@ -19,7 +19,7 @@ const
|
|||
|
||||
import
|
||||
strformat,
|
||||
../../forks,
|
||||
../../common/evmforks,
|
||||
./op_codes,
|
||||
./op_handlers/[oph_defs,
|
||||
oph_arithmetic, oph_hash, oph_envinfo, oph_blockdata,
|
||||
|
@ -45,7 +45,7 @@ const
|
|||
# Helper
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc mkOpTable(selected: Fork): array[Op,Vm2OpExec] {.compileTime.} =
|
||||
proc mkOpTable(selected: EVMFork): array[Op,Vm2OpExec] {.compileTime.} =
|
||||
|
||||
# Collect selected <fork> entries
|
||||
for (subList,subName) in allHandlersList:
|
||||
|
@ -106,8 +106,8 @@ const
|
|||
#
|
||||
vmOpHandlers* = ## Op handler records matrix indexed `fork` x `op`
|
||||
block:
|
||||
var rc: array[Fork, array[Op, vmOpHandlersRec]]
|
||||
for fork in Fork:
|
||||
var rc: array[EVMFork, array[Op, vmOpHandlersRec]]
|
||||
for fork in EVMFork:
|
||||
var tab = fork.mkOpTable
|
||||
for op in Op:
|
||||
rc[fork][op].name = tab[op].name
|
||||
|
@ -121,15 +121,15 @@ const
|
|||
|
||||
when isMainModule and isChatty:
|
||||
|
||||
proc opHandlersRun(fork: Fork; op: Op; d: var Vm2Ctx) {.used.} =
|
||||
proc opHandlersRun(fork: EVMFork; op: Op; d: var Vm2Ctx) {.used.} =
|
||||
## Given a particular `fork` and an `op`-code, run the associated handler
|
||||
vmOpHandlers[fork][op].run(d)
|
||||
|
||||
proc opHandlersName(fork: Fork; op: Op): string {.used.} =
|
||||
proc opHandlersName(fork: EVMFork; op: Op): string {.used.} =
|
||||
## Get name (or ID) of op handler
|
||||
vmOpHandlers[fork][op].name
|
||||
|
||||
proc opHandlersInfo(fork: Fork; op: Op): string {.used.} =
|
||||
proc opHandlersInfo(fork: EVMFork; op: Op): string {.used.} =
|
||||
## Get some op handler info
|
||||
vmOpHandlers[fork][op].info
|
||||
|
|
@ -15,7 +15,7 @@
|
|||
import
|
||||
../../../constants,
|
||||
../../../errors,
|
||||
../../../forks,
|
||||
../../../common/evmforks,
|
||||
../../computation,
|
||||
../../memory,
|
||||
../../stack,
|
|
@ -15,8 +15,8 @@
|
|||
import
|
||||
../../../constants,
|
||||
../../../errors,
|
||||
../../../forks,
|
||||
../../../utils,
|
||||
../../../common/evmforks,
|
||||
../../../utils/utils,
|
||||
../../computation,
|
||||
../../memory,
|
||||
../../stack,
|
|
@ -14,7 +14,7 @@
|
|||
|
||||
import
|
||||
../../types,
|
||||
../../../forks,
|
||||
../../../common/evmforks,
|
||||
../op_codes,
|
||||
eth/common/eth_types
|
||||
|
||||
|
@ -37,7 +37,7 @@ type
|
|||
|
||||
Vm2OpExec* = tuple ## op code handler entry
|
||||
opCode: Op ## index back-reference
|
||||
forks: set[Fork] ## forks applicable for this operation
|
||||
forks: set[EVMFork] ## forks applicable for this operation
|
||||
name: string ## handler name
|
||||
info: string ## handter info, explainer
|
||||
exec: Vm2OpHanders
|
||||
|
@ -52,7 +52,7 @@ const
|
|||
|
||||
# similar to: toSeq(Fork).mapIt({it}).foldl(a+b)
|
||||
Vm2OpAllForks* =
|
||||
{Fork.low .. Fork.high}
|
||||
{EVMFork.low .. EVMFork.high}
|
||||
|
||||
Vm2OpHomesteadAndLater* = ## Set of all fork symbols
|
||||
Vm2OpAllForks - {FkFrontier}
|
|
@ -68,7 +68,8 @@ const
|
|||
invalidOp: Vm2OpFn = proc(k: var Vm2Ctx) =
|
||||
raise newException(InvalidInstruction,
|
||||
"Invalid instruction, received an opcode " &
|
||||
"not implemented in the current fork.")
|
||||
"not implemented in the current fork. " &
|
||||
$k.cpt.fork & " " & $k.cpt.instr)
|
||||
|
||||
# -----------
|
||||
|
|
@ -14,24 +14,12 @@ const
|
|||
lowMemoryCompileTime {.used.} = lowmem > 0
|
||||
|
||||
import
|
||||
../constants,
|
||||
../utils,
|
||||
../db/accounts_cache,
|
||||
./code_stream,
|
||||
./computation,
|
||||
std/[macros, sets, strformat],
|
||||
".."/[constants, utils/utils, db/accounts_cache],
|
||||
"."/[code_stream, computation],
|
||||
"."/[message, precompiles, state, types],
|
||||
./interpreter/[op_dispatcher, gas_costs],
|
||||
./message,
|
||||
./precompiles,
|
||||
./state,
|
||||
./types,
|
||||
chronicles,
|
||||
chronos,
|
||||
eth/[common, keys],
|
||||
macros,
|
||||
options,
|
||||
sets,
|
||||
stew/byteutils,
|
||||
strformat
|
||||
pkg/[chronicles, chronos, eth/keys, stew/byteutils]
|
||||
|
||||
logScope:
|
||||
topics = "vm opcode"
|
||||
|
@ -46,7 +34,7 @@ const
|
|||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc selectVM(c: Computation, fork: Fork, shouldPrepareTracer: bool) {.gcsafe.} =
|
||||
proc selectVM(c: Computation, fork: EVMFork, shouldPrepareTracer: bool) {.gcsafe.} =
|
||||
## Op code execution handler main loop.
|
||||
var desc: Vm2Ctx
|
||||
desc.cpt = c
|
||||
|
@ -282,7 +270,7 @@ else:
|
|||
# In the long run I'd like to make some clever macro/template to
|
||||
# eliminate the duplication between the synchronous and
|
||||
# asynchronous versions. But for now let's stick with this for
|
||||
# simplicity.
|
||||
# simplicity.
|
||||
#
|
||||
# Also, I've based this on the recursive one (above), which I think
|
||||
# is okay because the "async" pragma is going to rewrite this whole
|
|
@ -8,7 +8,7 @@
|
|||
import
|
||||
sequtils,
|
||||
chronicles, eth/common/eth_types,
|
||||
../errors, ../validation,
|
||||
../errors, ./validation,
|
||||
./interpreter/utils/utils_numeric
|
||||
|
||||
type
|
|
@ -9,10 +9,12 @@
|
|||
# according to those terms.
|
||||
|
||||
import
|
||||
./types, ../forks,
|
||||
std/[math, macros],
|
||||
"."/[types, blake2b_f, blscurve],
|
||||
./interpreter/[gas_meter, gas_costs, utils/utils_numeric],
|
||||
../errors, stint, eth/[keys, common], chronicles, macros,
|
||||
math, nimcrypto/[ripemd, sha2, utils], bncurve/[fields, groups], ./blake2b_f, ./blscurve
|
||||
../errors, eth/[common, keys], chronicles,
|
||||
nimcrypto/[ripemd, sha2, utils], bncurve/[fields, groups],
|
||||
../common/evmforks
|
||||
|
||||
type
|
||||
PrecompileAddresses* = enum
|
||||
|
@ -201,7 +203,7 @@ proc modExpInternal(computation: Computation, baseLen, expLen, modLen: int, T: t
|
|||
computation.output = newSeq[byte](modLen)
|
||||
computation.output[^output.len..^1] = output[0..^1]
|
||||
|
||||
proc modExpFee(c: Computation, baseLen, expLen, modLen: UInt256, fork: Fork): GasInt =
|
||||
proc modExpFee(c: Computation, baseLen, expLen, modLen: UInt256, fork: EVMFork): GasInt =
|
||||
template data: untyped {.dirty.} =
|
||||
c.msg.data
|
||||
|
||||
|
@ -256,7 +258,7 @@ proc modExpFee(c: Computation, baseLen, expLen, modLen: UInt256, fork: Fork): Ga
|
|||
if fork >= FkBerlin and result < 200.GasInt:
|
||||
result = 200.GasInt
|
||||
|
||||
proc modExp*(c: Computation, fork: Fork = FkByzantium) =
|
||||
proc modExp*(c: Computation, fork: EVMFork = FkByzantium) =
|
||||
## Modular exponentiation precompiled contract
|
||||
## Yellow Paper Appendix E
|
||||
## EIP-198 - https://github.com/ethereum/EIPs/blob/master/EIPS/eip-198.md
|
||||
|
@ -296,7 +298,7 @@ proc modExp*(c: Computation, fork: Fork = FkByzantium) =
|
|||
else:
|
||||
raise newException(EVMError, "The Nimbus VM doesn't support modular exponentiation with numbers larger than uint8192")
|
||||
|
||||
proc bn256ecAdd*(computation: Computation, fork: Fork = FkByzantium) =
|
||||
proc bn256ecAdd*(computation: Computation, fork: EVMFork = FkByzantium) =
|
||||
let gasFee = if fork < FkIstanbul: GasECAdd else: GasECAddIstanbul
|
||||
computation.gasMeter.consumeGas(gasFee, reason = "ecAdd Precompile")
|
||||
|
||||
|
@ -315,7 +317,7 @@ proc bn256ecAdd*(computation: Computation, fork: Fork = FkByzantium) =
|
|||
|
||||
computation.output = @output
|
||||
|
||||
proc bn256ecMul*(computation: Computation, fork: Fork = FkByzantium) =
|
||||
proc bn256ecMul*(computation: Computation, fork: EVMFork = FkByzantium) =
|
||||
let gasFee = if fork < FkIstanbul: GasECMul else: GasECMulIstanbul
|
||||
computation.gasMeter.consumeGas(gasFee, reason="ecMul Precompile")
|
||||
|
||||
|
@ -335,7 +337,7 @@ proc bn256ecMul*(computation: Computation, fork: Fork = FkByzantium) =
|
|||
|
||||
computation.output = @output
|
||||
|
||||
proc bn256ecPairing*(computation: Computation, fork: Fork = FkByzantium) =
|
||||
proc bn256ecPairing*(computation: Computation, fork: EVMFork = FkByzantium) =
|
||||
let msglen = len(computation.msg.data)
|
||||
if msglen mod 192 != 0:
|
||||
raise newException(ValidationError, "Invalid input length")
|
||||
|
@ -676,7 +678,7 @@ proc blsMapG2*(c: Computation) =
|
|||
if not encodePoint(p, c.output):
|
||||
raise newException(ValidationError, "blsMapG2 encodePoint error")
|
||||
|
||||
proc getMaxPrecompileAddr(fork: Fork): PrecompileAddresses =
|
||||
proc getMaxPrecompileAddr(fork: EVMFork): PrecompileAddresses =
|
||||
if fork < FkByzantium: paIdentity
|
||||
elif fork < FkIstanbul: paPairing
|
||||
# EIP 2537: disabled
|
||||
|
@ -684,7 +686,7 @@ proc getMaxPrecompileAddr(fork: Fork): PrecompileAddresses =
|
|||
# elif fork < FkBerlin: paBlake2bf
|
||||
else: PrecompileAddresses.high
|
||||
|
||||
proc execPrecompiles*(computation: Computation, fork: Fork): bool {.inline.} =
|
||||
proc execPrecompiles*(computation: Computation, fork: EVMFork): bool {.inline.} =
|
||||
for i in 0..18:
|
||||
if computation.msg.codeAddress[i] != 0: return
|
||||
|
|
@ -6,8 +6,9 @@
|
|||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
import
|
||||
chronicles, strformat, strutils, sequtils, macros, eth/common,
|
||||
../errors, ../validation
|
||||
std/[strformat, strutils, sequtils, macros],
|
||||
chronicles, eth/common,
|
||||
../errors, ./validation
|
||||
|
||||
logScope:
|
||||
topics = "vm stack"
|
|
@ -11,14 +11,12 @@
|
|||
import
|
||||
std/[json, macros, options, sets, strformat, tables],
|
||||
../../stateless/[witness_from_tree, witness_types],
|
||||
../constants,
|
||||
../db/[db_chain, accounts_cache],
|
||||
../db/accounts_cache,
|
||||
../common/[common, evmforks],
|
||||
../errors,
|
||||
../forks,
|
||||
../utils/[ec_recover],
|
||||
./transaction_tracer,
|
||||
./types,
|
||||
eth/[common, keys]
|
||||
eth/[keys]
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
|
@ -38,25 +36,6 @@ template safeExecutor(info: string; code: untyped) =
|
|||
let e = getCurrentException()
|
||||
raise newException(VmStateError, info & "(): " & $e.name & " -- " & e.msg)
|
||||
|
||||
proc isTtdReached(db: BaseChainDB; blockHash: Hash256): bool
|
||||
{.gcsafe, raises: [Defect,RlpError].} =
|
||||
## Returns `true` iff the stored sum of difficulties has reached the
|
||||
## terminal total difficulty, see EIP3675.
|
||||
if db.config.terminalTotalDifficulty.isSome:
|
||||
return db.config.terminalTotalDifficulty.get <= db.getScore(blockHash)
|
||||
|
||||
proc getMinerAddress(chainDB: BaseChainDB; header: BlockHeader): EthAddress
|
||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
||||
if not chainDB.config.poaEngine or chainDB.isTtdReached(header.parentHash):
|
||||
return header.coinbase
|
||||
|
||||
let account = header.ecRecover
|
||||
if account.isErr:
|
||||
let msg = "Could not recover account address: " & $account.error
|
||||
raise newException(ValidationError, msg)
|
||||
|
||||
account.value
|
||||
|
||||
proc init(
|
||||
self: BaseVMState;
|
||||
ac: AccountsCache;
|
||||
|
@ -67,21 +46,18 @@ proc init(
|
|||
prevRandao: Hash256;
|
||||
difficulty: UInt256;
|
||||
miner: EthAddress;
|
||||
chainDB: BaseChainDB;
|
||||
ttdReached: bool;
|
||||
com: CommonRef;
|
||||
tracer: TransactionTracer)
|
||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
||||
## Initialisation helper
|
||||
self.prevHeaders = @[]
|
||||
self.name = "BaseVM"
|
||||
self.parent = parent
|
||||
self.timestamp = timestamp
|
||||
self.gasLimit = gasLimit
|
||||
self.fee = fee
|
||||
self.prevRandao = prevRandao
|
||||
self.blockDifficulty = difficulty
|
||||
self.chainDB = chainDB
|
||||
self.ttdReached = ttdReached
|
||||
self.com = com
|
||||
self.tracer = tracer
|
||||
self.logEntries = @[]
|
||||
self.stateDB = ac
|
||||
|
@ -98,7 +74,7 @@ proc init(
|
|||
prevRandao: Hash256;
|
||||
difficulty: UInt256;
|
||||
miner: EthAddress;
|
||||
chainDB: BaseChainDB;
|
||||
com: CommonRef;
|
||||
tracerFlags: set[TracerFlags])
|
||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
||||
var tracer: TransactionTracer
|
||||
|
@ -112,8 +88,7 @@ proc init(
|
|||
prevRandao= prevRandao,
|
||||
difficulty= difficulty,
|
||||
miner = miner,
|
||||
chainDB = chainDB,
|
||||
ttdReached= chainDB.isTtdReached(parent.blockHash),
|
||||
com = com,
|
||||
tracer = tracer)
|
||||
|
||||
# --------------
|
||||
|
@ -123,9 +98,8 @@ proc `$`*(vmState: BaseVMState): string
|
|||
if vmState.isNil:
|
||||
result = "nil"
|
||||
else:
|
||||
result = &"VMState {vmState.name}:"&
|
||||
&"\n blockNumber: {vmState.parent.blockNumber + 1}"&
|
||||
&"\n chaindb: {vmState.chaindb}"
|
||||
result = &"VMState:"&
|
||||
&"\n blockNumber: {vmState.parent.blockNumber + 1}"
|
||||
|
||||
proc new*(
|
||||
T: type BaseVMState;
|
||||
|
@ -136,9 +110,8 @@ proc new*(
|
|||
prevRandao: Hash256; ## tx env: POS block randomness
|
||||
difficulty: UInt256, ## tx env: difficulty
|
||||
miner: EthAddress; ## tx env: coinbase(PoW) or signer(PoA)
|
||||
chainDB: BaseChainDB; ## block chain database
|
||||
tracerFlags: set[TracerFlags] = {};
|
||||
pruneTrie: bool = true): T
|
||||
com: CommonRef; ## block chain config
|
||||
tracerFlags: set[TracerFlags] = {}): T
|
||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
||||
## Create a new `BaseVMState` descriptor from a parent block header. This
|
||||
## function internally constructs a new account state cache rooted at
|
||||
|
@ -149,7 +122,7 @@ proc new*(
|
|||
## with the `parent` block header.
|
||||
new result
|
||||
result.init(
|
||||
ac = AccountsCache.init(chainDB.db, parent.stateRoot, pruneTrie),
|
||||
ac = AccountsCache.init(com.db.db, parent.stateRoot, com.pruneTrie),
|
||||
parent = parent,
|
||||
timestamp = timestamp,
|
||||
gasLimit = gasLimit,
|
||||
|
@ -157,7 +130,7 @@ proc new*(
|
|||
prevRandao = prevRandao,
|
||||
difficulty = difficulty,
|
||||
miner = miner,
|
||||
chainDB = chainDB,
|
||||
com = com,
|
||||
tracerFlags = tracerFlags)
|
||||
|
||||
proc reinit*(self: BaseVMState; ## Object descriptor
|
||||
|
@ -168,7 +141,7 @@ proc reinit*(self: BaseVMState; ## Object descriptor
|
|||
prevRandao:Hash256; ## tx env: POS block randomness
|
||||
difficulty:UInt256, ## tx env: difficulty
|
||||
miner: EthAddress; ## tx env: coinbase(PoW) or signer(PoA)
|
||||
pruneTrie: bool = true): bool
|
||||
): bool
|
||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
||||
## Re-initialise state descriptor. The `AccountsCache` database is
|
||||
## re-initilaise only if its `rootHash` doe not point to `parent.stateRoot`,
|
||||
|
@ -181,9 +154,10 @@ proc reinit*(self: BaseVMState; ## Object descriptor
|
|||
if self.stateDB.isTopLevelClean:
|
||||
let
|
||||
tracer = self.tracer
|
||||
db = self.chainDB
|
||||
com = self.com
|
||||
db = com.db
|
||||
ac = if self.stateDB.rootHash == parent.stateRoot: self.stateDB
|
||||
else: AccountsCache.init(db.db, parent.stateRoot, pruneTrie)
|
||||
else: AccountsCache.init(db.db, parent.stateRoot, com.pruneTrie)
|
||||
self[].reset
|
||||
self.init(
|
||||
ac = ac,
|
||||
|
@ -194,8 +168,7 @@ proc reinit*(self: BaseVMState; ## Object descriptor
|
|||
prevRandao = prevRandao,
|
||||
difficulty = difficulty,
|
||||
miner = miner,
|
||||
chainDB = db,
|
||||
ttdReached = db.isTtdReached(parent.blockHash),
|
||||
com = com,
|
||||
tracer = tracer)
|
||||
return true
|
||||
# else: false
|
||||
|
@ -203,7 +176,7 @@ proc reinit*(self: BaseVMState; ## Object descriptor
|
|||
proc reinit*(self: BaseVMState; ## Object descriptor
|
||||
parent: BlockHeader; ## parent header, account sync pos.
|
||||
header: BlockHeader; ## header with tx environment data fields
|
||||
pruneTrie: bool = true): bool
|
||||
): bool
|
||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
||||
## Variant of `reinit()`. The `parent` argument is used to sync the accounts
|
||||
## cache and the `header` is used as a container to pass the `timestamp`,
|
||||
|
@ -218,31 +191,27 @@ proc reinit*(self: BaseVMState; ## Object descriptor
|
|||
fee = header.fee,
|
||||
prevRandao= header.prevRandao,
|
||||
difficulty= header.difficulty,
|
||||
miner = self.chainDB.getMinerAddress(header),
|
||||
pruneTrie = pruneTrie)
|
||||
miner = self.com.minerAddress(header))
|
||||
|
||||
proc reinit*(self: BaseVMState; ## Object descriptor
|
||||
header: BlockHeader; ## header with tx environment data fields
|
||||
pruneTrie: bool = true): bool
|
||||
): bool
|
||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
||||
## This is a variant of the `reinit()` function above where the field
|
||||
## `header.parentHash`, is used to fetch the `parent` BlockHeader to be
|
||||
## used in the `update()` variant, above.
|
||||
var parent: BlockHeader
|
||||
if self.chainDB.getBlockHeader(header.parentHash, parent):
|
||||
if self.com.db.getBlockHeader(header.parentHash, parent):
|
||||
return self.reinit(
|
||||
parent = parent,
|
||||
header = header,
|
||||
pruneTrie = pruneTrie)
|
||||
|
||||
header = header)
|
||||
|
||||
proc init*(
|
||||
self: BaseVMState; ## Object descriptor
|
||||
parent: BlockHeader; ## parent header, account sync position
|
||||
header: BlockHeader; ## header with tx environment data fields
|
||||
chainDB: BaseChainDB; ## block chain database
|
||||
tracerFlags: set[TracerFlags] = {},
|
||||
pruneTrie: bool = true)
|
||||
com: CommonRef; ## block chain config
|
||||
tracerFlags: set[TracerFlags] = {})
|
||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
||||
## Variant of `new()` constructor above for in-place initalisation. The
|
||||
## `parent` argument is used to sync the accounts cache and the `header`
|
||||
|
@ -252,24 +221,23 @@ proc init*(
|
|||
## It requires the `header` argument properly initalised so that for PoA
|
||||
## networks, the miner address is retrievable via `ecRecover()`.
|
||||
self.init(
|
||||
ac = AccountsCache.init(chainDB.db, parent.stateRoot, pruneTrie),
|
||||
ac = AccountsCache.init(com.db.db, parent.stateRoot, com.pruneTrie),
|
||||
parent = parent,
|
||||
timestamp = header.timestamp,
|
||||
gasLimit = header.gasLimit,
|
||||
fee = header.fee,
|
||||
prevRandao = header.prevRandao,
|
||||
difficulty = header.difficulty,
|
||||
miner = chainDB.getMinerAddress(header),
|
||||
chainDB = chainDB,
|
||||
miner = com.minerAddress(header),
|
||||
com = com,
|
||||
tracerFlags = tracerFlags)
|
||||
|
||||
proc new*(
|
||||
T: type BaseVMState;
|
||||
parent: BlockHeader; ## parent header, account sync position
|
||||
header: BlockHeader; ## header with tx environment data fields
|
||||
chainDB: BaseChainDB; ## block chain database
|
||||
tracerFlags: set[TracerFlags] = {},
|
||||
pruneTrie: bool = true): T
|
||||
com: CommonRef; ## block chain config
|
||||
tracerFlags: set[TracerFlags] = {}): T
|
||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
||||
## This is a variant of the `new()` constructor above where the `parent`
|
||||
## argument is used to sync the accounts cache and the `header` is used
|
||||
|
@ -281,44 +249,39 @@ proc new*(
|
|||
result.init(
|
||||
parent = parent,
|
||||
header = header,
|
||||
chainDB = chainDB,
|
||||
tracerFlags = tracerFlags,
|
||||
pruneTrie = pruneTrie)
|
||||
com = com,
|
||||
tracerFlags = tracerFlags)
|
||||
|
||||
proc new*(
|
||||
T: type BaseVMState;
|
||||
header: BlockHeader; ## header with tx environment data fields
|
||||
chainDB: BaseChainDB; ## block chain database
|
||||
tracerFlags: set[TracerFlags] = {};
|
||||
pruneTrie: bool = true): T
|
||||
com: CommonRef; ## block chain config
|
||||
tracerFlags: set[TracerFlags] = {}): T
|
||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
||||
## This is a variant of the `new()` constructor above where the field
|
||||
## `header.parentHash`, is used to fetch the `parent` BlockHeader to be
|
||||
## used in the `new()` variant, above.
|
||||
BaseVMState.new(
|
||||
parent = chainDB.getBlockHeader(header.parentHash),
|
||||
parent = com.db.getBlockHeader(header.parentHash),
|
||||
header = header,
|
||||
chainDB = chainDB,
|
||||
tracerFlags = tracerFlags,
|
||||
pruneTrie = pruneTrie)
|
||||
com = com,
|
||||
tracerFlags = tracerFlags)
|
||||
|
||||
proc init*(
|
||||
vmState: BaseVMState;
|
||||
header: BlockHeader; ## header with tx environment data fields
|
||||
chainDB: BaseChainDB; ## block chain database
|
||||
tracerFlags: set[TracerFlags] = {};
|
||||
pruneTrie: bool = true): bool
|
||||
com: CommonRef; ## block chain config
|
||||
tracerFlags: set[TracerFlags] = {}): bool
|
||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
||||
## Variant of `new()` which does not throw an exception on a dangling
|
||||
## `BlockHeader` parent hash reference.
|
||||
var parent: BlockHeader
|
||||
if chainDB.getBlockHeader(header.parentHash, parent):
|
||||
if com.db.getBlockHeader(header.parentHash, parent):
|
||||
vmState.init(
|
||||
parent = parent,
|
||||
header = header,
|
||||
chainDB = chainDB,
|
||||
tracerFlags = tracerFlags,
|
||||
pruneTrie = pruneTrie)
|
||||
com = com,
|
||||
tracerFlags = tracerFlags)
|
||||
return true
|
||||
|
||||
method coinbase*(vmState: BaseVMState): EthAddress {.base, gcsafe.} =
|
||||
|
@ -330,7 +293,7 @@ method blockNumber*(vmState: BaseVMState): BlockNumber {.base, gcsafe.} =
|
|||
vmState.parent.blockNumber + 1
|
||||
|
||||
method difficulty*(vmState: BaseVMState): UInt256 {.base, gcsafe.} =
|
||||
if vmState.ttdReached:
|
||||
if vmState.com.consensus == ConsensusType.POS:
|
||||
# EIP-4399/EIP-3675
|
||||
UInt256.fromBytesBE(vmState.prevRandao.data, allowPadding = false)
|
||||
else:
|
||||
|
@ -347,15 +310,17 @@ when defined(geth):
|
|||
|
||||
method getAncestorHash*(vmState: BaseVMState, blockNumber: BlockNumber): Hash256 {.base, gcsafe, raises: [Defect,CatchableError,Exception].} =
|
||||
var ancestorDepth = vmState.blockNumber - blockNumber - 1
|
||||
if ancestorDepth >= constants.MAX_PREV_HEADER_DEPTH:
|
||||
if ancestorDepth >= MAX_PREV_HEADER_DEPTH:
|
||||
return
|
||||
if blockNumber >= vmState.blockNumber:
|
||||
return
|
||||
|
||||
let db = vmState.com.db
|
||||
when defined(geth):
|
||||
result = vmState.chainDB.headerHash(blockNumber.truncate(uint64))
|
||||
result = db.headerHash(blockNumber.truncate(uint64))
|
||||
else:
|
||||
result = vmState.chainDB.getBlockHash(blockNumber)
|
||||
result = db.getBlockHash(blockNumber)
|
||||
|
||||
#TODO: should we use deque here?
|
||||
# someday we may revive this code when
|
||||
# we already have working miner
|
||||
|
@ -427,6 +392,6 @@ proc buildWitness*(vmState: BaseVMState): seq[byte]
|
|||
let flags = if vmState.fork >= FkSpurious: {wfEIP170} else: {}
|
||||
|
||||
# build witness from tree
|
||||
var wb = initWitnessBuilder(vmState.chainDB.db, rootHash, flags)
|
||||
var wb = initWitnessBuilder(vmState.com.db.db, rootHash, flags)
|
||||
safeExecutor("buildWitness"):
|
||||
result = wb.buildWitness(mkeys)
|
|
@ -9,9 +9,7 @@
|
|||
# according to those terms.
|
||||
|
||||
import
|
||||
../chain_config,
|
||||
../constants,
|
||||
../forks,
|
||||
../db/accounts_cache,
|
||||
../transaction,
|
||||
./computation,
|
||||
|
@ -22,12 +20,10 @@ import
|
|||
./types,
|
||||
chronicles,
|
||||
chronos,
|
||||
eth/common,
|
||||
eth/common/eth_types,
|
||||
options,
|
||||
sets
|
||||
|
||||
proc setupTxContext*(vmState: BaseVMState, origin: EthAddress, gasPrice: GasInt, forkOverride=none(Fork)) =
|
||||
proc setupTxContext*(vmState: BaseVMState, origin: EthAddress, gasPrice: GasInt, forkOverride=none(EVMFork)) =
|
||||
## this proc will be called each time a new transaction
|
||||
## is going to be executed
|
||||
vmState.txOrigin = origin
|
||||
|
@ -36,7 +32,7 @@ proc setupTxContext*(vmState: BaseVMState, origin: EthAddress, gasPrice: GasInt,
|
|||
if forkOverride.isSome:
|
||||
forkOverride.get
|
||||
else:
|
||||
vmState.chainDB.config.toFork(vmState.blockNumber)
|
||||
vmState.com.toEVMFork(vmState.blockNumber)
|
||||
vmState.gasCosts = vmState.fork.forkToSchedule
|
||||
|
||||
|
|
@ -9,14 +9,13 @@
|
|||
# according to those terms.
|
||||
|
||||
import
|
||||
tables, eth/common,
|
||||
options, json, sets,
|
||||
chronos, stint,
|
||||
std/[tables, json, sets],
|
||||
chronos,
|
||||
json_rpc/rpcclient,
|
||||
./stack, ./memory, ./code_stream, ../forks,
|
||||
"."/[stack, memory, code_stream],
|
||||
./interpreter/[gas_costs, op_codes],
|
||||
# TODO - will be hidden at a lower layer
|
||||
../db/[db_chain, accounts_cache]
|
||||
../db/accounts_cache,
|
||||
../common/[common, evmforks]
|
||||
|
||||
when defined(evmc_enabled):
|
||||
import
|
||||
|
@ -35,15 +34,13 @@ type
|
|||
|
||||
BaseVMState* = ref object of RootObj
|
||||
prevHeaders* : seq[BlockHeader]
|
||||
chainDB* : BaseChainDB
|
||||
com* : CommonRef
|
||||
parent* : BlockHeader
|
||||
timestamp* : EthTime
|
||||
gasLimit* : GasInt
|
||||
fee* : Option[UInt256]
|
||||
prevRandao* : Hash256
|
||||
blockDifficulty*: UInt256
|
||||
ttdReached* : bool
|
||||
name* : string
|
||||
flags* : set[VMFlag]
|
||||
tracer* : TransactionTracer
|
||||
logEntries* : seq[Log]
|
||||
|
@ -55,7 +52,7 @@ type
|
|||
txOrigin* : EthAddress
|
||||
txGasPrice* : GasInt
|
||||
gasCosts* : GasCosts
|
||||
fork* : Fork
|
||||
fork* : EVMFork
|
||||
minerAddress* : EthAddress
|
||||
asyncFactory* : AsyncOperationFactory
|
||||
|
||||
|
@ -134,6 +131,6 @@ type
|
|||
|
||||
LazyDataSource* = ref object of RootObj
|
||||
ifNecessaryGetStorage*: proc(c: Computation, slot: UInt256): Future[void] {.gcsafe.}
|
||||
|
||||
|
||||
AsyncOperationFactory* = ref object of RootObj
|
||||
lazyDataSource*: LazyDataSource
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
import
|
||||
strformat,
|
||||
errors, eth/common
|
||||
../errors, eth/common
|
||||
|
||||
proc validateGte*(value: Int256 | int, minimum: int, title: string = "Value") =
|
||||
if value.i256 < minimum.i256:
|
|
@ -1,7 +1,7 @@
|
|||
import
|
||||
chronicles,
|
||||
eth/[rlp, trie/db],
|
||||
../db/[storage_types, db_chain]
|
||||
../../db/[storage_types, db_chain]
|
||||
|
||||
type
|
||||
# transitionStatus describes the status of eth1/2 transition. This switch
|
||||
|
@ -34,7 +34,7 @@ proc new*(_: type MergerRef, db: TrieDatabaseRef): MergerRef =
|
|||
status: db.readStatus()
|
||||
)
|
||||
|
||||
proc new*(_: type MergerRef, db: BaseChainDB): MergerRef =
|
||||
proc new*(_: type MergerRef, db: ChainDBRef): MergerRef =
|
||||
MergerRef.new(db.db)
|
||||
|
||||
# ReachTTD is called whenever the first NewHead message received
|
|
@ -1,6 +1,6 @@
|
|||
import
|
||||
web3/engine_api_types,
|
||||
../db/db_chain,
|
||||
../../db/db_chain,
|
||||
./merger
|
||||
|
||||
import eth/common/eth_types except BlockHeader
|
|
@ -5,7 +5,7 @@ import
|
|||
json_rpc/errors,
|
||||
eth/[trie, rlp, common, trie/db],
|
||||
stew/[objects, results, byteutils],
|
||||
../constants,
|
||||
../../constants,
|
||||
./mergetypes
|
||||
|
||||
proc computePayloadId*(headBlockHash: Hash256, params: PayloadAttributesV1): PayloadID =
|
|
@ -13,7 +13,7 @@ import
|
|||
chronos,
|
||||
eth/p2p,
|
||||
eth/p2p/peer_pool,
|
||||
../nimbus/sync/protocol
|
||||
./protocol
|
||||
|
||||
# Currently, this module only handles static peers
|
||||
# but we can extend it to handles trusted peers as well
|
|
@ -9,9 +9,12 @@
|
|||
# according to those terms.
|
||||
|
||||
import
|
||||
std/[options, times],
|
||||
std/[options, times, json, strutils],
|
||||
eth/common,
|
||||
stew/byteutils
|
||||
stew/byteutils,
|
||||
../vm_state,
|
||||
../vm_types,
|
||||
../db/accounts_cache
|
||||
|
||||
proc `$`(hash: Hash256): string =
|
||||
hash.data.toHex
|
||||
|
@ -50,3 +53,34 @@ proc debug*(h: BlockHeader): string =
|
|||
if h.withdrawalsRoot.isSome:
|
||||
result.add "withdrawalsRoot: " & $h.withdrawalsRoot.get() & "\n"
|
||||
result.add "blockHash : " & $blockHash(h) & "\n"
|
||||
|
||||
proc dumpAccount(stateDB: AccountsCache, address: EthAddress): JsonNode =
|
||||
var storage = newJObject()
|
||||
for k, v in stateDB.cachedStorage(address):
|
||||
storage[k.toHex] = %v.toHex
|
||||
|
||||
result = %{
|
||||
"nonce": %toHex(stateDB.getNonce(address)),
|
||||
"balance": %stateDB.getBalance(address).toHex(),
|
||||
"codehash": %($stateDB.getCodeHash(address)),
|
||||
"storageRoot": %($stateDB.getStorageRoot(address)),
|
||||
"storage": storage
|
||||
}
|
||||
|
||||
proc debugAccounts*(vmState: BaseVMState): string =
|
||||
var
|
||||
accounts = newJObject()
|
||||
accountList = newSeq[EthAddress]()
|
||||
|
||||
for address in vmState.stateDB.addresses:
|
||||
accountList.add address
|
||||
|
||||
for i, ac in accountList:
|
||||
accounts[ac.toHex] = dumpAccount(vmState.stateDB, ac)
|
||||
|
||||
let res = %{
|
||||
"rootHash": %($vmState.readOnlyStateDB.rootHash),
|
||||
"accounts": accounts
|
||||
}
|
||||
|
||||
res.pretty
|
Loading…
Reference in New Issue