Compare commits
No commits in common. "e1bb65fdfa566ab2f327e6c4895fcb710d08f3b1" and "af34f90fe40ef6fc77be6cf446817022de6d5226" have entirely different histories.
e1bb65fdfa
...
af34f90fe4
|
@ -113,6 +113,11 @@
|
|||
url = https://github.com/status-im/nimbus-build-system.git
|
||||
ignore = dirty
|
||||
branch = master
|
||||
[submodule "vendor/ethash"]
|
||||
path = vendor/ethash
|
||||
url = https://github.com/status-im/nim-ethash
|
||||
ignore = dirty
|
||||
branch = master
|
||||
[submodule "vendor/nim-evmc"]
|
||||
path = vendor/nim-evmc
|
||||
url = https://github.com/status-im/nim-evmc
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
import
|
||||
chronicles,
|
||||
eth/trie/trie_defs,
|
||||
../core/casper,
|
||||
../core/[pow, casper],
|
||||
../db/[core_db, ledger, storage_types],
|
||||
../utils/[utils, ec_recover],
|
||||
".."/[constants, errors],
|
||||
|
@ -81,6 +81,9 @@ type
|
|||
## installing a snapshot pivot. The default value for this field is
|
||||
## `GENESIS_PARENT_HASH` to start at the very beginning.
|
||||
|
||||
pow: PowRef
|
||||
## Wrapper around `hashimotoLight()` and lookup cache
|
||||
|
||||
pos: CasperRef
|
||||
## Proof Of Stake descriptor
|
||||
|
||||
|
@ -138,6 +141,9 @@ proc init(com : CommonRef,
|
|||
com.networkId = networkId
|
||||
com.syncProgress= SyncProgress()
|
||||
com.pruneHistory= pruneHistory
|
||||
|
||||
# Always initialise the PoW epoch cache even though it migh no be used
|
||||
com.pow = PowRef.new
|
||||
com.pos = CasperRef.new
|
||||
|
||||
# com.currentFork and com.consensusType
|
||||
|
@ -247,6 +253,7 @@ func clone*(com: CommonRef, db: CoreDbRef): CommonRef =
|
|||
networkId : com.networkId,
|
||||
currentFork : com.currentFork,
|
||||
consensusType: com.consensusType,
|
||||
pow : com.pow,
|
||||
pos : com.pos,
|
||||
pruneHistory : com.pruneHistory)
|
||||
|
||||
|
@ -389,6 +396,10 @@ func startOfHistory*(com: CommonRef): Hash256 =
|
|||
## Getter
|
||||
com.startOfHistory
|
||||
|
||||
func pow*(com: CommonRef): PowRef =
|
||||
## Getter
|
||||
com.pow
|
||||
|
||||
func pos*(com: CommonRef): CasperRef =
|
||||
## Getter
|
||||
com.pos
|
||||
|
|
|
@ -13,7 +13,8 @@
|
|||
import
|
||||
../../common/common,
|
||||
../../utils/utils,
|
||||
../../vm_types
|
||||
../../vm_types,
|
||||
../pow
|
||||
|
||||
export
|
||||
common
|
||||
|
@ -40,7 +41,7 @@ type
|
|||
# Public constructors
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
func newChain*(com: CommonRef,
|
||||
proc newChain*(com: CommonRef,
|
||||
extraValidation: bool,
|
||||
vmState = BaseVMState(nil)): ChainRef =
|
||||
## Constructor for the `Chain` descriptor object.
|
||||
|
@ -64,23 +65,27 @@ func newChain*(com: CommonRef): ChainRef =
|
|||
# ------------------------------------------------------------------------------
|
||||
# Public `Chain` getters
|
||||
# ------------------------------------------------------------------------------
|
||||
func vmState*(c: ChainRef): BaseVMState =
|
||||
proc vmState*(c: ChainRef): BaseVMState =
|
||||
## Getter
|
||||
c.vmState
|
||||
|
||||
func db*(c: ChainRef): CoreDbRef =
|
||||
proc pow*(c: ChainRef): PowRef =
|
||||
## Getter
|
||||
c.com.pow
|
||||
|
||||
proc db*(c: ChainRef): CoreDbRef =
|
||||
## Getter
|
||||
c.com.db
|
||||
|
||||
func com*(c: ChainRef): CommonRef =
|
||||
proc com*(c: ChainRef): CommonRef =
|
||||
## Getter
|
||||
c.com
|
||||
|
||||
func extraValidation*(c: ChainRef): bool =
|
||||
proc extraValidation*(c: ChainRef): bool =
|
||||
## Getter
|
||||
c.extraValidation
|
||||
|
||||
func verifyFrom*(c: ChainRef): BlockNumber =
|
||||
proc verifyFrom*(c: ChainRef): BlockNumber =
|
||||
## Getter
|
||||
c.verifyFrom
|
||||
|
||||
|
@ -95,12 +100,12 @@ proc currentBlock*(c: ChainRef): BlockHeader
|
|||
# Public `Chain` setters
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
func `extraValidation=`*(c: ChainRef; extraValidation: bool) =
|
||||
proc `extraValidation=`*(c: ChainRef; extraValidation: bool) =
|
||||
## Setter. If set `true`, the assignment value `extraValidation` enables
|
||||
## extra block chain validation.
|
||||
c.extraValidation = extraValidation
|
||||
|
||||
func `verifyFrom=`*(c: ChainRef; verifyFrom: BlockNumber) =
|
||||
proc `verifyFrom=`*(c: ChainRef; verifyFrom: BlockNumber) =
|
||||
## Setter. The assignment value `verifyFrom` defines the first block where
|
||||
## validation should start if the `Clique` field `extraValidation` was set
|
||||
## `true`.
|
||||
|
|
|
@ -0,0 +1,211 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed except
|
||||
# according to those terms.
|
||||
|
||||
## Block PoW Support (Verifying & Mining)
|
||||
## ======================================
|
||||
##
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[options, strutils],
|
||||
../utils/utils,
|
||||
./pow/pow_cache,
|
||||
eth/[common, keys, p2p, rlp],
|
||||
stew/endians2,
|
||||
ethash,
|
||||
stint
|
||||
|
||||
type
|
||||
PowDigest = tuple ##\
|
||||
## Return value from the `hashimotoLight()` function
|
||||
mixDigest: Hash256
|
||||
value : Hash256
|
||||
|
||||
PowSpecs* = object ##\
|
||||
## Relevant block header parts for PoW mining & verifying. This object
|
||||
## might be more useful for testing and debugging than for production.
|
||||
number* : BlockNumber
|
||||
miningHash*: Hash256
|
||||
nonce : BlockNonce
|
||||
mixHash* : Hash256
|
||||
difficulty : DifficultyInt
|
||||
|
||||
PowHeader = object ##\
|
||||
## Stolen from `p2p/validate.MiningHeader`
|
||||
parentHash : Hash256
|
||||
ommersHash : Hash256
|
||||
coinbase : EthAddress
|
||||
stateRoot : Hash256
|
||||
txRoot : Hash256
|
||||
receiptsRoot: Hash256
|
||||
logsBloom : common.BloomFilter
|
||||
difficulty : DifficultyInt
|
||||
number : BlockNumber
|
||||
gasLimit : GasInt
|
||||
gasUsed : GasInt
|
||||
timestamp : EthTime
|
||||
extraData : Blob
|
||||
|
||||
PowRef* = ref object of RootObj ##\
|
||||
## PoW context descriptor
|
||||
lightByEpoch: PowCacheRef ## PoW cache indexed by epoch
|
||||
|
||||
# You should only create one instance of the RNG per application / library
|
||||
# Ref is used so that it can be shared between components
|
||||
rng: ref HmacDrbgContext
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions: RLP support
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
func append(w: var RlpWriter; specs: PowSpecs) =
|
||||
## RLP support
|
||||
w.startList(5)
|
||||
w.append(HashOrNum(isHash: false, number: specs.number))
|
||||
w.append(HashOrNum(isHash: true, hash: specs.miningHash))
|
||||
w.append(specs.nonce.toUint)
|
||||
w.append(HashOrNum(isHash: true, hash: specs.mixHash))
|
||||
w.append(specs.difficulty)
|
||||
|
||||
func read(rlp: var Rlp; Q: type PowSpecs): Q
|
||||
{.raises: [RlpError].} =
|
||||
## RLP support
|
||||
rlp.tryEnterList()
|
||||
result.number = rlp.read(HashOrNum).number
|
||||
result.miningHash = rlp.read(HashOrNum).hash
|
||||
result.nonce = rlp.read(uint64).toBlockNonce
|
||||
result.mixHash = rlp.read(HashOrNum).hash
|
||||
result.difficulty = rlp.read(DifficultyInt)
|
||||
|
||||
func rlpTextEncode(specs: PowSpecs): string =
|
||||
"specs #" & $specs.number & " " & rlp.encode(specs).toHex
|
||||
|
||||
func decodeRlpText(data: string): PowSpecs
|
||||
{.raises: [CatchableError].} =
|
||||
if 180 < data.len and data[0 .. 6] == "specs #":
|
||||
let hexData = data.split
|
||||
if hexData.len == 3:
|
||||
var rlpData = hexData[2].rlpFromHex
|
||||
result = rlpData.read(PowSpecs)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
func miningHash(header: BlockHeader): Hash256 =
|
||||
## Calculate hash from mining relevant fields of the argument `header`
|
||||
let miningHeader = PowHeader(
|
||||
parentHash: header.parentHash,
|
||||
ommersHash: header.ommersHash,
|
||||
coinbase: header.coinbase,
|
||||
stateRoot: header.stateRoot,
|
||||
txRoot: header.txRoot,
|
||||
receiptsRoot:header.receiptsRoot,
|
||||
logsBloom: header.logsBloom,
|
||||
difficulty: header.difficulty,
|
||||
number: header.number,
|
||||
gasLimit: header.gasLimit,
|
||||
gasUsed: header.gasUsed,
|
||||
timestamp: header.timestamp,
|
||||
extraData: header.extraData)
|
||||
|
||||
rlpHash(miningHeader)
|
||||
|
||||
# ---------------
|
||||
|
||||
proc init(tm: PowRef; light: Option[PowCacheRef]) =
|
||||
## Constructor
|
||||
tm.rng = newRng()
|
||||
|
||||
if light.isSome:
|
||||
tm.lightByEpoch = light.get
|
||||
else:
|
||||
tm.lightByEpoch = PowCacheRef.new
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, Constructor
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc new*(T: type PowRef; cache: PowCacheRef): T =
|
||||
## Constructor
|
||||
new result
|
||||
result.init(some(cache))
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
func getPowSpecs*(header: BlockHeader): PowSpecs =
|
||||
## Extracts relevant parts from the `header` argument that are needed
|
||||
## for mining or pow verification. This function might be more useful for
|
||||
## testing and debugging than for production.
|
||||
PowSpecs(
|
||||
number: header.number,
|
||||
miningHash: header.miningHash,
|
||||
nonce: header.nonce,
|
||||
mixHash: header.mixHash,
|
||||
difficulty: header.difficulty)
|
||||
|
||||
func getPowCacheLookup*(tm: PowRef;
|
||||
blockNumber: BlockNumber): (uint64, Hash256)
|
||||
{.gcsafe, raises: [KeyError].} =
|
||||
## Returns the pair `(size,digest)` derived from the lookup cache for the
|
||||
## `hashimotoLight()` function for the given block number. The `size` is the
|
||||
## full size of the dataset (the cache represents) as passed on to the
|
||||
## `hashimotoLight()` function. The `digest` is a hash derived from the
|
||||
## cache that would be passed on to `hashimotoLight()`.
|
||||
##
|
||||
## This function is intended for error reporting and might also be useful
|
||||
## for testing and debugging.
|
||||
let ds = tm.lightByEpoch.get(blockNumber)
|
||||
if ds == nil:
|
||||
raise newException(KeyError, "block not found")
|
||||
|
||||
result[0] = ds.size
|
||||
result[1] = withKeccakHash:
|
||||
for a in ds.data:
|
||||
h.update(a.data)
|
||||
|
||||
# ------------------------
|
||||
|
||||
func getPowDigest(tm: PowRef; blockNumber: BlockNumber;
|
||||
powHeaderDigest: Hash256; nonce: BlockNonce): PowDigest =
|
||||
## Calculate the expected value of `header.mixHash` using the
|
||||
## `hashimotoLight()` library method.
|
||||
let
|
||||
ds = tm.lightByEpoch.get(blockNumber)
|
||||
u64Nonce = uint64.fromBytesBE(nonce)
|
||||
hashimotoLight(ds.size, ds.data, powHeaderDigest, u64Nonce)
|
||||
|
||||
func getPowDigest*(tm: PowRef; header: BlockHeader): PowDigest =
|
||||
## Variant of `getPowDigest()`
|
||||
tm.getPowDigest(header.number, header.miningHash, header.nonce)
|
||||
|
||||
func getPowDigest*(tm: PowRef; specs: PowSpecs): PowDigest =
|
||||
## Variant of `getPowDigest()`
|
||||
tm.getPowDigest(specs.number, specs.miningHash, specs.nonce)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, debugging & testing
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
func dumpPowSpecs*(specs: PowSpecs): string =
|
||||
## Text representation of `PowSpecs` argument object
|
||||
specs.rlpTextEncode
|
||||
|
||||
func undumpPowSpecs*(data: string): PowSpecs
|
||||
{.raises: [CatchableError].} =
|
||||
## Recover `PowSpecs` object from text representation
|
||||
data.decodeRlpText
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
|
@ -0,0 +1,116 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed except
|
||||
# according to those terms.
|
||||
|
||||
## LRU Cache for Epoch Indexed Hashimoto Cache
|
||||
## ============================================
|
||||
##
|
||||
## This module uses the eth-block number (mapped to epoch) to hold and re-use
|
||||
## the cache needed for running the `hasimotoLight()` proof-of-work function.
|
||||
|
||||
import
|
||||
eth/common,
|
||||
ethash,
|
||||
stew/keyed_queue
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
type
|
||||
PowCacheItemRef* = ref object
|
||||
size*: uint64
|
||||
data*: seq[MDigest[512]]
|
||||
|
||||
PowCacheStats* = tuple
|
||||
maxItems: int
|
||||
size: int
|
||||
|
||||
PowCache* = object
|
||||
cacheMax: int
|
||||
cache: KeyedQueue[uint64,PowCacheItemRef]
|
||||
|
||||
PowCacheRef* = ref PowCache
|
||||
|
||||
const
|
||||
nItemsMax = 10
|
||||
nItemsInit = 2
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc toKey(bn: BlockNumber): uint64 =
|
||||
bn div EPOCH_LENGTH
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, constructor
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc init*(pc: var PowCache; maxItems = nItemsMax) =
|
||||
## Constructor for PoW cache
|
||||
pc.cacheMax = maxItems
|
||||
pc.cache.init(nItemsInit)
|
||||
|
||||
proc init*(T: type PowCache; maxItems = nItemsMax): T =
|
||||
## Constructor variant
|
||||
result.init(maxItems)
|
||||
|
||||
proc new*(T: type PowCacheRef; maxItems = nItemsMax): T =
|
||||
## Constructor variant
|
||||
new result
|
||||
result[].init(maxItems)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, constructor
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc get*(pc: var PowCache; bn: BlockNumber): PowCacheItemRef =
|
||||
## Return a cache derived from argument `blockNumber` ready to be used
|
||||
## for the `hashimotoLight()` method.
|
||||
let
|
||||
key = bn.toKey
|
||||
rc = pc.cache.lruFetch(key)
|
||||
|
||||
if rc.isOk:
|
||||
return rc.value
|
||||
|
||||
let
|
||||
# note that `getDataSize()` and `getCacheSize()` depend on
|
||||
# `key * EPOCH_LENGTH` rather than the original block number.
|
||||
top = key * EPOCH_LENGTH
|
||||
pair = PowCacheItemRef(
|
||||
size: top.getDataSize,
|
||||
data: top.getCacheSize.mkcache(top.getSeedhash))
|
||||
|
||||
pc.cache.lruAppend(key, pair, pc.cacheMax)
|
||||
|
||||
proc get*(pcr: PowCacheRef; bn: BlockNumber): PowCacheItemRef =
|
||||
## Variant of `getCache()`
|
||||
pcr[].get(bn)
|
||||
|
||||
proc hasItem*(pc: var PowCache; bn: BlockNumber): bool =
|
||||
## Returns true if there is a cache entry for argument `bn`.
|
||||
pc.cache.hasKey(bn.toKey)
|
||||
|
||||
proc hasItem*(pcr: PowCacheRef; bn: BlockNumber): bool =
|
||||
## Variant of `hasItem()`
|
||||
pcr[].hasItem(bn)
|
||||
|
||||
# -------------------------
|
||||
|
||||
proc stats*(pc: var PowCache): PowCacheStats =
|
||||
## Return current cache sizes
|
||||
result = (maxItems: pc.cacheMax, size: pc.cache.len)
|
||||
|
||||
proc stats*(pcr: PowCacheRef): PowCacheStats =
|
||||
## Variant of `stats()`
|
||||
pcr[].stats
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
|
@ -18,6 +18,7 @@ import
|
|||
../utils/utils,
|
||||
"."/[dao, eip4844, gaslimit, withdrawals],
|
||||
./pow/[difficulty, header],
|
||||
./pow,
|
||||
nimcrypto/utils as cryptoutils,
|
||||
stew/objects,
|
||||
results
|
||||
|
@ -33,9 +34,35 @@ const
|
|||
byteutils.hexToByteArray[13](DAOForkBlockExtra).toSeq
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private validator functions
|
||||
# Pivate validator functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc validateSeal(pow: PowRef; header: BlockHeader): Result[void,string] =
|
||||
try:
|
||||
let (expmixHash, miningValue) = pow.getPowDigest(header)
|
||||
|
||||
if expmixHash != header.mixHash:
|
||||
let
|
||||
miningHash = header.getPowSpecs.miningHash
|
||||
(size, cachedHash) = try: pow.getPowCacheLookup(header.number)
|
||||
except KeyError: return err("Unknown block")
|
||||
except CatchableError as e: return err(e.msg)
|
||||
return err("mixHash mismatch. actual=$1, expected=$2," &
|
||||
" blockNumber=$3, miningHash=$4, nonce=$5, difficulty=$6," &
|
||||
" size=$7, cachedHash=$8" % [
|
||||
$header.mixHash, $expmixHash, $header.number,
|
||||
$miningHash, header.nonce.toHex, $header.difficulty,
|
||||
$size, $cachedHash])
|
||||
|
||||
let value = UInt256.fromBytesBE(miningValue.data)
|
||||
if value > UInt256.high div header.difficulty:
|
||||
return err("mining difficulty error")
|
||||
|
||||
except CatchableError as err:
|
||||
return err(err.msg)
|
||||
|
||||
ok()
|
||||
|
||||
proc validateHeader(
|
||||
com: CommonRef;
|
||||
blk: EthBlock;
|
||||
|
@ -91,6 +118,9 @@ proc validateHeader(
|
|||
if header.difficulty < calcDiffc:
|
||||
return err("provided header difficulty is too low")
|
||||
|
||||
if checkSealOK:
|
||||
return com.pow.validateSeal(header)
|
||||
|
||||
? com.validateWithdrawals(header, blk.withdrawals)
|
||||
? com.validateEip4844Header(header, parentHeader, blk.transactions)
|
||||
? com.validateGasLimitOrBaseFee(header, parentHeader)
|
||||
|
@ -165,6 +195,10 @@ proc validateUncles(com: CommonRef; header: BlockHeader;
|
|||
if uncle.timestamp <= parent.timestamp:
|
||||
return err("Uncle's parent must me older")
|
||||
|
||||
# Now perform VM level validation of the uncle
|
||||
if checkSealOK:
|
||||
? com.pow.validateSeal(uncle)
|
||||
|
||||
let uncleParent = try:
|
||||
chainDB.getBlockHeader(uncle.parentHash)
|
||||
except BlockNotFound:
|
||||
|
|
|
@ -988,7 +988,7 @@ proc finalizedHeaderHash*(db: CoreDbRef, headerHash: Hash256) =
|
|||
proc safeHeader*(
|
||||
db: CoreDbRef;
|
||||
): BlockHeader
|
||||
{.gcsafe, raises: [BlockNotFound].} =
|
||||
{.gcsafe, raises: [RlpError,BlockNotFound].} =
|
||||
db.getBlockHeader(db.safeHeaderHash)
|
||||
|
||||
proc finalizedHeader*(
|
||||
|
|
|
@ -152,8 +152,8 @@ proc init*(x: typedesc[AccountsLedgerRef], db: CoreDbRef,
|
|||
result.witnessCache = Table[EthAddress, WitnessData]()
|
||||
discard result.beginSavepoint
|
||||
|
||||
proc init*(x: typedesc[AccountsLedgerRef], db: CoreDbRef): AccountsLedgerRef =
|
||||
init(x, db, EMPTY_ROOT_HASH)
|
||||
proc init*(x: typedesc[AccountsLedgerRef], db: CoreDbRef, pruneTrie = true): AccountsLedgerRef =
|
||||
init(x, db, EMPTY_ROOT_HASH, pruneTrie)
|
||||
|
||||
# Renamed `rootHash()` => `state()`
|
||||
proc state*(ac: AccountsLedgerRef): KeccakHash =
|
||||
|
@ -784,17 +784,6 @@ func getAccessList*(ac: AccountsLedgerRef): common.AccessList =
|
|||
doAssert(ac.savePoint.parentSavepoint.isNil)
|
||||
ac.savePoint.accessList.getAccessList()
|
||||
|
||||
proc getEthAccount*(ac: AccountsLedgerRef, address: EthAddress): Account =
|
||||
let acc = ac.getAccount(address, false)
|
||||
if acc.isNil:
|
||||
return emptyEthAccount
|
||||
|
||||
## Convert to legacy object, will throw an assert if that fails
|
||||
let rc = acc.statement.recast()
|
||||
if rc.isErr:
|
||||
raiseAssert "getAccount(): cannot convert account: " & $$rc.error
|
||||
rc.value
|
||||
|
||||
proc state*(db: ReadOnlyStateDB): KeccakHash {.borrow.}
|
||||
proc getCodeHash*(db: ReadOnlyStateDB, address: EthAddress): Hash256 {.borrow.}
|
||||
proc getStorageRoot*(db: ReadOnlyStateDB, address: EthAddress): Hash256 {.borrow.}
|
||||
|
|
|
@ -356,11 +356,6 @@ proc getMpt*(ldg: LedgerRef): CoreDxMptRef =
|
|||
result = ldg.ac.rawTrie.CoreDxAccRef.getMpt
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, result
|
||||
|
||||
proc getEthAccount*(ldg: LedgerRef, eAddr: EthAddress): Account =
|
||||
ldg.beginTrackApi LdgGetAthAccountFn
|
||||
result = ldg.ac.getEthAccount(eAddr)
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, result
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public virtual read-only methods
|
||||
# ------------------------------------------------------------------------------
|
||||
|
|
|
@ -50,7 +50,6 @@ type
|
|||
LdgGetStorageFn = "getStorage"
|
||||
LdgGetStorageRootFn = "getStorageRoot"
|
||||
LdgGetTransientStorageFn = "getTransientStorage"
|
||||
LdgGetAthAccountFn = "getEthAccount"
|
||||
LdgInAccessListFn = "inAccessList"
|
||||
LdgIncNonceFn = "incNonce"
|
||||
LdgIsDeadAccountFn = "isDeadAccount"
|
||||
|
|
|
@ -0,0 +1,35 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
## Read only source, import `state_db/read_write` for full functionality.
|
||||
##
|
||||
## Note that the writable mode is only partially supported by the `Aristo`
|
||||
## backend of `CoreDb` (read-only mode is fully supported.)
|
||||
|
||||
import
|
||||
state_db/[base, read_only]
|
||||
|
||||
export
|
||||
AccountStateDB,
|
||||
ReadOnlyStateDB,
|
||||
accountExists,
|
||||
contractCollision,
|
||||
db,
|
||||
getAccount,
|
||||
getBalance,
|
||||
getCode,
|
||||
getCodeHash,
|
||||
getNonce,
|
||||
getStorage,
|
||||
getStorageRoot,
|
||||
isDeadAccount,
|
||||
isEmptyAccount,
|
||||
newAccountStateDB,
|
||||
rootHash,
|
||||
to
|
||||
|
||||
# End
|
|
@ -0,0 +1,314 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
import
|
||||
std/[sets, strformat, typetraits],
|
||||
chronicles,
|
||||
eth/[common, rlp, trie/trie_defs],
|
||||
results,
|
||||
../../constants,
|
||||
../../utils/utils,
|
||||
".."/[core_db, ledger, storage_types]
|
||||
|
||||
logScope:
|
||||
topics = "state_db"
|
||||
|
||||
# aleth/geth/parity compatibility mode:
|
||||
#
|
||||
# affected test cases both in GST and BCT:
|
||||
# - stSStoreTest\InitCollision.json
|
||||
# - stRevertTest\RevertInCreateInInit.json
|
||||
# - stCreate2\RevertInCreateInInitCreate2.json
|
||||
#
|
||||
# pyEVM sided with original Nimbus EVM
|
||||
#
|
||||
# implementation difference:
|
||||
# Aleth/geth/parity using accounts cache.
|
||||
# When contract creation happened on an existing
|
||||
# but 'empty' account with non empty storage will
|
||||
# get new empty storage root.
|
||||
# Aleth cs. only clear the storage cache while both pyEVM
|
||||
# and Nimbus will modify the state trie.
|
||||
# During the next SSTORE call, aleth cs. calculate
|
||||
# gas used based on this cached 'original storage value'.
|
||||
# In other hand pyEVM and Nimbus will fetch
|
||||
# 'original storage value' from state trie.
|
||||
#
|
||||
# Both Yellow Paper and EIP2200 are not clear about this
|
||||
# situation but since aleth/geth/and parity implement this
|
||||
# behaviour, we perhaps also need to implement it.
|
||||
#
|
||||
# TODO: should this compatibility mode enabled via
|
||||
# compile time switch, runtime switch, or just hard coded
|
||||
# it?
|
||||
const
|
||||
aleth_compat = true
|
||||
|
||||
type
|
||||
AccountStateDB* = ref object
|
||||
trie: AccountLedger
|
||||
originalRoot: KeccakHash # will be updated for every transaction
|
||||
when aleth_compat:
|
||||
cleared: HashSet[EthAddress]
|
||||
|
||||
#MptNodeRlpBytes* = seq[byte]
|
||||
#AccountProof* = seq[MptNodeRlpBytes]
|
||||
#SlotProof* = seq[MptNodeRlpBytes]
|
||||
|
||||
proc db*(db: AccountStateDB): CoreDbRef =
|
||||
db.trie.db
|
||||
|
||||
proc rootHash*(db: AccountStateDB): KeccakHash =
|
||||
db.trie.state
|
||||
|
||||
proc `rootHash=`*(db: AccountStateDB, root: KeccakHash) =
|
||||
db.trie = AccountLedger.init(db.trie.db, root)
|
||||
|
||||
func newCoreDbAccount(
|
||||
eAddr: EthAddress;
|
||||
nonce = AccountNonce(0);
|
||||
balance = 0.u256;
|
||||
): CoreDbAccount =
|
||||
CoreDbAccount(
|
||||
address: eAddr,
|
||||
nonce: nonce,
|
||||
balance: balance,
|
||||
codeHash: EMPTY_CODE_HASH)
|
||||
|
||||
proc newAccountStateDB*(backingStore: CoreDbRef,
|
||||
root: KeccakHash): AccountStateDB =
|
||||
result.new()
|
||||
result.trie = AccountLedger.init(backingStore, root)
|
||||
result.originalRoot = root
|
||||
when aleth_compat:
|
||||
result.cleared = HashSet[EthAddress]()
|
||||
|
||||
#proc getTrie*(db: AccountStateDB): CoreDxMptRef =
|
||||
# db.trie.mpt
|
||||
|
||||
#proc getSecureTrie*(db: AccountStateDB): CoreDbPhkRef =
|
||||
# db.trie.phk
|
||||
|
||||
proc to*(acc: CoreDbAccount; T: type Account): T =
|
||||
## Convert to legacy object, will throw an aseert if that fails
|
||||
let rc = acc.recast()
|
||||
if rc.isErr:
|
||||
raiseAssert "getAccount(): cannot convert account: " & $$rc.error
|
||||
rc.value
|
||||
|
||||
proc getAccount*(db: AccountStateDB, eAddr: EthAddress): CoreDbAccount =
|
||||
db.trie.fetch(eAddr).valueOr:
|
||||
return newCoreDbAccount(eAddr)
|
||||
|
||||
proc setAccount*(db: AccountStateDB, acc: CoreDbAccount) =
|
||||
db.trie.merge(acc)
|
||||
|
||||
proc deleteAccount*(db: AccountStateDB, acc: CoreDbAccount) =
|
||||
db.trie.delete(acc.address)
|
||||
|
||||
proc deleteAccount*(db: AccountStateDB, eAddr: EthAddress) =
|
||||
db.trie.delete(eAddr)
|
||||
|
||||
proc getCodeHash*(db: AccountStateDB, eAddr: EthAddress): Hash256 =
|
||||
db.getAccount(eAddr).codeHash
|
||||
|
||||
proc getBalance*(db: AccountStateDB, eAddr: EthAddress): UInt256 =
|
||||
db.getAccount(eAddr).balance
|
||||
|
||||
proc setBalance*(db: AccountStateDB, eAddr: EthAddress, balance: UInt256) =
|
||||
var acc = db.getAccount(eAddr)
|
||||
if acc.balance != balance:
|
||||
acc.balance = balance
|
||||
db.setAccount(acc)
|
||||
|
||||
proc addBalance*(db: AccountStateDB, eAddr: EthAddress, delta: UInt256) =
|
||||
db.setBalance(eAddr, db.getBalance(eAddr) + delta)
|
||||
|
||||
#template getStorageTrie(db: AccountStateDB, account: Account): auto =
|
||||
# storageTrieForAccount(db.trie, account)
|
||||
|
||||
proc subBalance*(db: AccountStateDB, eAddr: EthAddress, delta: UInt256) =
|
||||
db.setBalance(eAddr, db.getBalance(eAddr) - delta)
|
||||
|
||||
#template createTrieKeyFromSlot(slot: UInt256): auto =
|
||||
# # Converts a number to hex big-endian representation including
|
||||
# # prefix and leading zeros:
|
||||
# slot.toBytesBE
|
||||
# # Original py-evm code:
|
||||
# # pad32(int_to_big_endian(slot))
|
||||
# # morally equivalent to toByteRange_Unnecessary but with different types
|
||||
|
||||
proc clearStorage*(db: AccountStateDB, eAddr: EthAddress) =
|
||||
# Flush associated storage trie (will update account record on disk)
|
||||
db.trie.distinctBase.stoDelete(eAddr).isOkOr:
|
||||
raiseAssert "clearStorage(): stoDelete() failed, " & $$error
|
||||
# Reset storage info locally so that `Aristo` would not complain when
|
||||
# updating the account record on disk
|
||||
var account = db.getAccount(eAddr)
|
||||
account.storage = CoreDbColRef(nil)
|
||||
when aleth_compat:
|
||||
db.cleared.incl eAddr
|
||||
|
||||
proc getStorageRoot*(db: AccountStateDB, eAddr: EthAddress): Hash256 =
|
||||
db.getAccount(eAddr).storage.state.valueOr:
|
||||
EMPTY_ROOT_HASH
|
||||
|
||||
proc setStorage*(
|
||||
db: AccountStateDB;
|
||||
eAddr: EthAddress;
|
||||
slot: UInt256;
|
||||
value: UInt256;
|
||||
) =
|
||||
var
|
||||
acc = db.getAccount(eAddr)
|
||||
sto = StorageLedger.init(db.trie, acc)
|
||||
|
||||
if value > 0:
|
||||
sto.merge(slot, rlp.encode(value))
|
||||
else:
|
||||
sto.delete(slot)
|
||||
|
||||
# map slot hash back to slot value
|
||||
# see iterator storage below
|
||||
var
|
||||
# slotHash can be obtained from storage.merge()?
|
||||
slotHash = keccakHash(slot.toBytesBE)
|
||||
db.db.newKvt().put(
|
||||
slotHashToSlotKey(slotHash.data).toOpenArray, rlp.encode(slot)).isOkOr:
|
||||
raiseAssert "setStorage(): put(slotHash) failed: " & $$error
|
||||
|
||||
# Changing the storage trie might also change the `storage` descriptor when
|
||||
# the trie changes from empty to existing or v.v.
|
||||
acc.storage = sto.getColumn()
|
||||
|
||||
# No need to hold descriptors for longer than needed
|
||||
let state = acc.storage.state.valueOr:
|
||||
raiseAssert "Storage column state error: " & $$error
|
||||
if state == EMPTY_ROOT_HASH:
|
||||
acc.storage = CoreDbColRef(nil)
|
||||
|
||||
iterator storage*(db: AccountStateDB, eAddr: EthAddress): (UInt256, UInt256) =
|
||||
let kvt = db.db.newKvt()
|
||||
for key, value in db.trie.storage db.getAccount(eAddr):
|
||||
if key.len != 0:
|
||||
var keyData = kvt.get(slotHashToSlotKey(key).toOpenArray).valueOr:
|
||||
raiseAssert "storage(): get() failed: " & $$error
|
||||
yield (rlp.decode(keyData, UInt256), rlp.decode(value, UInt256))
|
||||
|
||||
proc getStorage*(
|
||||
db: AccountStateDB;
|
||||
eAddr: EthAddress;
|
||||
slot: UInt256;
|
||||
): Result[UInt256,void] =
|
||||
let
|
||||
acc = db.getAccount(eAddr)
|
||||
data = ? StorageLedger.init(db.trie, acc).fetch(slot)
|
||||
ok rlp.decode(data, UInt256)
|
||||
|
||||
proc setNonce*(db: AccountStateDB, eAddr: EthAddress; nonce: AccountNonce) =
|
||||
var acc = db.getAccount(eAddr)
|
||||
if nonce != acc.nonce:
|
||||
acc.nonce = nonce
|
||||
db.setAccount(acc)
|
||||
|
||||
proc getNonce*(db: AccountStateDB, eAddr: EthAddress): AccountNonce =
|
||||
db.getAccount(eAddr).nonce
|
||||
|
||||
proc incNonce*(db: AccountStateDB, eAddr: EthAddress) {.inline.} =
|
||||
db.setNonce(eAddr, db.getNonce(eAddr) + 1)
|
||||
|
||||
proc setCode*(db: AccountStateDB, eAddr: EthAddress, code: openArray[byte]) =
|
||||
var acc = db.getAccount(eAddr)
|
||||
let codeHash = keccakHash(code)
|
||||
if acc.codeHash != codeHash:
|
||||
if code.len != 0:
|
||||
db.db.newKvt().put(contractHashKey(codeHash).toOpenArray, code).isOkOr:
|
||||
raiseAssert "setCode(): put() failed: " & $$error
|
||||
acc.codeHash = codeHash
|
||||
db.setAccount(acc)
|
||||
|
||||
proc getCode*(db: AccountStateDB, eAddr: EthAddress): seq[byte] =
|
||||
let codeHash = db.getCodeHash(eAddr)
|
||||
db.db.newKvt().get(contractHashKey(codeHash).toOpenArray).valueOr:
|
||||
EmptyBlob
|
||||
|
||||
proc contractCollision*(db: AccountStateDB, eAddr: EthAddress): bool =
|
||||
db.getNonce(eAddr) != 0 or
|
||||
db.getCodeHash(eAddr) != EMPTY_CODE_HASH or
|
||||
db.getStorageRoot(eAddr) != EMPTY_ROOT_HASH
|
||||
|
||||
proc dumpAccount*(db: AccountStateDB, eAddr: string): string =
|
||||
let pAddr = eAddr.parseAddress
|
||||
return fmt"{eAddr}: Storage: {db.getStorage(pAddr, 0.u256)}; getAccount: {db.getAccount pAddr}"
|
||||
|
||||
proc accountExists*(db: AccountStateDB, eAddr: EthAddress): bool =
|
||||
db.trie.fetch(eAddr).isOk
|
||||
|
||||
proc isEmptyAccount*(db: AccountStateDB, eAddr: EthAddress): bool =
|
||||
let acc = db.trie.fetch(eAddr).valueOr:
|
||||
return false
|
||||
acc.nonce == 0 and
|
||||
acc.balance.isZero and
|
||||
acc.codeHash == EMPTY_CODE_HASH
|
||||
|
||||
proc isDeadAccount*(db: AccountStateDB, eAddr: EthAddress): bool =
|
||||
let acc = db.trie.fetch(eAddr).valueOr:
|
||||
return true
|
||||
acc.nonce == 0 and
|
||||
acc.balance.isZero and
|
||||
acc.codeHash == EMPTY_CODE_HASH
|
||||
|
||||
#proc removeEmptyRlpNode(branch: var seq[MptNodeRlpBytes]) =
|
||||
# if branch.len() == 1 and branch[0] == emptyRlp:
|
||||
# branch.del(0)
|
||||
|
||||
#proc getAccountProof*(db: AccountStateDB, eAddr: EthAddress): AccountProof =
|
||||
# var branch = db.trie.phk().getBranch(eAddr)
|
||||
# removeEmptyRlpNode(branch)
|
||||
# branch
|
||||
|
||||
#proc getStorageProof*(db: AccountStateDB, eAddr: EthAddress, slots: seq[UInt256]): seq[SlotProof] =
|
||||
# var acc = db.getAccount(eAddr)
|
||||
# var storageTrie = StorageLedger.init(db.trie, acc)
|
||||
#
|
||||
# var slotProofs = newSeqOfCap[SlotProof](slots.len())
|
||||
# for slot in slots:
|
||||
# var branch = storageTrie.phk().getBranch(createTrieKeyFromSlot(slot))
|
||||
# removeEmptyRlpNode(branch)
|
||||
# slotProofs.add(branch)
|
||||
#
|
||||
# slotProofs
|
||||
|
||||
# Note: `state_db.getCommittedStorage()` is nowhere used.
|
||||
#
|
||||
#proc getCommittedStorage*(db: AccountStateDB, eAddr: EthAddress, slot: UInt256): UInt256 =
|
||||
# let tmpHash = db.rootHash
|
||||
# db.rootHash = db.originalRoot
|
||||
# db.transactionID.shortTimeReadOnly():
|
||||
# when aleth_compat:
|
||||
# if eAddr in db.cleared:
|
||||
# debug "Forced contract creation on existing account detected", eAddr
|
||||
# result = 0.u256
|
||||
# else:
|
||||
# result = db.getStorage(eAddr, slot)[0]
|
||||
# else:
|
||||
# result = db.getStorage(eAddr, slot)[0]
|
||||
# db.rootHash = tmpHash
|
||||
|
||||
# Note: `state_db.updateOriginalRoot()` is nowhere used.
|
||||
#
|
||||
#proc updateOriginalRoot*(db: AccountStateDB) =
|
||||
# ## this proc will be called for every transaction
|
||||
# db.originalRoot = db.rootHash
|
||||
# # no need to rollback or dispose
|
||||
# # transactionID, it will be handled elsewhere
|
||||
# db.transactionID = db.db.getTransactionID()
|
||||
#
|
||||
# when aleth_compat:
|
||||
# db.cleared.clear()
|
||||
|
||||
# End
|
|
@ -0,0 +1,37 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)
|
||||
# or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT)
|
||||
# or http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
import
|
||||
results,
|
||||
../core_db,
|
||||
./base
|
||||
|
||||
type
|
||||
ReadOnlyStateDB* = distinct AccountStateDB
|
||||
|
||||
#proc getTrie*(db: ReadOnlyStateDB): CoreDbMptRef {.borrow.}
|
||||
proc db*(db: ReadOnlyStateDB): CoreDbRef {.borrow.}
|
||||
proc rootHash*(db: ReadOnlyStateDB): KeccakHash {.borrow.}
|
||||
proc getAccount*(db: ReadOnlyStateDB, address: EthAddress): CoreDbAccount {.borrow.}
|
||||
proc getCodeHash*(db: ReadOnlyStateDB, address: EthAddress): Hash256 {.borrow.}
|
||||
proc getBalance*(db: ReadOnlyStateDB, address: EthAddress): UInt256 {.borrow.}
|
||||
proc getStorageRoot*(db: ReadOnlyStateDB, address: EthAddress): Hash256 {.borrow.}
|
||||
proc getStorage*(db: ReadOnlyStateDB, address: EthAddress, slot: UInt256): Result[UInt256,void] {.borrow.}
|
||||
proc getNonce*(db: ReadOnlyStateDB, address: EthAddress): AccountNonce {.borrow.}
|
||||
proc getCode*(db: ReadOnlyStateDB, address: EthAddress): seq[byte] {.borrow.}
|
||||
proc contractCollision*(db: ReadOnlyStateDB, address: EthAddress): bool {.borrow.}
|
||||
proc accountExists*(db: ReadOnlyStateDB, address: EthAddress): bool {.borrow.}
|
||||
proc isDeadAccount*(db: ReadOnlyStateDB, address: EthAddress): bool {.borrow.}
|
||||
proc isEmptyAccount*(db: ReadOnlyStateDB, address: EthAddress): bool {.borrow.}
|
||||
#proc getAccountProof*(db: ReadOnlyStateDB, address: EthAddress): AccountProof {.borrow.}
|
||||
#proc getStorageProof*(db: ReadOnlyStateDB, address: EthAddress, slots: seq[UInt256]): seq[SlotProof] {.borrow.}
|
||||
#proc getCommittedStorage*(db: ReadOnlyStateDB, address: EthAddress, slot: UInt256): UInt256 {.borrow.}
|
||||
|
||||
# End
|
|
@ -0,0 +1,17 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)
|
||||
# or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT)
|
||||
# or http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
import
|
||||
"."/[base, read_only]
|
||||
|
||||
export
|
||||
base, read_only
|
||||
|
||||
# End
|
|
@ -15,7 +15,7 @@ import
|
|||
graphql, graphql/graphql as context,
|
||||
graphql/common/types, graphql/httpserver,
|
||||
graphql/instruments/query_complexity,
|
||||
../db/[ledger],
|
||||
../db/[state_db],
|
||||
../rpc/rpc_types,
|
||||
../rpc/rpc_utils,
|
||||
".."/[transaction, vm_state, config, constants],
|
||||
|
@ -47,7 +47,7 @@ type
|
|||
AccountNode = ref object of Node
|
||||
address: EthAddress
|
||||
account: Account
|
||||
db: LedgerRef
|
||||
db: ReadOnlyStateDB
|
||||
|
||||
TxNode = ref object of Node
|
||||
tx: Transaction
|
||||
|
@ -99,7 +99,7 @@ proc headerNode(ctx: GraphqlContextRef, header: common.BlockHeader): Node =
|
|||
header: header
|
||||
)
|
||||
|
||||
proc accountNode(ctx: GraphqlContextRef, acc: Account, address: EthAddress, db: LedgerRef): Node =
|
||||
proc accountNode(ctx: GraphqlContextRef, acc: Account, address: EthAddress, db: ReadOnlyStateDB): Node =
|
||||
AccountNode(
|
||||
kind: nkMap,
|
||||
typeName: ctx.ids[ethAccount],
|
||||
|
@ -146,10 +146,11 @@ proc wdNode(ctx: GraphqlContextRef, wd: Withdrawal): Node =
|
|||
wd: wd
|
||||
)
|
||||
|
||||
proc getStateDB(com: CommonRef, header: common.BlockHeader): LedgerRef =
|
||||
proc getStateDB(com: CommonRef, header: common.BlockHeader): ReadOnlyStateDB =
|
||||
## Retrieves the account db from canonical head
|
||||
## we don't use accounst_cache here because it's read only operations
|
||||
LedgerRef.init(com.db, header.stateRoot)
|
||||
let ac = newAccountStateDB(com.db, header.stateRoot)
|
||||
ReadOnlyStateDB(ac)
|
||||
|
||||
proc getBlockByNumber(ctx: GraphqlContextRef, number: Node): RespResult =
|
||||
try:
|
||||
|
@ -352,8 +353,8 @@ proc accountNode(ctx: GraphqlContextRef, header: common.BlockHeader, address: Et
|
|||
# but hive test case demand something
|
||||
if not db.accountExists(address):
|
||||
return ok(respNull())
|
||||
let acc = db.getEthAccount(address)
|
||||
ok(accountNode(ctx, acc, address, db))
|
||||
let acc = db.getAccount(address)
|
||||
ok(accountNode(ctx, acc.to(Account), address, db))
|
||||
except RlpError as ex:
|
||||
err(ex.msg)
|
||||
|
||||
|
@ -551,7 +552,7 @@ proc accountStorage(ud: RootRef, params: Args, parent: Node): RespResult {.apiPr
|
|||
let acc = AccountNode(parent)
|
||||
try:
|
||||
let slot = parse(params[0].val.stringVal, UInt256, radix = 16)
|
||||
let val = acc.db.getStorage(acc.address, slot)
|
||||
let val = acc.db.getStorage(acc.address, slot).valueOr: 0.u256
|
||||
byte32Node(val)
|
||||
except RlpError as ex:
|
||||
err(ex.msg)
|
||||
|
|
|
@ -14,6 +14,7 @@ import
|
|||
json_rpc/rpcserver, stint, web3/conversions,
|
||||
eth/p2p,
|
||||
../[transaction, vm_state, constants, vm_types],
|
||||
../db/state_db,
|
||||
rpc_types, rpc_utils,
|
||||
../common/common,
|
||||
../utils/utils,
|
||||
|
@ -26,12 +27,13 @@ import
|
|||
|
||||
type
|
||||
BlockHeader = eth_types.BlockHeader
|
||||
ReadOnlyStateDB = state_db.ReadOnlyStateDB
|
||||
|
||||
proc getMultiKeys*(
|
||||
com: CommonRef,
|
||||
blockHeader: BlockHeader,
|
||||
statePostExecution: bool): MultiKeysRef
|
||||
{.raises: [RlpError, BlockNotFound, ValueError].} =
|
||||
{.raises: [RlpError, BlockNotFound, ValueError, CatchableError].} =
|
||||
|
||||
let
|
||||
chainDB = com.db
|
||||
|
@ -58,8 +60,8 @@ proc getMultiKeys*(
|
|||
mkeys
|
||||
|
||||
proc getBlockProofs*(
|
||||
accDB: LedgerRef,
|
||||
mkeys: MultiKeysRef): seq[ProofResponse] =
|
||||
accDB: ReadOnlyStateDB,
|
||||
mkeys: MultiKeysRef): seq[ProofResponse] {.raises: [RlpError].} =
|
||||
|
||||
var blockProofs = newSeq[ProofResponse]()
|
||||
|
||||
|
@ -79,10 +81,11 @@ proc setupExpRpc*(com: CommonRef, server: RpcServer) =
|
|||
|
||||
let chainDB = com.db
|
||||
|
||||
proc getStateDB(header: BlockHeader): LedgerRef =
|
||||
proc getStateDB(header: BlockHeader): ReadOnlyStateDB =
|
||||
## Retrieves the account db from canonical head
|
||||
# we don't use accounst_cache here because it's only read operations
|
||||
LedgerRef.init(chainDB, header.stateRoot)
|
||||
let ac = newAccountStateDB(chainDB, header.stateRoot)
|
||||
result = ReadOnlyStateDB(ac)
|
||||
|
||||
server.rpc("exp_getProofsByBlockNumber") do(quantityTag: BlockTag, statePostExecution: bool) -> seq[ProofResponse]:
|
||||
## Returns the block proofs for a block by block number or tag.
|
||||
|
|
|
@ -16,7 +16,7 @@ import
|
|||
eth/common/eth_types_json_serialization,
|
||||
eth/[keys, rlp, p2p],
|
||||
".."/[transaction, vm_state, constants],
|
||||
../db/ledger,
|
||||
../db/state_db,
|
||||
./rpc_types, ./rpc_utils, ./oracle,
|
||||
../transaction/call_evm,
|
||||
../core/tx_pool,
|
||||
|
@ -42,23 +42,23 @@ when not AccountAndStorageProofAvailableAndWorking:
|
|||
AccountProof = seq[MptNodeRlpBytes]
|
||||
SlotProof = seq[MptNodeRlpBytes]
|
||||
func getAccountProof(
|
||||
db: LedgerRef;
|
||||
db: ReadOnlyStateDB;
|
||||
eAddr: EthAddress;
|
||||
): AccountProof =
|
||||
discard
|
||||
func getStorageProof(
|
||||
db: LedgerRef;
|
||||
db: ReadOnlyStateDB;
|
||||
eAddr: EthAddress;
|
||||
slot: seq[UInt256];
|
||||
): seq[SlotProof] =
|
||||
discard
|
||||
|
||||
proc getProof*(
|
||||
accDB: LedgerRef,
|
||||
accDB: ReadOnlyStateDB,
|
||||
address: EthAddress,
|
||||
slots: seq[UInt256]): ProofResponse =
|
||||
slots: seq[UInt256]): ProofResponse {.raises: [RlpError].} =
|
||||
let
|
||||
acc = accDB.getEthAccount(address)
|
||||
acc = accDB.getAccount(address)
|
||||
accExists = accDB.accountExists(address)
|
||||
accountProof = accDB.getAccountProof(address)
|
||||
slotProofs = accDB.getStorageProof(address, slots)
|
||||
|
@ -66,7 +66,7 @@ proc getProof*(
|
|||
var storage = newSeqOfCap[StorageProof](slots.len)
|
||||
|
||||
for i, slotKey in slots:
|
||||
let slotValue = accDB.getStorage(address, slotKey)
|
||||
let slotValue = accDB.getStorage(address, slotKey).valueOr: 0.u256
|
||||
storage.add(StorageProof(
|
||||
key: slotKey,
|
||||
value: slotValue,
|
||||
|
@ -79,7 +79,7 @@ proc getProof*(
|
|||
balance: acc.balance,
|
||||
nonce: w3Qty(acc.nonce),
|
||||
codeHash: w3Hash(acc.codeHash),
|
||||
storageHash: w3Hash(acc.storageRoot),
|
||||
storageHash: w3Hash(acc.to(Account).storageRoot),
|
||||
storageProof: storage)
|
||||
else:
|
||||
ProofResponse(
|
||||
|
@ -92,12 +92,13 @@ proc setupEthRpc*(
|
|||
txPool: TxPoolRef, oracle: Oracle, server: RpcServer) =
|
||||
|
||||
let chainDB = com.db
|
||||
proc getStateDB(header: BlockHeader): LedgerRef =
|
||||
proc getStateDB(header: BlockHeader): ReadOnlyStateDB =
|
||||
## Retrieves the account db from canonical head
|
||||
# we don't use accounst_cache here because it's only read operations
|
||||
LedgerRef.init(chainDB, header.stateRoot)
|
||||
let ac = newAccountStateDB(chainDB, header.stateRoot)
|
||||
result = ReadOnlyStateDB(ac)
|
||||
|
||||
proc stateDBFromTag(quantityTag: BlockTag, readOnly = true): LedgerRef
|
||||
proc stateDBFromTag(quantityTag: BlockTag, readOnly = true): ReadOnlyStateDB
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
result = getStateDB(chainDB.headerFromTag(quantityTag))
|
||||
|
||||
|
@ -181,7 +182,7 @@ proc setupEthRpc*(
|
|||
let
|
||||
accDB = stateDBFromTag(quantityTag)
|
||||
address = data.ethAddr
|
||||
data = accDB.getStorage(address, slot)
|
||||
data = accDB.getStorage(address, slot).valueOr: 0.u256
|
||||
result = data.w3FixedBytes
|
||||
|
||||
server.rpc("eth_getTransactionCount") do(data: Web3Address, quantityTag: BlockTag) -> Web3Quantity:
|
||||
|
|
|
@ -39,6 +39,7 @@ cliBuilder:
|
|||
./test_multi_keys,
|
||||
./test_misc,
|
||||
#./test_graphql, -- fails
|
||||
./test_pow,
|
||||
./test_configuration,
|
||||
#./test_txpool, -- fails
|
||||
./test_txpool2,
|
||||
|
|
Binary file not shown.
|
@ -16,7 +16,7 @@ import
|
|||
./test_helpers, ./test_allowed_to_fail,
|
||||
../premix/parser, test_config,
|
||||
../nimbus/[vm_state, vm_types, errors, constants],
|
||||
../nimbus/db/ledger,
|
||||
../nimbus/db/[ledger, state_db],
|
||||
../nimbus/utils/[utils, debug],
|
||||
../nimbus/evm/tracer/legacy_tracer,
|
||||
../nimbus/evm/tracer/json_tracer,
|
||||
|
@ -187,8 +187,8 @@ proc testGetMultiKeys(chain: ChainRef, parentHeader, currentHeader: BlockHeader)
|
|||
|
||||
# use the MultiKeysRef to build the block proofs
|
||||
let
|
||||
ac = LedgerRef.init(chain.com.db, currentHeader.stateRoot)
|
||||
blockProofs = getBlockProofs(ac, mkeys)
|
||||
ac = newAccountStateDB(chain.com.db, currentHeader.stateRoot)
|
||||
blockProofs = getBlockProofs(state_db.ReadOnlyStateDB(ac), mkeys)
|
||||
if blockProofs.len() != 0:
|
||||
raise newException(ValidationError, "Expected blockProofs.len() == 0")
|
||||
|
||||
|
|
|
@ -12,12 +12,14 @@ import
|
|||
web3/eth_api,
|
||||
nimcrypto/[keccak, hash],
|
||||
eth/[common, rlp, keys, trie/trie_defs, trie/hexary_proof_verification],
|
||||
../nimbus/db/state_db,
|
||||
../nimbus/db/[ledger, core_db],
|
||||
../nimbus/common/chain_config,
|
||||
../nimbus/rpc/p2p
|
||||
|
||||
type
|
||||
Hash256 = eth_types.Hash256
|
||||
ReadOnlyStateDB = state_db.ReadOnlyStateDB
|
||||
|
||||
func ethAddr*(x: Address): EthAddress =
|
||||
EthAddress x
|
||||
|
@ -77,7 +79,7 @@ proc setupStateDB(genAccounts: GenesisAlloc, stateDB: LedgerRef): Hash256 =
|
|||
|
||||
proc checkProofsForExistingLeafs(
|
||||
genAccounts: GenesisAlloc,
|
||||
accDB: LedgerRef,
|
||||
accDB: ReadOnlyStateDB,
|
||||
stateRoot: Hash256) =
|
||||
|
||||
for address, account in genAccounts:
|
||||
|
@ -104,7 +106,7 @@ proc checkProofsForExistingLeafs(
|
|||
|
||||
proc checkProofsForMissingLeafs(
|
||||
genAccounts: GenesisAlloc,
|
||||
accDB: LedgerRef,
|
||||
accDB: ReadOnlyStateDB,
|
||||
stateRoot: Hash256) =
|
||||
|
||||
let
|
||||
|
@ -135,9 +137,10 @@ proc getProofJsonMain*() =
|
|||
coreDb = newCoreDbRef(DefaultDbMemory)
|
||||
accountsCache = LedgerRef.init(coreDb, emptyRlpHash)
|
||||
stateRootHash = setupStateDB(accounts, accountsCache)
|
||||
accountDb = LedgerRef.init(coreDb, stateRootHash)
|
||||
accountDb = newAccountStateDB(coreDb, stateRootHash)
|
||||
readOnlyDb = ReadOnlyStateDB(accountDb)
|
||||
|
||||
checkProofsForExistingLeafs(accounts, accountDb, stateRootHash)
|
||||
checkProofsForExistingLeafs(accounts, readOnlyDb, stateRootHash)
|
||||
|
||||
test "Get proofs for missing leafs":
|
||||
for file in genesisFiles:
|
||||
|
@ -147,9 +150,10 @@ proc getProofJsonMain*() =
|
|||
coreDb = newCoreDbRef(DefaultDbMemory)
|
||||
accountsCache = LedgerRef.init(coreDb, emptyRlpHash)
|
||||
stateRootHash = setupStateDB(accounts, accountsCache)
|
||||
accountDb = LedgerRef.init(coreDb, stateRootHash)
|
||||
accountDb = newAccountStateDB(coreDb, stateRootHash)
|
||||
readOnlyDb = ReadOnlyStateDB(accountDb)
|
||||
|
||||
checkProofsForMissingLeafs(accounts, accountDb, stateRootHash)
|
||||
checkProofsForMissingLeafs(accounts, readOnlyDb, stateRootHash)
|
||||
|
||||
when isMainModule:
|
||||
getProofJsonMain()
|
||||
|
|
|
@ -0,0 +1,109 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2022-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed except
|
||||
# according to those terms.
|
||||
|
||||
import
|
||||
std/[os, sequtils, strformat, strutils, times],
|
||||
./replay/[pp, gunzip],
|
||||
../nimbus/core/[pow, pow/pow_cache],
|
||||
eth/common,
|
||||
unittest2
|
||||
|
||||
const
|
||||
baseDir = [".", "tests", ".." / "tests", $DirSep] # path containg repo
|
||||
repoDir = ["replay"] # alternative repos
|
||||
|
||||
specsDump = "mainspecs2k.txt.gz"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc say*(noisy = false; pfx = "***"; args: varargs[string, `$`]) =
|
||||
if noisy:
|
||||
if args.len == 0:
|
||||
echo "*** ", pfx
|
||||
elif 0 < pfx.len and pfx[^1] != ' ':
|
||||
echo pfx, " ", args.toSeq.join
|
||||
else:
|
||||
echo pfx, args.toSeq.join
|
||||
|
||||
proc findFilePath(file: string): string =
|
||||
result = "?unknown?" / file
|
||||
for dir in baseDir:
|
||||
for repo in repoDir:
|
||||
let path = dir / repo / file
|
||||
if path.fileExists:
|
||||
return path
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Test Runners
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc runPowTests(noisy = true; file = specsDump;
|
||||
nVerify = int.high; nFakeMiner = 0, nRealMiner = 0) =
|
||||
let
|
||||
filePath = file.findFilePath
|
||||
fileInfo = file.splitFile.name.split(".")[0]
|
||||
|
||||
powCache = PowCacheRef.new # so we can inspect the LRU caches
|
||||
pow = PowRef.new(powCache)
|
||||
|
||||
var specsList: seq[PowSpecs]
|
||||
|
||||
suite &"PoW: Header test specs from {fileInfo} capture":
|
||||
block:
|
||||
test "Loading from capture":
|
||||
for (lno,line) in gunzipLines(filePath):
|
||||
let specs = line.undumpPowSpecs
|
||||
if 0 < specs.number:
|
||||
specsList.add specs
|
||||
check line == specs.dumpPowSpecs
|
||||
noisy.say "***", " block range #",
|
||||
specsList[0].number, " .. #", specsList[^1].number
|
||||
|
||||
# Adjust number of tests
|
||||
let
|
||||
startVerify = max(0, specsList.len - nVerify)
|
||||
nDoVerify = specsList.len - startVerify
|
||||
|
||||
block:
|
||||
test &"Running single getPowDigest() to fill the cache":
|
||||
if nVerify <= 0:
|
||||
skip()
|
||||
else:
|
||||
noisy.showElapsed(&"first getPowDigest() instance"):
|
||||
let p = specsList[startVerify]
|
||||
check pow.getPowDigest(p).mixDigest == p.mixHash
|
||||
|
||||
test &"Running getPowDigest() on {nDoVerify} specs records":
|
||||
if nVerify <= 0:
|
||||
skip()
|
||||
else:
|
||||
noisy.showElapsed(&"all {nDoVerify} getPowDigest() instances"):
|
||||
for n in startVerify ..< specsList.len:
|
||||
let p = specsList[n]
|
||||
check pow.getPowDigest(p).mixDigest == p.mixHash
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Main function(s)
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc powMain*(noisy = defined(debug)) =
|
||||
noisy.runPowTests(nVerify = 100)
|
||||
|
||||
when isMainModule:
|
||||
# Note:
|
||||
# 0 < nFakeMiner: allow ~20 minuntes for building lookup table
|
||||
# 0 < nRealMiner: takes days/months/years ...
|
||||
true.runPowTests(nVerify = 200, nFakeMiner = 200, nRealMiner = 5)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
|
@ -33,7 +33,7 @@ import
|
|||
../nimbus/db/opts,
|
||||
../nimbus/db/core_db,
|
||||
../nimbus/db/core_db/persistent,
|
||||
../nimbus/db/ledger,
|
||||
../nimbus/db/state_db/base,
|
||||
./rpc/experimental_rpc_client
|
||||
|
||||
const
|
||||
|
@ -53,7 +53,7 @@ template toHash256(hash: untyped): Hash256 =
|
|||
fromHex(Hash256, hash.toHex())
|
||||
|
||||
proc updateStateUsingProofsAndCheckStateRoot(
|
||||
stateDB: LedgerRef,
|
||||
stateDB: AccountStateDB,
|
||||
expectedStateRoot: Hash256,
|
||||
proofs: seq[ProofResponse]) =
|
||||
|
||||
|
@ -125,7 +125,7 @@ proc rpcGetProofsTrackStateChangesMain*() =
|
|||
|
||||
let
|
||||
blockHeader = waitFor client.eth_getBlockByNumber(blockId(START_BLOCK), false)
|
||||
stateDB = LedgerRef.init(com.db, blockHeader.stateRoot.toHash256())
|
||||
stateDB = newAccountStateDB(com.db, blockHeader.stateRoot.toHash256())
|
||||
|
||||
for i in START_BLOCK..END_BLOCK:
|
||||
let
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
Subproject commit 953b8ed994d5f14569ca255cfe75bb4507025dcc
|
|
@ -1 +1 @@
|
|||
Subproject commit a806cbfab5fe8de49c76139f8705fff79daf99ee
|
||||
Subproject commit d81b37dc2011bf3a2bd93500489877c2ce8e6ac3
|
Loading…
Reference in New Issue