nimbus-eth1/tests/test_txpool/setup.nim

249 lines
8.2 KiB
Nim
Raw Normal View History

# Nimbus
# Copyright (c) 2022-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
import
std/[algorithm, os, sequtils, strformat, tables, times, json],
Unified database frontend integration (#1670) * Nimbus folder environment update details: * Integrated `CoreDbRef` for the sources in the `nimbus` sub-folder. * The `nimbus` program does not compile yet as it needs the updates in the parallel `stateless` sub-folder. * Stateless environment update details: * Integrated `CoreDbRef` for the sources in the `stateless` sub-folder. * The `nimbus` program compiles now. * Premix environment update details: * Integrated `CoreDbRef` for the sources in the `premix` sub-folder. * Fluffy environment update details: * Integrated `CoreDbRef` for the sources in the `fluffy` sub-folder. * Tools environment update details: * Integrated `CoreDbRef` for the sources in the `tools` sub-folder. * Nodocker environment update details: * Integrated `CoreDbRef` for the sources in the `hive_integration/nodocker` sub-folder. * Tests environment update details: * Integrated `CoreDbRef` for the sources in the `tests` sub-folder. * The unit tests compile and run cleanly now. * Generalise `CoreDbRef` to any `select_backend` supported database why: Generalisation was just missed due to overcoming some compiler oddity which was tied to rocksdb for testing. * Suppress compiler warning for `newChainDB()` why: Warning was added to this function which must be wrapped so that any `CatchableError` is re-raised as `Defect`. * Split off persistent `CoreDbRef` constructor into separate file why: This allows to compile a memory only database version without linking the backend library. * Use memory `CoreDbRef` database by default detail: Persistent DB constructor needs to import `db/core_db/persistent why: Most tests use memory DB anyway. This avoids linking `-lrocksdb` or any other backend by default. * fix `toLegacyBackend()` availability check why: got garbled after memory/persistent split. * Clarify raw access to MPT for snap sync handler why: Logically, `kvt` is not the raw access for the hexary trie (although this holds for the legacy database)
2023-08-04 11:10:09 +00:00
../../nimbus/core/[chain, tx_pool], # must be early (compilation annoyance)
../../nimbus/common/common,
2022-12-02 04:39:12 +00:00
../../nimbus/[config, constants],
../../nimbus/utils/ec_recover,
../../nimbus/core/tx_pool/[tx_chain, tx_item],
../../nimbus/transaction,
./helpers,
2022-12-02 04:39:12 +00:00
eth/[keys, p2p],
stew/[keyed_queue, byteutils]
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
proc setStatus(xp: TxPoolRef; item: TxItemRef; status: TxItemStatus)
{.gcsafe,raises: [CatchableError].} =
## Change/update the status of the transaction item.
if status != item.status:
discard xp.txDB.reassign(item, status)
type
TxEnv = object
chainId: ChainID
rng: ref HmacDrbgContext
signers: Table[EthAddress, PrivateKey]
map: Table[EthAddress, EthAddress]
txs: seq[Transaction]
Signer = object
address: EthAddress
signer: PrivateKey
const
genesisFile = "tests/customgenesis/cancun123.json"
proc initTxEnv(chainId: ChainID): TxEnv =
result.rng = newRng()
result.chainId = chainId
proc getSigner(env: var TxEnv, address: EthAddress): Signer =
env.map.withValue(address, val) do:
let newAddress = val[]
return Signer(address: newAddress, signer: env.signers[newAddress])
do:
let key = PrivateKey.random(env.rng[])
let newAddress = toCanonicalAddress(key.toPublicKey)
env.map[address] = newAddress
env.signers[newAddress] = key
return Signer(address: newAddress, signer: key)
proc fillGenesis(env: var TxEnv, param: NetworkParams) =
const txFile = "tests/test_txpool/transactions.json"
let n = json.parseFile(txFile)
var map: Table[EthAddress, UInt256]
for z in n:
let bytes = hexToSeqByte(z.getStr)
let tx = rlp.decode(bytes, Transaction)
let sender = tx.getSender()
let bal = map.getOrDefault(sender, 0.u256)
if bal + tx.value > 0:
map[sender] = bal + tx.value
env.txs.add(tx)
for k, v in map:
let s = env.getSigner(k)
param.genesis.alloc[s.address] = GenesisAccount(
balance: v + v,
)
proc setupTxPool*(getStatus: proc(): TxItemStatus): (CommonRef, TxPoolRef, int) =
let
conf = makeConfig(@[
"--custom-network:" & genesisFile
])
var txEnv = initTxEnv(conf.networkParams.config.chainId)
txEnv.fillGenesis(conf.networkParams)
let com = CommonRef.new(
newCoreDbRef DefaultDbMemory,
conf.networkId,
conf.networkParams
)
com.initializeEmptyDb()
let txPool = TxPoolRef.new(com)
for n, tx in txEnv.txs:
let s = txEnv.getSigner(tx.getSender())
let status = statusInfo[getStatus()]
let info = &"{n}/{txEnv.txs.len} {status}"
let signedTx = signTransaction(tx, s.signer, txEnv.chainId, eip155 = true)
txPool.add(PooledTransaction(tx: signedTx), info)
(com, txPool, txEnv.txs.len)
proc toTxPool*(
2022-12-02 04:39:12 +00:00
com: CommonRef; ## to be modified, initialisier for `TxPool`
itList: seq[TxItemRef]; ## import items into new `TxPool` (read only)
baseFee = 0.GasPrice; ## initalise with `baseFee` (unless 0)
local: seq[EthAddress] = @[]; ## local addresses
noisy = true): TxPoolRef =
2022-12-02 04:39:12 +00:00
doAssert not com.isNil
2024-06-07 02:01:45 +00:00
result = TxPoolRef.new(com)
result.baseFee = baseFee
result.maxRejects = itList.len
let noLocals = local.len == 0
var localAddr: Table[EthAddress,bool]
for a in local:
localAddr[a] = true
noisy.showElapsed(&"Loading {itList.len} transactions"):
for item in itList:
if noLocals:
result.add(item.pooledTx, item.info)
elif localAddr.hasKey(item.sender):
doAssert result.addLocal(item.pooledTx, true).isOk
else:
doAssert result.addRemote(item.pooledTx, true).isOk
doAssert result.nItems.total == itList.len
proc toTxPool*(
2022-12-02 04:39:12 +00:00
com: CommonRef; ## to be modified, initialisier for `TxPool`
timeGap: var Time; ## to be set, time in the middle of time gap
nGapItems: var int; ## to be set, # items before time gap
itList: var seq[TxItemRef]; ## import items into new `TxPool` (read only)
baseFee = 0.GasPrice; ## initalise with `baseFee` (unless 0)
itemsPC = 30; ## % number if items befor time gap
delayMSecs = 200; ## size of time vap
local: seq[EthAddress] = @[]; ## local addresses
noisy = true): TxPoolRef =
## Variant of `toTxPoolFromSeq()` with a time gap between consecutive
## items on the `remote` queue
2022-12-02 04:39:12 +00:00
doAssert not com.isNil
doAssert 0 < itemsPC and itemsPC < 100
2024-06-07 02:01:45 +00:00
result = TxPoolRef.new(com)
result.baseFee = baseFee
result.maxRejects = itList.len
let noLocals = local.len == 0
var localAddr: Table[EthAddress,bool]
for a in local:
localAddr[a] = true
let
delayAt = itList.len * itemsPC div 100
middleOfTimeGap = initDuration(milliSeconds = delayMSecs div 2)
const
tFmt = "yyyy-MM-dd'T'HH-mm-ss'.'fff"
noisy.showElapsed(&"Loading {itList.len} transactions"):
for n in 0 ..< itList.len:
let item = itList[n]
if noLocals:
result.add(item.pooledTx, item.info)
elif localAddr.hasKey(item.sender):
doAssert result.addLocal(item.pooledTx, true).isOk
else:
doAssert result.addRemote(item.pooledTx, true).isOk
if n < 3 or delayAt-3 <= n and n <= delayAt+3 or itList.len-4 < n:
let t = result.getItem(item.itemID).value.timeStamp.format(tFmt, utc())
noisy.say &"added item {n} time={t}"
if delayAt == n:
nGapItems = n # pass back value
let itemID = item.itemID
doAssert result.nItems.disposed == 0
timeGap = result.getItem(itemID).value.timeStamp + middleOfTimeGap
let t = timeGap.format(tFmt, utc())
noisy.say &"{delayMSecs}ms time gap centered around {t}"
delayMSecs.sleep
doAssert result.nItems.total == itList.len
doAssert result.nItems.disposed == 0
proc toItems*(xp: TxPoolRef): seq[TxItemRef] =
toSeq(xp.txDB.byItemID.nextValues)
proc toItems*(xp: TxPoolRef; label: TxItemStatus): seq[TxItemRef] =
for (_,nonceList) in xp.txDB.decAccount(label):
result.add toSeq(nonceList.incNonce)
proc setItemStatusFromInfo*(xp: TxPoolRef) =
## Re-define status from last character of info field. Note that this might
## violate boundary conditions regarding nonces.
for item in xp.toItems:
let w = TxItemStatus.toSeq.filterIt(statusInfo[it][0] == item.info[^1])
if w.len > 0:
xp.setStatus(item, w[0])
proc getBackHeader*(xp: TxPoolRef; nTxs, nAccounts: int):
(BlockHeader, seq[Transaction], seq[EthAddress]) {.inline.} =
## back track the block chain for at least `nTxs` transactions and
## `nAccounts` sender accounts
var
accTab: Table[EthAddress,bool]
txsLst: seq[Transaction]
backHash = xp.head.blockHash
backHeader = xp.head
2022-12-02 04:39:12 +00:00
backBody = xp.chain.com.db.getBlockBody(backHash)
while true:
# count txs and step behind last block
txsLst.add backBody.transactions
backHash = backHeader.parentHash
2022-12-02 04:39:12 +00:00
if not xp.chain.com.db.getBlockHeader(backHash, backHeader) or
not xp.chain.com.db.getBlockBody(backHash, backBody):
break
# collect accounts unless max reached
if accTab.len < nAccounts:
for tx in backBody.transactions:
let rc = tx.ecRecover
if rc.isOK:
if xp.txDB.bySender.eq(rc.value).isOk:
accTab[rc.value] = true
if nAccounts <= accTab.len:
break
if nTxs <= txsLst.len and nAccounts <= accTab.len:
break
# otherwise get next block
(backHeader, txsLst.reversed, toSeq(accTab.keys))
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------