2022-01-18 14:40:02 +00:00
|
|
|
# Nimbus
|
|
|
|
# Copyright (c) 2018-2019 Status Research & Development GmbH
|
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
|
|
# http://opensource.org/licenses/MIT)
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except
|
|
|
|
# according to those terms.
|
|
|
|
|
|
|
|
import
|
|
|
|
std/[algorithm, os, random, sequtils, strformat, strutils, tables, times],
|
2023-08-04 11:10:09 +00:00
|
|
|
../nimbus/core/chain, # must be early (compilation annoyance)
|
2022-12-02 04:39:12 +00:00
|
|
|
../nimbus/common/common,
|
2022-12-06 05:55:40 +00:00
|
|
|
../nimbus/core/clique/clique_sealer,
|
2023-08-04 11:10:09 +00:00
|
|
|
../nimbus/core/[clique, executor, casper, tx_pool, tx_pool/tx_item],
|
|
|
|
../nimbus/[config, vm_state, vm_types],
|
2022-01-18 14:40:02 +00:00
|
|
|
./test_txpool/[helpers, setup, sign_helper],
|
2022-03-30 04:50:35 +00:00
|
|
|
./test_txpool2,
|
2022-01-18 14:40:02 +00:00
|
|
|
chronos,
|
2022-12-02 04:39:12 +00:00
|
|
|
eth/[keys, p2p],
|
2022-01-18 14:40:02 +00:00
|
|
|
stew/[keyed_queue, sorted_set],
|
|
|
|
stint,
|
|
|
|
unittest2
|
|
|
|
|
|
|
|
type
|
|
|
|
CaptureSpecs = tuple
|
|
|
|
network: NetworkID
|
|
|
|
file: string
|
|
|
|
numBlocks, minBlockTxs, numTxs: int
|
|
|
|
|
|
|
|
const
|
|
|
|
prngSeed = 42
|
|
|
|
|
2022-03-16 09:13:17 +00:00
|
|
|
baseDir = [".", "..", ".."/"..", $DirSep]
|
|
|
|
repoDir = [".", "tests"/"replay", "nimbus-eth1-blobs"/"replay"]
|
2022-01-18 14:40:02 +00:00
|
|
|
|
|
|
|
goerliCapture: CaptureSpecs = (
|
|
|
|
network: GoerliNet,
|
|
|
|
file: "goerli68161.txt.gz",
|
|
|
|
numBlocks: 22000, # block chain prequel
|
|
|
|
minBlockTxs: 300, # minimum txs in imported blocks
|
|
|
|
numTxs: 840) # txs following (not in block chain)
|
|
|
|
|
|
|
|
loadSpecs = goerliCapture
|
|
|
|
|
|
|
|
# 75% <= #local/#remote <= 1/75%
|
|
|
|
# note: by law of big numbers, the ratio will exceed any upper or lower
|
|
|
|
# on a +1/-1 random walk if running long enough (with expectation
|
|
|
|
# value 0)
|
|
|
|
randInitRatioBandPC = 75
|
|
|
|
|
|
|
|
# 95% <= #remote-deleted/#remote-present <= 1/95%
|
|
|
|
deletedItemsRatioBandPC = 95
|
|
|
|
|
|
|
|
# With a large enough block size, decreasing it should not decrease the
|
|
|
|
# profitability (very much) as the the number of blocks availabe increases
|
|
|
|
# (and a better choice might be available?) A good value for the next
|
|
|
|
# parameter should be above 100%.
|
|
|
|
decreasingBlockProfitRatioPC = 92
|
|
|
|
|
2022-03-31 16:17:22 +00:00
|
|
|
# Make some percentage of the accounts local accouns.
|
|
|
|
accountExtractPC = 10
|
|
|
|
|
2022-01-18 14:40:02 +00:00
|
|
|
var
|
|
|
|
minGasPrice = GasPrice.high
|
|
|
|
maxGasPrice = GasPrice.low
|
|
|
|
|
|
|
|
prng = prngSeed.initRand
|
|
|
|
|
2022-03-31 16:17:22 +00:00
|
|
|
# To be set up in runTxLoader()
|
2022-01-18 14:40:02 +00:00
|
|
|
statCount: array[TxItemStatus,int] # per status bucket
|
|
|
|
|
|
|
|
txList: seq[TxItemRef]
|
|
|
|
effGasTips: seq[GasPriceEx]
|
|
|
|
|
2022-03-31 16:17:22 +00:00
|
|
|
# Running block chain
|
2022-12-02 04:39:12 +00:00
|
|
|
bcCom: CommonRef
|
2022-01-18 14:40:02 +00:00
|
|
|
|
2022-03-31 16:17:22 +00:00
|
|
|
# Accounts to be considered local
|
|
|
|
localAccounts: seq[EthAddress]
|
|
|
|
|
2022-01-18 14:40:02 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Helpers
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc randStatusRatios: seq[int] =
|
|
|
|
for n in 1 .. statCount.len:
|
|
|
|
let
|
|
|
|
inx = (n mod statCount.len).TxItemStatus
|
|
|
|
prv = (n - 1).TxItemStatus
|
|
|
|
if statCount[inx] == 0:
|
|
|
|
result.add int.high
|
|
|
|
else:
|
|
|
|
result.add (statCount[prv] * 100 / statCount[inx]).int
|
|
|
|
|
|
|
|
proc randStatus: TxItemStatus =
|
|
|
|
result = prng.rand(TxItemStatus.high.ord).TxItemStatus
|
|
|
|
statCount[result].inc
|
|
|
|
|
|
|
|
template wrapException(info: string; action: untyped) =
|
|
|
|
try:
|
|
|
|
action
|
|
|
|
except CatchableError:
|
|
|
|
raiseAssert info & " has problems: " & getCurrentExceptionMsg()
|
|
|
|
|
|
|
|
proc addOrFlushGroupwise(xp: TxPoolRef;
|
|
|
|
grpLen: int; seen: var seq[TxItemRef]; w: TxItemRef;
|
|
|
|
noisy = true): bool =
|
|
|
|
# to be run as call back inside `itemsApply()`
|
|
|
|
wrapException("addOrFlushGroupwise()"):
|
|
|
|
seen.add w
|
|
|
|
if grpLen <= seen.len:
|
|
|
|
# clear waste basket
|
|
|
|
discard xp.txDB.flushRejects
|
|
|
|
|
|
|
|
# flush group-wise
|
|
|
|
let xpLen = xp.nItems.total
|
|
|
|
noisy.say "*** updateSeen: deleting ", seen.mapIt($it.itemID).join(" ")
|
|
|
|
for item in seen:
|
|
|
|
doAssert xp.txDB.dispose(item,txInfoErrUnspecified)
|
|
|
|
doAssert xpLen == seen.len + xp.nItems.total
|
|
|
|
doAssert seen.len == xp.nItems.disposed
|
|
|
|
seen.setLen(0)
|
|
|
|
|
|
|
|
# clear waste basket
|
|
|
|
discard xp.txDB.flushRejects
|
|
|
|
|
|
|
|
return true
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Test Runners
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc runTxLoader(noisy = true; capture = loadSpecs) =
|
|
|
|
let
|
|
|
|
elapNoisy = noisy
|
|
|
|
veryNoisy = false # noisy
|
|
|
|
fileInfo = capture.file.splitFile.name.split(".")[0]
|
2022-04-04 08:34:59 +00:00
|
|
|
filePath = capture.file.findFilePath(baseDir,repoDir).value
|
2022-01-18 14:40:02 +00:00
|
|
|
|
|
|
|
# Reset/initialise
|
|
|
|
statCount.reset
|
|
|
|
txList.reset
|
|
|
|
effGasTips.reset
|
2022-12-02 04:39:12 +00:00
|
|
|
bcCom = capture.network.blockChainForTesting
|
2022-01-18 14:40:02 +00:00
|
|
|
|
|
|
|
suite &"TxPool: Transactions from {fileInfo} capture":
|
|
|
|
var
|
|
|
|
xp: TxPoolRef
|
|
|
|
nTxs: int
|
|
|
|
|
|
|
|
test &"Import {capture.numBlocks.toKMG} blocks + {capture.minBlockTxs} txs"&
|
|
|
|
&" and collect {capture.numTxs} txs for pooling":
|
|
|
|
|
|
|
|
elapNoisy.showElapsed("Total collection time"):
|
2022-12-02 04:39:12 +00:00
|
|
|
(xp, nTxs) = bcCom.toTxPool(file = filePath,
|
2022-01-18 14:40:02 +00:00
|
|
|
getStatus = randStatus,
|
|
|
|
loadBlocks = capture.numBlocks,
|
|
|
|
minBlockTxs = capture.minBlockTxs,
|
|
|
|
loadTxs = capture.numTxs,
|
|
|
|
noisy = veryNoisy)
|
|
|
|
|
2022-03-31 16:17:22 +00:00
|
|
|
# Extract some of the least profitable accounts and hold them so
|
|
|
|
# they could be made local at a later stage
|
|
|
|
let
|
|
|
|
accr = xp.accountRanks
|
|
|
|
nExtract = (accr.remote.len * accountExtractPC + 50) div 100
|
|
|
|
localAccounts = accr.remote[accr.remote.len - nExtract .. ^1]
|
|
|
|
|
2022-01-18 14:40:02 +00:00
|
|
|
# Make sure that sample extraction from file was ok
|
|
|
|
check capture.minBlockTxs <= nTxs
|
|
|
|
check capture.numTxs == xp.nItems.total
|
|
|
|
|
|
|
|
# Set txs to pseudo random status
|
|
|
|
check xp.verify.isOK
|
|
|
|
xp.setItemStatusFromInfo
|
|
|
|
|
|
|
|
# Boundary conditions regarding nonces might be violated by running
|
|
|
|
# setItemStatusFromInfo() => xp.txDB.verify() rather than xp.verify()
|
|
|
|
check xp.txDB.verify.isOK
|
|
|
|
|
|
|
|
check txList.len == 0
|
|
|
|
check xp.nItems.disposed == 0
|
|
|
|
|
|
|
|
noisy.say "***",
|
|
|
|
"Latest item: <", xp.txDB.byItemID.last.value.data.info, ">"
|
|
|
|
|
|
|
|
# make sure that the block chain was initialised
|
2022-12-02 04:39:12 +00:00
|
|
|
check capture.numBlocks.u256 <= bcCom.db.getCanonicalHead.blockNumber
|
2022-01-18 14:40:02 +00:00
|
|
|
|
|
|
|
check xp.nItems.total == foldl(@[0]&statCount.toSeq, a+b)
|
|
|
|
# ^^^ sum up statCount[] values
|
|
|
|
|
|
|
|
# make sure that PRNG did not go bonkers
|
|
|
|
for statusRatio in randStatusRatios():
|
|
|
|
check randInitRatioBandPC < statusRatio
|
|
|
|
check statusRatio < (10000 div randInitRatioBandPC)
|
|
|
|
|
|
|
|
# Load txList[]
|
|
|
|
txList = xp.toItems
|
|
|
|
check txList.len == xp.nItems.total
|
|
|
|
|
|
|
|
elapNoisy.showElapsed("Load min/max gas prices"):
|
|
|
|
for item in txList:
|
|
|
|
if item.tx.gasPrice < minGasPrice and 0 < item.tx.gasPrice:
|
|
|
|
minGasPrice = item.tx.gasPrice.GasPrice
|
|
|
|
if maxGasPrice < item.tx.gasPrice.GasPrice:
|
|
|
|
maxGasPrice = item.tx.gasPrice.GasPrice
|
|
|
|
|
|
|
|
check 0.GasPrice <= minGasPrice
|
|
|
|
check minGasPrice <= maxGasPrice
|
|
|
|
|
2023-04-11 08:28:45 +00:00
|
|
|
|
2022-01-18 14:40:02 +00:00
|
|
|
proc runTxPoolTests(noisy = true) =
|
|
|
|
let elapNoisy = false
|
|
|
|
|
|
|
|
suite &"TxPool: Play with pool functions and primitives":
|
|
|
|
|
|
|
|
block:
|
|
|
|
const groupLen = 13
|
|
|
|
let veryNoisy = noisy and false
|
|
|
|
|
|
|
|
test &"Load/forward walk ID queue, " &
|
|
|
|
&"deleting groups of at most {groupLen}":
|
|
|
|
var
|
2022-12-02 04:39:12 +00:00
|
|
|
xq = bcCom.toTxPool(txList, noisy = noisy)
|
2022-01-18 14:40:02 +00:00
|
|
|
seen: seq[TxItemRef]
|
|
|
|
|
|
|
|
# Set txs to pseudo random status
|
|
|
|
xq.setItemStatusFromInfo
|
|
|
|
|
|
|
|
check xq.txDB.verify.isOK
|
|
|
|
elapNoisy.showElapsed("Forward delete-walk ID queue"):
|
|
|
|
for item in xq.txDB.byItemID.nextValues:
|
|
|
|
if not xq.addOrFlushGroupwise(groupLen, seen, item, veryNoisy):
|
|
|
|
break
|
|
|
|
check xq.txDB.verify.isOK
|
|
|
|
check seen.len == xq.nItems.total
|
|
|
|
check seen.len < groupLen
|
|
|
|
|
|
|
|
test &"Load/reverse walk ID queue, " &
|
|
|
|
&"deleting in groups of at most {groupLen}":
|
|
|
|
var
|
2022-12-02 04:39:12 +00:00
|
|
|
xq = bcCom.toTxPool(txList, noisy = noisy)
|
2022-01-18 14:40:02 +00:00
|
|
|
seen: seq[TxItemRef]
|
|
|
|
|
|
|
|
# Set txs to pseudo random status
|
|
|
|
xq.setItemStatusFromInfo
|
|
|
|
|
|
|
|
check xq.txDB.verify.isOK
|
|
|
|
elapNoisy.showElapsed("Revese delete-walk ID queue"):
|
|
|
|
for item in xq.txDB.byItemID.nextValues:
|
|
|
|
if not xq.addOrFlushGroupwise(groupLen, seen, item, veryNoisy):
|
|
|
|
break
|
|
|
|
check xq.txDB.verify.isOK
|
|
|
|
check seen.len == xq.nItems.total
|
|
|
|
check seen.len < groupLen
|
|
|
|
|
|
|
|
block:
|
|
|
|
var
|
2022-12-02 04:39:12 +00:00
|
|
|
xq = TxPoolRef.new(bcCom,testAddress)
|
2022-01-18 14:40:02 +00:00
|
|
|
testTxs: array[5,(TxItemRef,Transaction,Transaction)]
|
|
|
|
|
|
|
|
test &"Superseding txs with sender and nonce variants":
|
|
|
|
var
|
|
|
|
testInx = 0
|
|
|
|
let
|
|
|
|
testBump = xq.priceBump
|
|
|
|
lastBump = testBump - 1 # implies underpriced item
|
|
|
|
|
|
|
|
# load a set of suitable txs into testTxs[]
|
|
|
|
for n in 0 ..< txList.len:
|
|
|
|
let
|
|
|
|
item = txList[n]
|
|
|
|
bump = if testInx < testTxs.high: testBump else: lastBump
|
|
|
|
rc = item.txModPair(testInx,bump.int)
|
|
|
|
if not rc[0].isNil:
|
|
|
|
testTxs[testInx] = rc
|
|
|
|
testInx.inc
|
|
|
|
if testTxs.high < testInx:
|
|
|
|
break
|
|
|
|
|
|
|
|
# verify that test does not degenerate
|
|
|
|
check testInx == testTxs.len
|
|
|
|
check 0 < lastBump # => 0 < testBump
|
|
|
|
|
|
|
|
# insert some txs
|
|
|
|
for triple in testTxs:
|
2022-04-08 14:05:30 +00:00
|
|
|
xq.add(triple[1], triple[0].info)
|
2022-01-18 14:40:02 +00:00
|
|
|
|
|
|
|
check xq.nItems.total == testTxs.len
|
|
|
|
check xq.nItems.disposed == 0
|
|
|
|
let infoLst = testTxs.toSeq.mapIt(it[0].info).sorted
|
|
|
|
check infoLst == xq.toItems.toSeq.mapIt(it.info).sorted
|
|
|
|
|
|
|
|
# re-insert modified transactions
|
|
|
|
for triple in testTxs:
|
2022-04-08 14:05:30 +00:00
|
|
|
xq.add(triple[2], "alt " & triple[0].info)
|
2022-01-18 14:40:02 +00:00
|
|
|
|
|
|
|
check xq.nItems.total == testTxs.len
|
|
|
|
check xq.nItems.disposed == testTxs.len
|
|
|
|
|
|
|
|
# last update item was underpriced, so it must not have been
|
|
|
|
# replaced
|
|
|
|
var altLst = testTxs.toSeq.mapIt("alt " & it[0].info)
|
|
|
|
altLst[^1] = testTxs[^1][0].info
|
|
|
|
check altLst.sorted == xq.toItems.toSeq.mapIt(it.info).sorted
|
|
|
|
|
|
|
|
test &"Deleting tx => also delete higher nonces":
|
|
|
|
|
|
|
|
let
|
|
|
|
# From the data base, get the one before last item. This was
|
|
|
|
# replaced earlier by the second transaction in the triple, i.e.
|
|
|
|
# testTxs[^2][2]. FYI, the last transaction is testTxs[^1][1] as
|
|
|
|
# it could not be replaced earlier by testTxs[^1][2].
|
|
|
|
item = xq.getItem(testTxs[^2][2].itemID).value
|
|
|
|
|
|
|
|
nWasteBasket = xq.nItems.disposed
|
|
|
|
|
|
|
|
# make sure the test makes sense, nonces were 0 ..< testTxs.len
|
|
|
|
check (item.tx.nonce + 2).int == testTxs.len
|
|
|
|
|
|
|
|
xq.disposeItems(item)
|
|
|
|
|
|
|
|
check xq.nItems.total + 2 == testTxs.len
|
|
|
|
check nWasteBasket + 2 == xq.nItems.disposed
|
|
|
|
|
|
|
|
# --------------------------
|
|
|
|
|
|
|
|
block:
|
|
|
|
var
|
|
|
|
gap: Time
|
|
|
|
nItems: int
|
2022-12-02 04:39:12 +00:00
|
|
|
xq = bcCom.toTxPool(timeGap = gap,
|
2022-01-18 14:40:02 +00:00
|
|
|
nGapItems = nItems,
|
|
|
|
itList = txList,
|
|
|
|
itemsPC = 35, # arbitrary
|
|
|
|
delayMSecs = 100, # large enough to process
|
|
|
|
noisy = noisy)
|
|
|
|
|
|
|
|
# Set txs to pseudo random status. Note that this functon will cause
|
|
|
|
# a violation of boundary conditions regarding nonces. So database
|
|
|
|
# integrily check needs xq.txDB.verify() rather than xq.verify().
|
|
|
|
xq.setItemStatusFromInfo
|
|
|
|
|
|
|
|
test &"Auto delete about {nItems} expired txs out of {xq.nItems.total}":
|
|
|
|
|
2022-04-08 08:38:47 +00:00
|
|
|
# Make sure that the test did not collapse
|
2022-01-18 14:40:02 +00:00
|
|
|
check 0 < nItems
|
2022-04-08 14:05:30 +00:00
|
|
|
xq.lifeTime = getTime() - gap
|
|
|
|
xq.flags = xq.flags + {autoZombifyPacked}
|
2022-01-18 14:40:02 +00:00
|
|
|
|
2022-04-08 14:05:30 +00:00
|
|
|
# Evict and pick items from the wastbasket
|
2022-01-18 14:40:02 +00:00
|
|
|
let
|
|
|
|
disposedBase = xq.nItems.disposed
|
|
|
|
evictedBase = evictionMeter.value
|
|
|
|
impliedBase = impliedEvictionMeter.value
|
2022-04-08 08:38:47 +00:00
|
|
|
|
2022-04-08 14:05:30 +00:00
|
|
|
# Zombify the items that are older than the artificial time gap. The
|
|
|
|
# move to the waste basket takes place with the `xq.add()` directive
|
|
|
|
# (which is empty as there are no new txs.)
|
|
|
|
xq.add @[]
|
2022-04-08 08:38:47 +00:00
|
|
|
|
2022-01-18 14:40:02 +00:00
|
|
|
let
|
|
|
|
disposedItems = xq.nItems.disposed - disposedBase
|
|
|
|
evictedItems = (evictionMeter.value - evictedBase).int
|
|
|
|
impliedItems = (impliedEvictionMeter.value - impliedBase).int
|
|
|
|
|
|
|
|
check xq.txDB.verify.isOK
|
|
|
|
check disposedItems + disposedBase + xq.nItems.total == txList.len
|
|
|
|
check 0 < evictedItems
|
|
|
|
check evictedItems <= disposedItems
|
|
|
|
check disposedItems == evictedItems + impliedItems
|
|
|
|
|
|
|
|
# make sure that deletion was sort of expected
|
|
|
|
let deleteExpextRatio = (evictedItems * 100 / nItems).int
|
|
|
|
check deletedItemsRatioBandPC < deleteExpextRatio
|
|
|
|
check deleteExpextRatio < (10000 div deletedItemsRatioBandPC)
|
|
|
|
|
|
|
|
# --------------------
|
|
|
|
|
|
|
|
block:
|
|
|
|
var
|
2022-12-02 04:39:12 +00:00
|
|
|
xq = bcCom.toTxPool(txList, noisy = noisy)
|
2022-01-18 14:40:02 +00:00
|
|
|
maxAddr: EthAddress
|
|
|
|
nAddrItems = 0
|
|
|
|
|
|
|
|
nAddrPendingItems = 0
|
|
|
|
nAddrStagedItems = 0
|
|
|
|
nAddrPackedItems = 0
|
|
|
|
|
|
|
|
fromNumItems = nAddrPendingItems
|
|
|
|
fromBucketInfo = "pending"
|
|
|
|
fromBucket = txItemPending
|
|
|
|
toBucketInfo = "staged"
|
|
|
|
toBucket = txItemStaged
|
|
|
|
|
|
|
|
# Set txs to pseudo random status
|
|
|
|
xq.setItemStatusFromInfo
|
|
|
|
|
|
|
|
# find address with max number of transactions
|
|
|
|
for (address,nonceList) in xq.txDB.incAccount:
|
|
|
|
if nAddrItems < nonceList.nItems:
|
|
|
|
maxAddr = address
|
|
|
|
nAddrItems = nonceList.nItems
|
|
|
|
|
|
|
|
# count items
|
|
|
|
nAddrPendingItems = xq.txDB.bySender.eq(maxAddr).eq(txItemPending).nItems
|
|
|
|
nAddrStagedItems = xq.txDB.bySender.eq(maxAddr).eq(txItemStaged).nItems
|
|
|
|
nAddrPackedItems = xq.txDB.bySender.eq(maxAddr).eq(txItemPacked).nItems
|
|
|
|
|
|
|
|
# find the largest from-bucket
|
|
|
|
if fromNumItems < nAddrStagedItems:
|
|
|
|
fromNumItems = nAddrStagedItems
|
|
|
|
fromBucketInfo = "staged"
|
|
|
|
fromBucket = txItemStaged
|
|
|
|
toBucketInfo = "packed"
|
|
|
|
toBucket = txItemPacked
|
|
|
|
if fromNumItems < nAddrPackedItems:
|
|
|
|
fromNumItems = nAddrPackedItems
|
|
|
|
fromBucketInfo = "packed"
|
|
|
|
fromBucket = txItemPacked
|
|
|
|
toBucketInfo = "pending"
|
|
|
|
toBucket = txItemPending
|
|
|
|
|
|
|
|
let moveNumItems = fromNumItems div 2
|
|
|
|
|
|
|
|
test &"Reassign {moveNumItems} of {fromNumItems} items "&
|
|
|
|
&"from \"{fromBucketInfo}\" to \"{toBucketInfo}\"":
|
|
|
|
|
|
|
|
# requite mimimum => there is a status queue with at least 2 entries
|
|
|
|
check 3 < nAddrItems
|
|
|
|
|
|
|
|
check nAddrPendingItems +
|
|
|
|
nAddrStagedItems +
|
|
|
|
nAddrPackedItems == nAddrItems
|
|
|
|
|
|
|
|
check 0 < moveNumItems
|
|
|
|
check 1 < fromNumItems
|
|
|
|
|
|
|
|
var count = 0
|
|
|
|
let nonceList = xq.txDB.bySender.eq(maxAddr).eq(fromBucket).value.data
|
|
|
|
block collect:
|
|
|
|
for item in nonceList.incNonce:
|
|
|
|
count.inc
|
|
|
|
check xq.txDB.reassign(item, toBucket)
|
|
|
|
if moveNumItems <= count:
|
|
|
|
break collect
|
|
|
|
check xq.txDB.verify.isOK
|
|
|
|
|
|
|
|
case fromBucket
|
|
|
|
of txItemPending:
|
|
|
|
check nAddrPendingItems - moveNumItems ==
|
|
|
|
xq.txDB.bySender.eq(maxAddr).eq(txItemPending).nItems
|
|
|
|
check nAddrStagedItems + moveNumItems ==
|
|
|
|
xq.txDB.bySender.eq(maxAddr).eq(txItemStaged).nItems
|
|
|
|
check nAddrPackedItems ==
|
|
|
|
xq.txDB.bySender.eq(maxAddr).eq(txItemPacked).nItems
|
|
|
|
of txItemStaged:
|
|
|
|
check nAddrStagedItems - moveNumItems ==
|
|
|
|
xq.txDB.bySender.eq(maxAddr).eq(txItemStaged).nItems
|
|
|
|
check nAddrPackedItems + moveNumItems ==
|
|
|
|
xq.txDB.bySender.eq(maxAddr).eq(txItemPacked).nItems
|
|
|
|
check nAddrPendingItems ==
|
|
|
|
xq.txDB.bySender.eq(maxAddr).eq(txItemPending).nItems
|
|
|
|
else:
|
|
|
|
check nAddrPackedItems - moveNumItems ==
|
|
|
|
xq.txDB.bySender.eq(maxAddr).eq(txItemPacked).nItems
|
|
|
|
check nAddrPendingItems + moveNumItems ==
|
|
|
|
xq.txDB.bySender.eq(maxAddr).eq(txItemPending).nItems
|
|
|
|
check nAddrPackedItems ==
|
|
|
|
xq.txDB.bySender.eq(maxAddr).eq(txItemPacked).nItems
|
|
|
|
|
|
|
|
# --------------------
|
|
|
|
|
|
|
|
let expect = (
|
|
|
|
xq.txDB.byStatus.eq(txItemPending).nItems,
|
|
|
|
xq.txDB.byStatus.eq(txItemStaged).nItems,
|
|
|
|
xq.txDB.byStatus.eq(txItemPacked).nItems)
|
|
|
|
|
|
|
|
test &"Verify #items per bucket ({expect[0]},{expect[1]},{expect[2]})":
|
|
|
|
let status = xq.nItems
|
|
|
|
check expect == (status.pending,status.staged,status.packed)
|
|
|
|
|
|
|
|
test "Recycling from waste basket":
|
|
|
|
|
|
|
|
let
|
|
|
|
basketPrefill = xq.nItems.disposed
|
|
|
|
numDisposed = min(50,txList.len)
|
|
|
|
|
|
|
|
# make sure to work on a copy of the pivot item (to see changes)
|
|
|
|
thisItem = xq.getItem(txList[^numDisposed].itemID).value.dup
|
|
|
|
|
|
|
|
# move to wastebasket
|
|
|
|
xq.maxRejects = txList.len
|
|
|
|
for n in 1 .. numDisposed:
|
|
|
|
# use from top avoiding extra deletes (higer nonces per account)
|
|
|
|
xq.disposeItems(txList[^n])
|
|
|
|
|
|
|
|
# make sure that the pivot item is in the waste basket
|
|
|
|
check xq.getItem(thisItem.itemID).isErr
|
|
|
|
check xq.txDB.byRejects.hasKey(thisItem.itemID)
|
|
|
|
check basketPrefill + numDisposed == xq.nItems.disposed
|
|
|
|
check txList.len == xq.nItems.total + xq.nItems.disposed
|
|
|
|
|
|
|
|
# re-add item
|
2022-04-08 14:05:30 +00:00
|
|
|
xq.add(thisItem.tx)
|
2022-01-18 14:40:02 +00:00
|
|
|
|
|
|
|
# verify that the pivot item was moved out from the waste basket
|
|
|
|
check not xq.txDB.byRejects.hasKey(thisItem.itemID)
|
|
|
|
check basketPrefill + numDisposed == xq.nItems.disposed + 1
|
|
|
|
check txList.len == xq.nItems.total + xq.nItems.disposed
|
|
|
|
|
|
|
|
# verify that a new item was derived from the waste basket pivot item
|
|
|
|
let wbItem = xq.getItem(thisItem.itemID).value
|
|
|
|
check thisItem.info == wbItem.info
|
|
|
|
check thisItem.timestamp < wbItem.timestamp
|
|
|
|
|
|
|
|
|
|
|
|
proc runTxPackerTests(noisy = true) =
|
|
|
|
suite &"TxPool: Block packer tests":
|
|
|
|
var
|
|
|
|
ntBaseFee = 0.GasPrice
|
|
|
|
ntNextFee = 0.GasPrice
|
|
|
|
|
|
|
|
test &"Calculate some non-trivial base fee":
|
|
|
|
var
|
|
|
|
feesList = SortedSet[GasPriceEx,bool].init()
|
|
|
|
|
|
|
|
# provide a sorted list of gas fees
|
|
|
|
for item in txList:
|
|
|
|
discard feesList.insert(item.tx.effectiveGasTip(0.GasPrice))
|
|
|
|
|
|
|
|
let
|
|
|
|
minKey = max(0, feesList.ge(GasPriceEx.low).value.key.int64)
|
|
|
|
lowKey = feesList.gt(minKey.GasPriceEx).value.key.uint64
|
|
|
|
highKey = feesList.le(GasPriceEx.high).value.key.uint64
|
|
|
|
keyRange = highKey - lowKey
|
|
|
|
keyStep = max(1u64, keyRange div 500_000)
|
|
|
|
|
|
|
|
# what follows is a rather crude partitioning so that
|
|
|
|
# * ntBaseFee partititions non-zero numbers of pending and staged txs
|
|
|
|
# * ntNextFee decreases the number of staged txs
|
|
|
|
ntBaseFee = (lowKey + keyStep).GasPrice
|
|
|
|
|
|
|
|
# the following might throw an exception if the table is de-generated
|
|
|
|
var nextKey = ntBaseFee
|
|
|
|
for _ in [1, 2, 3]:
|
|
|
|
let rcNextKey = feesList.gt(nextKey.GasPriceEx)
|
|
|
|
check rcNextKey.isOK
|
|
|
|
nextKey = rcNextKey.value.key.uint64.GasPrice
|
|
|
|
|
|
|
|
ntNextFee = nextKey + keyStep.GasPrice
|
|
|
|
|
|
|
|
# of course ...
|
|
|
|
check ntBaseFee < ntNextFee
|
|
|
|
|
|
|
|
block:
|
|
|
|
var
|
2022-12-02 04:39:12 +00:00
|
|
|
xq = bcCom.toTxPool(txList, ntBaseFee, noisy = noisy)
|
|
|
|
xr = bcCom.toTxPool(txList, ntNextFee, noisy = noisy)
|
2022-01-18 14:40:02 +00:00
|
|
|
block:
|
|
|
|
let
|
|
|
|
pending = xq.nItems.pending
|
|
|
|
staged = xq.nItems.staged
|
|
|
|
packed = xq.nItems.packed
|
|
|
|
|
|
|
|
test &"Load txs with baseFee={ntBaseFee}, "&
|
|
|
|
&"buckets={pending}/{staged}/{packed}":
|
|
|
|
|
|
|
|
check 0 < pending
|
|
|
|
check 0 < staged
|
|
|
|
check xq.nItems.total == txList.len
|
|
|
|
check xq.nItems.disposed == 0
|
|
|
|
|
|
|
|
block:
|
|
|
|
let
|
|
|
|
pending = xr.nItems.pending
|
|
|
|
staged = xr.nItems.staged
|
|
|
|
packed = xr.nItems.packed
|
|
|
|
|
|
|
|
test &"Re-org txs previous buckets setting baseFee={ntNextFee}, "&
|
|
|
|
&"buckets={pending}/{staged}/{packed}":
|
|
|
|
|
|
|
|
check 0 < pending
|
|
|
|
check 0 < staged
|
|
|
|
check xr.nItems.total == txList.len
|
|
|
|
check xr.nItems.disposed == 0
|
|
|
|
|
|
|
|
# having the same set of txs, setting the xq database to the same
|
|
|
|
# base fee as the xr one, the bucket fills of both database must
|
|
|
|
# be the same after re-org
|
|
|
|
xq.baseFee = ntNextFee
|
|
|
|
xq.triggerReorg
|
|
|
|
|
|
|
|
# now, xq should look like xr
|
|
|
|
check xq.verify.isOK
|
|
|
|
check xq.nItems == xr.nItems
|
|
|
|
|
|
|
|
block:
|
|
|
|
# get some value below the middle
|
|
|
|
let
|
|
|
|
packPrice = ((minGasPrice + maxGasPrice).uint64 div 3).GasPrice
|
|
|
|
lowerPrice = minGasPrice + 1.GasPrice
|
|
|
|
|
|
|
|
test &"Packing txs, baseFee=0 minPrice={packPrice} "&
|
|
|
|
&"targetBlockSize={xq.trgGasLimit}":
|
|
|
|
|
|
|
|
# verify that the test does not degenerate
|
|
|
|
check 0 < minGasPrice
|
|
|
|
check minGasPrice < maxGasPrice
|
|
|
|
|
|
|
|
# ignore base limit so that the `packPrice` below becomes effective
|
|
|
|
xq.baseFee = 0.GasPrice
|
|
|
|
check xq.nItems.disposed == 0
|
|
|
|
|
|
|
|
# set minimum target price
|
|
|
|
xq.minPreLondonGasPrice = packPrice
|
|
|
|
check xq.minPreLondonGasPrice == packPrice
|
|
|
|
|
|
|
|
# employ packer
|
2022-04-08 14:05:30 +00:00
|
|
|
# xq.jobCommit(forceMaintenance = true)
|
2022-01-18 14:40:02 +00:00
|
|
|
xq.packerVmExec
|
|
|
|
check xq.verify.isOK
|
|
|
|
|
|
|
|
# verify that the test did not degenerate
|
|
|
|
check 0 < xq.gasTotals.packed
|
|
|
|
check xq.nItems.disposed == 0
|
|
|
|
|
|
|
|
# assemble block from `packed` bucket
|
|
|
|
let
|
|
|
|
items = xq.toItems(txItemPacked)
|
|
|
|
total = foldl(@[0.GasInt] & items.mapIt(it.tx.gasLimit), a+b)
|
|
|
|
check xq.gasTotals.packed == total
|
|
|
|
|
|
|
|
noisy.say "***", "1st bLock size=", total, " stats=", xq.nItems.pp
|
|
|
|
|
|
|
|
test &"Clear and re-pack bucket":
|
2023-05-23 05:25:42 +00:00
|
|
|
# prepare for POS transition in txpool
|
2023-10-18 02:16:11 +00:00
|
|
|
xq.chain.com.pos.timestamp = EthTime.now()
|
2023-05-23 05:25:42 +00:00
|
|
|
|
2022-01-18 14:40:02 +00:00
|
|
|
let
|
|
|
|
items0 = xq.toItems(txItemPacked)
|
|
|
|
saveState0 = foldl(@[0.GasInt] & items0.mapIt(it.tx.gasLimit), a+b)
|
|
|
|
check 0 < xq.nItems.packed
|
|
|
|
|
|
|
|
# re-pack bucket
|
2022-04-08 14:05:30 +00:00
|
|
|
#xq.jobCommit(forceMaintenance = true)
|
2022-01-18 14:40:02 +00:00
|
|
|
xq.packerVmExec
|
|
|
|
check xq.verify.isOK
|
|
|
|
|
|
|
|
let
|
|
|
|
items1 = xq.toItems(txItemPacked)
|
|
|
|
saveState1 = foldl(@[0.GasInt] & items1.mapIt(it.tx.gasLimit), a+b)
|
|
|
|
check items0 == items1
|
|
|
|
check saveState0 == saveState1
|
|
|
|
|
|
|
|
test &"Delete item and re-pack bucket/w lower minPrice={lowerPrice}":
|
|
|
|
# verify that the test does not degenerate
|
|
|
|
check 0 < lowerPrice
|
|
|
|
check lowerPrice < packPrice
|
|
|
|
check 0 < xq.nItems.packed
|
|
|
|
|
|
|
|
let
|
|
|
|
saveStats = xq.nItems
|
|
|
|
lastItem = xq.toItems(txItemPacked)[^1]
|
|
|
|
|
|
|
|
# delete last item from packed bucket
|
|
|
|
xq.disposeItems(lastItem)
|
|
|
|
check xq.verify.isOK
|
|
|
|
|
|
|
|
# set new minimum target price
|
|
|
|
xq.minPreLondonGasPrice = lowerPrice
|
|
|
|
check xq.minPreLondonGasPrice == lowerPrice
|
|
|
|
|
|
|
|
# re-pack bucket, packer needs extra trigger because there is
|
|
|
|
# not necessarily a buckets re-org resulting in a change
|
2022-04-08 14:05:30 +00:00
|
|
|
#xq.jobCommit(forceMaintenance = true)
|
2022-01-18 14:40:02 +00:00
|
|
|
xq.packerVmExec
|
|
|
|
check xq.verify.isOK
|
|
|
|
|
|
|
|
let
|
|
|
|
items = xq.toItems(txItemPacked)
|
|
|
|
newTotal = foldl(@[0.GasInt] & items.mapIt(it.tx.gasLimit), a+b)
|
|
|
|
newStats = xq.nItems
|
|
|
|
newItem = xq.toItems(txItemPacked)[^1]
|
|
|
|
|
|
|
|
# for sanity assert the obvoius
|
|
|
|
check 0 < xq.gasTotals.packed
|
|
|
|
check xq.gasTotals.packed == newTotal
|
|
|
|
|
|
|
|
# verify incremental packing
|
|
|
|
check lastItem.info != newItem.info
|
2023-04-11 08:28:45 +00:00
|
|
|
check saveStats.packed >= newStats.packed
|
2022-01-18 14:40:02 +00:00
|
|
|
|
|
|
|
noisy.say "***", "2st bLock size=", newTotal, " stats=", newStats.pp
|
|
|
|
|
|
|
|
# -------------------------------------------------
|
|
|
|
|
|
|
|
block:
|
|
|
|
var
|
2022-12-02 04:39:12 +00:00
|
|
|
xq = bcCom.toTxPool(txList, ntBaseFee,
|
2022-03-31 16:17:22 +00:00
|
|
|
local = localAccounts,
|
|
|
|
noisy = noisy)
|
2022-01-18 14:40:02 +00:00
|
|
|
let
|
|
|
|
(nMinTxs, nTrgTxs) = (15, 15)
|
|
|
|
(nMinAccounts, nTrgAccounts) = (1, 8)
|
2022-12-02 04:39:12 +00:00
|
|
|
canonicalHead = xq.chain.com.db.getCanonicalHead
|
2022-01-18 14:40:02 +00:00
|
|
|
|
|
|
|
test &"Back track block chain head (at least "&
|
|
|
|
&"{nMinTxs} txs, {nMinAccounts} known accounts)":
|
|
|
|
|
|
|
|
# get the environment of a state back in the block chain, preferably
|
|
|
|
# at least `nTrgTxs` txs and `nTrgAccounts` known accounts
|
|
|
|
let
|
|
|
|
(backHeader,backTxs,accLst) = xq.getBackHeader(nTrgTxs,nTrgAccounts)
|
|
|
|
nBackBlocks = xq.head.blockNumber - backHeader.blockNumber
|
|
|
|
stats = xq.nItems
|
|
|
|
|
|
|
|
# verify that the test would not degenerate
|
|
|
|
check nMinAccounts <= accLst.len
|
|
|
|
check nMinTxs <= backTxs.len
|
|
|
|
|
|
|
|
noisy.say "***",
|
|
|
|
&"back tracked block chain:" &
|
|
|
|
&" {backTxs.len} txs, {nBackBlocks} blocks," &
|
|
|
|
&" {accLst.len} known accounts"
|
|
|
|
|
2022-04-08 14:05:30 +00:00
|
|
|
check xq.smartHead(backHeader) # move insertion point
|
2022-01-18 14:40:02 +00:00
|
|
|
|
|
|
|
# make sure that all txs have been added to the pool
|
|
|
|
check stats.disposed == 0
|
|
|
|
check stats.total + backTxs.len == xq.nItems.total
|
|
|
|
|
|
|
|
test &"Run packer, profitability will not increase with block size":
|
|
|
|
|
|
|
|
xq.flags = xq.flags - {packItemsMaxGasLimit}
|
|
|
|
xq.packerVmExec
|
|
|
|
let
|
|
|
|
smallerBlockProfitability = xq.profitability
|
|
|
|
smallerBlockSize = xq.gasCumulative
|
|
|
|
|
|
|
|
noisy.say "***", "trg-packing",
|
|
|
|
" profitability=", xq.profitability,
|
|
|
|
" used=", xq.gasCumulative,
|
|
|
|
" trg=", xq.trgGasLimit,
|
|
|
|
" slack=", xq.trgGasLimit - xq.gasCumulative
|
|
|
|
|
|
|
|
xq.flags = xq.flags + {packItemsMaxGasLimit}
|
|
|
|
xq.packerVmExec
|
|
|
|
|
|
|
|
noisy.say "***", "max-packing",
|
|
|
|
" profitability=", xq.profitability,
|
|
|
|
" used=", xq.gasCumulative,
|
|
|
|
" max=", xq.maxGasLimit,
|
|
|
|
" slack=", xq.maxGasLimit - xq.gasCumulative
|
|
|
|
|
2023-04-11 08:28:45 +00:00
|
|
|
check smallerBlockSize <= xq.gasCumulative
|
2022-01-18 14:40:02 +00:00
|
|
|
check 0 < xq.profitability
|
|
|
|
|
|
|
|
# Well, this ratio should be above 100 but might be slightly less
|
|
|
|
# with small data samples (pathological case.)
|
|
|
|
let blockProfitRatio =
|
|
|
|
(((smallerBlockProfitability.uint64 * 1000) div
|
|
|
|
(max(1u64,xq.profitability.uint64))) + 5) div 10
|
|
|
|
check decreasingBlockProfitRatioPC <= blockProfitRatio
|
|
|
|
|
|
|
|
noisy.say "***", "cmp",
|
|
|
|
" increase=", xq.gasCumulative - smallerBlockSize,
|
|
|
|
" trg/max=", blockProfitRatio, "%"
|
|
|
|
|
|
|
|
# if true: return
|
|
|
|
test "Store generated block in block chain database":
|
|
|
|
|
2022-12-06 05:55:40 +00:00
|
|
|
# authorized signer is needed to produce correct
|
|
|
|
# POA difficulty and blockheader fields
|
|
|
|
bcCom.poa.authorize(testAddress, signerFunc)
|
|
|
|
|
2022-03-31 16:17:22 +00:00
|
|
|
noisy.say "***", "locality",
|
|
|
|
" locals=", xq.accountRanks.local.len,
|
|
|
|
" remotes=", xq.accountRanks.remote.len
|
|
|
|
|
2022-01-18 14:40:02 +00:00
|
|
|
# Force maximal block size. Accidentally, the latest tx should have
|
|
|
|
# a `gasLimit` exceeding the available space on the block `gasLimit`
|
|
|
|
# which will be checked below.
|
2023-04-11 08:28:45 +00:00
|
|
|
xq.flags = xq.flags #+ {packItemsMaxGasLimit}
|
2022-01-18 14:40:02 +00:00
|
|
|
|
|
|
|
# Invoke packer
|
|
|
|
let blk = xq.ethBlock
|
|
|
|
|
|
|
|
# Make sure that there are at least two txs on the packed block so
|
|
|
|
# this test does not degenerate.
|
|
|
|
check 1 < xq.chain.receipts.len
|
|
|
|
|
|
|
|
var overlap = -1
|
|
|
|
for n in countDown(blk.txs.len - 1, 0):
|
|
|
|
let total = xq.chain.receipts[n].cumulativeGasUsed
|
|
|
|
if blk.header.gasUsed < total + blk.txs[n].gasLimit:
|
|
|
|
overlap = n
|
|
|
|
break
|
|
|
|
|
|
|
|
noisy.say "***",
|
|
|
|
"overlap=#", overlap,
|
|
|
|
" tx=#", blk.txs.len,
|
|
|
|
" gasUsed=", blk.header.gasUsed,
|
|
|
|
" gasLimit=", blk.header.gasLimit
|
|
|
|
|
|
|
|
if 0 <= overlap:
|
|
|
|
let
|
|
|
|
n = overlap
|
|
|
|
mostlySize = xq.chain.receipts[n].cumulativeGasUsed
|
|
|
|
noisy.say "***", "overlap",
|
|
|
|
" size=", mostlySize + blk.txs[n].gasLimit - blk.header.gasUsed
|
|
|
|
|
|
|
|
let
|
2023-05-23 05:25:42 +00:00
|
|
|
bdy = BlockBody(transactions: blk.txs, withdrawals: blk.withdrawals)
|
2022-01-18 14:40:02 +00:00
|
|
|
hdr = block:
|
|
|
|
var rc = blk.header
|
2023-04-11 08:28:45 +00:00
|
|
|
rc.gasLimit = blk.header.gasLimit
|
2022-01-18 14:40:02 +00:00
|
|
|
rc.testKeySign
|
|
|
|
|
|
|
|
# Make certain that some tx was set up so that its gasLimit overlaps
|
|
|
|
# with the total block size. Of course, running it in the VM will burn
|
|
|
|
# much less than permitted so this block will be accepted.
|
|
|
|
check 0 < overlap
|
|
|
|
|
2022-12-06 05:55:40 +00:00
|
|
|
setTraceLevel()
|
2022-03-31 16:17:22 +00:00
|
|
|
|
2022-01-18 14:40:02 +00:00
|
|
|
# Test low-level function for adding the new block to the database
|
2023-04-11 08:28:45 +00:00
|
|
|
#xq.chain.maxMode = (packItemsMaxGasLimit in xq.flags)
|
2022-01-18 14:40:02 +00:00
|
|
|
xq.chain.clearAccounts
|
2023-10-05 03:04:12 +00:00
|
|
|
check xq.chain.vmState.processBlock(hdr, bdy).isOK
|
2022-01-18 14:40:02 +00:00
|
|
|
|
2022-03-31 16:17:22 +00:00
|
|
|
setErrorLevel()
|
|
|
|
|
2022-01-18 14:40:02 +00:00
|
|
|
# Re-allocate using VM environment from `persistBlocks()`
|
2022-12-06 05:55:40 +00:00
|
|
|
let vmstate2 = BaseVMState.new(hdr, bcCom)
|
2023-10-05 03:04:12 +00:00
|
|
|
check vmstate2.processBlock(hdr, bdy).isOK
|
2022-12-06 05:55:40 +00:00
|
|
|
|
2022-01-18 14:40:02 +00:00
|
|
|
# This should not have changed
|
2022-12-02 04:39:12 +00:00
|
|
|
check canonicalHead == xq.chain.com.db.getCanonicalHead
|
2022-01-18 14:40:02 +00:00
|
|
|
|
|
|
|
# Using the high-level library function, re-append the block while
|
|
|
|
# turning off header verification.
|
2022-12-02 04:39:12 +00:00
|
|
|
let c = bcCom.newChain(extraValidation = false)
|
2022-01-18 14:40:02 +00:00
|
|
|
|
2023-05-23 05:25:42 +00:00
|
|
|
check c.persistBlocks(@[hdr], @[bdy]).isOK
|
2022-01-18 14:40:02 +00:00
|
|
|
|
2023-05-23 05:25:42 +00:00
|
|
|
if bcCom.consensus == ConsensusType.POS:
|
|
|
|
# PoS consensus will force the new blockheader as head
|
|
|
|
# even though the difficulty or the blocknumber is lower than
|
|
|
|
# previous canonical head
|
2022-12-02 04:39:12 +00:00
|
|
|
check hdr.blockHash == xq.chain.com.db.getCanonicalHead.blockHash
|
2023-08-04 03:59:12 +00:00
|
|
|
|
2023-05-23 05:25:42 +00:00
|
|
|
# Is the withdrawals persisted and loaded properly?
|
|
|
|
var blockBody: BlockBody
|
|
|
|
check xq.chain.com.db.getBlockBody(hdr, blockBody)
|
|
|
|
check bdy == blockBody
|
2022-01-18 14:40:02 +00:00
|
|
|
else:
|
2023-05-23 05:25:42 +00:00
|
|
|
# The canonical head will be set to hdr if it scores high enough
|
|
|
|
# (see implementation of db_chain.persistHeaderToDb()).
|
|
|
|
let
|
|
|
|
canonScore = xq.chain.com.db.getScore(canonicalHead.blockHash)
|
|
|
|
headerScore = xq.chain.com.db.getScore(hdr.blockHash)
|
|
|
|
|
|
|
|
if canonScore < headerScore:
|
|
|
|
# Note that the updated canonical head is equivalent to hdr but not
|
|
|
|
# necessarily binary equal.
|
|
|
|
check hdr.blockHash == xq.chain.com.db.getCanonicalHead.blockHash
|
|
|
|
else:
|
|
|
|
check canonicalHead == xq.chain.com.db.getCanonicalHead
|
2022-01-18 14:40:02 +00:00
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Main function(s)
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc txPoolMain*(noisy = defined(debug)) =
|
|
|
|
noisy.runTxLoader
|
|
|
|
noisy.runTxPoolTests
|
|
|
|
noisy.runTxPackerTests
|
2022-12-06 05:55:40 +00:00
|
|
|
runTxPoolCliqueTest()
|
|
|
|
runTxPoolPosTest()
|
2023-08-04 03:59:12 +00:00
|
|
|
runTxPoolBlobhashTest()
|
2022-12-06 05:55:40 +00:00
|
|
|
noisy.runTxHeadDelta
|
2022-01-18 14:40:02 +00:00
|
|
|
|
|
|
|
when isMainModule:
|
|
|
|
const
|
|
|
|
noisy = defined(debug)
|
|
|
|
capts0: CaptureSpecs = goerliCapture
|
2022-03-16 09:13:17 +00:00
|
|
|
capts1: CaptureSpecs = (GoerliNet, "goerli482304.txt.gz", 30000, 500, 1500)
|
2022-01-18 14:40:02 +00:00
|
|
|
# Note: mainnet has the leading 45k blocks without any transactions
|
2022-03-16 09:13:17 +00:00
|
|
|
capts2: CaptureSpecs = (MainNet, "mainnet332160.txt.gz", 30000, 500, 1500)
|
|
|
|
|
|
|
|
setErrorLevel()
|
2022-01-18 14:40:02 +00:00
|
|
|
|
2022-03-16 09:13:17 +00:00
|
|
|
noisy.runTxLoader(capture = capts1)
|
2022-01-18 14:40:02 +00:00
|
|
|
noisy.runTxPoolTests
|
2022-04-04 08:34:59 +00:00
|
|
|
noisy.runTxPackerTests
|
|
|
|
|
2023-04-11 08:28:45 +00:00
|
|
|
runTxPoolCliqueTest()
|
|
|
|
runTxPoolPosTest()
|
2023-07-25 05:28:17 +00:00
|
|
|
runTxPoolBlobhashTest()
|
2023-04-11 08:28:45 +00:00
|
|
|
noisy.runTxHeadDelta
|
2022-01-18 14:40:02 +00:00
|
|
|
|
|
|
|
#noisy.runTxLoader(dir = ".")
|
|
|
|
#noisy.runTxPoolTests
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# End
|
|
|
|
# ------------------------------------------------------------------------------
|