TxPool implementation

details:
  For documentation, see comments in the file tx_pool.nim.

  For prettified manual pages run 'make docs' in the nimbus directory and
  point your web browser to the newly created 'docs' directory.
This commit is contained in:
Jordan Hrycaj 2022-01-18 14:40:02 +00:00 committed by zah
parent 3f0139c5b6
commit 103656dbb5
27 changed files with 7037 additions and 7 deletions

View File

@ -11,12 +11,15 @@
import import
./executor/[ ./executor/[
calculate_reward,
executor_helpers, executor_helpers,
process_block, process_block,
process_transaction] process_transaction]
export export
calculate_reward,
executor_helpers.createBloom, executor_helpers.createBloom,
executor_helpers.makeReceipt,
process_block, process_block,
process_transaction process_transaction

View File

@ -43,8 +43,8 @@ const
{.push raises: [Defect].} {.push raises: [Defect].}
proc calculateReward*(vmState: BaseVMState; proc calculateReward*(vmState: BaseVMState; account: EthAddress;
header: BlockHeader; body: BlockBody) number: BlockNumber; uncles: openArray[BlockHeader])
{.gcsafe, raises: [Defect,CatchableError].} = {.gcsafe, raises: [Defect,CatchableError].} =
var blockReward: Uint256 var blockReward: Uint256
@ -53,9 +53,9 @@ proc calculateReward*(vmState: BaseVMState;
var mainReward = blockReward var mainReward = blockReward
for uncle in body.uncles: for uncle in uncles:
var uncleReward = uncle.blockNumber.u256 + 8.u256 var uncleReward = uncle.blockNumber.u256 + 8.u256
uncleReward -= header.blockNumber.u256 uncleReward -= number
uncleReward = uncleReward * blockReward uncleReward = uncleReward * blockReward
uncleReward = uncleReward div 8.u256 uncleReward = uncleReward div 8.u256
vmState.mutateStateDB: vmState.mutateStateDB:
@ -63,6 +63,12 @@ proc calculateReward*(vmState: BaseVMState;
mainReward += blockReward div 32.u256 mainReward += blockReward div 32.u256
vmState.mutateStateDB: vmState.mutateStateDB:
db.addBalance(header.coinbase, mainReward) db.addBalance(account, mainReward)
proc calculateReward*(vmState: BaseVMState;
header: BlockHeader; body: BlockBody)
{.gcsafe, raises: [Defect,CatchableError].} =
vmState.calculateReward(header.coinbase, header.blockNumber, body.uncles)
# End # End

875
nimbus/utils/tx_pool.nim Normal file
View File

@ -0,0 +1,875 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
## TODO:
## =====
## * Support `local` accounts the txs of which would be prioritised. This is
## currently unsupported. For now, all txs are considered from `remote`
## accounts.
##
## * No uncles are handled by this pool
##
## * Impose a size limit to the bucket database. Which items would be removed?
##
## * There is a conceivable problem with the per-account optimisation. The
## algorithm chooses an account and does not stop packing until all txs
## of the account are packed or the block is full. In the lattter case,
## there might be some txs left unpacked from the account which might be
## the most lucrative ones. Should this be tackled (see also next item)?
##
## * The classifier throws out all txs with negative gas tips. This implies
## that all subsequent txs must also be suspended for this account even
## though these following txs might be extraordinarily profitable so that
## packing the whole account might be woth wile. Should this be considered,
## somehow (see also previous item)?
##
##
## Transaction Pool
## ================
##
## The transaction pool collects transactions and holds them in a database.
## This database consists of the three buckets *pending*, *staged*, and
## *packed* and a *waste basket*. These database entities are discussed in
## more detail, below.
##
## At some point, there will be some transactions in the *staged* bucket.
## Upon request, the pool will pack as many of those transactions as possible
## into to *packed* bucket which will subsequently be used to generate a
## new Ethereum block.
##
## When packing transactions from *staged* into *packed* bucked, the staged
## transactions are sorted by *sender account* and *nonce*. The *sender
## account* values are ordered by a *ranking* function (highest ranking first)
## and the *nonce* values by their natural integer order. Then, transactions
## are greedily picked from the ordered set until there are enough
## transactions in the *packed* bucket. Some boundary condition applies which
## roughly says that for a given account, all the transactions packed must
## leave no gaps between nonce values when sorted.
##
## The rank function applied to the *sender account* sorting is chosen as a
## guess for higher profitability which goes with a higher rank account.
##
##
## Rank calculator
## ---------------
## Let *tx()* denote the mapping
## ::
## tx: (account,nonce) -> tx
##
## from an index pair *(account,nonce)* to a transaction *tx*. Also, for some
## external parameter *baseFee*, let
## ::
## maxProfit: (tx,baseFee) -> tx.effectiveGasTip(baseFee) * tx.gasLimit
##
## be the maximal tip a single transation can achieve (where unit of the
## *effectiveGasTip()* is a *price* and *gasLimit* is a *commodity value*.).
## Then the rank function
## ::
## rank(account) = Σ maxProfit(tx(account,ν),baseFee) / Σ tx(account,ν).gasLimit
## ν ν
##
## is a *price* estimate of the maximal avarage tip per gas unit over all
## transactions for the given account. The nonces `ν` for the summation
## run over all transactions from the *staged* and *packed* bucket.
##
##
##
##
## Pool database:
## --------------
## ::
## <Batch queue> . <Status buckets> . <Terminal state>
## . .
## . . +----------+
## --> txJobAddTxs -------------------------------> | |
## | . +-----------+ . | disposed |
## +------------> | pending | ------> | |
## . +-----------+ . | |
## . | ^ ^ . | waste |
## . v | | . | basket |
## . +----------+ | . | |
## . | staged | | . | |
## . +----------+ | . | |
## . | | ^ | . | |
## . | v | | . | |
## . | +----------+ . | |
## . | | packed | -------> | |
## . | +----------+ . | |
## . +----------------------> | |
## . . +----------+
##
## The three columns *Batch queue*, *State bucket*, and *Terminal state*
## represent three different accounting (or database) systems. The pool
## database is continuosly updated while new transactions are added.
## Transactions are bundled with meta data which holds the full datanbase
## state in addition to other cached information like the sender account.
##
##
## Batch Queue
## -----------
## The batch queue holds different types of jobs to be run later in a batch.
## When running at a time, all jobs are executed in *FIFO* mode until the queue
## is empty.
##
## When entering the pool, new transactions are bundled with meta data and
## appended to the batch queue. These bundles are called *item*. When the
## batch commits, items are forwarded to one of the following entites:
##
## * the *staged* bucket if the transaction is valid and match some constraints
## on expected minimum mining fees (or a semblance of that for *non-PoW*
## networks)
## * the *pending* bucket if the transaction is valid but is not subject to be
## held in the *staged* bucket
## * the *waste basket* if the transaction is invalid
##
## If a valid transaction item supersedes an existing one, the existing
## item is moved to the waste basket and the new transaction replaces the
## existing one in the current bucket if the gas price of the transaction is
## at least `priceBump` per cent higher (see adjustable parameters, below.)
##
## Status buckets
## --------------
## The term *bucket* is a nickname for a set of *items* (i.e. transactions
## bundled with meta data as mentioned earlier) all labelled with the same
## `status` symbol and not marked *waste*. In particular, bucket membership
## for an item is encoded as
##
## * the `status` field indicates the particular *bucket* membership
## * the `reject` field is reset/unset and has zero-equivalent value
##
## The following boundary conditions hold for the union of all buckets:
##
## * *Unique index:*
## Let **T** be the union of all buckets and **Q** be the
## set of *(sender,nonce)* pairs derived from the items of **T**. Then
## **T** and **Q** are isomorphic, i.e. for each pair *(sender,nonce)*
## from **Q** there is exactly one item from **T**, and vice versa.
##
## * *Consecutive nonces:*
## For each *(sender0,nonce0)* of **Q**, either
## *(sender0,nonce0-1)* is in **Q** or *nonce0* is the current nonce as
## registered with the *sender account* (implied by the block chain),
##
## The *consecutive nonces* requirement involves the *sender account*
## which depends on the current state of the block chain as represented by the
## internally cached head (i.e. insertion point where a new block is to be
## appended.)
##
## The following notation describes sets of *(sender,nonce)* pairs for
## per-bucket items. It will be used for boundary conditions similar to the
## ones above.
##
## * **Pending** denotes the set of *(sender,nonce)* pairs for the
## *pending* bucket
##
## * **Staged** denotes the set of *(sender,nonce)* pairs for the
## *staged* bucket
##
## * **Packed** denotes the set of *(sender,nonce)* pairs for the
## *packed* bucket
##
## The pending bucket
## ^^^^^^^^^^^^^^^^^^
## Items in this bucket hold valid transactions that are not in any of the
## other buckets. All itmes might be promoted form here into other buckets if
## the current state of the block chain as represented by the internally cached
## head changes.
##
## The staged bucket
## ^^^^^^^^^^^^^^^^^
## Items in this bucket are ready to be added to a new block. They typycally
## imply some expected minimum reward when mined on PoW networks. Some
## boundary condition holds:
##
## * *Consecutive nonces:*
## For any *(sender0,nonce0)* pair from **Staged**, the pair
## *(sender0,nonce0-1)* is not in **Pending**.
##
## Considering the respective boundary condition on the union of buckets
## **T**, this condition here implies that a *staged* per sender nonce has a
## predecessor in the *staged* or *packed* bucket or is a nonce as registered
## with the *sender account*.
##
## The packed bucket
## ^^^^^^^^^^^^^^^^^
## All items from this bucket have been selected from the *staged* bucket, the
## transactions of which (i.e. unwrapped items) can go right away into a new
## ethernet block. How these items are selected was described at the beginning
## of this chapter. The following boundary conditions holds:
##
## * *Consecutive nonces:*
## For any *(sender0,nonce0)* pair from **Packed**, the pair
## *(sender0,nonce0-1)* is neither in **Pending**, nor in **Staged**.
##
## Considering the respective boundary condition on the union of buckets
## **T**, this condition here implies that a *packed* per-sender nonce has a
## predecessor in the very *packed* bucket or is a nonce as registered with the
## *sender account*.
##
##
## Terminal state
## --------------
## After use, items are disposed into a waste basket *FIFO* queue which has a
## maximal length. If the length is exceeded, the oldest items are deleted.
## The waste basket is used as a cache for discarded transactions that need to
## re-enter the system. Recovering from the waste basket saves the effort of
## recovering the sender account from the signature. An item is identified
## *waste* if
##
## * the `reject` field is explicitely set and has a value different
## from a zero-equivalent.
##
## So a *waste* item is clearly distinguishable from any active one as a
## member of one of the *status buckets*.
##
##
##
## Pool coding
## ===========
## The idea is that there are concurrent *async* instances feeding transactions
## into a batch queue via `jobAddTxs()`. The batch queue is then processed on
## demand not until `jobCommit()` is run. A piece of code using this pool
## architecture could look like as follows:
## ::
## # see also unit test examples, e.g. "Block packer tests"
## var db: BaseChainDB # to be initialised
## var txs: seq[Transaction] # to be initialised
##
## proc mineThatBlock(blk: EthBlock) # external function
##
## ..
##
## var xq = TxPoolRef.new(db) # initialise tx-pool
## ..
##
## xq.jobAddTxs(txs) # add transactions to be held
## .. # .. on the batch queue
##
## xq.jobCommit # run batch queue worker/processor
## let newBlock = xq.ethBlock # fetch current mining block
##
## ..
## mineThatBlock(newBlock) ... # external mining & signing process
## ..
##
## let newTopHeader = db.getCanonicalHead # new head after mining
## xp.jobDeltaTxsHead(newTopHeader) # add transactions update jobs
## xp.head = newTopHeader # adjust block insertion point
## xp.jobCommit # run batch queue worker/processor
##
##
## Discussion of example
## ---------------------
## In the example, transactions are collected via `jobAddTx()` and added to
## a batch of jobs to be processed at a time when considered right. The
## processing is initiated with the `jobCommit()` directive.
##
## The `ethBlock()` directive retrieves a new block for mining derived
## from the current pool state. It invokes the block packer whic accumulates
## txs from the `pending` buscket into the `packed` bucket which then go
## into the block.
##
## Then mining and signing takes place ...
##
## After mining and signing, the view of the block chain as seen by the pool
## must be updated to be ready for a new mining process. In the best case, the
## canonical head is just moved to the currently mined block which would imply
## just to discard the contents of the *packed* bucket with some additional
## transactions from the *staged* bucket. A more general block chain state
## head update would be more complex, though.
##
## In the most complex case, the newly mined block was added to some block
## chain branch which has become an uncle to the new canonical head retrieved
## by `getCanonicalHead()`. In order to update the pool to the very state
## one would have arrived if worked on the retrieved canonical head branch
## in the first place, the directive `jobDeltaTxsHead()` calculates the
## actions of what is needed to get just there from the locally cached head
## state of the pool. These actions are added by `jobDeltaTxsHead()` to the
## batch queue to be executed when it is time.
##
## Then the locally cached block chain head is updated by setting a new
## `topHeader`. The *setter* behind this assignment also caches implied
## internal parameters as base fee, fork, etc. Only after the new chain head
## is set, the `jobCommit()` should be started to process the update actions
## (otherwise txs might be thrown out which could be used for packing.)
##
##
## Adjustable Parameters
## ---------------------
##
## flags
## The `flags` parameter holds a set of strategy symbols for how to process
## items and buckets.
##
## *stageItems1559MinFee*
## Stage tx items with `tx.maxFee` at least `minFeePrice`. Other items are
## left or set pending. This symbol affects post-London tx items, only.
##
## *stageItems1559MinTip*
## Stage tx items with `tx.effectiveGasTip(baseFee)` at least
## `minTipPrice`. Other items are considered underpriced and left or set
## pending. This symbol affects post-London tx items, only.
##
## *stageItemsPlMinPrice*
## Stage tx items with `tx.gasPrice` at least `minPreLondonGasPrice`.
## Other items are considered underpriced and left or set pending. This
## symbol affects pre-London tx items, only.
##
## *packItemsMaxGasLimit*
## It set, the *packer* will execute and collect additional items from
## the `staged` bucket while accumulating `gasUsed` as long as
## `maxGasLimit` is not exceeded. If `packItemsTryHarder` flag is also
## set, the *packer* will not stop until at least `hwmGasLimit` is
## reached.
##
## Otherwise the *packer* will accumulate up until `trgGasLimit` is
## not exceeded, and not stop until at least `lwmGasLimit` is reached
## in case `packItemsTryHarder` is also set,
##
## *packItemsTryHarder*
## It set, the *packer* will *not* stop accumulaing transactions up until
## the `lwmGasLimit` or `hwmGasLimit` is reached, depending on whether
## the `packItemsMaxGasLimit` is set. Otherwise, accumulating stops
## immediately before the next transaction exceeds `trgGasLimit`, or
## `maxGasLimit` depending on `packItemsMaxGasLimit`.
##
## *autoUpdateBucketsDB*
## Automatically update the state buckets after running batch jobs if the
## `dirtyBuckets` flag is also set.
##
## *autoZombifyUnpacked*
## Automatically dispose *pending* or *staged* tx items that were added to
## the state buckets database at least `lifeTime` ago.
##
## *autoZombifyPacked*
## Automatically dispose *packed* tx itemss that were added to
## the state buckets database at least `lifeTime` ago.
##
## *..there might be more strategy symbols..*
##
## head
## Cached block chain insertion point. Typocally, this should be the the
## same header as retrieved by the `getCanonicalHead()`.
##
## hwmTrgPercent
## This parameter implies the size of `hwmGasLimit` which is calculated
## as `max(trgGasLimit, maxGasLimit * lwmTrgPercent / 100)`.
##
## lifeTime
## Txs that stay longer in one of the buckets will be moved to a waste
## basket. From there they will be eventually deleted oldest first when
## the maximum size would be exceeded.
##
## lwmMaxPercent
## This parameter implies the size of `lwmGasLimit` which is calculated
## as `max(minGasLimit, trgGasLimit * lwmTrgPercent / 100)`.
##
## minFeePrice
## Applies no EIP-1559 txs only. Txs are packed if `maxFee` is at least
## that value.
##
## minTipPrice
## For EIP-1559, txs are packed if the expected tip (see `estimatedGasTip()`)
## is at least that value. In compatibility mode for legacy txs, this
## degenerates to `gasPrice - baseFee`.
##
## minPreLondonGasPrice
## For pre-London or legacy txs, this parameter has precedence over
## `minTipPrice`. Txs are packed if the `gasPrice` is at least that value.
##
## priceBump
## There can be only one transaction in the database for the same `sender`
## account and `nonce` value. When adding a transaction with the same
## (`sender`, `nonce`) pair, the new transaction will replace the current one
## if it has a gas price which is at least `priceBump` per cent higher.
##
##
## Read-Only Parameters
## --------------------
##
## baseFee
## This parameter is derived from the internally cached block chain state.
## The base fee parameter modifies/determines the expected gain when packing
## a new block (is set to *zero* for *pre-London* blocks.)
##
## dirtyBuckets
## If `true`, the state buckets database is ready for re-org if the
## `autoUpdateBucketsDB` flag is also set.
##
## gasLimit
## Taken or derived from the current block chain head, incoming txs that
## exceed this gas limit are stored into the *pending* bucket (maybe
## eligible for staging at the next cycle when the internally cached block
## chain state is updated.)
##
## hwmGasLimit
## This parameter is at least `trgGasLimit` and does not exceed
## `maxGasLimit` and can be adjusted by means of setting `hwmMaxPercent`. It
## is used by the packer as a minimum block size if both flags
## `packItemsTryHarder` and `packItemsMaxGasLimit` are set.
##
## lwmGasLimit
## This parameter is at least `minGasLimit` and does not exceed
## `trgGasLimit` and can be adjusted by means of setting `lwmTrgPercent`. It
## is used by the packer as a minimum block size if the flag
## `packItemsTryHarder` is set and `packItemsMaxGasLimit` is unset.
##
## maxGasLimit
## This parameter is at least `hwmGasLimit`. It is calculated considering
## the current state of the block chain as represented by the internally
## cached head. This parameter is used by the *packer* as a size limit if
## `packItemsMaxGasLimit` is set.
##
## minGasLimit
## This parameter is calculated considering the current state of the block
## chain as represented by the internally cached head. It can be used for
## verifying that a generated block does not underflow minimum size.
## Underflow can only be happen if there are not enough transaction available
## in the pool.
##
## trgGasLimit
## This parameter is at least `lwmGasLimit` and does not exceed
## `maxGasLimit`. It is calculated considering the current state of the block
## chain as represented by the internally cached head. This parameter is
## used by the *packer* as a size limit if `packItemsMaxGasLimit` is unset.
##
import
std/[sequtils, tables],
../db/db_chain,
./tx_pool/[tx_chain, tx_desc, tx_info, tx_item, tx_job],
./tx_pool/tx_tabs,
./tx_pool/tx_tasks/[tx_add, tx_bucket, tx_head, tx_dispose, tx_packer],
chronicles,
eth/[common, keys],
stew/[keyed_queue, results],
stint
# hide complexity unless really needed
when JobWaitEnabled:
import chronos
export
TxItemRef,
TxItemStatus,
TxJobDataRef,
TxJobID,
TxJobKind,
TxPoolFlags,
TxPoolRef,
TxTabsGasTotals,
TxTabsItemsCount,
results,
tx_desc.startDate,
tx_info,
tx_item.GasPrice,
tx_item.`<=`,
tx_item.`<`,
tx_item.effectiveGasTip,
tx_item.info,
tx_item.itemID,
tx_item.sender,
tx_item.status,
tx_item.timeStamp,
tx_item.tx
{.push raises: [Defect].}
logScope:
topics = "tx-pool"
# ------------------------------------------------------------------------------
# Private functions: tasks processor
# ------------------------------------------------------------------------------
proc maintenanceProcessing(xp: TxPoolRef)
{.gcsafe,raises: [Defect,CatchableError].} =
## Tasks to be done after job processing
# Purge expired items
if autoZombifyUnpacked in xp.pFlags or
autoZombifyPacked in xp.pFlags:
# Move transactions older than `xp.lifeTime` to the waste basket.
xp.disposeExpiredItems
# Update buckets
if autoUpdateBucketsDB in xp.pFlags:
if xp.pDirtyBuckets:
# For all items, re-calculate item status values (aka bucket labels).
# If the `force` flag is set, re-calculation is done even though the
# change flag has remained unset.
discard xp.bucketUpdateAll
xp.pDirtyBuckets = false
proc processJobs(xp: TxPoolRef): int
{.gcsafe,raises: [Defect,CatchableError].} =
## Job queue processor
var rc = xp.byJob.fetch
while rc.isOK:
let task = rc.value
rc = xp.byJob.fetch
result.inc
case task.data.kind
of txJobNone:
# No action
discard
of txJobAddTxs:
# Add a batch of txs to the database
var args = task.data.addTxsArgs
let (_,topItems) = xp.addTxs(args.txs, args.info)
xp.pDoubleCheckAdd topItems
of txJobDelItemIDs:
# Dispose a batch of items
var args = task.data.delItemIDsArgs
for itemID in args.itemIDs:
let rcItem = xp.txDB.byItemID.eq(itemID)
if rcItem.isOK:
discard xp.txDB.dispose(rcItem.value, reason = args.reason)
# ------------------------------------------------------------------------------
# Public constructor/destructor
# ------------------------------------------------------------------------------
proc new*(T: type TxPoolRef; db: BaseChainDB; miner: EthAddress): T
{.gcsafe,raises: [Defect,CatchableError].} =
## Constructor, returns a new tx-pool descriptor. The `miner` argument is
## the fee beneficiary for informational purposes only.
new result
result.init(db,miner)
# ------------------------------------------------------------------------------
# Public functions, task manager, pool actions serialiser
# ------------------------------------------------------------------------------
proc job*(xp: TxPoolRef; job: TxJobDataRef): TxJobID
{.discardable,gcsafe,raises: [Defect,CatchableError].} =
## Queue a new generic job (does not run `jobCommit()`.)
xp.byJob.add(job)
# core/tx_pool.go(848): func (pool *TxPool) AddLocals(txs []..
# core/tx_pool.go(864): func (pool *TxPool) AddRemotes(txs []..
proc jobAddTxs*(xp: TxPoolRef; txs: openArray[Transaction]; info = "")
{.gcsafe,raises: [Defect,CatchableError].} =
## Queues a batch of transactions jobs to be processed in due course (does
## not run `jobCommit()`.)
##
## The argument Transactions `txs` may come in any order, they will be
## sorted by `<account,nonce>` before adding to the database with the
## least nonce first. For this reason, it is suggested to pass transactions
## in larger groups. Calling single transaction jobs, they must strictly be
## passed smaller nonce before larger nonce.
discard xp.job(TxJobDataRef(
kind: txJobAddTxs,
addTxsArgs: (
txs: toSeq(txs),
info: info)))
# core/tx_pool.go(854): func (pool *TxPool) AddLocals(txs []..
# core/tx_pool.go(883): func (pool *TxPool) AddRemotes(txs []..
proc jobAddTx*(xp: TxPoolRef; tx: Transaction; info = "")
{.gcsafe,raises: [Defect,CatchableError].} =
## Variant of `jobAddTxs()` for a single transaction.
xp.jobAddTxs(@[tx], info)
proc jobDeltaTxsHead*(xp: TxPoolRef; newHead: BlockHeader): bool
{.gcsafe,raises: [Defect,CatchableError].} =
## This function calculates the txs to add or delete that need to take place
## after the cached block chain head is set to the position implied by the
## argument `newHead`. If successful, the txs to add or delete are queued
## on the job queue (run `jobCommit()` to execute) and `true` is returned.
## Otherwise nothing is done and `false` is returned.
let rcDiff = xp.headDiff(newHead)
if rcDiff.isOk:
let changes = rcDiff.value
# Re-inject transactions, do that via job queue
if 0 < changes.addTxs.len:
discard xp.job(TxJobDataRef(
kind: txJobAddTxs,
addTxsArgs: (
txs: toSeq(changes.addTxs.nextValues),
info: "")))
# Delete already *mined* transactions
if 0 < changes.remTxs.len:
discard xp.job(TxJobDataRef(
kind: txJobDelItemIDs,
delItemIDsArgs: (
itemIDs: toSeq(changes.remTxs.keys),
reason: txInfoChainHeadUpdate)))
return true
proc jobCommit*(xp: TxPoolRef; forceMaintenance = false)
{.gcsafe,raises: [Defect,CatchableError].} =
## This function processes all jobs currently queued. If the the argument
## `forceMaintenance` is set `true`, mainenance processing is always run.
## Otherwise it is only run if there were active jobs.
let nJobs = xp.processJobs
if 0 < nJobs or forceMaintenance:
xp.maintenanceProcessing
debug "processed jobs", nJobs
proc nJobs*(xp: TxPoolRef): int
{.gcsafe,raises: [Defect,CatchableError].} =
## Return the number of jobs currently unprocessed, waiting.
xp.byJob.len
# hide complexity unless really needed
when JobWaitEnabled:
proc jobWait*(xp: TxPoolRef) {.async,raises: [Defect,CatchableError].} =
## Asynchronously wait until at least one job is queued and available.
## This function might be useful for testing (available only if the
## `JobWaitEnabled` compile time constant is set.)
await xp.byJob.waitAvail
proc triggerReorg*(xp: TxPoolRef) =
## This function triggers a bucket re-org action with the next job queue
## maintenance-processing (see `jobCommit()`) by setting the `dirtyBuckets`
## parameter. This re-org action eventually happens when the
## `autoUpdateBucketsDB` flag is also set.
xp.pDirtyBuckets = true
# ------------------------------------------------------------------------------
# Public functions, getters
# ------------------------------------------------------------------------------
proc baseFee*(xp: TxPoolRef): GasPrice =
## Getter, this parameter modifies/determines the expected gain when packing
xp.chain.baseFee
proc dirtyBuckets*(xp: TxPoolRef): bool =
## Getter, bucket database is ready for re-org if the `autoUpdateBucketsDB`
## flag is also set.
xp.pDirtyBuckets
proc ethBlock*(xp: TxPoolRef): EthBlock
{.gcsafe,raises: [Defect,CatchableError].} =
## Getter, retrieves a packed block ready for mining and signing depending
## on the internally cached block chain head, the txs in the pool and some
## tuning parameters. The following block header fields are left
## uninitialised:
##
## * *extraData*: Blob
## * *mixDigest*: Hash256
## * *nonce*: BlockNonce
##
## Note that this getter runs *ad hoc* all the txs through the VM in
## order to build the block.
xp.packerVmExec # updates vmState
result.header = xp.chain.getHeader # uses updated vmState
for (_,nonceList) in xp.txDB.decAccount(txItemPacked):
result.txs.add toSeq(nonceList.incNonce).mapIt(it.tx)
proc gasCumulative*(xp: TxPoolRef): GasInt
{.gcsafe,raises: [Defect,CatchableError].} =
## Getter, retrieves the gas that will be burned in the block after
## retrieving it via `ethBlock`.
xp.chain.gasUsed
proc gasTotals*(xp: TxPoolRef): TxTabsGasTotals
{.gcsafe,raises: [Defect,CatchableError].} =
## Getter, retrieves the current gas limit totals per bucket.
xp.txDB.gasTotals
proc lwmTrgPercent*(xp: TxPoolRef): int =
## Getter, `trgGasLimit` percentage for `lwmGasLimit` which is
## `max(minGasLimit, trgGasLimit * lwmTrgPercent / 100)`
xp.chain.lhwm.lwmTrg
proc flags*(xp: TxPoolRef): set[TxPoolFlags] =
## Getter, retrieves strategy symbols for how to process items and buckets.
xp.pFlags
proc head*(xp: TxPoolRef): BlockHeader =
## Getter, cached block chain insertion point. Typocally, this should be the
## the same header as retrieved by the `getCanonicalHead()` (unless in the
## middle of a mining update.)
xp.chain.head
proc hwmMaxPercent*(xp: TxPoolRef): int =
## Getter, `maxGasLimit` percentage for `hwmGasLimit` which is
## `max(trgGasLimit, maxGasLimit * hwmMaxPercent / 100)`
xp.chain.lhwm.hwmMax
proc maxGasLimit*(xp: TxPoolRef): GasInt =
## Getter, hard size limit when packing blocks (see also `trgGasLimit`.)
xp.chain.limits.maxLimit
# core/tx_pool.go(435): func (pool *TxPool) GasPrice() *big.Int {
proc minFeePrice*(xp: TxPoolRef): GasPrice =
## Getter, retrieves minimum for the current gas fee enforced by the
## transaction pool for txs to be packed. This is an EIP-1559 only
## parameter (see `stage1559MinFee` strategy.)
xp.pMinFeePrice
proc minPreLondonGasPrice*(xp: TxPoolRef): GasPrice =
## Getter. retrieves, the current gas price enforced by the transaction
## pool. This is a pre-London parameter (see `packedPlMinPrice` strategy.)
xp.pMinPlGasPrice
proc minTipPrice*(xp: TxPoolRef): GasPrice =
## Getter, retrieves minimum for the current gas tip (or priority fee)
## enforced by the transaction pool. This is an EIP-1559 parameter but it
## comes with a fall back interpretation (see `stage1559MinTip` strategy.)
## for legacy transactions.
xp.pMinTipPrice
# core/tx_pool.go(474): func (pool SetGasPrice,*TxPool) Stats() (int, int) {
# core/tx_pool.go(1728): func (t *txLookup) Count() int {
# core/tx_pool.go(1737): func (t *txLookup) LocalCount() int {
# core/tx_pool.go(1745): func (t *txLookup) RemoteCount() int {
proc nItems*(xp: TxPoolRef): TxTabsItemsCount
{.gcsafe,raises: [Defect,CatchableError].} =
## Getter, retrieves the current number of items per bucket and
## some totals.
xp.txDB.nItems
proc profitability*(xp: TxPoolRef): GasPrice =
## Getter, a calculation of the average *price* per gas to be rewarded after
## packing the last block (see `ethBlock`). This *price* is only based on
## execution transaction in the VM without *PoW* specific rewards. The net
## profit (as opposed to the *PoW/PoA* specifc *reward*) can be calculated
## as `gasCumulative * profitability`.
if 0 < xp.chain.gasUsed:
(xp.chain.profit div xp.chain.gasUsed.u256).truncate(uint64).GasPrice
else:
0.GasPrice
proc trgGasLimit*(xp: TxPoolRef): GasInt =
## Getter, soft size limit when packing blocks (might be extended to
## `maxGasLimit`)
xp.chain.limits.trgLimit
# ------------------------------------------------------------------------------
# Public functions, setters
# ------------------------------------------------------------------------------
proc `baseFee=`*(xp: TxPoolRef; val: GasPrice)
{.gcsafe,raises: [Defect,KeyError].} =
## Setter, sets `baseFee` explicitely witout triggering a packer update.
## Stil a database update might take place when updating account ranks.
##
## Typically, this function would *not* be called but rather the `head=`
## update would be employed to do the job figuring out the proper value
## for the `baseFee`.
xp.txDB.baseFee = val
xp.chain.baseFee = val
proc `lwmTrgPercent=`*(xp: TxPoolRef; val: int) =
## Setter, `val` arguments outside `0..100` are ignored
if 0 <= val and val <= 100:
xp.chain.lhwm = (lwmTrg: val, hwmMax: xp.chain.lhwm.hwmMax)
proc `flags=`*(xp: TxPoolRef; val: set[TxPoolFlags]) =
## Setter, strategy symbols for how to process items and buckets.
xp.pFlags = val
proc `hwmMaxPercent=`*(xp: TxPoolRef; val: int) =
## Setter, `val` arguments outside `0..100` are ignored
if 0 <= val and val <= 100:
xp.chain.lhwm = (lwmTrg: xp.chain.lhwm.lwmTrg, hwmMax: val)
proc `maxRejects=`*(xp: TxPoolRef; val: int) =
## Setter, the size of the waste basket. This setting becomes effective with
## the next move of an item into the waste basket.
xp.txDB.maxRejects = val
# core/tx_pool.go(444): func (pool *TxPool) SetGasPrice(price *big.Int) {
proc `minFeePrice=`*(xp: TxPoolRef; val: GasPrice)
{.gcsafe,raises: [Defect,CatchableError].} =
## Setter for `minFeePrice`. If there was a value change, this function
## implies `triggerReorg()`.
if xp.pMinFeePrice != val:
xp.pMinFeePrice = val
xp.pDirtyBuckets = true
# core/tx_pool.go(444): func (pool *TxPool) SetGasPrice(price *big.Int) {
proc `minPreLondonGasPrice=`*(xp: TxPoolRef; val: GasPrice) =
## Setter for `minPlGasPrice`. If there was a value change, this function
## implies `triggerReorg()`.
if xp.pMinPlGasPrice != val:
xp.pMinPlGasPrice = val
xp.pDirtyBuckets = true
# core/tx_pool.go(444): func (pool *TxPool) SetGasPrice(price *big.Int) {
proc `minTipPrice=`*(xp: TxPoolRef; val: GasPrice) =
## Setter for `minTipPrice`. If there was a value change, this function
## implies `triggerReorg()`.
if xp.pMinTipPrice != val:
xp.pMinTipPrice = val
xp.pDirtyBuckets = true
proc `head=`*(xp: TxPoolRef; val: BlockHeader)
{.gcsafe,raises: [Defect,CatchableError].} =
## Setter, cached block chain insertion point. This will also update the
## internally cached `baseFee` (depends on the block chain state.)
if xp.chain.head != val:
xp.chain.head = val # calculates the new baseFee
xp.txDB.baseFee = xp.chain.baseFee
xp.pDirtyBuckets = true
xp.bucketFlushPacked
# ------------------------------------------------------------------------------
# Public functions, per-tx-item operations
# ------------------------------------------------------------------------------
# core/tx_pool.go(979): func (pool *TxPool) Get(hash common.Hash) ..
# core/tx_pool.go(985): func (pool *TxPool) Has(hash common.Hash) bool {
proc getItem*(xp: TxPoolRef; hash: Hash256): Result[TxItemRef,void]
{.gcsafe,raises: [Defect,CatchableError].} =
## Returns a transaction if it is contained in the pool.
xp.txDB.byItemID.eq(hash)
proc disposeItems*(xp: TxPoolRef; item: TxItemRef;
reason = txInfoExplicitDisposal;
otherReason = txInfoImpliedDisposal): int
{.discardable,gcsafe,raises: [Defect,CatchableError].} =
## Move item to wastebasket. All items for the same sender with nonces
## greater than the current one are deleted, as well. The function returns
## the number of items eventally removed.
xp.disposeItemAndHigherNonces(item, reason, otherReason)
# ------------------------------------------------------------------------------
# Public functions, more immediate actions deemed not so important yet
# ------------------------------------------------------------------------------
#[
# core/tx_pool.go(561): func (pool *TxPool) Locals() []common.Address {
proc getAccounts*(xp: TxPoolRef; local: bool): seq[EthAddress]
{.gcsafe,raises: [Defect,CatchableError].} =
## Retrieves the accounts currently considered `local` or `remote` (i.e.
## the have txs of that kind) destaged on request arguments.
if local:
result = xp.txDB.locals
else:
result = xp.txDB.remotes
# core/tx_pool.go(1797): func (t *txLookup) RemoteToLocals(locals ..
proc remoteToLocals*(xp: TxPoolRef; signer: EthAddress): int
{.gcsafe,raises: [Defect,CatchableError].} =
## For given account, remote transactions are migrated to local transactions.
## The function returns the number of transactions migrated.
xp.txDB.setLocal(signer)
xp.txDB.bySender.eq(signer).nItems
]#
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,308 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
## Transaction Pool Block Chain Packer Environment
## ===============================================
##
import
std/[sets, times],
../../chain_config,
../../constants,
../../db/[accounts_cache, db_chain],
../../forks,
../../p2p/executor,
../../utils,
../../vm_state,
../../vm_types,
./tx_chain/[tx_basefee, tx_gaslimits],
./tx_item,
eth/[common],
nimcrypto
export
TxChainGasLimits,
TxChainGasLimitsPc
{.push raises: [Defect].}
const
TRG_THRESHOLD_PER_CENT = ##\
## VM executor may stop if this per centage of `trgLimit` has
## been reached.
90
MAX_THRESHOLD_PER_CENT = ##\
## VM executor may stop if this per centage of `maxLimit` has
## been reached.
90
type
TxChainPackerEnv = tuple
vmState: BaseVMState ## current tx/packer environment
receipts: seq[Receipt] ## `vmState.receipts` after packing
reward: Uint256 ## Miner balance difference after packing
profit: Uint256 ## Net reward (w/o PoW specific block rewards)
txRoot: Hash256 ## `rootHash` after packing
stateRoot: Hash256 ## `stateRoot` after packing
TxChainRef* = ref object ##\
## State cache of the transaction environment for creating a new\
## block. This state is typically synchrionised with the canonical\
## block chain head when updated.
db: BaseChainDB ## Block chain database
miner: EthAddress ## Address of fee beneficiary
lhwm: TxChainGasLimitsPc ## Hwm/lwm gas limit percentage
maxMode: bool ## target or maximal limit for next block header
roAcc: ReadOnlyStateDB ## Accounts cache fixed on current sync header
limits: TxChainGasLimits ## Gas limits for packer and next header
txEnv: TxChainPackerEnv ## Assorted parameters, tx packer environment
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
proc resetTxEnv(dh: TxChainRef; parent: BlockHeader; fee: Option[UInt256])
{.gcsafe,raises: [Defect,CatchableError].} =
dh.txEnv.reset
dh.txEnv.vmState = BaseVMState.new(
parent = parent,
timestamp = getTime().utc.toTime,
gasLimit = (if dh.maxMode: dh.limits.maxLimit else: dh.limits.trgLimit),
fee = fee,
miner = dh.miner,
chainDB = dh.db)
dh.txEnv.txRoot = BLANK_ROOT_HASH
dh.txEnv.stateRoot = dh.txEnv.vmState.parent.stateRoot
proc update(dh: TxChainRef; parent: BlockHeader)
{.gcsafe,raises: [Defect,CatchableError].} =
let
acc = AccountsCache.init(dh.db.db, parent.stateRoot, dh.db.pruneTrie)
fee = if FkLondon <= dh.db.config.toFork(parent.blockNumber + 1):
some(dh.db.config.baseFeeGet(parent).uint64.u256)
else:
UInt256.none()
# Keep a separate accounts descriptor positioned at the sync point
dh.roAcc = ReadOnlyStateDB(acc)
dh.limits = dh.db.gasLimitsGet(parent, dh.lhwm)
dh.resetTxEnv(parent, fee)
# ------------------------------------------------------------------------------
# Public functions, constructor
# ------------------------------------------------------------------------------
proc new*(T: type TxChainRef; db: BaseChainDB; miner: EthAddress): T
{.gcsafe,raises: [Defect,CatchableError].} =
## Constructor
new result
result.db = db
result.miner = miner
result.lhwm.lwmTrg = TRG_THRESHOLD_PER_CENT
result.lhwm.hwmMax = MAX_THRESHOLD_PER_CENT
result.update(db.getCanonicalHead)
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc getBalance*(dh: TxChainRef; account: EthAddress): UInt256
{.gcsafe,raises: [Defect,CatchableError].} =
## Wrapper around `vmState.readOnlyStateDB.getBalance()` for a `vmState`
## descriptor positioned at the `dh.head`. This might differ from the
## `dh.vmState.readOnlyStateDB.getBalance()` which returnes the current
## balance relative to what has been accumulated by the current packing
## procedure.
dh.roAcc.getBalance(account)
proc getNonce*(dh: TxChainRef; account: EthAddress): AccountNonce
{.gcsafe,raises: [Defect,CatchableError].} =
## Wrapper around `vmState.readOnlyStateDB.getNonce()` for a `vmState`
## descriptor positioned at the `dh.head`. This might differ from the
## `dh.vmState.readOnlyStateDB.getNonce()` which returnes the current balance
## relative to what has been accumulated by the current packing procedure.
dh.roAcc.getNonce(account)
proc getHeader*(dh: TxChainRef): BlockHeader
{.gcsafe,raises: [Defect,CatchableError].} =
## Generate a new header, a child of the cached `head` (similar to
## `utils.generateHeaderFromParentHeader()`.)
let gasUsed = if dh.txEnv.receipts.len == 0: 0.GasInt
else: dh.txEnv.receipts[^1].cumulativeGasUsed
BlockHeader(
parentHash: dh.txEnv.vmState.parent.blockHash,
ommersHash: EMPTY_UNCLE_HASH,
coinbase: dh.miner,
stateRoot: dh.txEnv.stateRoot,
txRoot: dh.txEnv.txRoot,
receiptRoot: dh.txEnv.receipts.calcReceiptRoot,
bloom: dh.txEnv.receipts.createBloom,
difficulty: dh.txEnv.vmState.difficulty,
blockNumber: dh.txEnv.vmState.blockNumber,
gasLimit: dh.txEnv.vmState.gasLimit,
gasUsed: gasUsed,
timestamp: dh.txEnv.vmState.timestamp,
# extraData: Blob # signing data
# mixDigest: Hash256 # mining hash for given difficulty
# nonce: BlockNonce # mining free vaiable
fee: dh.txEnv.vmState.fee)
proc clearAccounts*(dh: TxChainRef)
{.gcsafe,raises: [Defect,CatchableError].} =
## Reset transaction environment, e.g. before packing a new block
dh.resetTxEnv(dh.txEnv.vmState.parent, dh.txEnv.vmState.fee)
# ------------------------------------------------------------------------------
# Public functions, getters
# ------------------------------------------------------------------------------
proc db*(dh: TxChainRef): BaseChainDB =
## Getter
dh.db
proc config*(dh: TxChainRef): ChainConfig =
## Getter, shortcut for `dh.db.config`
dh.db.config
proc head*(dh: TxChainRef): BlockHeader =
## Getter
dh.txEnv.vmState.parent
proc limits*(dh: TxChainRef): TxChainGasLimits =
## Getter
dh.limits
proc lhwm*(dh: TxChainRef): TxChainGasLimitsPc =
## Getter
dh.lhwm
proc maxMode*(dh: TxChainRef): bool =
## Getter
dh.maxMode
proc miner*(dh: TxChainRef): EthAddress =
## Getter, shortcut for `dh.vmState.minerAddress`
dh.miner
proc baseFee*(dh: TxChainRef): GasPrice =
## Getter, baseFee for the next bock header. This value is auto-generated
## when a new insertion point is set via `head=`.
if dh.txEnv.vmState.fee.isSome:
dh.txEnv.vmState.fee.get.truncate(uint64).GasPrice
else:
0.GasPrice
proc nextFork*(dh: TxChainRef): Fork =
## Getter, fork of next block
dh.db.config.toFork(dh.txEnv.vmState.blockNumber)
proc gasUsed*(dh: TxChainRef): GasInt =
## Getter, accumulated gas burned for collected blocks
if 0 < dh.txEnv.receipts.len:
return dh.txEnv.receipts[^1].cumulativeGasUsed
proc profit*(dh: TxChainRef): Uint256 =
## Getter
dh.txEnv.profit
proc receipts*(dh: TxChainRef): seq[Receipt] =
## Getter, receipts for collected blocks
dh.txEnv.receipts
proc reward*(dh: TxChainRef): Uint256 =
## Getter, reward for collected blocks
dh.txEnv.reward
proc stateRoot*(dh: TxChainRef): Hash256 =
## Getter, accounting DB state root hash for the next block header
dh.txEnv.stateRoot
proc txRoot*(dh: TxChainRef): Hash256 =
## Getter, transaction state root hash for the next block header
dh.txEnv.txRoot
proc vmState*(dh: TxChainRef): BaseVMState =
## Getter, `BaseVmState` descriptor based on the current insertion point.
dh.txEnv.vmState
# ------------------------------------------------------------------------------
# Public functions, setters
# ------------------------------------------------------------------------------
proc `baseFee=`*(dh: TxChainRef; val: GasPrice) =
## Setter, temorarily overwrites parameter until next `head=` update. This
## function would be called in exceptional cases only as this parameter is
## determined by the `head=` update.
if 0 < val or FkLondon <= dh.db.config.toFork(dh.txEnv.vmState.blockNumber):
dh.txEnv.vmState.fee = some(val.uint64.u256)
else:
dh.txEnv.vmState.fee = UInt256.none()
proc `head=`*(dh: TxChainRef; val: BlockHeader)
{.gcsafe,raises: [Defect,CatchableError].} =
## Setter, updates descriptor. This setter re-positions the `vmState` and
## account caches to a new insertion point on the block chain database.
dh.update(val)
proc `lhwm=`*(dh: TxChainRef; val: TxChainGasLimitsPc) =
## Setter, tuple `(lwmTrg,hwmMax)` will allow the packer to continue
## up until the percentage level has been reached of the `trgLimit`, or
## `maxLimit` depending on what has been activated.
if dh.lhwm != val:
dh.lhwm = val
let parent = dh.txEnv.vmState.parent
dh.limits = dh.db.gasLimitsGet(parent, dh.limits.gasLimit, dh.lhwm)
dh.txEnv.vmState.gasLimit = if dh.maxMode: dh.limits.maxLimit
else: dh.limits.trgLimit
proc `maxMode=`*(dh: TxChainRef; val: bool) =
## Setter, the packing mode (maximal or target limit) for the next block
## header
dh.maxMode = val
dh.txEnv.vmState.gasLimit = if dh.maxMode: dh.limits.maxLimit
else: dh.limits.trgLimit
proc `miner=`*(dh: TxChainRef; val: EthAddress) =
## Setter
dh.miner = val
dh.txEnv.vmState.minerAddress = val
proc `profit=`*(dh: TxChainRef; val: Uint256) =
## Setter
dh.txEnv.profit = val
proc `receipts=`*(dh: TxChainRef; val: seq[Receipt]) =
## Setter, implies `gasUsed`
dh.txEnv.receipts = val
proc `reward=`*(dh: TxChainRef; val: Uint256) =
## Getter
dh.txEnv.reward = val
proc `stateRoot=`*(dh: TxChainRef; val: Hash256) =
## Setter
dh.txEnv.stateRoot = val
proc `txRoot=`*(dh: TxChainRef; val: Hash256) =
## Setter
dh.txEnv.txRoot = val
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,92 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
## Block Chain Helper: Calculate Base Fee
## =======================================
##
import
../../../chain_config,
../../../constants,
../../../forks,
../tx_item,
eth/[common]
{.push raises: [Defect].}
const
EIP1559_BASE_FEE_CHANGE_DENOMINATOR = ##\
## Bounds the amount the base fee can change between blocks.
8
EIP1559_ELASTICITY_MULTIPLIER = ##\
## Bounds the maximum gas limit an EIP-1559 block may have.
2
EIP1559_INITIAL_BASE_FEE = ##\
## Initial base fee for Eip1559 blocks.
1_000_000_000
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc baseFeeGet*(config: ChainConfig; parent: BlockHeader): GasPrice =
## Calculates the `baseFee` of the head assuming this is the parent of a
## new block header to generate. This function is derived from
## `p2p/gaslimit.calcEip1599BaseFee()` which in turn has its origins on
## `consensus/misc/eip1559.go` of geth.
# Note that the baseFee is calculated for the next header
let
parentGasUsed = parent.gasUsed
parentGasLimit = parent.gasLimit
parentBaseFee = parent.baseFee.truncate(uint64)
parentFork = config.toFork(parent.blockNumber)
nextFork = config.toFork(parent.blockNumber + 1)
if nextFork < FkLondon:
return 0.GasPrice
# If the new block is the first EIP-1559 block, return initial base fee.
if parentFork < FkLondon:
return EIP1559_INITIAL_BASE_FEE.GasPrice
let
parGasTrg = parentGasLimit div EIP1559_ELASTICITY_MULTIPLIER
parGasDenom = (parGasTrg * EIP1559_BASE_FEE_CHANGE_DENOMINATOR).uint64
# If parent gasUsed is the same as the target, the baseFee remains unchanged.
if parentGasUsed == parGasTrg:
return parentBaseFee.GasPrice
if parGasTrg < parentGasUsed:
# If the parent block used more gas than its target, the baseFee should
# increase.
let
gasUsedDelta = (parentGasUsed - parGasTrg).uint64
baseFeeDelta = (parentBaseFee * gasUsedDelta) div parGasDenom
return (parentBaseFee + max(1u64, baseFeeDelta)).GasPrice
# Otherwise if the parent block used less gas than its target, the
# baseFee should decrease.
let
gasUsedDelta = (parGasTrg - parentGasUsed).uint64
baseFeeDelta = (parentBaseFee * gasUsedDelta) div parGasDenom
if baseFeeDelta < parentBaseFee:
return (parentBaseFee - baseFeeDelta).GasPrice
0.GasPrice
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,117 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
## Block Chain Helper: Gas Limits
## ==============================
##
import
std/[math],
../../../chain_config,
../../../db/db_chain,
../../../constants,
../../../forks,
eth/[common]
{.push raises: [Defect].}
type
TxChainGasLimitsPc* = tuple
lwmTrg: int ##\
## VM executor may stop if this per centage of `trgLimit` has
## been reached.
hwmMax: int ##\
## VM executor may stop if this per centage of `maxLimit` has
## been reached.
TxChainGasLimits* = tuple
gasLimit: GasInt ## Parent gas limit, used as a base for others
minLimit: GasInt ## Minimum `gasLimit` for the packer
lwmLimit: GasInt ## Low water mark for VM/exec packer
trgLimit: GasInt ## The `gasLimit` for the packer, soft limit
hwmLimit: GasInt ## High water mark for VM/exec packer
maxLimit: GasInt ## May increase the `gasLimit` a bit, hard limit
const
PRE_LONDON_GAS_LIMIT_TRG = ##\
## https://ethereum.org/en/developers/docs/blocks/#block-size
15_000_000.GasInt
PRE_LONDON_GAS_LIMIT_MAX = ##\
## https://ethereum.org/en/developers/docs/blocks/#block-size
30_000_000.GasInt
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
proc setPostLondonLimits(gl: var TxChainGasLimits) =
## EIP-1559 conformant gas limit update
gl.trgLimit = max(gl.gasLimit, GAS_LIMIT_MINIMUM)
# https://github.com/ethereum/EIPs/blob/master/EIPS/eip-1559.md
# find in box: block.gas_used
let delta = gl.trgLimit.floorDiv(GAS_LIMIT_ADJUSTMENT_FACTOR)
gl.minLimit = gl.trgLimit + delta
gl.maxLimit = gl.trgLimit - delta
# Fringe case: use the middle between min/max
if gl.minLimit <= GAS_LIMIT_MINIMUM:
gl.minLimit = GAS_LIMIT_MINIMUM
gl.trgLimit = (gl.minLimit + gl.maxLimit) div 2
proc setPreLondonLimits(gl: var TxChainGasLimits) =
## Pre-EIP-1559 conformant gas limit update
gl.maxLimit = PRE_LONDON_GAS_LIMIT_MAX
const delta = (PRE_LONDON_GAS_LIMIT_TRG - GAS_LIMIT_MINIMUM) div 2
# Just made up to be convenient for the packer
if gl.gasLimit <= GAS_LIMIT_MINIMUM + delta:
gl.minLimit = max(gl.gasLimit, GAS_LIMIT_MINIMUM)
gl.trgLimit = PRE_LONDON_GAS_LIMIT_TRG
else:
# This setting preserves the setting from the parent block
gl.minLimit = gl.gasLimit - delta
gl.trgLimit = gl.gasLimit
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc gasLimitsGet*(db: BaseChainDB; parent: BlockHeader; parentLimit: GasInt;
pc: TxChainGasLimitsPc): TxChainGasLimits =
## Calculate gas limits for the next block header.
result.gasLimit = parentLimit
let nextFork = db.config.toFork(parent.blockNumber + 1)
if FkLondon <= nextFork:
result.setPostLondonLimits
else:
result.setPreLondonLimits
# VM/exec low/high water marks, optionally provided for packer
result.lwmLimit = max(
result.minLimit, (result.trgLimit * pc.lwmTrg + 50) div 100)
result.hwmLimit = max(
result.trgLimit, (result.maxLimit * pc.hwmMax + 50) div 100)
proc gasLimitsGet*(db: BaseChainDB; parent: BlockHeader;
pc: TxChainGasLimitsPc): TxChainGasLimits =
## Variant of `gasLimitsGet()`
db.gasLimitsGet(parent, parent.gasLimit, pc)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,279 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
## Transaction Pool Descriptor
## ===========================
##
import
std/[times],
../../db/db_chain,
./tx_chain,
./tx_info,
./tx_item,
./tx_job,
./tx_tabs,
./tx_tabs/tx_sender, # for verify()
eth/[common, keys]
{.push raises: [Defect].}
type
TxPoolCallBackRecursion* = object of Defect
## Attempt to recurse a call back function
TxPoolFlags* = enum ##\
## Processing strategy selector symbols
stageItems1559MinFee ##\
## Stage tx items with `tx.maxFee` at least `minFeePrice`. Other items
## are left or set pending. This symbol affects post-London tx items,
## only.
stageItems1559MinTip ##\
## Stage tx items with `tx.effectiveGasTip(baseFee)` at least
## `minTipPrice`. Other items are considered underpriced and left
## or set pending. This symbol affects post-London tx items, only.
stageItemsPlMinPrice ##\
## Stage tx items with `tx.gasPrice` at least `minPreLondonGasPrice`.
## Other items are considered underpriced and left or set pending.
## This symbol affects pre-London tx items, only.
# -----------
packItemsMaxGasLimit ##\
## It set, the *packer* will execute and collect additional items from
## the `staged` bucket while accumulating `gasUsed` as long as
## `maxGasLimit` is not exceeded. If `packItemsTryHarder` flag is also
## set, the *packer* will not stop until at least `hwmGasLimit` is
## reached.
##
## Otherwise the *packer* will accumulate up until `trgGasLimit` is
## not exceeded, and not stop until at least `lwmGasLimit` is reached
## in case `packItemsTryHarder` is also set,
packItemsTryHarder ##\
## It set, the *packer* will *not* stop accumulaing transactions up until
## the `lwmGasLimit` or `hwmGasLimit` is reached, depending on whether
## the `packItemsMaxGasLimit` is set. Otherwise, accumulating stops
## immediately before the next transaction exceeds `trgGasLimit`, or
## `maxGasLimit` depending on `packItemsMaxGasLimit`.
# -----------
autoUpdateBucketsDB ##\
## Automatically update the state buckets after running batch jobs if
## the `dirtyBuckets` flag is also set.
autoZombifyUnpacked ##\
## Automatically dispose *pending* or *staged* txs that were queued
## at least `lifeTime` ago.
autoZombifyPacked ##\
## Automatically dispose *packed* txs that were queued
## at least `lifeTime` ago.
TxPoolParam* = tuple ## Getter/setter accessible parameters
minFeePrice: GasPrice ## Gas price enforced by the pool, `gasFeeCap`
minTipPrice: GasPrice ## Desired tip-per-tx target, `effectiveGasTip`
minPlGasPrice: GasPrice ## Desired pre-London min `gasPrice`
dirtyBuckets: bool ## Buckets need to be updated
doubleCheck: seq[TxItemRef] ## Check items after moving block chain head
flags: set[TxPoolFlags] ## Processing strategy symbols
TxPoolRef* = ref object of RootObj ##\
## Transaction pool descriptor
startDate: Time ## Start date (read-only)
chain: TxChainRef ## block chain state
byJob: TxJobRef ## Job batch list
txDB: TxTabsRef ## Transaction lists & tables
lifeTime*: times.Duration ## Maximum life time of a tx in the system
priceBump*: uint ## Min precentage price when superseding
param: TxPoolParam ## Getter/Setter parameters
const
txItemLifeTime = ##\
## Maximum amount of time transactions can be held in the database\
## unless they are packed already for a block. This default is chosen\
## as found in core/tx_pool.go(184) of the geth implementation.
initDuration(hours = 3)
txPriceBump = ##\
## Minimum price bump percentage to replace an already existing\
## transaction (nonce). This default is chosen as found in\
## core/tx_pool.go(177) of the geth implementation.
10u
txMinFeePrice = 1.GasPrice
txMinTipPrice = 1.GasPrice
txPoolFlags = {stageItems1559MinTip,
stageItems1559MinFee,
stageItemsPlMinPrice,
packItemsTryHarder,
autoUpdateBucketsDB,
autoZombifyUnpacked}
# ------------------------------------------------------------------------------
# Public functions, constructor
# ------------------------------------------------------------------------------
proc init*(xp: TxPoolRef; db: BaseChainDB; miner: EthAddress)
{.gcsafe,raises: [Defect,CatchableError].} =
## Constructor, returns new tx-pool descriptor. The `miner` argument is
## the fee beneficiary for informational purposes only.
xp.startDate = getTime().utc.toTime
xp.chain = TxChainRef.new(db, miner)
xp.txDB = TxTabsRef.new
xp.byJob = TxJobRef.new
xp.lifeTime = txItemLifeTime
xp.priceBump = txPriceBump
xp.param.reset
xp.param.minFeePrice = txMinFeePrice
xp.param.minTipPrice = txMinTipPrice
xp.param.flags = txPoolFlags
# ------------------------------------------------------------------------------
# Public functions, getters
# ------------------------------------------------------------------------------
proc byJob*(xp: TxPoolRef): TxJobRef =
## Getter, job queue
xp.byJob
proc chain*(xp: TxPoolRef): TxChainRef =
## Getter, block chain DB
xp.chain
proc pFlags*(xp: TxPoolRef): set[TxPoolFlags] =
## Returns the set of algorithm strategy symbols for labelling items
## as`packed`
xp.param.flags
proc pDirtyBuckets*(xp: TxPoolRef): bool =
## Getter, buckets need update
xp.param.dirtyBuckets
proc pDoubleCheck*(xp: TxPoolRef): seq[TxItemRef] =
## Getter, cached block chain head was moved back
xp.param.doubleCheck
proc pMinFeePrice*(xp: TxPoolRef): GasPrice =
## Getter
xp.param.minFeePrice
proc pMinTipPrice*(xp: TxPoolRef): GasPrice =
## Getter
xp.param.minTipPrice
proc pMinPlGasPrice*(xp: TxPoolRef): GasPrice =
## Getter
xp.param.minPlGasPrice
proc startDate*(xp: TxPoolRef): Time =
## Getter
xp.startDate
proc txDB*(xp: TxPoolRef): TxTabsRef =
## Getter, pool database
xp.txDB
# ------------------------------------------------------------------------------
# Public functions, setters
# ------------------------------------------------------------------------------
proc `pDirtyBuckets=`*(xp: TxPoolRef; val: bool) =
## Setter
xp.param.dirtyBuckets = val
proc pDoubleCheckAdd*(xp: TxPoolRef; val: seq[TxItemRef]) =
## Pseudo setter
xp.param.doubleCheck.add val
proc pDoubleCheckFlush*(xp: TxPoolRef) =
## Pseudo setter
xp.param.doubleCheck.setLen(0)
proc `pFlags=`*(xp: TxPoolRef; val: set[TxPoolFlags]) =
## Install a set of algorithm strategy symbols for labelling items as`packed`
xp.param.flags = val
proc `pMinFeePrice=`*(xp: TxPoolRef; val: GasPrice) =
## Setter
xp.param.minFeePrice = val
proc `pMinTipPrice=`*(xp: TxPoolRef; val: GasPrice) =
## Setter
xp.param.minTipPrice = val
proc `pMinPlGasPrice=`*(xp: TxPoolRef; val: GasPrice) =
## Setter
xp.param.minPlGasPrice = val
# ------------------------------------------------------------------------------
# Public functions, heplers (debugging only)
# ------------------------------------------------------------------------------
proc verify*(xp: TxPoolRef): Result[void,TxInfo]
{.gcsafe, raises: [Defect,CatchableError].} =
## Verify descriptor and subsequent data structures.
block:
let rc = xp.byJob.verify
if rc.isErr:
return rc
block:
let rc = xp.txDB.verify
if rc.isErr:
return rc
# verify consecutive nonces per sender
var
initOk = false
lastSender: EthAddress
lastNonce: AccountNonce
lastSublist: TxSenderSchedRef
for (_,nonceList) in xp.txDB.incAccount:
for item in nonceList.incNonce:
if not initOk or lastSender != item.sender:
initOk = true
lastSender = item.sender
lastNonce = item.tx.nonce
lastSublist = xp.txDB.bySender.eq(item.sender).value.data
elif lastNonce + 1 == item.tx.nonce:
lastNonce = item.tx.nonce
else:
return err(txInfoVfyNonceChain)
# verify bucket boundary conditions
case item.status:
of txItemPending:
discard
of txItemStaged:
if lastSublist.eq(txItemPending).eq(item.tx.nonce - 1).isOk:
return err(txInfoVfyNonceChain)
of txItemPacked:
if lastSublist.eq(txItemPending).eq(item.tx.nonce - 1).isOk:
return err(txInfoVfyNonceChain)
if lastSublist.eq(txItemStaged).eq(item.tx.nonce - 1).isOk:
return err(txInfoVfyNonceChain)
ok()
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,142 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
## Transaction Pool Meters
## =======================
##
import
metrics
{.push raises: [Defect].}
const
# Provide some fall-back counters available for unit tests
FallBackMetrics4Debugging = not defined(metrics)
when FallBackMetrics4Debugging:
{.warning: "Debugging fall back mode for some gauges".}
type
DummyCounter* = ref object
value: float64
# ------------------------------------------------------------------------------
# Private settings
# ------------------------------------------------------------------------------
# Metrics for the pending pool
# core/tx_pool.go(97): pendingDiscardMeter = metrics.NewRegisteredMeter(..
declareGauge pendingDiscard, "n/a"
declareGauge pendingReplace, "n/a"
declareGauge pendingRateLimit, "n/a" # Dropped due to rate limiting
declareGauge pendingNofunds, "n/a" # Dropped due to out-of-funds
# Metrics for the queued pool
# core/tx_pool.go(103): queuedDiscardMeter = metrics.NewRegisteredMeter(..
declareGauge queuedDiscard, "n/a"
declareGauge queuedReplace, "n/a"
declareGauge queuedRateLimit, "n/a" # Dropped due to rate limiting
declareGauge queuedNofunds, "n/a" # Dropped due to out-of-funds
declareGauge evictionGauge,
"A transaction has been on the system for too long so it was removed"
declareGauge impliedEvictionGauge,
"Implied disposal for greater nonces (same sender) when base tx was removed"
# General tx metrics
# core/tx_pool.go(110): knownTxMeter = metrics.NewRegisteredMeter(..
declareGauge knownTransactions, "n/a"
declareGauge validTransactions, "n/a"
declareGauge invalidTransactions, "n/a"
declareGauge underpricedTransactions, "n/a"
declareGauge overflowedTransactions, "n/a"
# core/tx_pool.go(117): throttleTxMeter = metrics.NewRegisteredMeter(..
declareGauge throttleTransactions,
"Rejected transactions due to too-many-changes between txpool reorgs"
# core/tx_pool.go(119): reorgDurationTimer = metrics.NewRegisteredTimer(..
declareGauge reorgDurationTimer, "Measures how long time a txpool reorg takes"
# core/tx_pool.go(122): dropBetweenReorgHistogram = metrics..
declareGauge dropBetweenReorgHistogram,
"Number of expected drops between two reorg runs. It is expected that "&
"this number is pretty low, since txpool reorgs happen very frequently"
# core/tx_pool.go(124): pendingGauge = metrics.NewRegisteredGauge(..
declareGauge pendingGauge, "n/a"
declareGauge queuedGauge, "n/a"
declareGauge localGauge, "n/a"
declareGauge slotsGauge, "n/a"
# core/tx_pool.go(129): reheapTimer = metrics.NewRegisteredTimer(..
declareGauge reheapTimer, "n/a"
# ----------------------
declareGauge unspecifiedError,
"Some error occured but was not specified in any way. This counter should "&
"stay zero."
# ------------------------------------------------------------------------------
# Exports
# ------------------------------------------------------------------------------
when FallBackMetrics4Debugging:
let
evictionMeter* = DummyCounter()
impliedEvictionMeter* = DummyCounter()
proc inc(w: DummyCounter; val: int64|float64 = 1,) =
w.value = w.value + val.float64
else:
let
evictionMeter* = evictionGauge
impliedEvictionMeter* = impliedEvictionGauge
# ------------------------------------------------------------------------------
# Global functions -- deprecated
# ------------------------------------------------------------------------------
proc pendingDiscardMeter*(n = 1i64) = pendingDiscard.inc(n)
proc pendingReplaceMeter*(n = 1i64) = pendingReplace.inc(n)
proc pendingRateLimitMeter*(n = 1i64) = pendingRateLimit.inc(n)
proc pendingNofundsMeter*(n = 1i64) = pendingNofunds.inc(n)
proc queuedDiscardMeter*(n = 1i64) = queuedDiscard.inc(n)
proc queuedReplaceMeter*(n = 1i64) = queuedReplace.inc(n)
proc queuedRateLimitMeter*(n = 1i64) = queuedRateLimit.inc(n)
proc queuedNofundsMeter*(n = 1i64) = queuedNofunds.inc(n)
proc knownTxMeter*(n = 1i64) = knownTransactions.inc(n)
proc invalidTxMeter*(n = 1i64) = invalidTransactions.inc(n)
proc validTxMeter*(n = 1i64) = validTransactions.inc(n)
proc underpricedTxMeter*(n = 1i64) = underpricedTransactions.inc(n)
proc overflowedTxMeter*(n = 1i64) = overflowedTransactions.inc(n)
proc throttleTxMeter*(n = 1i64) = throttleTransactions.inc(n)
proc unspecifiedErrorMeter*(n = 1i64) = unspecifiedError.inc(n)
proc reorgDurationTimerMeter*(n = 1i64) = reorgDurationTimer.inc(n)
proc dropBetweenReorgHistogramMeter*(n = 1i64) =
dropBetweenReorgHistogram.inc(n)
proc pendingGaugeMeter*(n = 1i64) = pendingGauge.inc(n)
proc queuedGaugeMeter*(n = 1i64) = queuedGauge.inc(n)
proc localGaugeMeter*(n = 1i64) = localGauge.inc(n)
proc slotsGaugeMeter*(n = 1i64) = slotsGauge.inc(n)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,208 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
## Transaction Pool Info Symbols & Error Codes
## ===========================================
{.push raises: [Defect].}
type
TxInfo* = enum
txInfoOk =
(0, "no error")
txInfoPackedBlockIncluded = ##\
## The transaction was disposed after packing into block
"not needed anymore"
txInfoSenderNonceSuperseded = ##\
## Tx superseded by another one with same <sender,nonce> index
"Sender/nonce index superseded"
txInfoErrNonceGap = ##\
## Non consecutive nonces detected after moving back the block chain
## head. This should not happen and indicates an inconsistency between
## cached transactions and the ones on the block chain.
"nonce gap"
txInfoErrImpliedNonceGap = ##\
## Implied disposal, applies to transactions with higher nonces after
## a `txInfoErrNonceGap` error.
"implied nonce gap"
txInfoExplicitDisposal = ##\
## Unspecified disposal reason (fallback value)
"on-demand disposal"
txInfoImpliedDisposal = ##\
## Implied disposal, typically implied by greater nonces (fallback value)
"implied disposal"
# ------ Miscellaneous errors ----------------------------------------------
txInfoErrUnspecified = ##\
## Some unspecified error occured
"generic error"
txInfoErrVoidDisposal = ##\
## Cannot dispose non-existing item
"void disposal"
txInfoErrAlreadyKnown = ##\
## The transactions is already contained within the pool
"already known"
txInfoErrSenderNonceIndex = ##\
## <sender,nonce> index for transaction exists, already.
"Sender/nonce index error"
txInfoErrTxPoolOverflow = ##\
## The transaction pool is full and can't accpet another remote
## transaction.
"txpool is full"
# ------ Transaction format/parsing problems -------------------------------
txInfoErrOversizedData = ##\
## The input data of a transaction is greater than some meaningful
## limit a user might use. This is not a consensus error making the
## transaction invalid, rather a DOS protection.
"Oversized tx data"
txInfoErrNegativeValue = ##\
## A sanity error to ensure no one is able to specify a transaction
## with a negative value.
"Negative value in tx"
txInfoErrUnexpectedProtection = ##\
## Transaction type does not supported EIP-1559 protected signature
"Unsupported EIP-1559 signature protection"
txInfoErrInvalidTxType = ##\
## Transaction type not valid in this context
"Unsupported tx type"
txInfoErrTxTypeNotSupported = ##\
## Transaction type not supported
"Unsupported transaction type"
txInfoErrEmptyTypedTx = ##\
## Typed transaction, missing data
"Empty typed transaction bytes"
txInfoErrBasicValidatorFailed = ##\
## Running basic validator failed on current transaction
"Tx rejected by basic validator"
# ------ Signature problems ------------------------------------------------
txInfoErrInvalidSender = ##\
## The transaction contains an invalid signature.
"invalid sender"
txInfoErrInvalidSig = ##\
## invalid transaction v, r, s values
"Invalid transaction signature"
# ------ Gas fee and selection problems ------------------------------------
txInfoErrUnderpriced = ##\
## A transaction's gas price is below the minimum configured for the
## transaction pool.
"Tx underpriced"
txInfoErrReplaceUnderpriced = ##\
## A transaction is attempted to be replaced with a different one
## without the required price bump.
"Replacement tx underpriced"
txInfoErrGasLimit = ##\
## A transaction's requested gas limit exceeds the maximum allowance
## of the current block.
"Tx exceeds block gasLimit"
txInfoErrGasFeeCapTooLow = ##\
## Gase fee cap less than base fee
"Tx has feeCap < baseFee"
# ------- operational events related to transactions -----------------------
txInfoErrTxExpired = ##\
## A transaction has been on the system for too long so it was removed.
"Tx expired"
txInfoErrTxExpiredImplied = ##\
## Implied disposal for greater nonces for the same sender when the base
## tx was removed.
"Tx expired implied"
# ------- update/move block chain head -------------------------------------
txInfoErrAncestorMissing = ##\
## Cannot forward current head as it is detached from the block chain
"Lost header ancestor"
txInfoErrChainHeadMissing = ##\
## Must not back move current head as it is detached from the block chain
"Lost header position"
txInfoErrForwardHeadMissing = ##\
## Cannot move forward current head to non-existing target position
"Non-existing forward header"
txInfoErrUnrootedCurChain = ##\
## Some orphan block found in current branch of the block chain
"Orphan block in current branch"
txInfoErrUnrootedNewChain = ##\
## Some orphan block found in new branch of the block chain
"Orphan block in new branch"
txInfoChainHeadUpdate = ##\
## Tx becomes obsolete as it is in a mined block, already
"Tx obsoleted"
# ---------- debugging error codes as used in verifier functions -----------
# failed verifier codes
txInfoVfyLeafQueue ## Corrupted leaf item queue
txInfoVfyItemIdList ## Corrupted ID queue/fifo structure
txInfoVfyRejectsList ## Corrupted waste basket structure
txInfoVfyNonceChain ## Non-consecutive nonces
txInfoVfySenderRbTree ## Corrupted sender list structure
txInfoVfySenderLeafEmpty ## Empty sender list leaf record
txInfoVfySenderLeafQueue ## Corrupted sender leaf queue
txInfoVfySenderTotal ## Wrong number of leaves
txInfoVfySenderGasLimits ## Wrong gas accu values
txInfoVfySenderProfits ## Profits calculation error
txInfoVfyStatusRbTree ## Corrupted status list structure
txInfoVfyStatusTotal ## Wrong number of leaves
txInfoVfyStatusGasLimits ## Wrong gas accu values
txInfoVfyStatusSenderList ## Corrupted status-sender sub-list
txInfoVfyStatusNonceList ## Corrupted status-nonce sub-list
txInfoVfyStatusSenderTotal ## Sender vs status table mismatch
txInfoVfyStatusSenderGasLimits ## Wrong gas accu values
txInfoVfyRankAddrMismatch ## Different ranks in address set
txInfoVfyReverseZombies ## Zombie addresses in reverse lookup
txInfoVfyRankReverseLookup ## Sender missing in reverse lookup
txInfoVfyRankReverseMismatch ## Ranks differ with revers lookup
txInfoVfyRankDuplicateAddr ## Same address with different ranks
txInfoVfyRankTotal ## Wrong number of leaves (i.e. adresses)
# codes provided for other modules
txInfoVfyJobQueue ## Corrupted jobs queue/fifo structure
txInfoVfyJobEvent ## Event table sync error
# End

View File

@ -0,0 +1,231 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
## Transaction Pool Item Container & Wrapper
## =========================================
##
import
std/[hashes, sequtils, strutils, times],
../ec_recover,
../utils_defs,
./tx_info,
eth/[common, keys],
stew/results
{.push raises: [Defect].}
type
GasPrice* = ##|
## Handy definition distinct from `GasInt` which is a commodity unit while
## the `GasPrice` is the commodity valuation per unit of gas, similar to a
## kind of currency.
distinct uint64
GasPriceEx* = ##\
## Similar to `GasPrice` but is allowed to be negative.
distinct int64
TxItemStatus* = enum ##\
## Current status of a transaction as seen by the pool.
txItemPending = 0
txItemStaged
txItemPacked
TxItemRef* = ref object of RootObj ##\
## Data container with transaction and meta data. Entries are *read-only*\
## by default, for some there is a setter available.
tx: Transaction ## Transaction data
itemID: Hash256 ## Transaction hash
timeStamp: Time ## Time when added
sender: EthAddress ## Sender account address
info: string ## Whatever
status: TxItemStatus ## Transaction status (setter available)
reject: TxInfo ## Reason for moving to waste basket
# ------------------------------------------------------------------------------
# Private, helpers for debugging and pretty printing
# ------------------------------------------------------------------------------
proc utcTime: Time =
getTime().utc.toTime
# ------------------------------------------------------------------------------
# Public helpers supporting distinct types
# ------------------------------------------------------------------------------
proc `$`*(a: GasPrice): string {.borrow.}
proc `<`*(a, b: GasPrice): bool {.borrow.}
proc `<=`*(a, b: GasPrice): bool {.borrow.}
proc `==`*(a, b: GasPrice): bool {.borrow.}
proc `+`*(a, b: GasPrice): GasPrice {.borrow.}
proc `-`*(a, b: GasPrice): GasPrice {.borrow.}
proc `$`*(a: GasPriceEx): string {.borrow.}
proc `<`*(a, b: GasPriceEx): bool {.borrow.}
proc `<=`*(a, b: GasPriceEx): bool {.borrow.}
proc `==`*(a, b: GasPriceEx): bool {.borrow.}
proc `+`*(a, b: GasPriceEx): GasPriceEx {.borrow.}
proc `-`*(a, b: GasPriceEx): GasPriceEx {.borrow.}
proc `+=`*(a: var GasPriceEx; b: GasPriceEx) {.borrow.}
proc `-=`*(a: var GasPriceEx; b: GasPriceEx) {.borrow.}
# Multiplication/division of *price* and *commodity unit*
proc `*`*(a: GasPrice; b: SomeUnsignedInt): GasPrice {.borrow.}
proc `*`*(a: SomeUnsignedInt; b: GasPrice): GasPrice {.borrow.}
proc `div`*(a: GasPrice; b: SomeUnsignedInt): GasPrice =
(a.uint64 div b).GasPrice # beware of zero denominator
proc `*`*(a: SomeInteger; b: GasPriceEx): GasPriceEx =
(a * b.int64).GasPriceEx # beware of under/overflow
# Mixed stuff, convenience ops
proc `-`*(a: GasPrice; b: SomeUnsignedInt): GasPrice {.borrow.}
proc `<`*(a: GasPriceEx; b: SomeSignedInt): bool =
a.int64 < b
proc `<`*(a: GasPriceEx|SomeSignedInt; b: GasPrice): bool =
if a.int64 < 0: true else: a.GasPrice < b
proc `<=`*(a: SomeSignedInt; b: GasPriceEx): bool =
a < b.int64
# ------------------------------------------------------------------------------
# Public functions, Constructor
# ------------------------------------------------------------------------------
proc init*(item: TxItemRef; status: TxItemStatus; info: string) =
## Update item descriptor.
item.info = info
item.status = status
item.timeStamp = utcTime()
item.reject = txInfoOk
proc new*(T: type TxItemRef; tx: Transaction; itemID: Hash256;
status: TxItemStatus; info: string): Result[T,void] =
## Create item descriptor.
let rc = tx.ecRecover
if rc.isErr:
return err()
ok(T(itemID: itemID,
tx: tx,
sender: rc.value,
timeStamp: utcTime(),
info: info,
status: status))
proc new*(T: type TxItemRef; tx: Transaction;
reject: TxInfo; status: TxItemStatus; info: string): T =
## Create incomplete item descriptor, so meta-data can be stored (e.g.
## for holding in the waste basket to be investigated later.)
T(tx: tx,
timeStamp: utcTime(),
info: info,
status: status)
# ------------------------------------------------------------------------------
# Public functions, Table ID helper
# ------------------------------------------------------------------------------
proc hash*(item: TxItemRef): Hash =
## Needed if `TxItemRef` is used as hash-`Table` index.
cast[pointer](item).hash
# ------------------------------------------------------------------------------
# Public functions, transaction getters
# ------------------------------------------------------------------------------
proc itemID*(tx: Transaction): Hash256 =
## Getter, transaction ID
tx.rlpHash
# core/types/transaction.go(297): func (tx *Transaction) Cost() *big.Int {
proc cost*(tx: Transaction): UInt256 =
## Getter (go/ref compat): gas * gasPrice + value.
(tx.gasPrice * tx.gasLimit).u256 + tx.value
# core/types/transaction.go(332): .. *Transaction) EffectiveGasTip(baseFee ..
# core/types/transaction.go(346): .. EffectiveGasTipValue(baseFee ..
proc effectiveGasTip*(tx: Transaction; baseFee: GasPrice): GasPriceEx =
## The effective miner gas tip for the globally argument `baseFee`. The
## result (which is a price per gas) might well be negative.
if tx.txType != TxEip1559:
(tx.gasPrice - baseFee.int64).GasPriceEx
else:
# London, EIP1559
min(tx.maxPriorityFee, tx.maxFee - baseFee.int64).GasPriceEx
proc effectiveGasTip*(tx: Transaction; baseFee: UInt256): GasPriceEx =
## Variant of `effectiveGasTip()`
tx.effectiveGasTip(baseFee.truncate(uint64).GasPrice)
# ------------------------------------------------------------------------------
# Public functions, item getters
# ------------------------------------------------------------------------------
proc dup*(item: TxItemRef): TxItemRef =
## Getter, provide contents copy
item.deepCopy
proc info*(item: TxItemRef): string =
## Getter
item.info
proc itemID*(item: TxItemRef): Hash256 =
## Getter
item.itemID
proc reject*(item: TxItemRef): TxInfo =
## Getter
item.reject
proc sender*(item: TxItemRef): EthAddress =
## Getter
item.sender
proc status*(item: TxItemRef): TxItemStatus =
## Getter
item.status
proc timeStamp*(item: TxItemRef): Time =
## Getter
item.timeStamp
proc tx*(item: TxItemRef): Transaction =
## Getter
item.tx
# ------------------------------------------------------------------------------
# Public functions, setters
# ------------------------------------------------------------------------------
proc `status=`*(item: TxItemRef; val: TxItemStatus) =
## Setter
item.status = val
proc `reject=`*(item: TxItemRef; val: TxInfo) =
## Setter
item.reject = val
# ------------------------------------------------------------------------------
# Public functions, pretty printing and debugging
# ------------------------------------------------------------------------------
proc `$`*(w: TxItemRef): string =
## Visualise item ID (use for debugging)
"<" & w.itemID.data.mapIt(it.toHex(2)).join[24 .. 31].toLowerAscii & ">"
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,263 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
## Jobs Queue For Transaction Pool
## ===============================
##
import
std/[hashes, tables],
./tx_info,
./tx_item,
./tx_tabs,
eth/[common, keys],
stew/[keyed_queue, keyed_queue/kq_debug, results]
{.push raises: [Defect].}
# hide complexity unless really needed
const
jobWaitCompilerFlag = defined(job_wait_enabled) or defined(debug)
JobWaitEnabled* = ##\
## Compiler flag: fire *chronos* event if job queue becomes populated
jobWaitCompilerFlag
when JobWaitEnabled:
import chronos
type
TxJobID* = ##\
## Valid interval: *1 .. TxJobIdMax*, the value `0` corresponds to\
## `TxJobIdMax` and is internally accepted only right after initialisation.
distinct uint
TxJobKind* = enum ##\
## Types of batch job data. See `txJobPriorityKind` for the list of\
## *out-of-band* jobs.
txJobNone = 0 ##\
## no action
txJobAddTxs ##\
## Enqueues a batch of transactions
txJobDelItemIDs ##\
## Enqueues a batch of itemIDs the items of which to be disposed
const
txJobPriorityKind*: set[TxJobKind] = ##\
## Prioritised jobs, either small or important ones.
{}
type
TxJobDataRef* = ref object
case kind*: TxJobKind
of txJobNone:
discard
of txJobAddTxs:
addTxsArgs*: tuple[
txs: seq[Transaction],
info: string]
of txJobDelItemIDs:
delItemIDsArgs*: tuple[
itemIDs: seq[Hash256],
reason: TxInfo]
TxJobPair* = object ## Responding to a job queue query
id*: TxJobID ## Job ID, queue database key
data*: TxJobDataRef ## Data record
TxJobRef* = ref object ##\
## Job queue with increasing job *ID* numbers (wrapping around at\
## `TxJobIdMax`.)
topID: TxJobID ## Next job will have `topID+1`
jobs: KeyedQueue[TxJobID,TxJobDataRef] ## Job queue
# hide complexity unless really needed
when JobWaitEnabled:
jobsAvail: AsyncEvent ## Fired if there is a job available
const
txJobIdMax* = ##\
## Wraps around to `1` after last ID
999999.TxJobID
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc hash(id: TxJobID): Hash =
## Needed if `TxJobID` is used as hash-`Table` index.
id.uint.hash
proc `+`(a, b: TxJobID): TxJobID {.borrow.}
proc `-`(a, b: TxJobID): TxJobID {.borrow.}
proc `+`(a: TxJobID; b: int): TxJobID = a + b.TxJobID
proc `-`(a: TxJobID; b: int): TxJobID = a - b.TxJobID
# ------------------------------------------------------------------------------
# Public helpers (operators needed in jobAppend() and jobUnshift() functions)
# ------------------------------------------------------------------------------
proc `<=`*(a, b: TxJobID): bool {.borrow.}
proc `==`*(a, b: TxJobID): bool {.borrow.}
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
proc jobAppend(jq: TxJobRef; data: TxJobDataRef): TxJobID
{.gcsafe,raises: [Defect,KeyError].} =
## Appends a job to the *FIFO*. This function returns a non-zero *ID* if
## successful.
##
## :Note:
## An error can only occur if the *ID* of the first job follows the *ID*
## of the last job (*modulo* `TxJobIdMax`). This occurs when
## * there are `TxJobIdMax` jobs already on the queue
## * some jobs were deleted in the middle of the queue and the *ID*
## gap was not shifted out yet.
var id: TxJobID
if txJobIdMax <= jq.topID:
id = 1.TxJobID
else:
id = jq.topID + 1
if jq.jobs.append(id, data):
jq.topID = id
return id
proc jobUnshift(jq: TxJobRef; data: TxJobDataRef): TxJobID
{.gcsafe,raises: [Defect,KeyError].} =
## Stores *back* a job to to the *FIFO* front end be re-fetched next. This
## function returns a non-zero *ID* if successful.
##
## See also the **Note* at the comment for `txAdd()`.
var id: TxJobID
if jq.jobs.len == 0:
if jq.topID == 0.TxJobID:
jq.topID = txJobIdMax # must be non-zero after first use
id = jq.topID
else:
id = jq.jobs.firstKey.value - 1
if id == 0.TxJobID:
id = txJobIdMax
if jq.jobs.unshift(id, data):
return id
# ------------------------------------------------------------------------------
# Public functions, constructor
# ------------------------------------------------------------------------------
proc new*(T: type TxJobRef; initSize = 10): T =
## Constructor variant
new result
result.jobs.init(initSize)
# hide complexity unless really needed
when JobWaitEnabled:
result.jobsAvail = newAsyncEvent()
proc clear*(jq: TxJobRef) =
## Re-initilaise variant
jq.jobs.clear
# hide complexity unless really needed
when JobWaitEnabled:
jq.jobsAvail.clear
# ------------------------------------------------------------------------------
# Public functions, add/remove entry
# ------------------------------------------------------------------------------
proc add*(jq: TxJobRef; data: TxJobDataRef): TxJobID
{.gcsafe,raises: [Defect,KeyError].} =
## Add a new job to the *FIFO*.
if data.kind in txJobPriorityKind:
result = jq.jobUnshift(data)
else:
result = jq.jobAppend(data)
# hide complexity unless really needed
when JobWaitEnabled:
# update event
jq.jobsAvail.fire
proc fetch*(jq: TxJobRef): Result[TxJobPair,void]
{.gcsafe,raises: [Defect,KeyError].} =
## Fetches (and deletes) the next job from the *FIFO*.
# first item from queue
let rc = jq.jobs.shift
if rc.isErr:
return err()
# hide complexity unless really needed
when JobWaitEnabled:
# update event
jq.jobsAvail.clear
# result
ok(TxJobPair(id: rc.value.key, data: rc.value.data))
# hide complexity unless really needed
when JobWaitEnabled:
proc waitAvail*(jq: TxJobRef) {.async,raises: [Defect,CatchableError].} =
## Asynchronously wait until at least one job is available (available
## only if the `JobWaitEnabled` compile time constant is set.)
if jq.jobs.len == 0:
await jq.jobsAvail.wait
else:
proc waitAvail*(jq: TxJobRef)
{.deprecated: "will raise exception unless JobWaitEnabled is set",
raises: [Defect,CatchableError].} =
raiseAssert "Must not be called unless JobWaitEnabled is set"
# ------------------------------------------------------------------------------
# Public queue/table ops
# ------------------------------------------------------------------------------
proc`[]`*(jq: TxJobRef; id: TxJobID): TxJobDataRef
{.gcsafe,raises: [Defect,KeyError].} =
jq.jobs[id]
proc hasKey*(jq: TxJobRef; id: TxJobID): bool
{.gcsafe,raises: [Defect,KeyError].} =
jq.jobs.hasKey(id)
proc len*(jq: TxJobRef): int
{.gcsafe,raises: [Defect,KeyError].} =
jq.jobs.len
# ------------------------------------------------------------------------------
# Public functions, debugging
# ------------------------------------------------------------------------------
proc verify*(jq: TxJobRef): Result[void,TxInfo]
{.gcsafe,raises: [Defect,KeyError].} =
block:
let rc = jq.jobs.verify
if rc.isErr:
return err(txInfoVfyJobQueue)
ok()
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,518 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
## Transaction Pool Database For Buckets And Waste Basket
## ======================================================
##
import
std/[sequtils, tables],
./tx_info,
./tx_item,
./tx_tabs/[tx_sender, tx_rank, tx_status],
eth/[common, keys],
stew/[keyed_queue, keyed_queue/kq_debug, results, sorted_set]
{.push raises: [Defect].}
export
# bySender/byStatus index operations
any, eq, ge, gt, le, len, lt, nItems, gasLimits
type
TxTabsItemsCount* = tuple
pending, staged, packed: int ## sum => total
total: int ## excluding rejects
disposed: int ## waste basket
TxTabsGasTotals* = tuple
pending, staged, packed: GasInt ## sum => total
TxTabsRef* = ref object ##\
## Base descriptor
maxRejects: int ##\
## Maximal number of items in waste basket
# ----- primary tables ------
byLocal*: Table[EthAddress,bool] ##\
## List of local accounts (currently idle/unused)
byRejects*: KeyedQueue[Hash256,TxItemRef] ##\
## Rejects queue and waste basket, queued by disposal event
byItemID*: KeyedQueue[Hash256,TxItemRef] ##\
## Primary table containing all tx items, queued by arrival event
# ----- index tables for byItemID ------
bySender*: TxSenderTab ##\
## Index for byItemID: `sender` > `status` > `nonce` > item
byStatus*: TxStatusTab ##\
## Index for byItemID: `status` > `nonce` > item
byRank*: TxRankTab ##\
## Ranked address table, used for sender address traversal
const
txTabMaxRejects = ##\
## Default size of rejects queue (aka waste basket.) Older waste items will
## be automatically removed so that there are no more than this many items
## in the rejects queue.
500
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc deleteImpl(xp: TxTabsRef; item: TxItemRef): bool
{.gcsafe,raises: [Defect,KeyError].} =
## Delete transaction (and wrapping container) from the database. If
## successful, the function returns the wrapping container that was just
## removed.
if xp.byItemID.delete(item.itemID).isOK:
discard xp.bySender.delete(item)
discard xp.byStatus.delete(item)
# Update address rank
let rc = xp.bySender.rank(item.sender)
if rc.isOK:
discard xp.byRank.insert(rc.value.TxRank, item.sender) # update
else:
discard xp.byRank.delete(item.sender)
return true
proc insertImpl(xp: TxTabsRef; item: TxItemRef): Result[void,TxInfo]
{.gcsafe,raises: [Defect,CatchableError].} =
if not xp.bySender.insert(item):
return err(txInfoErrSenderNonceIndex)
# Insert item
discard xp.byItemID.append(item.itemID,item)
discard xp.byStatus.insert(item)
# Update address rank
let rank = xp.bySender.rank(item.sender).value.TxRank
discard xp.byRank.insert(rank, item.sender)
return ok()
# ------------------------------------------------------------------------------
# Public functions, constructor
# ------------------------------------------------------------------------------
proc new*(T: type TxTabsRef): T =
## Constructor, returns new tx-pool descriptor.
new result
result.maxRejects = txTabMaxRejects
# result.byLocal -- Table, no need to init
# result.byItemID -- KeyedQueue, no need to init
# result.byRejects -- KeyedQueue, no need to init
# index tables
result.bySender.init
result.byStatus.init
result.byRank.init
# ------------------------------------------------------------------------------
# Public functions, add/remove entry
# ------------------------------------------------------------------------------
proc insert*(
xp: TxTabsRef;
tx: var Transaction;
status = txItemPending;
info = ""): Result[void,TxInfo]
{.gcsafe,raises: [Defect,CatchableError].} =
## Add new transaction argument `tx` to the database. If accepted and added
## to the database, a `key` value is returned which can be used to retrieve
## this transaction direcly via `tx[key].tx`. The following holds for the
## returned `key` value (see `[]` below for details):
## ::
## xp[key].id == key # id: transaction key stored in the wrapping container
## tx.toKey == key # holds as long as tx is not modified
##
## Adding the transaction will be rejected if the transaction key `tx.toKey`
## exists in the database already.
##
## CAVEAT:
## The returned transaction key `key` for the transaction `tx` is
## recoverable as `tx.toKey` only while the trasaction remains unmodified.
##
let itemID = tx.itemID
if xp.byItemID.hasKey(itemID):
return err(txInfoErrAlreadyKnown)
var item: TxItemRef
block:
let rc = TxItemRef.new(tx, itemID, status, info)
if rc.isErr:
return err(txInfoErrInvalidSender)
item = rc.value
block:
let rc = xp.insertImpl(item)
if rc.isErr:
return rc
ok()
proc insert*(xp: TxTabsRef; item: TxItemRef): Result[void,TxInfo]
{.gcsafe,raises: [Defect,CatchableError].} =
## Variant of `insert()` with fully qualified `item` argument.
if xp.byItemID.hasKey(item.itemID):
return err(txInfoErrAlreadyKnown)
return xp.insertImpl(item.dup)
proc reassign*(xp: TxTabsRef; item: TxItemRef; status: TxItemStatus): bool
{.gcsafe,raises: [Defect,CatchableError].} =
## Variant of `reassign()` for the `TxItemStatus` flag.
# make sure that the argument `item` is not some copy
let rc = xp.byItemID.eq(item.itemID)
if rc.isOK:
var realItem = rc.value
if realItem.status != status:
discard xp.bySender.delete(realItem) # delete original
discard xp.byStatus.delete(realItem)
realItem.status = status
discard xp.bySender.insert(realItem) # re-insert changed
discard xp.byStatus.insert(realItem)
return true
proc flushRejects*(xp: TxTabsRef; maxItems = int.high): (int,int)
{.gcsafe,raises: [Defect,KeyError].} =
## Flush/delete at most `maxItems` oldest items from the waste basket and
## return the numbers of deleted and remaining items (a waste basket item
## is considered older if it was moved there earlier.)
if xp.byRejects.len <= maxItems:
result[0] = xp.byRejects.len
xp.byRejects.clear
return # result
while result[0] < maxItems:
if xp.byRejects.shift.isErr:
break
result[0].inc
result[1] = xp.byRejects.len
proc dispose*(xp: TxTabsRef; item: TxItemRef; reason: TxInfo): bool
{.gcsafe,raises: [Defect,KeyError].} =
## Move argument `item` to rejects queue (aka waste basket.)
if xp.deleteImpl(item):
if xp.maxRejects <= xp.byRejects.len:
discard xp.flushRejects(1 + xp.byRejects.len - xp.maxRejects)
item.reject = reason
xp.byRejects[item.itemID] = item
return true
proc reject*(xp: TxTabsRef; tx: var Transaction;
reason: TxInfo; status = txItemPending; info = "")
{.gcsafe,raises: [Defect,KeyError].} =
## Similar to dispose but for a tx without the item wrapper, the function
## imports the tx into the waste basket (e.g. after it could not
## be inserted.)
if xp.maxRejects <= xp.byRejects.len:
discard xp.flushRejects(1 + xp.byRejects.len - xp.maxRejects)
let item = TxItemRef.new(tx, reason, status, info)
xp.byRejects[item.itemID] = item
proc reject*(xp: TxTabsRef; item: TxItemRef; reason: TxInfo)
{.gcsafe,raises: [Defect,KeyError].} =
## Variant of `reject()` with `item` rather than `tx` (assuming
## `item` is not in the database.)
if xp.maxRejects <= xp.byRejects.len:
discard xp.flushRejects(1 + xp.byRejects.len - xp.maxRejects)
item.reject = reason
xp.byRejects[item.itemID] = item
proc reject*(xp: TxTabsRef; tx: Transaction;
reason: TxInfo; status = txItemPending; info = "")
{.gcsafe,raises: [Defect,KeyError].} =
## Variant of `reject()`
var ty = tx
xp.reject(ty, reason, status)
# ------------------------------------------------------------------------------
# Public getters
# ------------------------------------------------------------------------------
proc baseFee*(xp: TxTabsRef): GasPrice =
## Getter
xp.bySender.baseFee
proc maxRejects*(xp: TxTabsRef): int =
## Getter
xp.maxRejects
# ------------------------------------------------------------------------------
# Public functions, setters
# ------------------------------------------------------------------------------
proc `baseFee=`*(xp: TxTabsRef; val: GasPrice)
{.gcsafe,raises: [Defect,KeyError].} =
## Setter, update may cause database re-org
if xp.bySender.baseFee != val:
xp.bySender.baseFee = val
# Build new rank table
xp.byRank.clear
for (address,rank) in xp.bySender.accounts:
discard xp.byRank.insert(rank.TxRank, address)
proc `maxRejects=`*(xp: TxTabsRef; val: int) =
## Setter, applicable with next `reject()` invocation.
xp.maxRejects = val
# ------------------------------------------------------------------------------
# Public functions, miscellaneous
# ------------------------------------------------------------------------------
proc hasTx*(xp: TxTabsRef; tx: Transaction): bool =
## Returns `true` if the argument pair `(key,local)` exists in the
## database.
##
## If this function returns `true`, then it is save to use the `xp[key]`
## paradigm for accessing a transaction container.
xp.byItemID.hasKey(tx.itemID)
proc nItems*(xp: TxTabsRef): TxTabsItemsCount
{.gcsafe,raises: [Defect,KeyError].} =
result.pending = xp.byStatus.eq(txItemPending).nItems
result.staged = xp.byStatus.eq(txItemStaged).nItems
result.packed = xp.byStatus.eq(txItemPacked).nItems
result.total = xp.byItemID.len
result.disposed = xp.byRejects.len
proc gasTotals*(xp: TxTabsRef): TxTabsGasTotals
{.gcsafe,raises: [Defect,KeyError].} =
result.pending = xp.byStatus.eq(txItemPending).gasLimits
result.staged = xp.byStatus.eq(txItemStaged).gasLimits
result.packed = xp.byStatus.eq(txItemPacked).gasLimits
# ------------------------------------------------------------------------------
# Public functions: local/remote sender accounts
# ------------------------------------------------------------------------------
proc isLocal*(xp: TxTabsRef; sender: EthAddress): bool =
## Returns `true` if account address is local
xp.byLocal.hasKey(sender)
proc locals*(xp: TxTabsRef): seq[EthAddress] =
## Returns an unsorted list of addresses tagged *local*
toSeq(xp.byLocal.keys)
proc remotes*(xp: TxTabsRef): seq[EthAddress] =
## Returns an sorted list of untagged addresses, highest address rank first
var rcRank = xp.byRank.le(TxRank.high)
while rcRank.isOK:
let (rank, addrList) = (rcRank.value.key, rcRank.value.data)
for account in addrList.keys:
if not xp.byLocal.hasKey(account):
result.add account
rcRank = xp.byRank.lt(rank)
proc setLocal*(xp: TxTabsRef; sender: EthAddress) =
## Tag `sender` address argument *local*
xp.byLocal[sender] = true
proc resLocal*(xp: TxTabsRef; sender: EthAddress) =
## Untag *local* `sender` address argument.
xp.byLocal.del(sender)
# ------------------------------------------------------------------------------
# Public iterators, `TxRank` > `(EthAddress,TxStatusNonceRef)`
# ------------------------------------------------------------------------------
iterator incAccount*(xp: TxTabsRef; bucket: TxItemStatus;
fromRank = TxRank.low): (EthAddress,TxStatusNonceRef)
{.gcsafe,raises: [Defect,KeyError].} =
## Walk accounts with increasing ranks and return a nonce-ordered item list.
let rcBucket = xp.byStatus.eq(bucket)
if rcBucket.isOK:
let bucketList = xp.byStatus.eq(bucket).value.data
var rcRank = xp.byRank.ge(fromRank)
while rcRank.isOK:
let (rank, addrList) = (rcRank.value.key, rcRank.value.data)
# Use adresses for this rank which are also found in the bucket
for account in addrList.keys:
let rcAccount = bucketList.eq(account)
if rcAccount.isOK:
yield (account, rcAccount.value.data)
# Get next ranked address list (top down index walk)
rcRank = xp.byRank.gt(rank) # potenially modified database
iterator decAccount*(xp: TxTabsRef; bucket: TxItemStatus;
fromRank = TxRank.high): (EthAddress,TxStatusNonceRef)
{.gcsafe,raises: [Defect,KeyError].} =
## Walk accounts with decreasing ranks and return the nonce-ordered item list.
let rcBucket = xp.byStatus.eq(bucket)
if rcBucket.isOK:
let bucketList = xp.byStatus.eq(bucket).value.data
var rcRank = xp.byRank.le(fromRank)
while rcRank.isOK:
let (rank, addrList) = (rcRank.value.key, rcRank.value.data)
# Use adresses for this rank which are also found in the bucket
for account in addrList.keys:
let rcAccount = bucketList.eq(account)
if rcAccount.isOK:
yield (account, rcAccount.value.data)
# Get next ranked address list (top down index walk)
rcRank = xp.byRank.lt(rank) # potenially modified database
# ------------------------------------------------------------------------------
# Public iterators, `TxRank` > `(EthAddress,TxSenderNonceRef)`
# ------------------------------------------------------------------------------
iterator incAccount*(xp: TxTabsRef;
fromRank = TxRank.low): (EthAddress,TxSenderNonceRef)
{.gcsafe,raises: [Defect,KeyError].} =
## Variant of `incAccount()` without bucket restriction.
var rcRank = xp.byRank.ge(fromRank)
while rcRank.isOK:
let (rank, addrList) = (rcRank.value.key, rcRank.value.data)
# Try all sender adresses found
for account in addrList.keys:
yield (account, xp.bySender.eq(account).any.value.data)
# Get next ranked address list (top down index walk)
rcRank = xp.byRank.gt(rank) # potenially modified database
iterator decAccount*(xp: TxTabsRef;
fromRank = TxRank.high): (EthAddress,TxSenderNonceRef)
{.gcsafe,raises: [Defect,KeyError].} =
## Variant of `decAccount()` without bucket restriction.
var rcRank = xp.byRank.le(fromRank)
while rcRank.isOK:
let (rank, addrList) = (rcRank.value.key, rcRank.value.data)
# Try all sender adresses found
for account in addrList.keys:
yield (account, xp.bySender.eq(account).any.value.data)
# Get next ranked address list (top down index walk)
rcRank = xp.byRank.lt(rank) # potenially modified database
# -----------------------------------------------------------------------------
# Public second stage iterators: nonce-ordered item lists.
# -----------------------------------------------------------------------------
iterator incNonce*(nonceList: TxSenderNonceRef;
nonceFrom = AccountNonce.low): TxItemRef
{.gcsafe,raises: [Defect,KeyError].} =
## Second stage iterator inside `incAccount()` or `decAccount()`. The
## items visited are always sorted by least-nonce first.
var rc = nonceList.ge(nonceFrom)
while rc.isOk:
let (nonce, item) = (rc.value.key, rc.value.data)
yield item
rc = nonceList.gt(nonce) # potenially modified database
iterator incNonce*(nonceList: TxStatusNonceRef;
nonceFrom = AccountNonce.low): TxItemRef =
## Variant of `incNonce()` for the `TxStatusNonceRef` list.
var rc = nonceList.ge(nonceFrom)
while rc.isOK:
let (nonce, item) = (rc.value.key, rc.value.data)
yield item
rc = nonceList.gt(nonce) # potenially modified database
#[
# There is currently no use for nonce count down traversal
iterator decNonce*(nonceList: TxSenderNonceRef;
nonceFrom = AccountNonce.high): TxItemRef
{.gcsafe,raises: [Defect,KeyError].} =
## Similar to `incNonce()` but visiting items in reverse order.
var rc = nonceList.le(nonceFrom)
while rc.isOk:
let (nonce, item) = (rc.value.key, rc.value.data)
yield item
rc = nonceList.lt(nonce) # potenially modified database
iterator decNonce*(nonceList: TxStatusNonceRef;
nonceFrom = AccountNonce.high): TxItemRef =
## Variant of `decNonce()` for the `TxStatusNonceRef` list.
var rc = nonceList.le(nonceFrom)
while rc.isOK:
let (nonce, item) = (rc.value.key, rc.value.data)
yield item
rc = nonceList.lt(nonce) # potenially modified database
]#
# ------------------------------------------------------------------------------
# Public functions, debugging
# ------------------------------------------------------------------------------
proc verify*(xp: TxTabsRef): Result[void,TxInfo]
{.gcsafe, raises: [Defect,CatchableError].} =
## Verify descriptor and subsequent data structures.
block:
let rc = xp.bySender.verify
if rc.isErr:
return rc
block:
let rc = xp.byItemID.verify
if rc.isErr:
return err(txInfoVfyItemIdList)
block:
let rc = xp.byRejects.verify
if rc.isErr:
return err(txInfoVfyRejectsList)
block:
let rc = xp.byStatus.verify
if rc.isErr:
return rc
block:
let rc = xp.byRank.verify
if rc.isErr:
return rc
for status in TxItemStatus:
var
statusCount = 0
statusAllGas = 0.GasInt
for (account,nonceList) in xp.incAccount(status):
let bySenderStatusList = xp.bySender.eq(account).eq(status)
statusAllGas += bySenderStatusList.gasLimits
statusCount += bySenderStatusList.nItems
if bySenderStatusList.nItems != nonceList.nItems:
return err(txInfoVfyStatusSenderTotal)
if xp.byStatus.eq(status).nItems != statusCount:
return err(txInfoVfyStatusSenderTotal)
if xp.byStatus.eq(status).gasLimits != statusAllGas:
return err(txInfoVfyStatusSenderGasLimits)
if xp.byItemID.len != xp.bySender.nItems:
return err(txInfoVfySenderTotal)
if xp.byItemID.len != xp.byStatus.nItems:
return err(txInfoVfyStatusTotal)
if xp.bySender.len != xp.byRank.nItems:
return err(txInfoVfyRankTotal)
ok()
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,187 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
## Transaction Pool Table: `rank` ~ `sender`
## =========================================
##
import
std/[tables],
../tx_info,
eth/[common],
stew/[results, sorted_set]
{.push raises: [Defect].}
type
TxRank* = ##\
## Order relation, determins how the `EthAddresses` are ranked
distinct int64
TxRankAddrRef* = ##\
## Set of adresses having the same rank.
TableRef[EthAddress,TxRank]
TxRankTab* = object ##\
## Descriptor for `TxRank` <-> `EthAddress` mapping.
rankList: SortedSet[TxRank,TxRankAddrRef]
addrTab: Table[EthAddress,TxRank]
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc cmp(a,b: TxRank): int {.borrow.}
## mixin for SortedSet
proc `==`(a,b: TxRank): bool {.borrow.}
# ------------------------------------------------------------------------------
# Public constructor
# ------------------------------------------------------------------------------
proc init*(rt: var TxRankTab) =
## Constructor
rt.rankList.init
proc clear*(rt: var TxRankTab) =
## Flush tables
rt.rankList.clear
rt.addrTab.clear
# ------------------------------------------------------------------------------
# Public functions, base management operations
# ------------------------------------------------------------------------------
proc insert*(rt: var TxRankTab; rank: TxRank; sender: EthAddress): bool
{.gcsafe,raises: [Defect,KeyError].} =
## Add or update a new ranked address. This function returns `true` it the
## address exists already with the current rank.
# Does this address exists already?
if rt.addrTab.hasKey(sender):
let oldRank = rt.addrTab[sender]
if oldRank == rank:
return false
# Delete address from oldRank address set
let oldRankSet = rt.rankList.eq(oldRank).value.data
if 1 < oldRankSet.len:
oldRankSet.del(sender)
else:
discard rt.rankList.delete(oldRank)
# Add new ranked address
var newRankSet: TxRankAddrRef
let rc = rt.rankList.insert(rank)
if rc.isOK:
newRankSet = newTable[EthAddress,TxRank](1)
rc.value.data = newRankSet
else:
newRankSet = rt.rankList.eq(rank).value.data
newRankSet[sender] = rank
rt.addrTab[sender] = rank
true
proc delete*(rt: var TxRankTab; sender: EthAddress): bool
{.gcsafe,raises: [Defect,KeyError].} =
## Delete argument address `sender` from rank table.
if rt.addrTab.hasKey(sender):
let
rankNum = rt.addrTab[sender]
rankSet = rt.rankList.eq(rankNum).value.data
# Delete address from oldRank address set
if 1 < rankSet.len:
rankSet.del(sender)
else:
discard rt.rankList.delete(rankNum)
rt.addrTab.del(sender)
return true
proc verify*(rt: var TxRankTab): Result[void,TxInfo]
{.gcsafe,raises: [Defect,CatchableError].} =
var
seen: Table[EthAddress,TxRank]
rc = rt.rankList.ge(TxRank.low)
while rc.isOK:
let (key, addrTab) = (rc.value.key, rc.value.data)
rc = rt.rankList.gt(key)
for (sender,rank) in addrTab.pairs:
if key != rank:
return err(txInfoVfyRankAddrMismatch)
if not rt.addrTab.hasKey(sender):
return err(txInfoVfyRankReverseLookup)
if rank != rt.addrTab[sender]:
return err(txInfoVfyRankReverseMismatch)
if seen.hasKey(sender):
return err(txInfoVfyRankDuplicateAddr)
seen[sender] = rank
if seen.len != rt.addrTab.len:
return err(txInfoVfyReverseZombies)
ok()
# ------------------------------------------------------------------------------
# Public functions: `TxRank` > `EthAddress`
# ------------------------------------------------------------------------------
proc len*(rt: var TxRankTab): int =
## Number of ranks available
rt.rankList.len
proc eq*(rt: var TxRankTab; rank: TxRank):
SortedSetResult[TxRank,TxRankAddrRef] =
rt.rankList.eq(rank)
proc ge*(rt: var TxRankTab; rank: TxRank):
SortedSetResult[TxRank,TxRankAddrRef] =
rt.rankList.ge(rank)
proc gt*(rt: var TxRankTab; rank: TxRank):
SortedSetResult[TxRank,TxRankAddrRef] =
rt.rankList.gt(rank)
proc le*(rt: var TxRankTab; rank: TxRank):
SortedSetResult[TxRank,TxRankAddrRef] =
rt.rankList.le(rank)
proc lt*(rt: var TxRankTab; rank: TxRank):
SortedSetResult[TxRank,TxRankAddrRef] =
rt.rankList.lt(rank)
# ------------------------------------------------------------------------------
# Public functions: `EthAddress` > `TxRank`
# ------------------------------------------------------------------------------
proc nItems*(rt: var TxRankTab): int =
## Total number of address items registered
rt.addrTab.len
proc eq*(rt: var TxRankTab; sender: EthAddress):
SortedSetResult[EthAddress,TxRank]
{.gcsafe,raises: [Defect,KeyError].} =
if rt.addrTab.hasKey(sender):
return toSortedSetResult(key = sender, data = rt.addrTab[sender])
err(rbNotFound)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,619 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
## Transaction Pool Table: `Sender` > `status` | all > `nonce`
## ===========================================================
##
import
std/[math],
../tx_info,
../tx_item,
eth/[common],
stew/[results, keyed_queue, keyed_queue/kq_debug, sorted_set]
{.push raises: [Defect].}
type
TxSenderNonceRef* = ref object ##\
## Sub-list ordered by `AccountNonce` values containing transaction\
## item lists.
gasLimits: GasInt ## Accumulated gas limits
profit: float64 ## Aggregated `effectiveGasTip*gasLimit` values
nonceList: SortedSet[AccountNonce,TxItemRef]
TxSenderSchedRef* = ref object ##\
## For a sender, items can be accessed by *nonce*, or *status,nonce*.
size: int ## Total number of items
statusList: array[TxItemStatus,TxSenderNonceRef]
allList: TxSenderNonceRef
TxSenderTab* = object ##\
## Per address table This is table provided as a keyed queue so deletion\
## while traversing is supported and predictable.
size: int ## Total number of items
baseFee: GasPrice ## For aggregating `effectiveGasTip` => `gasTipSum`
addrList: KeyedQueue[EthAddress,TxSenderSchedRef]
TxSenderSchedule* = enum ##\
## Generalised key for sub-list to be used in `TxSenderNoncePair`
txSenderAny = 0 ## All entries status (aka bucket name) ...
txSenderPending
txSenderStaged
txSenderPacked
TxSenderInx = object ##\
## Internal access data
schedData: TxSenderSchedRef
statusNonce: TxSenderNonceRef ## status items sub-list
allNonce: TxSenderNonceRef ## all items sub-list
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc `$`(rq: TxSenderSchedRef): string =
## Needed by `rq.verify()` for printing error messages
var n = 0
for status in TxItemStatus:
if not rq.statusList[status].isNil:
n.inc
$n
proc nActive(rq: TxSenderSchedRef): int =
## Number of non-nil items
for status in TxItemStatus:
if not rq.statusList[status].isNil:
result.inc
func differs(a, b: float64): bool =
## Syntactic sugar, crude comparator for large integer values a and b coded
## as `float64`. This function is mainly provided for the `verify()` function.
# note that later NIM compilers also provide `almostEqual()`
const
epsilon = 1.0e+15'f64 # just arbitrary, something small
let
x = max(a, b)
y = min(a, b)
z = if x == 0: 1'f64 else: x # 1f64 covers the case x == y == 0.0
epsilon < (x - y) / z
func toSenderSchedule(status: TxItemStatus): TxSenderSchedule =
case status
of txItemPending:
return txSenderPending
of txItemStaged:
return txSenderStaged
of txItemPacked:
return txSenderPacked
proc getRank(schedData: TxSenderSchedRef): int64 =
## Rank calculator
let pendingData = schedData.statusList[txItemPending]
var
maxProfit = schedData.allList.profit
gasLimits = schedData.allList.gasLimits
if not pendingData.isNil:
maxProfit -= pendingData.profit
gasLimits -= pendingData.gasLimits
if gasLimits <= 0:
return int64.low
let profit = maxProfit / gasLimits.float64
# Beware of under/overflow
if profit < int64.low.float64:
return int64.low
if int64.high.float64 < profit:
return int64.high
profit.int64
proc maxProfit(item: TxItemRef; baseFee: GasPrice): float64 =
## Profit calculator
item.tx.gasLimit.float64 * item.tx.effectiveGasTip(baseFee).float64
proc recalcProfit(nonceData: TxSenderNonceRef; baseFee: GasPrice) =
## Re-calculate profit value depending on `baseFee`
nonceData.profit = 0.0
var rc = nonceData.nonceList.ge(AccountNonce.low)
while rc.isOk:
let item = rc.value.data
nonceData.profit += item.maxProfit(baseFee)
rc = nonceData.nonceList.gt(item.tx.nonce)
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
proc mkInxImpl(gt: var TxSenderTab; item: TxItemRef): Result[TxSenderInx,void]
{.gcsafe,raises: [Defect,KeyError].} =
var inxData: TxSenderInx
if gt.addrList.hasKey(item.sender):
inxData.schedData = gt.addrList[item.sender]
else:
new inxData.schedData
gt.addrList[item.sender] = inxData.schedData
# all items sub-list
if inxData.schedData.allList.isNil:
new inxData.allNonce
inxData.allNonce.nonceList.init
inxData.schedData.allList = inxData.allNonce
else:
inxData.allNonce = inxData.schedData.allList
let rc = inxData.allNonce.nonceList.insert(item.tx.nonce)
if rc.isErr:
return err()
rc.value.data = item
# by status items sub-list
if inxData.schedData.statusList[item.status].isNil:
new inxData.statusNonce
inxData.statusNonce.nonceList.init
inxData.schedData.statusList[item.status] = inxData.statusNonce
else:
inxData.statusNonce = inxData.schedData.statusList[item.status]
# this is a new item, checked at `all items sub-list` above
inxData.statusNonce.nonceList.insert(item.tx.nonce).value.data = item
return ok(inxData)
proc getInxImpl(gt: var TxSenderTab; item: TxItemRef): Result[TxSenderInx,void]
{.gcsafe,raises: [Defect,KeyError].} =
var inxData: TxSenderInx
if not gt.addrList.hasKey(item.sender):
return err()
# Sub-lists are non-nil as `TxSenderSchedRef` cannot be empty
inxData.schedData = gt.addrList[item.sender]
# by status items sub-list
inxData.statusNonce = inxData.schedData.statusList[item.status]
# all items sub-list
inxData.allNonce = inxData.schedData.allList
ok(inxData)
# ------------------------------------------------------------------------------
# Public constructor
# ------------------------------------------------------------------------------
proc init*(gt: var TxSenderTab) =
## Constructor
gt.size = 0
gt.addrList.init
# ------------------------------------------------------------------------------
# Public functions, base management operations
# ------------------------------------------------------------------------------
proc insert*(gt: var TxSenderTab; item: TxItemRef): bool
{.gcsafe,raises: [Defect,KeyError].} =
## Add transaction `item` to the list. The function has no effect if the
## transaction exists, already.
let rc = gt.mkInxImpl(item)
if rc.isOK:
let
inx = rc.value
tip = item.maxProfit(gt.baseFee)
gt.size.inc
inx.schedData.size.inc
inx.statusNonce.gasLimits += item.tx.gasLimit
inx.statusNonce.profit += tip
inx.allNonce.gasLimits += item.tx.gasLimit
inx.allNonce.profit += tip
return true
proc delete*(gt: var TxSenderTab; item: TxItemRef): bool
{.gcsafe,raises: [Defect,KeyError].} =
let rc = gt.getInxImpl(item)
if rc.isOK:
let
inx = rc.value
tip = item.maxProfit(gt.baseFee)
gt.size.dec
inx.schedData.size.dec
discard inx.allNonce.nonceList.delete(item.tx.nonce)
if inx.allNonce.nonceList.len == 0:
# this was the last nonce for that sender account
discard gt.addrList.delete(item.sender)
return true
inx.allNonce.gasLimits -= item.tx.gasLimit
inx.allNonce.profit -= tip
discard inx.statusNonce.nonceList.delete(item.tx.nonce)
if inx.statusNonce.nonceList.len == 0:
inx.schedData.statusList[item.status] = nil
return true
inx.statusNonce.gasLimits -= item.tx.gasLimit
inx.statusNonce.profit -= tip
return true
proc verify*(gt: var TxSenderTab): Result[void,TxInfo]
{.gcsafe,raises: [Defect,CatchableError].} =
## Walk `EthAddress` > `TxSenderLocus` > `AccountNonce` > items
block:
let rc = gt.addrList.verify
if rc.isErr:
return err(txInfoVfySenderRbTree)
var totalCount = 0
for p in gt.addrList.nextPairs:
let schedData = p.data
var addrCount = 0
# at least one of status lists must be available
if schedData.nActive == 0:
return err(txInfoVfySenderLeafEmpty)
if schedData.allList.isNil:
return err(txInfoVfySenderLeafEmpty)
# status list
# ----------------------------------------------------------------
var
statusCount = 0
statusGas = 0.GasInt
statusProfit = 0.0
for status in TxItemStatus:
let statusData = schedData.statusList[status]
if not statusData.isNil:
block:
let rc = statusData.nonceList.verify
if rc.isErr:
return err(txInfoVfySenderRbTree)
var
rcNonce = statusData.nonceList.ge(AccountNonce.low)
bucketProfit = 0.0
while rcNonce.isOK:
let (nonceKey, item) = (rcNonce.value.key, rcNonce.value.data)
rcNonce = statusData.nonceList.gt(nonceKey)
statusGas += item.tx.gasLimit
statusCount.inc
bucketProfit += item.maxProfit(gt.baseFee)
statusProfit += bucketProfit
if differs(statusData.profit, bucketProfit):
echo "*** verify (1) ", statusData.profit," != ", bucketProfit
return err(txInfoVfySenderProfits)
# verify that `recalcProfit()` works
statusData.recalcProfit(gt.baseFee)
if differs(statusData.profit, bucketProfit):
echo "*** verify (2) ", statusData.profit," != ", bucketProfit
return err(txInfoVfySenderProfits)
# allList
# ----------------------------------------------------------------
var
allCount = 0
allGas = 0.GasInt
allProfit = 0.0
block:
var allData = schedData.allList
block:
let rc = allData.nonceList.verify
if rc.isErr:
return err(txInfoVfySenderRbTree)
var rcNonce = allData.nonceList.ge(AccountNonce.low)
while rcNonce.isOK:
let (nonceKey, item) = (rcNonce.value.key, rcNonce.value.data)
rcNonce = allData.nonceList.gt(nonceKey)
allProfit += item.maxProfit(gt.baseFee)
allGas += item.tx.gasLimit
allCount.inc
if differs(allData.profit, allProfit):
echo "*** verify (3) ", allData.profit," != ", allProfit
return err(txInfoVfySenderProfits)
# verify that `recalcProfit()` works
allData.recalcProfit(gt.baseFee)
if differs(allData.profit, allProfit):
echo "*** verify (4) ", allData.profit," != ", allProfit
return err(txInfoVfySenderProfits)
if differs(allProfit, statusProfit):
echo "*** verify (5) ", allProfit," != ", statusProfit
return err(txInfoVfySenderProfits)
if allGas != statusGas:
return err(txInfoVfySenderTotal)
if statusCount != schedData.size:
return err(txInfoVfySenderTotal)
if allCount != schedData.size:
return err(txInfoVfySenderTotal)
totalCount += allCount
# end while
if totalCount != gt.size:
return err(txInfoVfySenderTotal)
ok()
# ------------------------------------------------------------------------------
# Public getters
# ------------------------------------------------------------------------------
proc baseFee*(gt: var TxSenderTab): GasPrice =
## Getter
gt.baseFee
# ------------------------------------------------------------------------------
# Public functions, setters
# ------------------------------------------------------------------------------
proc `baseFee=`*(gt: var TxSenderTab; val: GasPrice)
{.gcsafe,raises: [Defect,KeyError].} =
## Setter. When invoked, there is *always* a re-calculation of the profit
## values stored with the sender address.
gt.baseFee = val
for p in gt.addrList.nextPairs:
let schedData = p.data
# statusList[]
for status in TxItemStatus:
let statusData = schedData.statusList[status]
if not statusData.isNil:
statusData.recalcProfit(val)
# allList
schedData.allList.recalcProfit(val)
# ------------------------------------------------------------------------------
# Public SortedSet ops -- `EthAddress` (level 0)
# ------------------------------------------------------------------------------
proc len*(gt: var TxSenderTab): int =
gt.addrList.len
proc nItems*(gt: var TxSenderTab): int =
## Getter, total number of items in the list
gt.size
proc rank*(gt: var TxSenderTab; sender: EthAddress): Result[int64,void]
{.gcsafe,raises: [Defect,KeyError].} =
## The *rank* of the `sender` argument address is the
## ::
## maxProfit() / gasLimits()
##
## calculated over all items of the `staged` and `packed` buckets.
##
if gt.addrList.hasKey(sender):
return ok(gt.addrList[sender].getRank)
err()
proc eq*(gt: var TxSenderTab; sender: EthAddress):
SortedSetResult[EthAddress,TxSenderSchedRef]
{.gcsafe,raises: [Defect,KeyError].} =
if gt.addrList.hasKey(sender):
return toSortedSetResult(key = sender, data = gt.addrList[sender])
err(rbNotFound)
# ------------------------------------------------------------------------------
# Public array ops -- `TxSenderSchedule` (level 1)
# ------------------------------------------------------------------------------
proc len*(schedData: TxSenderSchedRef): int =
schedData.nActive
proc nItems*(schedData: TxSenderSchedRef): int =
## Getter, total number of items in the sub-list
schedData.size
proc nItems*(rc: SortedSetResult[EthAddress,TxSenderSchedRef]): int =
if rc.isOK:
return rc.value.data.nItems
0
proc eq*(schedData: TxSenderSchedRef; status: TxItemStatus):
SortedSetResult[TxSenderSchedule,TxSenderNonceRef] =
## Return by status sub-list
let nonceData = schedData.statusList[status]
if nonceData.isNil:
return err(rbNotFound)
toSortedSetResult(key = status.toSenderSchedule, data = nonceData)
proc eq*(rc: SortedSetResult[EthAddress,TxSenderSchedRef];
status: TxItemStatus):
SortedSetResult[TxSenderSchedule,TxSenderNonceRef] =
## Return by status sub-list
if rc.isOK:
return rc.value.data.eq(status)
err(rc.error)
proc any*(schedData: TxSenderSchedRef):
SortedSetResult[TxSenderSchedule,TxSenderNonceRef] =
## Return all-entries sub-list
let nonceData = schedData.allList
if nonceData.isNil:
return err(rbNotFound)
toSortedSetResult(key = txSenderAny, data = nonceData)
proc any*(rc: SortedSetResult[EthAddress,TxSenderSchedRef]):
SortedSetResult[TxSenderSchedule,TxSenderNonceRef] =
## Return all-entries sub-list
if rc.isOK:
return rc.value.data.any
err(rc.error)
proc eq*(schedData: TxSenderSchedRef;
key: TxSenderSchedule):
SortedSetResult[TxSenderSchedule,TxSenderNonceRef] =
## Variant of `eq()` using unified key schedule
case key
of txSenderAny:
return schedData.any
of txSenderPending:
return schedData.eq(txItemPending)
of txSenderStaged:
return schedData.eq(txItemStaged)
of txSenderPacked:
return schedData.eq(txItemPacked)
proc eq*(rc: SortedSetResult[EthAddress,TxSenderSchedRef];
key: TxSenderSchedule):
SortedSetResult[TxSenderSchedule,TxSenderNonceRef] =
if rc.isOK:
return rc.value.data.eq(key)
err(rc.error)
# ------------------------------------------------------------------------------
# Public SortedSet ops -- `AccountNonce` (level 2)
# ------------------------------------------------------------------------------
proc len*(nonceData: TxSenderNonceRef): int =
let rc = nonceData.nonceList.len
proc nItems*(nonceData: TxSenderNonceRef): int =
## Getter, total number of items in the sub-list
nonceData.nonceList.len
proc nItems*(rc: SortedSetResult[TxSenderSchedule,TxSenderNonceRef]): int =
if rc.isOK:
return rc.value.data.nItems
0
proc gasLimits*(nonceData: TxSenderNonceRef): GasInt =
## Getter, aggregated valued of `gasLimit` for all items in the
## argument list.
nonceData.gasLimits
proc gasLimits*(rc: SortedSetResult[TxSenderSchedule,TxSenderNonceRef]):
GasInt =
## Getter variant of `gasLimits()`, returns `0` if `rc.isErr`
## evaluates `true`.
if rc.isOK:
return rc.value.data.gasLimits
0
proc maxProfit*(nonceData: TxSenderNonceRef): float64 =
## Getter, maximum profit value for the current item list. This is the
## aggregated value of `item.effectiveGasTip(baseFee) * item.gasLimit`
## over all items in the argument list `nonceData`. Note that this value
## is typically pretty large and sort of rounded due to the resolution
## of the `float64` data type.
nonceData.profit
proc maxProfit*(rc: SortedSetResult[TxSenderSchedule,TxSenderNonceRef]):
float64 =
## Variant of `profit()`, returns `GasPriceEx.low` if `rc.isErr`
## evaluates `true`.
if rc.isOK:
return rc.value.data.profit
float64.low
proc eq*(nonceData: TxSenderNonceRef; nonce: AccountNonce):
SortedSetResult[AccountNonce,TxItemRef] =
nonceData.nonceList.eq(nonce)
proc eq*(rc: SortedSetResult[TxSenderSchedule,TxSenderNonceRef];
nonce: AccountNonce):
SortedSetResult[AccountNonce,TxItemRef] =
if rc.isOK:
return rc.value.data.eq(nonce)
err(rc.error)
proc ge*(nonceData: TxSenderNonceRef; nonce: AccountNonce):
SortedSetResult[AccountNonce,TxItemRef] =
nonceData.nonceList.ge(nonce)
proc ge*(rc: SortedSetResult[TxSenderSchedule,TxSenderNonceRef];
nonce: AccountNonce):
SortedSetResult[AccountNonce,TxItemRef] =
if rc.isOK:
return rc.value.data.ge(nonce)
err(rc.error)
proc gt*(nonceData: TxSenderNonceRef; nonce: AccountNonce):
SortedSetResult[AccountNonce,TxItemRef] =
nonceData.nonceList.gt(nonce)
proc gt*(rc: SortedSetResult[TxSenderSchedule,TxSenderNonceRef];
nonce: AccountNonce):
SortedSetResult[AccountNonce,TxItemRef] =
if rc.isOK:
return rc.value.data.gt(nonce)
err(rc.error)
proc le*(nonceData: TxSenderNonceRef; nonce: AccountNonce):
SortedSetResult[AccountNonce,TxItemRef] =
nonceData.nonceList.le(nonce)
proc le*(rc: SortedSetResult[TxSenderSchedule,TxSenderNonceRef];
nonce: AccountNonce):
SortedSetResult[AccountNonce,TxItemRef] =
if rc.isOK:
return rc.value.data.le(nonce)
err(rc.error)
proc lt*(nonceData: TxSenderNonceRef; nonce: AccountNonce):
SortedSetResult[AccountNonce,TxItemRef] =
nonceData.nonceList.lt(nonce)
proc lt*(rc: SortedSetResult[TxSenderSchedule,TxSenderNonceRef];
nonce: AccountNonce):
SortedSetResult[AccountNonce,TxItemRef] =
if rc.isOK:
return rc.value.data.lt(nonce)
err(rc.error)
# ------------------------------------------------------------------------------
# Public iterators
# ------------------------------------------------------------------------------
iterator accounts*(gt: var TxSenderTab): (EthAddress,int64)
{.gcsafe,raises: [Defect,KeyError].} =
## Sender account traversal, returns the account address and the rank
## for that account.
for p in gt.addrList.nextPairs:
yield (p.key, p.data.getRank)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,322 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
## Transaction Pool Table: `status` > `nonce`
## ==========================================
##
import
../tx_info,
../tx_item,
eth/[common],
stew/[results, keyed_queue, keyed_queue/kq_debug, sorted_set]
{.push raises: [Defect].}
type
TxStatusNonceRef* = ref object ##\
## Sub-list ordered by `AccountNonce` or `TxItemRef` insertion order.
nonceList: SortedSet[AccountNonce,TxItemRef]
TxStatusSenderRef* = ref object ##\
## Per address table. This table is provided as a keyed queue so deletion\
## while traversing is supported and predictable.
size: int ## Total number of items
gasLimits: GasInt ## Accumulated gas limits
addrList: KeyedQueue[EthAddress,TxStatusNonceRef]
TxStatusTab* = object ##\
## Per status table
size: int ## Total number of items
statusList: array[TxItemStatus,TxStatusSenderRef]
TxStatusInx = object ##\
## Internal access data
addrData: TxStatusSenderRef
nonceData: TxStatusNonceRef
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc `$`(rq: TxStatusNonceRef): string =
## Needed by `rq.verify()` for printing error messages
$rq.nonceList.len
proc nActive(sq: TxStatusTab): int =
## Number of non-nil items
for status in TxItemStatus:
if not sq.statusList[status].isNil:
result.inc
proc mkInxImpl(sq: var TxStatusTab; item: TxItemRef): Result[TxStatusInx,void]
{.gcsafe,raises: [Defect,KeyError].} =
## Fails if item exists, already
var inx: TxStatusInx
# array of buckets (aka status) => senders
inx.addrData = sq.statusList[item.status]
if inx.addrData.isNil:
new inx.addrData
inx.addrData.addrList.init
sq.statusList[item.status] = inx.addrData
# sender address sub-list => nonces
if inx.addrData.addrList.hasKey(item.sender):
inx.nonceData = inx.addrData.addrList[item.sender]
else:
new inx.nonceData
inx.nonceData.nonceList.init
inx.addrData.addrList[item.sender] = inx.nonceData
# nonce sublist
let rc = inx.nonceData.nonceList.insert(item.tx.nonce)
if rc.isErr:
return err()
rc.value.data = item
return ok(inx)
proc getInxImpl(sq: var TxStatusTab; item: TxItemRef): Result[TxStatusInx,void]
{.gcsafe,raises: [Defect,KeyError].} =
var inx: TxStatusInx
# array of buckets (aka status) => senders
inx.addrData = sq.statusList[item.status]
if inx.addrData.isNil:
return err()
# sender address sub-list => nonces
if not inx.addrData.addrList.hasKey(item.sender):
return err()
inx.nonceData = inx.addrData.addrList[item.sender]
ok(inx)
# ------------------------------------------------------------------------------
# Public all-queue helpers
# ------------------------------------------------------------------------------
proc init*(sq: var TxStatusTab; size = 10) =
## Optional constructor
sq.size = 0
sq.statusList.reset
proc insert*(sq: var TxStatusTab; item: TxItemRef): bool
{.gcsafe,raises: [Defect,KeyError].} =
## Add transaction `item` to the list. The function has no effect if the
## transaction exists, already (apart from returning `false`.)
let rc = sq.mkInxImpl(item)
if rc.isOK:
let inx = rc.value
sq.size.inc
inx.addrData.size.inc
inx.addrData.gasLimits += item.tx.gasLimit
return true
proc delete*(sq: var TxStatusTab; item: TxItemRef): bool
{.gcsafe,raises: [Defect,KeyError].} =
let rc = sq.getInxImpl(item)
if rc.isOK:
let inx = rc.value
sq.size.dec
inx.addrData.size.dec
inx.addrData.gasLimits -= item.tx.gasLimit
discard inx.nonceData.nonceList.delete(item.tx.nonce)
if inx.nonceData.nonceList.len == 0:
discard inx.addrData.addrList.delete(item.sender)
if inx.addrData.addrList.len == 0:
sq.statusList[item.status] = nil
return true
proc verify*(sq: var TxStatusTab): Result[void,TxInfo]
{.gcsafe,raises: [Defect,CatchableError].} =
## walk `TxItemStatus` > `EthAddress` > `AccountNonce`
var totalCount = 0
for status in TxItemStatus:
let addrData = sq.statusList[status]
if not addrData.isNil:
block:
let rc = addrData.addrList.verify
if rc.isErr:
return err(txInfoVfyStatusSenderList)
var
addrCount = 0
gasLimits = 0.GasInt
for p in addrData.addrList.nextPairs:
let (addrKey, nonceData) = (p.key, p.data)
block:
let rc = nonceData.nonceList.verify
if rc.isErr:
return err(txInfoVfyStatusNonceList)
var rcNonce = nonceData.nonceList.ge(AccountNonce.low)
while rcNonce.isOK:
let (nonceKey, item) = (rcNonce.value.key, rcNonce.value.data)
rcNonce = nonceData.nonceList.gt(nonceKey)
gasLimits += item.tx.gasLimit
addrCount.inc
if addrCount != addrData.size:
return err(txInfoVfyStatusTotal)
if gasLimits != addrData.gasLimits:
return err(txInfoVfyStatusGasLimits)
totalCount += addrCount
# end while
if totalCount != sq.size:
return err(txInfoVfyStatusTotal)
ok()
# ------------------------------------------------------------------------------
# Public array ops -- `TxItemStatus` (level 0)
# ------------------------------------------------------------------------------
proc len*(sq: var TxStatusTab): int =
sq.nActive
proc nItems*(sq: var TxStatusTab): int =
## Getter, total number of items in the list
sq.size
proc eq*(sq: var TxStatusTab; status: TxItemStatus):
SortedSetResult[TxItemStatus,TxStatusSenderRef] =
let addrData = sq.statusList[status]
if addrData.isNil:
return err(rbNotFound)
toSortedSetResult(key = status, data = addrData)
# ------------------------------------------------------------------------------
# Public array ops -- `EthAddress` (level 1)
# ------------------------------------------------------------------------------
proc nItems*(addrData: TxStatusSenderRef): int =
## Getter, total number of items in the sub-list
addrData.size
proc nItems*(rc: SortedSetResult[TxItemStatus,TxStatusSenderRef]): int =
if rc.isOK:
return rc.value.data.nItems
0
proc gasLimits*(addrData: TxStatusSenderRef): GasInt =
## Getter, accumulated `gasLimit` values
addrData.gasLimits
proc gasLimits*(rc: SortedSetResult[TxItemStatus,TxStatusSenderRef]): GasInt =
if rc.isOK:
return rc.value.data.gasLimits
0
proc eq*(addrData: TxStatusSenderRef; sender: EthAddress):
SortedSetResult[EthAddress,TxStatusNonceRef]
{.gcsafe,raises: [Defect,KeyError].} =
if addrData.addrList.haskey(sender):
return toSortedSetResult(key = sender, data = addrData.addrList[sender])
err(rbNotFound)
proc eq*(rc: SortedSetResult[TxItemStatus,TxStatusSenderRef];
sender: EthAddress): SortedSetResult[EthAddress,TxStatusNonceRef]
{.gcsafe,raises: [Defect,KeyError].} =
if rc.isOK:
return rc.value.data.eq(sender)
err(rc.error)
# ------------------------------------------------------------------------------
# Public array ops -- `AccountNonce` (level 2)
# ------------------------------------------------------------------------------
proc len*(nonceData: TxStatusNonceRef): int =
## Getter, same as `nItems` (for last level list)
nonceData.nonceList.len
proc nItems*(nonceData: TxStatusNonceRef): int =
## Getter, total number of items in the sub-list
nonceData.nonceList.len
proc nItems*(rc: SortedSetResult[EthAddress,TxStatusNonceRef]): int =
if rc.isOK:
return rc.value.data.nItems
0
proc eq*(nonceData: TxStatusNonceRef; nonce: AccountNonce):
SortedSetResult[AccountNonce,TxItemRef] =
nonceData.nonceList.eq(nonce)
proc eq*(rc: SortedSetResult[EthAddress,TxStatusNonceRef]; nonce: AccountNonce):
SortedSetResult[AccountNonce,TxItemRef] =
if rc.isOK:
return rc.value.data.eq(nonce)
err(rc.error)
proc ge*(nonceData: TxStatusNonceRef; nonce: AccountNonce):
SortedSetResult[AccountNonce,TxItemRef] =
nonceData.nonceList.ge(nonce)
proc ge*(rc: SortedSetResult[EthAddress,TxStatusNonceRef]; nonce: AccountNonce):
SortedSetResult[AccountNonce,TxItemRef] =
if rc.isOK:
return rc.value.data.ge(nonce)
err(rc.error)
proc gt*(nonceData: TxStatusNonceRef; nonce: AccountNonce):
SortedSetResult[AccountNonce,TxItemRef] =
nonceData.nonceList.gt(nonce)
proc gt*(rc: SortedSetResult[EthAddress,TxStatusNonceRef]; nonce: AccountNonce):
SortedSetResult[AccountNonce,TxItemRef] =
if rc.isOK:
return rc.value.data.gt(nonce)
err(rc.error)
proc le*(nonceData: TxStatusNonceRef; nonce: AccountNonce):
SortedSetResult[AccountNonce,TxItemRef] =
nonceData.nonceList.le(nonce)
proc le*(rc: SortedSetResult[EthAddress,TxStatusNonceRef]; nonce: AccountNonce):
SortedSetResult[AccountNonce,TxItemRef] =
if rc.isOK:
return rc.value.data.le(nonce)
err(rc.error)
proc lt*(nonceData: TxStatusNonceRef; nonce: AccountNonce):
SortedSetResult[AccountNonce,TxItemRef] =
nonceData.nonceList.lt(nonce)
proc lt*(rc: SortedSetResult[EthAddress,TxStatusNonceRef]; nonce: AccountNonce):
SortedSetResult[AccountNonce,TxItemRef] =
if rc.isOK:
return rc.value.data.lt(nonce)
err(rc.error)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,220 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
## Transaction Pool Tasklet: Add Transaction
## =========================================
##
import
std/[tables],
../tx_desc,
../tx_gauge,
../tx_info,
../tx_item,
../tx_tabs,
./tx_classify,
./tx_recover,
chronicles,
eth/[common, keys],
stew/[keyed_queue, sorted_set]
{.push raises: [Defect].}
type
NonceList = ##\
## Temporary sorter list
SortedSet[AccountNonce,TxItemRef]
AccouuntNonceTab = ##\
## Temporary sorter table
Table[EthAddress,NonceList]
logScope:
topics = "tx-pool add transaction"
# ------------------------------------------------------------------------------
# Private helper
# ------------------------------------------------------------------------------
proc getItemList(tab: var AccouuntNonceTab; key: EthAddress): var NonceList
{.gcsafe,raises: [Defect,KeyError].} =
if not tab.hasKey(key):
tab[key] = NonceList.init
tab[key]
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
proc supersede(xp: TxPoolRef; item: TxItemRef): Result[void,TxInfo]
{.gcsafe,raises: [Defect,CatchableError].} =
var current: TxItemRef
block:
let rc = xp.txDB.bySender.eq(item.sender).any.eq(item.tx.nonce)
if rc.isErr:
return err(txInfoErrUnspecified)
current = rc.value.data
# verify whether replacing is allowed, at all
let bumpPrice = (current.tx.gasPrice * xp.priceBump.GasInt + 99) div 100
if item.tx.gasPrice < current.tx.gasPrice + bumpPrice:
return err(txInfoErrReplaceUnderpriced)
# make space, delete item
if not xp.txDB.dispose(current, txInfoSenderNonceSuperseded):
return err(txInfoErrVoidDisposal)
# try again
block:
let rc = xp.txDB.insert(item)
if rc.isErr:
return err(rc.error)
return ok()
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc addTx*(xp: TxPoolRef; item: TxItemRef): bool
{.discardable,gcsafe,raises: [Defect,CatchableError].} =
## Add a transaction item. It is tested and stored in either of the `pending`
## or `staged` buckets, or disposed into the waste basket. The function
## returns `true` if the item was added to the `staged` bucket.
var
stagedItemAdded = false
vetted = txInfoOk
# Leave this frame with `return`, or proceeed with error
block txErrorFrame:
# Create tx ID and check for dups
if xp.txDB.byItemID.hasKey(item.itemID):
vetted = txInfoErrAlreadyKnown
break txErrorFrame
# Verify transaction
if not xp.classifyValid(item):
vetted = txInfoErrBasicValidatorFailed
break txErrorFrame
# Update initial state bucket
item.status =
if xp.classifyActive(item): txItemStaged
else: txItemPending
# Insert into database
block:
let rc = xp.txDB.insert(item)
if rc.isOK:
validTxMeter(1)
return item.status == txItemStaged
vetted = rc.error
# need to replace tx with same <sender/nonce> as the new item
if vetted == txInfoErrSenderNonceIndex:
let rc = xp.supersede(item)
if rc.isOK:
validTxMeter(1)
return
vetted = rc.error
# Error processing => store in waste basket
xp.txDB.reject(item, vetted)
# update gauge
case vetted:
of txInfoErrAlreadyKnown:
knownTxMeter(1)
of txInfoErrInvalidSender:
invalidTxMeter(1)
else:
unspecifiedErrorMeter(1)
# core/tx_pool.go(848): func (pool *TxPool) AddLocals(txs []..
# core/tx_pool.go(854): func (pool *TxPool) AddLocals(txs []..
# core/tx_pool.go(864): func (pool *TxPool) AddRemotes(txs []..
# core/tx_pool.go(883): func (pool *TxPool) AddRemotes(txs []..
# core/tx_pool.go(889): func (pool *TxPool) addTxs(txs []*types.Transaction, ..
proc addTxs*(xp: TxPoolRef;
txs: var openArray[Transaction]; info = ""): (bool,seq[TxItemRef])
{.discardable,gcsafe,raises: [Defect,CatchableError].} =
## Add a list of transactions. The list is sorted after nonces and txs are
## tested and stored into either of the `pending` or `staged` buckets, or
## disposed o the waste basket. The function returns the tuple
## `(staged-indicator,top-items)` as explained below.
##
## *staged-indicator*
## If `true`, this value indicates that at least one item was added to
## the `staged` bucket (which suggest a re-run of the packer.)
##
## *top-items*
## For each sender where txs were added to the bucket database or waste
## basket, this list keeps the items with the highest nonce (handy for
## chasing nonce gaps after a back-move of the block chain head.)
##
var accTab: AccouuntNonceTab
for tx in txs.mitems:
var reason: TxInfo
# Create tx item wrapper, preferably recovered from waste basket
let rcTx = xp.recoverItem(tx, txItemPending, info)
if rcTx.isErr:
reason = rcTx.error
else:
let
item = rcTx.value
rcInsert = accTab.getItemList(item.sender).insert(item.tx.nonce)
if rcInsert.isErr:
reason = txInfoErrSenderNonceIndex
else:
rcInsert.value.data = item # link that item
continue
# move item to waste basket
xp.txDB.reject(tx, reason, txItemPending, info)
# update gauge
case reason:
of txInfoErrAlreadyKnown:
knownTxMeter(1)
of txInfoErrInvalidSender:
invalidTxMeter(1)
else:
unspecifiedErrorMeter(1)
# Add sorted transaction items
for itemList in accTab.mvalues:
var
rc = itemList.ge(AccountNonce.low)
lastItem: TxItemRef # => nil
while rc.isOK:
let (nonce,item) = (rc.value.key,rc.value.data)
if xp.addTx(item):
result[0] = true
# Make sure that there is at least one item per sender, prefereably
# a non-error item.
if item.reject == txInfoOk or lastItem.isNil:
lastItem = item
rc = itemList.gt(nonce)
# return the last one in the series
if not lastItem.isNil:
result[1].add lastItem
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,173 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
## Transaction Pool Tasklets: Update by Bucket
## ===========================================
##
import
std/[tables],
../../../constants,
../tx_chain,
../tx_desc,
../tx_info,
../tx_item,
../tx_tabs,
../tx_tabs/tx_status,
./tx_classify,
./tx_dispose,
chronicles,
eth/[common, keys],
stew/[sorted_set]
{.push raises: [Defect].}
const
minNonce = AccountNonce.low
logScope:
topics = "tx-pool buckets"
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc bucketItemsReassignPending*(xp: TxPoolRef; labelFrom: TxItemStatus;
account: EthAddress; nonceFrom = minNonce)
{.gcsafe,raises: [Defect,CatchableError].} =
## Move all items in bucket `lblFrom` with nonces not less than `nonceFrom`
## to the `pending` bucket
let rc = xp.txDB.byStatus.eq(labelFrom).eq(account)
if rc.isOK:
for item in rc.value.data.incNonce(nonceFrom):
discard xp.txDB.reassign(item, txItemPending)
proc bucketItemsReassignPending*(xp: TxPoolRef; item: TxItemRef)
{.gcsafe,raises: [Defect,CatchableError].} =
## Variant of `bucketItemsReassignPending()`
xp.bucketItemsReassignPending(item.status, item.sender, item.tx.nonce)
proc bucketUpdateAll*(xp: TxPoolRef): bool
{.discardable,gcsafe,raises: [Defect,CatchableError].} =
## Update all buckets. The function returns `true` if some items were added
## to the `staged` bucket.
# Sort order: `EthAddress` > `AccountNonce` > item.
var
stagedItemsAdded = false
stashed: Table[EthAddress,seq[TxItemRef]]
# Prepare
if 0 < xp.pDoubleCheck.len:
for item in xp.pDoubleCheck:
if item.reject == txInfoOk:
# Check whether there was a gap when the head was moved backwards.
let rc = xp.txDB.bySender.eq(item.sender).any.gt(item.tx.nonce)
if rc.isOK:
let nextItem = rc.value.data
if item.tx.nonce + 1 < nextItem.tx.nonce:
discard xp.disposeItemAndHigherNonces(
item, txInfoErrNonceGap, txInfoErrImpliedNonceGap)
else:
# For failed txs, make sure that the account state has not
# changed. Assuming that this list is complete, then there are
# no other account affected.
let rc = xp.txDB.bySender.eq(item.sender).any.ge(minNonce)
if rc.isOK:
let firstItem = rc.value.data
if not xp.classifyValid(firstItem):
discard xp.disposeItemAndHigherNonces(
firstItem, txInfoErrNonceGap, txInfoErrImpliedNonceGap)
# Clean up that queue
xp.pDoubleCheckFlush
# PENDING
#
# Stash the items from the `pending` bucket The nonces in this list are
# greater than the ones from other lists. When processing the `staged`
# list, all that can happen is that loer nonces (than the stashed ones)
# are added.
for (sender,nonceList) in xp.txDB.incAccount(txItemPending):
# New per-sender-account sub-sequence
stashed[sender] = newSeq[TxItemRef]()
for item in nonceList.incNonce:
# Add to sub-sequence
stashed[sender].add item
# STAGED
#
# Update/edit `staged` bucket.
for (_,nonceList) in xp.txDB.incAccount(txItemStaged):
for item in nonceList.incNonce:
if not xp.classifyActive(item):
# Larger nonces cannot be held in the `staged` bucket anymore for this
# sender account. So they are moved back to the `pending` bucket.
xp.bucketItemsReassignPending(item)
# The nonces in the `staged` bucket are always smaller than the one in
# the `pending` bucket. So, if the lower nonce items must go to the
# `pending` bucket, then the stashed `pending` bucket items can only
# stay there.
stashed.del(item.sender)
break # inner `incItemList()` loop
# PACKED
#
# Update `packed` bucket. The items are a subset of all possibly staged
# (aka active) items. So they follow a similar logic as for the `staged`
# items above.
for (_,nonceList) in xp.txDB.incAccount(txItemPacked):
for item in nonceList.incNonce:
if not xp.classifyActive(item):
xp.bucketItemsReassignPending(item)
# For the `sender` all staged items have smaller nonces, so they have
# to go to the `pending` bucket, as well.
xp.bucketItemsReassignPending(txItemStaged, item.sender)
stagedItemsAdded = true
stashed.del(item.sender)
break # inner `incItemList()` loop
# PENDING re-visted
#
# Post-process `pending` and `staged` buckets. Re-insert the
# list of stashed `pending` items.
for itemList in stashed.values:
for item in itemList:
if not xp.classifyActive(item):
# Ignore higher nonces
break # inner loop for `itemList` sequence
# Move to staged bucket
discard xp.txDB.reassign(item, txItemStaged)
stagedItemsAdded
# ---------------------------
proc bucketFlushPacked*(xp: TxPoolRef)
{.gcsafe,raises: [Defect,CatchableError].} =
## Move all items from the `packed` bucket to the `pending` bucket
for (_,nonceList) in xp.txDB.decAccount(txItemPacked):
for item in nonceList.incNonce:
discard xp.txDB.reassign(item,txItemStaged)
# Reset bucket status info
xp.chain.clearAccounts
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,264 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
## Transaction Pool Tasklet: Classify Transactions
## ===============================================
##
import
../../../forks,
../../../p2p/validate,
../../../transaction,
../../../vm_state,
../../../vm_types,
../tx_chain,
../tx_desc,
../tx_item,
../tx_tabs,
chronicles,
eth/[common, keys]
{.push raises: [Defect].}
logScope:
topics = "tx-pool classify"
# ------------------------------------------------------------------------------
# Private function: tx validity check helpers
# ------------------------------------------------------------------------------
proc checkTxBasic(xp: TxPoolRef; item: TxItemRef): bool =
## Inspired by `p2p/validate.validateTransaction()`
if item.tx.txType == TxEip2930 and xp.chain.nextFork < FkBerlin:
debug "invalid tx: Eip2930 Tx type detected before Berlin"
return false
if item.tx.txType == TxEip1559 and xp.chain.nextFork < FkLondon:
debug "invalid tx: Eip1559 Tx type detected before London"
return false
if item.tx.gasLimit < item.tx.intrinsicGas(xp.chain.nextFork):
debug "invalid tx: not enough gas to perform calculation",
available = item.tx.gasLimit,
require = item.tx.intrinsicGas(xp.chain.fork)
return false
if item.tx.txType == TxEip1559:
# The total must be the larger of the two
if item.tx.maxFee < item.tx.maxPriorityFee:
debug "invalid tx: maxFee is smaller than maPriorityFee",
maxFee = item.tx.maxFee,
maxPriorityFee = item.tx.maxPriorityFee
return false
true
proc checkTxNonce(xp: TxPoolRef; item: TxItemRef): bool
{.gcsafe,raises: [Defect,CatchableError].} =
## Make sure that there is only one contiuous sequence of nonces (per
## sender) starting at the account nonce.
# get the next applicable nonce as registered on the account database
let accountNonce = xp.chain.getNonce(item.sender)
if item.tx.nonce < accountNonce:
debug "invalid tx: account nonce too small",
txNonce = item.tx.nonce,
accountNonce
return false
elif accountNonce < item.tx.nonce:
# for an existing account, nonces must come in increasing consecutive order
let rc = xp.txDB.bySender.eq(item.sender)
if rc.isOK:
if rc.value.data.any.eq(item.tx.nonce - 1).isErr:
debug "invalid tx: account nonces gap",
txNonce = item.tx.nonce,
accountNonce
return false
true
# ------------------------------------------------------------------------------
# Private function: active tx classifier check helpers
# ------------------------------------------------------------------------------
proc txNonceActive(xp: TxPoolRef; item: TxItemRef): bool
{.gcsafe,raises: [Defect,KeyError].} =
## Make sure that nonces appear as a contiuous sequence in `staged` bucket
## probably preceeded in `packed` bucket.
let rc = xp.txDB.bySender.eq(item.sender)
if rc.isErr:
return true
# Must not be in the `pending` bucket.
if rc.value.data.eq(txItemPending).eq(item.tx.nonce - 1).isOk:
return false
true
proc txGasCovered(xp: TxPoolRef; item: TxItemRef): bool =
## Check whether the max gas consumption is within the gas limit (aka block
## size).
let trgLimit = xp.chain.limits.trgLimit
if trgLimit < item.tx.gasLimit:
debug "invalid tx: gasLimit exceeded",
maxLimit = trgLimit,
gasLimit = item.tx.gasLimit
return false
true
proc txFeesCovered(xp: TxPoolRef; item: TxItemRef): bool =
## Ensure that the user was willing to at least pay the base fee
if item.tx.txType == TxEip1559:
if item.tx.maxFee.GasPriceEx < xp.chain.baseFee:
debug "invalid tx: maxFee is smaller than baseFee",
maxFee = item.tx.maxFee,
baseFee = xp.chain.baseFee
return false
true
proc txCostInBudget(xp: TxPoolRef; item: TxItemRef): bool
{.gcsafe,raises: [Defect,CatchableError].} =
## Check whether the worst case expense is covered by the price budget,
let
balance = xp.chain.getBalance(item.sender)
gasCost = item.tx.gasLimit.u256 * item.tx.gasPrice.u256
if balance < gasCost:
debug "invalid tx: not enough cash for gas",
available = balance,
require = gasCost
return false
let balanceOffGasCost = balance - gasCost
if balanceOffGasCost < item.tx.value:
debug "invalid tx: not enough cash to send",
available = balance,
availableMinusGas = balanceOffGasCost,
require = item.tx.value
return false
true
proc txPreLondonAcceptableGasPrice(xp: TxPoolRef; item: TxItemRef): bool =
## For legacy transactions check whether minimum gas price and tip are
## high enough. These checks are optional.
if item.tx.txType != TxEip1559:
if stageItemsPlMinPrice in xp.pFlags:
if item.tx.gasPrice.GasPriceEx < xp.pMinPlGasPrice:
return false
elif stageItems1559MinTip in xp.pFlags:
# Fall back transaction selector scheme
if item.tx.effectiveGasTip(xp.chain.baseFee) < xp.pMinTipPrice:
return false
true
proc txPostLondonAcceptableTipAndFees(xp: TxPoolRef; item: TxItemRef): bool =
## Helper for `classifyTxPacked()`
if item.tx.txType == TxEip1559:
if stageItems1559MinTip in xp.pFlags:
if item.tx.effectiveGasTip(xp.chain.baseFee) < xp.pMinTipPrice:
return false
if stageItems1559MinFee in xp.pFlags:
if item.tx.maxFee.GasPriceEx < xp.pMinFeePrice:
return false
true
# ------------------------------------------------------------------------------
# Public functionss
# ------------------------------------------------------------------------------
proc classifyValid*(xp: TxPoolRef; item: TxItemRef): bool
{.gcsafe,raises: [Defect,CatchableError].} =
## Check a (typically new) transaction whether it should be accepted at all
## or re-jected right away.
if not xp.checkTxNonce(item):
return false
if not xp.checkTxBasic(item):
return false
true
proc classifyActive*(xp: TxPoolRef; item: TxItemRef): bool
{.gcsafe,raises: [Defect,CatchableError].} =
## Check whether a valid transaction is ready to be held in the
## `staged` bucket in which case the function returns `true`.
if not xp.txNonceActive(item):
return false
if item.tx.effectiveGasTip(xp.chain.baseFee) <= 0.GasPriceEx:
return false
if not xp.txGasCovered(item):
return false
if not xp.txFeesCovered(item):
return false
if not xp.txCostInBudget(item):
return false
if not xp.txPreLondonAcceptableGasPrice(item):
return false
if not xp.txPostLondonAcceptableTipAndFees(item):
return false
true
proc classifyValidatePacked*(xp: TxPoolRef;
vmState: BaseVMState; item: TxItemRef): bool =
## Verify the argument `item` against the accounts database. This function
## is a wrapper around the `verifyTransaction()` call to be used in a similar
## fashion as in `processTransactionImpl()`.
let
roDB = vmState.readOnlyStateDB
baseFee = xp.chain.baseFee.uint64.u256
fork = xp.chain.nextFork
gasLimit = if packItemsMaxGasLimit in xp.pFlags:
xp.chain.limits.maxLimit
else:
xp.chain.limits.trgLimit
roDB.validateTransaction(item.tx, item.sender, gasLimit, baseFee, fork)
proc classifyPacked*(xp: TxPoolRef; gasBurned, moreBurned: GasInt): bool =
## Classifier for *packing* (i.e. adding up `gasUsed` values after executing
## in the VM.) This function checks whether the sum of the arguments
## `gasBurned` and `moreGasBurned` is within acceptable constraints.
let totalGasUsed = gasBurned + moreBurned
if packItemsMaxGasLimit in xp.pFlags:
totalGasUsed < xp.chain.limits.maxLimit
else:
totalGasUsed < xp.chain.limits.trgLimit
proc classifyPackedNext*(xp: TxPoolRef; gasBurned, moreBurned: GasInt): bool =
## Classifier for *packing* (i.e. adding up `gasUsed` values after executing
## in the VM.) This function returns `true` if the packing level is still
## low enough to proceed trying to accumulate more items.
##
## This function is typically called as a follow up after a `false` return of
## `classifyPack()`.
if packItemsTryHarder notin xp.pFlags:
xp.classifyPacked(gasBurned, moreBurned)
elif packItemsMaxGasLimit in xp.pFlags:
gasBurned < xp.chain.limits.hwmLimit
else:
gasBurned < xp.chain.limits.lwmLimit
# ------------------------------------------------------------------------------
# Public functionss
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,114 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
## Transaction Pool Tasklet: Dispose expired items
## ===============================================
##
import
std/[times],
../tx_desc,
../tx_gauge,
../tx_info,
../tx_item,
../tx_tabs,
chronicles,
eth/[common, keys],
stew/keyed_queue
{.push raises: [Defect].}
logScope:
topics = "tx-pool dispose expired"
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
proc utcNow: Time =
getTime().utc.toTime
#proc pp(t: Time): string =
# t.format("yyyy-MM-dd'T'HH:mm:ss'.'fff", utc())
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
proc deleteOtherNonces(xp: TxPoolRef; item: TxItemRef; newerThan: Time): bool
{.gcsafe,raises: [Defect,KeyError].} =
let rc = xp.txDB.bySender.eq(item.sender).any
if rc.isOK:
for other in rc.value.data.incNonce(item.tx.nonce):
# only delete non-expired items
if newerThan < other.timeStamp:
discard xp.txDB.dispose(other, txInfoErrTxExpiredImplied)
impliedEvictionMeter.inc
result = true
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
# core/tx_pool.go(384): for addr := range pool.queue {
proc disposeExpiredItems*(xp: TxPoolRef) {.gcsafe,raises: [Defect,KeyError].} =
## Any non-local transaction old enough will be removed. This will not
## apply to items in the packed queue.
let
deadLine = utcNow() - xp.lifeTime
dspPacked = autoZombifyPacked in xp.pFlags
dspUnpacked = autoZombifyUnpacked in xp.pFlags
var rc = xp.txDB.byItemID.first
while rc.isOK:
let (key, item) = (rc.value.key, rc.value.data)
if deadLine < item.timeStamp:
break
rc = xp.txDB.byItemID.next(key)
if item.status == txItemPacked:
if not dspPacked:
continue
else:
if not dspUnpacked:
continue
# Note: it is ok to delete the current item
discard xp.txDB.dispose(item, txInfoErrTxExpired)
evictionMeter.inc
# Also delete all non-expired items with higher nonces.
if xp.deleteOtherNonces(item, deadLine):
if rc.isOK:
# If one of the "other" just deleted items was the "next(key)", the
# loop would have stooped anyway at the "if deadLine < item.timeStamp:"
# clause at the while() loop header.
if not xp.txDB.byItemID.hasKey(rc.value.key):
break
proc disposeItemAndHigherNonces*(xp: TxPoolRef; item: TxItemRef;
reason, otherReason: TxInfo): int
{.gcsafe,raises: [Defect,CatchableError].} =
## Move item and higher nonces per sender to wastebasket.
if xp.txDB.dispose(item, reason):
result = 1
# For the current sender, delete all items with higher nonces
let rc = xp.txDB.bySender.eq(item.sender).any
if rc.isOK:
let nonceList = rc.value.data
for otherItem in nonceList.incNonce(item.tx.nonce):
if xp.txDB.dispose(otherItem, otherReason):
result.inc
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,245 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
## Transaction Pool Tasklet: Move Head of Block Chain
## ==================================================
##
import
std/[tables],
../../../db/db_chain,
../tx_chain,
../tx_desc,
../tx_info,
../tx_item,
chronicles,
eth/[common, keys],
stew/keyed_queue
{.push raises: [Defect].}
type
TxHeadDiffRef* = ref object ##\
## Diff data, txs changes that apply after changing the head\
## insertion point of the block chain
addTxs*: KeyedQueue[Hash256,Transaction] ##\
## txs to add; using a queue makes it more intuive to delete
## items while travesing the queue in a loop.
remTxs*: Table[Hash256,bool] ##\
## txs to remove
logScope:
topics = "tx-pool head adjust"
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
# use it as a stack/lifo as the ordering is reversed
proc insert(xp: TxPoolRef; kq: TxHeadDiffRef; blockHash: Hash256)
{.gcsafe,raises: [Defect,CatchableError].} =
for tx in xp.chain.db.getBlockBody(blockHash).transactions:
kq.addTxs[tx.itemID] = tx
proc remove(xp: TxPoolRef; kq: TxHeadDiffRef; blockHash: Hash256)
{.gcsafe,raises: [Defect,CatchableError].} =
for tx in xp.chain.db.getBlockBody(blockHash).transactions:
kq.remTxs[tx.itemID] = true
proc new(T: type TxHeadDiffRef): T =
new result
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
# core/tx_pool.go(218): func (pool *TxPool) reset(oldHead, newHead ...
proc headDiff*(xp: TxPoolRef;
newHead: BlockHeader): Result[TxHeadDiffRef,TxInfo]
{.gcsafe,raises: [Defect,CatchableError].} =
## This function caclulates the txs differences between the cached block
## chain head to a new head implied by the argument `newHeader`. Differences
## are returned as two tables for adding and removing txs. The tables table
## for adding transactions (is a queue and) preserves the order of the txs
## from the block chain.
##
## Now, considering add/delete tx actions needed when replacing the cached
## *current head* position by the *new head* position (as derived from the
## function argument `newHeader`), the most complex case of a block chain
## might look as follows:
## ::
## . o---o-- .. ---o---o
## . / ^
## . block chain .. ---o---o---o .. --o |
## . ^ ^ |
## . | | |
## . common ancestor | |
## . | |
## . new head |
## . |
## . current head
##
## Legend
## * the bullet *o* stands for a block
## * a link *---* between two blocks indicates that the block number to
## the right increases by *1*
## * the *common ancestor* is chosen with the largest possible block number
## not exceeding the block numbers of both, the *current head* and the
## *new head*
## * the branches to the right of the *common ancestor* may collapse to a
## a single branch (in which case at least one of *old head* or
## *new head* collapses with the *common ancestor*)
## * there is no assumption on the block numbers of *new head* and
## *current head* as of which one is greater, they might also be equal
##
## Consider the two sets *ADD* and *DEL* where
##
## *ADD*
## is the set of txs on the branch between the *common ancestor* and
## the *current head*, and
## *DEL*
## is the set of txs on the branch between the *common ancestor* and
## the *new head*
##
## Then, the set of txs to be added to the pool is *ADD - DEL* and the set
## of txs to be removed is *DEL - ADD*.
##
let
curHead = xp.chain.head
curHash = curHead.blockHash
newHash = newHead.blockHash
var ignHeader: BlockHeader
if not xp.chain.db.getBlockHeader(newHash, ignHeader):
# sanity check
warn "Tx-pool head forward for non-existing header",
newHead = newHash,
newNumber = newHead.blockNumber
return err(txInfoErrForwardHeadMissing)
if not xp.chain.db.getBlockHeader(curHash, ignHeader):
# This can happen if a `setHead()` is performed, where we have discarded
# the old head from the chain.
if curHead.blockNumber <= newHead.blockNumber:
warn "Tx-pool head forward from detached current header",
curHead = curHash,
curNumber = curHead.blockNumber
return err(txInfoErrAncestorMissing)
debug "Tx-pool reset with detached current head",
curHeader = curHash,
curNumber = curHeader.blockNumber,
newHeader = newHash,
newNumber = newHeader.blockNumber
return err(txInfoErrChainHeadMissing)
# Equalise block numbers between branches (typically, these btanches
# collapse and there is a single strain only)
var
txDiffs = TxHeadDiffRef.new
curBranchHead = curHead
curBranchHash = curHash
newBranchHead = newHead
newBranchHash = newHash
if newHead.blockNumber < curHead.blockNumber:
#
# new head block number smaller than the current head one
#
# ,o---o-- ..--o
# / ^
# / |
# ----o---o---o |
# ^ |
# | |
# new << current (step back this one)
#
# preserve transactions on the upper branch block numbers
# between #new..#current to be re-inserted into the pool
#
while newHead.blockNumber < curBranchHead.blockNumber:
xp.insert(txDiffs, curBranchHash)
let
tmpHead = curBranchHead # cache value for error logging
tmpHash = curBranchHash
curBranchHash = curBranchHead.parentHash # decrement block number
if not xp.chain.db.getBlockHeader(curBranchHash, curBranchHead):
error "Unrooted old chain seen by tx-pool",
curBranchHead = tmpHash,
curBranchNumber = tmpHead.blockNumber
return err(txInfoErrUnrootedCurChain)
else:
#
# current head block number smaller (or equal) than the new head one
#
# ,o---o-- ..--o
# / ^
# / |
# ----o---o---o |
# ^ |
# | |
# current << new (step back this one)
#
# preserve transactions on the upper branch block numbers
# between #current..#new to be deleted from the pool
#
while curHead.blockNumber < newBranchHead.blockNumber:
xp.remove(txDiffs, curBranchHash)
let
tmpHead = newBranchHead # cache value for error logging
tmpHash = newBranchHash
newBranchHash = newBranchHead.parentHash # decrement block number
if not xp.chain.db.getBlockHeader(newBranchHash, newBranchHead):
error "Unrooted new chain seen by tx-pool",
newBranchHead = tmpHash,
newBranchNumber = tmpHead.blockNumber
return err(txInfoErrUnrootedNewChain)
# simultaneously step back until junction-head (aka common ancestor) while
# preserving txs between block numbers #ancestor..#current unless
# between #ancestor..#new
while curBranchHash != newBranchHash:
block:
xp.insert(txDiffs, curBranchHash)
let
tmpHead = curBranchHead # cache value for error logging
tmpHash = curBranchHash
curBranchHash = curBranchHead.parentHash
if not xp.chain.db.getBlockHeader(curBranchHash, curBranchHead):
error "Unrooted old chain seen by tx-pool",
curBranchHead = tmpHash,
curBranchNumber = tmpHead.blockNumber
return err(txInfoErrUnrootedCurChain)
block:
xp.remove(txDiffs, newBranchHash)
let
tmpHead = newBranchHead # cache value for error logging
tmpHash = newBranchHash
newBranchHash = newBranchHead.parentHash
if not xp.chain.db.getBlockHeader(newBranchHash, newBranchHead):
error "Unrooted new chain seen by tx-pool",
newBranchHead = tmpHash,
newBranchNumber = tmpHead.blockNumber
return err(txInfoErrUnrootedNewChain)
# figure out difference sets
for itemID in txDiffs.addTxs.nextKeys:
if txDiffs.remTxs.hasKey(itemID):
txDiffs.addTxs.del(itemID) # ok to delete the current one on a KeyedQueue
txDiffs.remTxs.del(itemID)
ok(txDiffs)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,282 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
## Transaction Pool Tasklets: Packer, VM execute and compact txs
## =============================================================
##
import
std/[sets, tables],
../../../db/[accounts_cache, db_chain],
../../../forks,
../../../p2p/[dao, executor, validate],
../../../transaction/call_evm,
../../../vm_state,
../../../vm_types,
../tx_chain,
../tx_desc,
../tx_item,
../tx_tabs,
../tx_tabs/tx_status,
./tx_bucket,
./tx_classify,
chronicles,
eth/[common, keys, rlp, trie, trie/db],
stew/[sorted_set]
{.push raises: [Defect].}
type
TxPackerError* = object of CatchableError
## Catch and relay exception error
TxPackerStateRef = ref object
xp: TxPoolRef
tr: HexaryTrie
cleanState: bool
balance: UInt256
const
receiptsExtensionSize = ##\
## Number of slots to extend the `receipts[]` at the same time.
20
logScope:
topics = "tx-pool packer"
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
template safeExecutor(info: string; code: untyped) =
try:
code
except CatchableError as e:
raise (ref CatchableError)(msg: e.msg)
except Defect as e:
raise (ref Defect)(msg: e.msg)
except:
let e = getCurrentException()
raise newException(TxPackerError, info & "(): " & $e.name & " -- " & e.msg)
proc eip1559TxNormalization(tx: Transaction): Transaction =
result = tx
if tx.txType < TxEip1559:
result.maxPriorityFee = tx.gasPrice
result.maxFee = tx.gasPrice
proc persist(pst: TxPackerStateRef)
{.gcsafe,raises: [Defect,RlpError].} =
## Smart wrapper
if not pst.cleanState:
pst.xp.chain.vmState.stateDB.persist(clearCache = false)
pst.cleanState = true
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
proc runTx(pst: TxPackerStateRef; item: TxItemRef): GasInt
{.gcsafe,raises: [Defect,CatchableError].} =
## Execute item transaction and update `vmState` book keeping. Returns the
## `gasUsed` after executing the transaction.
var
tx = item.tx.eip1559TxNormalization
let
fork = pst.xp.chain.nextFork
baseFee = pst.xp.chain.head.baseFee
if FkLondon <= fork:
tx.gasPrice = min(tx.maxPriorityFee + baseFee.truncate(int64), tx.maxFee)
safeExecutor "tx_packer.runTx":
# Execute transaction, may return a wildcard `Exception`
result = tx.txCallEvm(item.sender, pst.xp.chain.vmState, fork)
pst.cleanState = false
doAssert 0 <= result
proc runTxCommit(pst: TxPackerStateRef; item: TxItemRef; gasBurned: GasInt)
{.gcsafe,raises: [Defect,CatchableError].} =
## Book keeping after executing argument `item` transaction in the VM. The
## function returns the next number of items `nItems+1`.
let
xp = pst.xp
vmState = xp.chain.vmState
inx = xp.txDB.byStatus.eq(txItemPacked).nItems
gasTip = item.tx.effectiveGasTip(xp.chain.head.baseFee)
# The gas tip cannot get negative as all items in the `staged` bucket
# are vetted for profitability before entering that bucket.
assert 0 <= gasTip
let reward = gasBurned.u256 * gasTip.uint64.u256
vmState.stateDB.addBalance(xp.chain.miner, reward)
# Update account database
vmState.mutateStateDB:
for deletedAccount in vmState.selfDestructs:
db.deleteAccount deletedAccount
if FkSpurious <= xp.chain.nextFork:
vmState.touchedAccounts.incl(xp.chain.miner)
# EIP158/161 state clearing
for account in vmState.touchedAccounts:
if db.accountExists(account) and db.isEmptyAccount(account):
debug "state clearing", account
db.deleteAccount account
if vmState.generateWitness:
vmState.stateDB.collectWitnessData()
# Save accounts via persist() is not needed unless the fork is smaller
# than `FkByzantium` in which case, the `rootHash()` function is called
# by `makeReceipt()`. As the `rootHash()` function asserts unconditionally
# that the account cache has been saved, the `persist()` call is
# obligatory here.
if xp.chain.nextFork < FkByzantium:
pst.persist
# Update receipts sequence
if vmState.receipts.len <= inx:
vmState.receipts.setLen(inx + receiptsExtensionSize)
vmState.cumulativeGasUsed += gasBurned
vmState.receipts[inx] = vmState.makeReceipt(item.tx.txType)
# Update txRoot
pst.tr.put(rlp.encode(inx), rlp.encode(item.tx))
# Add the item to the `packed` bucket. This implicitely increases the
# receipts index `inx` at the next visit of this function.
discard xp.txDB.reassign(item,txItemPacked)
# ------------------------------------------------------------------------------
# Private functions: packer packerVmExec() helpers
# ------------------------------------------------------------------------------
proc vmExecInit(xp: TxPoolRef): TxPackerStateRef
{.gcsafe,raises: [Defect,CatchableError].} =
# Flush `packed` bucket
xp.bucketFlushPacked
xp.chain.maxMode = (packItemsMaxGasLimit in xp.pFlags)
if xp.chain.config.daoForkSupport and
xp.chain.config.daoForkBlock == xp.chain.head.blockNumber + 1:
xp.chain.vmState.mutateStateDB:
db.applyDAOHardFork()
TxPackerStateRef( # return value
xp: xp,
tr: newMemoryDB().initHexaryTrie,
balance: xp.chain.vmState.readOnlyStateDB.getBalance(xp.chain.miner))
proc vmExecGrabItem(pst: TxPackerStateRef; item: TxItemRef): Result[bool,void]
{.gcsafe,raises: [Defect,CatchableError].} =
## Greedily collect & compact items as long as the accumulated `gasLimit`
## values are below the maximum block size.
let
xp = pst.xp
vmState = xp.chain.vmState
# Validate transaction relative to the current vmState
if not xp.classifyValidatePacked(vmState, item):
return ok(false) # continue with next account
let
accTx = vmState.stateDB.beginSavepoint
gasUsed = pst.runTx(item) # this is the crucial part, running the tx
# Find out what to do next: accepting this tx or trying the next account
if not xp.classifyPacked(vmState.cumulativeGasUsed, gasUsed):
vmState.stateDB.rollback(accTx)
if xp.classifyPackedNext(vmState.cumulativeGasUsed, gasUsed):
return ok(false) # continue with next account
return err() # otherwise stop collecting
# Commit account state DB
vmState.stateDB.commit(accTx)
vmState.stateDB.persist(clearCache = false)
let midRoot = vmState.stateDB.rootHash
# Finish book-keeping and move item to `packed` bucket
pst.runTxCommit(item, gasUsed)
ok(true) # fetch the very next item
proc vmExecCommit(pst: TxPackerStateRef)
{.gcsafe,raises: [Defect,CatchableError].} =
let
xp = pst.xp
vmState = xp.chain.vmState
if not vmState.chainDB.config.poaEngine:
let
number = xp.chain.head.blockNumber + 1
uncles: seq[BlockHeader] = @[] # no uncles yet
vmState.calculateReward(xp.chain.miner, number + 1, uncles)
# Reward beneficiary
vmState.mutateStateDB:
if vmState.generateWitness:
db.collectWitnessData()
# Finish up, then vmState.stateDB.rootHash may be accessed
db.persist(ClearCache in vmState.flags)
# Update flexi-array, set proper length
let nItems = xp.txDB.byStatus.eq(txItemPacked).nItems
vmState.receipts.setLen(nItems)
xp.chain.receipts = vmState.receipts
xp.chain.txRoot = pst.tr.rootHash
xp.chain.stateRoot = vmState.stateDB.rootHash
proc balanceDelta: Uint256 =
let postBalance = vmState.readOnlyStateDB.getBalance(xp.chain.miner)
if pst.balance < postBalance:
return postBalance - pst.balance
xp.chain.profit = balanceDelta()
xp.chain.reward = balanceDelta()
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc packerVmExec*(xp: TxPoolRef) {.gcsafe,raises: [Defect,CatchableError].} =
## Rebuild `packed` bucket by selection items from the `staged` bucket
## after executing them in the VM.
let dbTx = xp.chain.db.db.beginTransaction
defer: dbTx.dispose()
var pst = xp.vmExecInit
block loop:
for (_,nonceList) in pst.xp.txDB.decAccount(txItemStaged):
block account:
for item in nonceList.incNonce:
let rc = pst.vmExecGrabItem(item)
if rc.isErr:
break loop # stop
if not rc.value:
break account # continue with next account
pst.vmExecCommit
# Block chain will roll back automatically
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,70 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
## Transaction Pool Tasklet: Recover From Waste Basket or Create
## =============================================================
##
import
../tx_desc,
../tx_info,
../tx_item,
../tx_tabs,
chronicles,
eth/[common, keys],
stew/keyed_queue
{.push raises: [Defect].}
logScope:
topics = "tx-pool recover item"
let
nullSender = block:
var rc: EthAddress
rc
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc recoverItem*(xp: TxPoolRef; tx: var Transaction;
status = txItemPending; info = ""): Result[TxItemRef,TxInfo]
{.gcsafe,raises: [Defect,CatchableError].} =
## Recover item from waste basket or create new. It is an error if the item
## is in the buckets database, already.
let itemID = tx.itemID
# Test whether the item is in the database, already
if xp.txDB.byItemID.hasKey(itemID):
return err(txInfoErrAlreadyKnown)
# Check whether the tx can be re-cycled from waste basket
block:
let rc = xp.txDB.byRejects.delete(itemID)
if rc.isOK:
let item = rc.value.data
# must not be a waste tx without meta-data
if item.sender != nullSender:
let itemInfo = if info != "": info else: item.info
item.init(status, itemInfo)
return ok(item)
# New item generated from scratch, e.g. with `nullSender`
block:
let rc = TxItemRef.new(tx, itemID, status, info)
if rc.isOk:
return ok(rc.value)
err(txInfoErrInvalidSender)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -41,4 +41,5 @@ cliBuilder:
./test_clique, ./test_clique,
./test_pow, ./test_pow,
./test_configuration, ./test_configuration,
./test_keyed_queue_rlp ./test_keyed_queue_rlp,
./test_txpool

910
tests/test_txpool.nim Normal file
View File

@ -0,0 +1,910 @@
# Nimbus
# Copyright (c) 2018-2019 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
import
std/[algorithm, os, random, sequtils, strformat, strutils, tables, times],
../nimbus/[chain_config, config, db/db_chain, vm_state, vm_types],
../nimbus/p2p/[chain, clique, executor],
../nimbus/utils/[tx_pool, tx_pool/tx_item],
./test_txpool/[helpers, setup, sign_helper],
chronos,
eth/[common, keys, p2p],
stew/[keyed_queue, sorted_set],
stint,
unittest2
type
CaptureSpecs = tuple
network: NetworkID
file: string
numBlocks, minBlockTxs, numTxs: int
const
prngSeed = 42
baseDir = [".", "tests", ".." / "tests", $DirSep] # path containg repo
repoDir = ["replay", "status"] # alternative repos
goerliCapture: CaptureSpecs = (
network: GoerliNet,
file: "goerli68161.txt.gz",
numBlocks: 22000, # block chain prequel
minBlockTxs: 300, # minimum txs in imported blocks
numTxs: 840) # txs following (not in block chain)
loadSpecs = goerliCapture
# 75% <= #local/#remote <= 1/75%
# note: by law of big numbers, the ratio will exceed any upper or lower
# on a +1/-1 random walk if running long enough (with expectation
# value 0)
randInitRatioBandPC = 75
# 95% <= #remote-deleted/#remote-present <= 1/95%
deletedItemsRatioBandPC = 95
# 70% <= #addr-local/#addr-remote <= 1/70%
# note: this ratio might vary due to timing race conditions
addrGroupLocalRemotePC = 70
# With a large enough block size, decreasing it should not decrease the
# profitability (very much) as the the number of blocks availabe increases
# (and a better choice might be available?) A good value for the next
# parameter should be above 100%.
decreasingBlockProfitRatioPC = 92
# test block chain
networkId = GoerliNet # MainNet
var
minGasPrice = GasPrice.high
maxGasPrice = GasPrice.low
prng = prngSeed.initRand
# to be set up in runTxLoader()
statCount: array[TxItemStatus,int] # per status bucket
txList: seq[TxItemRef]
effGasTips: seq[GasPriceEx]
# running block chain
bcDB: BaseChainDB
# ------------------------------------------------------------------------------
# Helpers
# ------------------------------------------------------------------------------
proc randStatusRatios: seq[int] =
for n in 1 .. statCount.len:
let
inx = (n mod statCount.len).TxItemStatus
prv = (n - 1).TxItemStatus
if statCount[inx] == 0:
result.add int.high
else:
result.add (statCount[prv] * 100 / statCount[inx]).int
proc randStatus: TxItemStatus =
result = prng.rand(TxItemStatus.high.ord).TxItemStatus
statCount[result].inc
template wrapException(info: string; action: untyped) =
try:
action
except CatchableError:
raiseAssert info & " has problems: " & getCurrentExceptionMsg()
proc addOrFlushGroupwise(xp: TxPoolRef;
grpLen: int; seen: var seq[TxItemRef]; w: TxItemRef;
noisy = true): bool =
# to be run as call back inside `itemsApply()`
wrapException("addOrFlushGroupwise()"):
seen.add w
if grpLen <= seen.len:
# clear waste basket
discard xp.txDB.flushRejects
# flush group-wise
let xpLen = xp.nItems.total
noisy.say "*** updateSeen: deleting ", seen.mapIt($it.itemID).join(" ")
for item in seen:
doAssert xp.txDB.dispose(item,txInfoErrUnspecified)
doAssert xpLen == seen.len + xp.nItems.total
doAssert seen.len == xp.nItems.disposed
seen.setLen(0)
# clear waste basket
discard xp.txDB.flushRejects
return true
proc findFilePath(file: string): string =
result = "?unknown?" / file
for dir in baseDir:
for repo in repoDir:
let path = dir / repo / file
if path.fileExists:
return path
# ------------------------------------------------------------------------------
# Test Runners
# ------------------------------------------------------------------------------
proc runTxLoader(noisy = true; capture = loadSpecs) =
let
elapNoisy = noisy
veryNoisy = false # noisy
fileInfo = capture.file.splitFile.name.split(".")[0]
filePath = capture.file.findFilePath
# Reset/initialise
statCount.reset
txList.reset
effGasTips.reset
bcDB = capture.network.blockChainForTesting
suite &"TxPool: Transactions from {fileInfo} capture":
var
xp: TxPoolRef
nTxs: int
test &"Import {capture.numBlocks.toKMG} blocks + {capture.minBlockTxs} txs"&
&" and collect {capture.numTxs} txs for pooling":
elapNoisy.showElapsed("Total collection time"):
(xp, nTxs) = bcDB.toTxPool(file = filePath,
getStatus = randStatus,
loadBlocks = capture.numBlocks,
minBlockTxs = capture.minBlockTxs,
loadTxs = capture.numTxs,
noisy = veryNoisy)
# Make sure that sample extraction from file was ok
check capture.minBlockTxs <= nTxs
check capture.numTxs == xp.nItems.total
# Set txs to pseudo random status
check xp.verify.isOK
xp.setItemStatusFromInfo
# Boundary conditions regarding nonces might be violated by running
# setItemStatusFromInfo() => xp.txDB.verify() rather than xp.verify()
check xp.txDB.verify.isOK
check txList.len == 0
check xp.nItems.disposed == 0
noisy.say "***",
"Latest item: <", xp.txDB.byItemID.last.value.data.info, ">"
# make sure that the block chain was initialised
check capture.numBlocks.u256 <= bcDB.getCanonicalHead.blockNumber
check xp.nItems.total == foldl(@[0]&statCount.toSeq, a+b)
# ^^^ sum up statCount[] values
# make sure that PRNG did not go bonkers
for statusRatio in randStatusRatios():
check randInitRatioBandPC < statusRatio
check statusRatio < (10000 div randInitRatioBandPC)
# Load txList[]
txList = xp.toItems
check txList.len == xp.nItems.total
elapNoisy.showElapsed("Load min/max gas prices"):
for item in txList:
if item.tx.gasPrice < minGasPrice and 0 < item.tx.gasPrice:
minGasPrice = item.tx.gasPrice.GasPrice
if maxGasPrice < item.tx.gasPrice.GasPrice:
maxGasPrice = item.tx.gasPrice.GasPrice
check 0.GasPrice <= minGasPrice
check minGasPrice <= maxGasPrice
test &"Concurrent job processing example":
var log = ""
# This test does not verify anything but rather shows how the pool
# primitives could be used in an async context.
proc delayJob(xp: TxPoolRef; waitMs: int) {.async.} =
let n = xp.nJobs
xp.job(TxJobDataRef(kind: txJobNone))
xp.job(TxJobDataRef(kind: txJobNone))
xp.job(TxJobDataRef(kind: txJobNone))
log &= " wait-" & $waitMs & "-" & $(xp.nJobs - n)
await chronos.milliseconds(waitMs).sleepAsync
xp.jobCommit
log &= " done-" & $waitMs
# run async jobs, completion should be sorted by timeout argument
proc runJobs(xp: TxPoolRef) {.async.} =
let
p1 = xp.delayJob(900)
p2 = xp.delayJob(1)
p3 = xp.delayJob(700)
await p3
await p2
await p1
waitFor xp.runJobs
check xp.nJobs == 0
check log == " wait-900-3 wait-1-3 wait-700-3 done-1 done-700 done-900"
# Cannot rely on boundary conditions regarding nonces. So xp.verify()
# will not work here => xp.txDB.verify()
check xp.txDB.verify.isOK
proc runTxPoolTests(noisy = true) =
let elapNoisy = false
suite &"TxPool: Play with pool functions and primitives":
block:
const groupLen = 13
let veryNoisy = noisy and false
test &"Load/forward walk ID queue, " &
&"deleting groups of at most {groupLen}":
var
xq = bcDB.toTxPool(txList, noisy = noisy)
seen: seq[TxItemRef]
# Set txs to pseudo random status
xq.setItemStatusFromInfo
check xq.txDB.verify.isOK
elapNoisy.showElapsed("Forward delete-walk ID queue"):
for item in xq.txDB.byItemID.nextValues:
if not xq.addOrFlushGroupwise(groupLen, seen, item, veryNoisy):
break
check xq.txDB.verify.isOK
check seen.len == xq.nItems.total
check seen.len < groupLen
test &"Load/reverse walk ID queue, " &
&"deleting in groups of at most {groupLen}":
var
xq = bcDB.toTxPool(txList, noisy = noisy)
seen: seq[TxItemRef]
# Set txs to pseudo random status
xq.setItemStatusFromInfo
check xq.txDB.verify.isOK
elapNoisy.showElapsed("Revese delete-walk ID queue"):
for item in xq.txDB.byItemID.nextValues:
if not xq.addOrFlushGroupwise(groupLen, seen, item, veryNoisy):
break
check xq.txDB.verify.isOK
check seen.len == xq.nItems.total
check seen.len < groupLen
block:
var
xq = TxPoolRef.new(bcDB,testAddress)
testTxs: array[5,(TxItemRef,Transaction,Transaction)]
test &"Superseding txs with sender and nonce variants":
var
testInx = 0
let
testBump = xq.priceBump
lastBump = testBump - 1 # implies underpriced item
# load a set of suitable txs into testTxs[]
for n in 0 ..< txList.len:
let
item = txList[n]
bump = if testInx < testTxs.high: testBump else: lastBump
rc = item.txModPair(testInx,bump.int)
if not rc[0].isNil:
testTxs[testInx] = rc
testInx.inc
if testTxs.high < testInx:
break
# verify that test does not degenerate
check testInx == testTxs.len
check 0 < lastBump # => 0 < testBump
# insert some txs
for triple in testTxs:
xq.jobAddTx(triple[1], triple[0].info)
xq.jobCommit
check xq.nItems.total == testTxs.len
check xq.nItems.disposed == 0
let infoLst = testTxs.toSeq.mapIt(it[0].info).sorted
check infoLst == xq.toItems.toSeq.mapIt(it.info).sorted
# re-insert modified transactions
for triple in testTxs:
xq.jobAddTx(triple[2], "alt " & triple[0].info)
xq.jobCommit
check xq.nItems.total == testTxs.len
check xq.nItems.disposed == testTxs.len
# last update item was underpriced, so it must not have been
# replaced
var altLst = testTxs.toSeq.mapIt("alt " & it[0].info)
altLst[^1] = testTxs[^1][0].info
check altLst.sorted == xq.toItems.toSeq.mapIt(it.info).sorted
test &"Deleting tx => also delete higher nonces":
let
# From the data base, get the one before last item. This was
# replaced earlier by the second transaction in the triple, i.e.
# testTxs[^2][2]. FYI, the last transaction is testTxs[^1][1] as
# it could not be replaced earlier by testTxs[^1][2].
item = xq.getItem(testTxs[^2][2].itemID).value
nWasteBasket = xq.nItems.disposed
# make sure the test makes sense, nonces were 0 ..< testTxs.len
check (item.tx.nonce + 2).int == testTxs.len
xq.disposeItems(item)
check xq.nItems.total + 2 == testTxs.len
check nWasteBasket + 2 == xq.nItems.disposed
# --------------------------
block:
var
gap: Time
nItems: int
xq = bcDB.toTxPool(timeGap = gap,
nGapItems = nItems,
itList = txList,
itemsPC = 35, # arbitrary
delayMSecs = 100, # large enough to process
noisy = noisy)
# Set txs to pseudo random status. Note that this functon will cause
# a violation of boundary conditions regarding nonces. So database
# integrily check needs xq.txDB.verify() rather than xq.verify().
xq.setItemStatusFromInfo
test &"Auto delete about {nItems} expired txs out of {xq.nItems.total}":
check 0 < nItems
xq.lifeTime = getTime() - gap
xq.flags = xq.flags + {autoZombifyPacked}
# evict and pick items from the wastbasket
let
disposedBase = xq.nItems.disposed
evictedBase = evictionMeter.value
impliedBase = impliedEvictionMeter.value
xq.jobCommit(true)
let
disposedItems = xq.nItems.disposed - disposedBase
evictedItems = (evictionMeter.value - evictedBase).int
impliedItems = (impliedEvictionMeter.value - impliedBase).int
check xq.txDB.verify.isOK
check disposedItems + disposedBase + xq.nItems.total == txList.len
check 0 < evictedItems
check evictedItems <= disposedItems
check disposedItems == evictedItems + impliedItems
# make sure that deletion was sort of expected
let deleteExpextRatio = (evictedItems * 100 / nItems).int
check deletedItemsRatioBandPC < deleteExpextRatio
check deleteExpextRatio < (10000 div deletedItemsRatioBandPC)
# --------------------
block:
var
xq = bcDB.toTxPool(txList, noisy = noisy)
maxAddr: EthAddress
nAddrItems = 0
nAddrPendingItems = 0
nAddrStagedItems = 0
nAddrPackedItems = 0
fromNumItems = nAddrPendingItems
fromBucketInfo = "pending"
fromBucket = txItemPending
toBucketInfo = "staged"
toBucket = txItemStaged
# Set txs to pseudo random status
xq.setItemStatusFromInfo
# find address with max number of transactions
for (address,nonceList) in xq.txDB.incAccount:
if nAddrItems < nonceList.nItems:
maxAddr = address
nAddrItems = nonceList.nItems
# count items
nAddrPendingItems = xq.txDB.bySender.eq(maxAddr).eq(txItemPending).nItems
nAddrStagedItems = xq.txDB.bySender.eq(maxAddr).eq(txItemStaged).nItems
nAddrPackedItems = xq.txDB.bySender.eq(maxAddr).eq(txItemPacked).nItems
# find the largest from-bucket
if fromNumItems < nAddrStagedItems:
fromNumItems = nAddrStagedItems
fromBucketInfo = "staged"
fromBucket = txItemStaged
toBucketInfo = "packed"
toBucket = txItemPacked
if fromNumItems < nAddrPackedItems:
fromNumItems = nAddrPackedItems
fromBucketInfo = "packed"
fromBucket = txItemPacked
toBucketInfo = "pending"
toBucket = txItemPending
let moveNumItems = fromNumItems div 2
test &"Reassign {moveNumItems} of {fromNumItems} items "&
&"from \"{fromBucketInfo}\" to \"{toBucketInfo}\"":
# requite mimimum => there is a status queue with at least 2 entries
check 3 < nAddrItems
check nAddrPendingItems +
nAddrStagedItems +
nAddrPackedItems == nAddrItems
check 0 < moveNumItems
check 1 < fromNumItems
var count = 0
let nonceList = xq.txDB.bySender.eq(maxAddr).eq(fromBucket).value.data
block collect:
for item in nonceList.incNonce:
count.inc
check xq.txDB.reassign(item, toBucket)
if moveNumItems <= count:
break collect
check xq.txDB.verify.isOK
case fromBucket
of txItemPending:
check nAddrPendingItems - moveNumItems ==
xq.txDB.bySender.eq(maxAddr).eq(txItemPending).nItems
check nAddrStagedItems + moveNumItems ==
xq.txDB.bySender.eq(maxAddr).eq(txItemStaged).nItems
check nAddrPackedItems ==
xq.txDB.bySender.eq(maxAddr).eq(txItemPacked).nItems
of txItemStaged:
check nAddrStagedItems - moveNumItems ==
xq.txDB.bySender.eq(maxAddr).eq(txItemStaged).nItems
check nAddrPackedItems + moveNumItems ==
xq.txDB.bySender.eq(maxAddr).eq(txItemPacked).nItems
check nAddrPendingItems ==
xq.txDB.bySender.eq(maxAddr).eq(txItemPending).nItems
else:
check nAddrPackedItems - moveNumItems ==
xq.txDB.bySender.eq(maxAddr).eq(txItemPacked).nItems
check nAddrPendingItems + moveNumItems ==
xq.txDB.bySender.eq(maxAddr).eq(txItemPending).nItems
check nAddrPackedItems ==
xq.txDB.bySender.eq(maxAddr).eq(txItemPacked).nItems
# --------------------
let expect = (
xq.txDB.byStatus.eq(txItemPending).nItems,
xq.txDB.byStatus.eq(txItemStaged).nItems,
xq.txDB.byStatus.eq(txItemPacked).nItems)
test &"Verify #items per bucket ({expect[0]},{expect[1]},{expect[2]})":
let status = xq.nItems
check expect == (status.pending,status.staged,status.packed)
test "Recycling from waste basket":
let
basketPrefill = xq.nItems.disposed
numDisposed = min(50,txList.len)
# make sure to work on a copy of the pivot item (to see changes)
thisItem = xq.getItem(txList[^numDisposed].itemID).value.dup
# move to wastebasket
xq.maxRejects = txList.len
for n in 1 .. numDisposed:
# use from top avoiding extra deletes (higer nonces per account)
xq.disposeItems(txList[^n])
# make sure that the pivot item is in the waste basket
check xq.getItem(thisItem.itemID).isErr
check xq.txDB.byRejects.hasKey(thisItem.itemID)
check basketPrefill + numDisposed == xq.nItems.disposed
check txList.len == xq.nItems.total + xq.nItems.disposed
# re-add item
xq.jobAddTx(thisItem.tx)
xq.jobCommit
# verify that the pivot item was moved out from the waste basket
check not xq.txDB.byRejects.hasKey(thisItem.itemID)
check basketPrefill + numDisposed == xq.nItems.disposed + 1
check txList.len == xq.nItems.total + xq.nItems.disposed
# verify that a new item was derived from the waste basket pivot item
let wbItem = xq.getItem(thisItem.itemID).value
check thisItem.info == wbItem.info
check thisItem.timestamp < wbItem.timestamp
proc runTxPackerTests(noisy = true) =
let
elapNoisy = true # noisy
suite &"TxPool: Block packer tests":
var
ntBaseFee = 0.GasPrice
ntNextFee = 0.GasPrice
test &"Calculate some non-trivial base fee":
var
xq = bcDB.toTxPool(txList, noisy = noisy)
feesList = SortedSet[GasPriceEx,bool].init()
# provide a sorted list of gas fees
for item in txList:
discard feesList.insert(item.tx.effectiveGasTip(0.GasPrice))
let
minKey = max(0, feesList.ge(GasPriceEx.low).value.key.int64)
lowKey = feesList.gt(minKey.GasPriceEx).value.key.uint64
highKey = feesList.le(GasPriceEx.high).value.key.uint64
keyRange = highKey - lowKey
keyStep = max(1u64, keyRange div 500_000)
# what follows is a rather crude partitioning so that
# * ntBaseFee partititions non-zero numbers of pending and staged txs
# * ntNextFee decreases the number of staged txs
ntBaseFee = (lowKey + keyStep).GasPrice
# the following might throw an exception if the table is de-generated
var nextKey = ntBaseFee
for _ in [1, 2, 3]:
let rcNextKey = feesList.gt(nextKey.GasPriceEx)
check rcNextKey.isOK
nextKey = rcNextKey.value.key.uint64.GasPrice
ntNextFee = nextKey + keyStep.GasPrice
# of course ...
check ntBaseFee < ntNextFee
block:
var
xq = bcDB.toTxPool(txList, ntBaseFee, noisy)
xr = bcDB.toTxPool(txList, ntNextFee, noisy)
block:
let
pending = xq.nItems.pending
staged = xq.nItems.staged
packed = xq.nItems.packed
test &"Load txs with baseFee={ntBaseFee}, "&
&"buckets={pending}/{staged}/{packed}":
check 0 < pending
check 0 < staged
check xq.nItems.total == txList.len
check xq.nItems.disposed == 0
block:
let
pending = xr.nItems.pending
staged = xr.nItems.staged
packed = xr.nItems.packed
test &"Re-org txs previous buckets setting baseFee={ntNextFee}, "&
&"buckets={pending}/{staged}/{packed}":
check 0 < pending
check 0 < staged
check xr.nItems.total == txList.len
check xr.nItems.disposed == 0
# having the same set of txs, setting the xq database to the same
# base fee as the xr one, the bucket fills of both database must
# be the same after re-org
xq.baseFee = ntNextFee
xq.triggerReorg
xq.jobCommit(forceMaintenance = true)
# now, xq should look like xr
check xq.verify.isOK
check xq.nItems == xr.nItems
block:
# get some value below the middle
let
packPrice = ((minGasPrice + maxGasPrice).uint64 div 3).GasPrice
lowerPrice = minGasPrice + 1.GasPrice
test &"Packing txs, baseFee=0 minPrice={packPrice} "&
&"targetBlockSize={xq.trgGasLimit}":
# verify that the test does not degenerate
check 0 < minGasPrice
check minGasPrice < maxGasPrice
# ignore base limit so that the `packPrice` below becomes effective
xq.baseFee = 0.GasPrice
check xq.nItems.disposed == 0
# set minimum target price
xq.minPreLondonGasPrice = packPrice
check xq.minPreLondonGasPrice == packPrice
# employ packer
xq.jobCommit(forceMaintenance = true)
xq.packerVmExec
check xq.verify.isOK
# verify that the test did not degenerate
check 0 < xq.gasTotals.packed
check xq.nItems.disposed == 0
# assemble block from `packed` bucket
let
items = xq.toItems(txItemPacked)
total = foldl(@[0.GasInt] & items.mapIt(it.tx.gasLimit), a+b)
check xq.gasTotals.packed == total
noisy.say "***", "1st bLock size=", total, " stats=", xq.nItems.pp
test &"Clear and re-pack bucket":
let
items0 = xq.toItems(txItemPacked)
saveState0 = foldl(@[0.GasInt] & items0.mapIt(it.tx.gasLimit), a+b)
check 0 < xq.nItems.packed
# re-pack bucket
xq.jobCommit(forceMaintenance = true)
xq.packerVmExec
check xq.verify.isOK
let
items1 = xq.toItems(txItemPacked)
saveState1 = foldl(@[0.GasInt] & items1.mapIt(it.tx.gasLimit), a+b)
check items0 == items1
check saveState0 == saveState1
test &"Delete item and re-pack bucket/w lower minPrice={lowerPrice}":
# verify that the test does not degenerate
check 0 < lowerPrice
check lowerPrice < packPrice
check 0 < xq.nItems.packed
let
saveStats = xq.nItems
lastItem = xq.toItems(txItemPacked)[^1]
# delete last item from packed bucket
xq.disposeItems(lastItem)
check xq.verify.isOK
# set new minimum target price
xq.minPreLondonGasPrice = lowerPrice
check xq.minPreLondonGasPrice == lowerPrice
# re-pack bucket, packer needs extra trigger because there is
# not necessarily a buckets re-org resulting in a change
xq.jobCommit(forceMaintenance = true)
xq.packerVmExec
check xq.verify.isOK
let
items = xq.toItems(txItemPacked)
newTotal = foldl(@[0.GasInt] & items.mapIt(it.tx.gasLimit), a+b)
newStats = xq.nItems
newItem = xq.toItems(txItemPacked)[^1]
# for sanity assert the obvoius
check 0 < xq.gasTotals.packed
check xq.gasTotals.packed == newTotal
# verify incremental packing
check lastItem.info != newItem.info
check saveStats.packed <= newStats.packed
noisy.say "***", "2st bLock size=", newTotal, " stats=", newStats.pp
# -------------------------------------------------
block:
var
xq = bcDB.toTxPool(txList, ntBaseFee, noisy)
let
(nMinTxs, nTrgTxs) = (15, 15)
(nMinAccounts, nTrgAccounts) = (1, 8)
canonicalHead = xq.chain.db.getCanonicalHead
test &"Back track block chain head (at least "&
&"{nMinTxs} txs, {nMinAccounts} known accounts)":
# get the environment of a state back in the block chain, preferably
# at least `nTrgTxs` txs and `nTrgAccounts` known accounts
let
(backHeader,backTxs,accLst) = xq.getBackHeader(nTrgTxs,nTrgAccounts)
nBackBlocks = xq.head.blockNumber - backHeader.blockNumber
stats = xq.nItems
# verify that the test would not degenerate
check nMinAccounts <= accLst.len
check nMinTxs <= backTxs.len
noisy.say "***",
&"back tracked block chain:" &
&" {backTxs.len} txs, {nBackBlocks} blocks," &
&" {accLst.len} known accounts"
check xq.nJobs == 0 # want cleared job queue
check xq.jobDeltaTxsHead(backHeader) # set up tx diff jobs
xq.head = backHeader # move insertion point
xq.jobCommit # apply job diffs
# make sure that all txs have been added to the pool
let nFailed = xq.nItems.disposed - stats.disposed
check stats.disposed == 0
check stats.total + backTxs.len == xq.nItems.total
test &"Run packer, profitability will not increase with block size":
xq.flags = xq.flags - {packItemsMaxGasLimit}
xq.packerVmExec
let
smallerBlockProfitability = xq.profitability
smallerBlockSize = xq.gasCumulative
noisy.say "***", "trg-packing",
" profitability=", xq.profitability,
" used=", xq.gasCumulative,
" trg=", xq.trgGasLimit,
" slack=", xq.trgGasLimit - xq.gasCumulative
xq.flags = xq.flags + {packItemsMaxGasLimit}
xq.packerVmExec
noisy.say "***", "max-packing",
" profitability=", xq.profitability,
" used=", xq.gasCumulative,
" max=", xq.maxGasLimit,
" slack=", xq.maxGasLimit - xq.gasCumulative
check smallerBlockSize < xq.gasCumulative
check 0 < xq.profitability
# Well, this ratio should be above 100 but might be slightly less
# with small data samples (pathological case.)
let blockProfitRatio =
(((smallerBlockProfitability.uint64 * 1000) div
(max(1u64,xq.profitability.uint64))) + 5) div 10
check decreasingBlockProfitRatioPC <= blockProfitRatio
noisy.say "***", "cmp",
" increase=", xq.gasCumulative - smallerBlockSize,
" trg/max=", blockProfitRatio, "%"
# if true: return
test "Store generated block in block chain database":
# Force maximal block size. Accidentally, the latest tx should have
# a `gasLimit` exceeding the available space on the block `gasLimit`
# which will be checked below.
xq.flags = xq.flags + {packItemsMaxGasLimit}
# Invoke packer
let blk = xq.ethBlock
# Make sure that there are at least two txs on the packed block so
# this test does not degenerate.
check 1 < xq.chain.receipts.len
var overlap = -1
for n in countDown(blk.txs.len - 1, 0):
let total = xq.chain.receipts[n].cumulativeGasUsed
if blk.header.gasUsed < total + blk.txs[n].gasLimit:
overlap = n
break
noisy.say "***",
"overlap=#", overlap,
" tx=#", blk.txs.len,
" gasUsed=", blk.header.gasUsed,
" gasLimit=", blk.header.gasLimit
if 0 <= overlap:
let
n = overlap
mostlySize = xq.chain.receipts[n].cumulativeGasUsed
noisy.say "***", "overlap",
" size=", mostlySize + blk.txs[n].gasLimit - blk.header.gasUsed
let
poa = bcDB.newClique
bdy = BlockBody(transactions: blk.txs)
hdr = block:
var rc = blk.header
rc.gasLimit = blk.header.gasUsed
rc.testKeySign
# Make certain that some tx was set up so that its gasLimit overlaps
# with the total block size. Of course, running it in the VM will burn
# much less than permitted so this block will be accepted.
check 0 < overlap
# Test low-level function for adding the new block to the database
xq.chain.maxMode = (packItemsMaxGasLimit in xq.flags)
xq.chain.clearAccounts
check xq.chain.vmState.processBlock(poa, hdr, bdy).isOK
# Re-allocate using VM environment from `persistBlocks()`
check BaseVMState.new(hdr, bcDB).processBlock(poa, hdr, bdy).isOK
# This should not have changed
check canonicalHead == xq.chain.db.getCanonicalHead
# Using the high-level library function, re-append the block while
# turning off header verification.
let c = bcDB.newChain(extraValidation = false)
check c.persistBlocks(@[hdr], @[bdy]).isOK
# The canonical head will be set to hdr if it scores high enough
# (see implementation of db_chain.persistHeaderToDb()).
let
canonScore = xq.chain.db.getScore(canonicalHead.blockHash)
headerScore = xq.chain.db.getScore(hdr.blockHash)
if canonScore < headerScore:
# Note that the updated canonical head is equivalent to hdr but not
# necessarily binary equal.
check hdr.blockHash == xq.chain.db.getCanonicalHead.blockHash
else:
check canonicalHead == xq.chain.db.getCanonicalHead
# ------------------------------------------------------------------------------
# Main function(s)
# ------------------------------------------------------------------------------
proc txPoolMain*(noisy = defined(debug)) =
noisy.runTxLoader
noisy.runTxPoolTests
noisy.runTxPackerTests
when isMainModule:
const
noisy = defined(debug)
capts0: CaptureSpecs = goerliCapture
capts1: CaptureSpecs = (GoerliNet, "goerli504192.txt.gz", 30000, 500, 1500)
# Note: mainnet has the leading 45k blocks without any transactions
capts2: CaptureSpecs = (MainNet, "mainnet843841.txt.gz", 30000, 500, 1500)
noisy.runTxLoader(capture = capts2)
noisy.runTxPoolTests
true.runTxPackerTests
#noisy.runTxLoader(dir = ".")
#noisy.runTxPoolTests
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,251 @@
# Nimbus
# Copyright (c) 2018-2019 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
import
std/[strformat, sequtils, strutils, times],
../../nimbus/utils/tx_pool/[tx_chain, tx_desc, tx_gauge, tx_item, tx_tabs],
../../nimbus/utils/tx_pool/tx_tasks/[tx_packer, tx_recover],
../replay/undump,
eth/[common, keys],
stew/[keyed_queue, sorted_set],
stint
# Make sure that the runner can stay on public view without the need
# to import `tx_pool/*` sup-modules
export
tx_chain.TxChainGasLimits,
tx_chain.`maxMode=`,
tx_chain.clearAccounts,
tx_chain.db,
tx_chain.limits,
tx_chain.nextFork,
tx_chain.profit,
tx_chain.receipts,
tx_chain.reward,
tx_chain.vmState,
tx_desc.chain,
tx_desc.txDB,
tx_desc.verify,
tx_gauge,
tx_packer.packerVmExec,
tx_recover.recoverItem,
tx_tabs.TxTabsRef,
tx_tabs.any,
tx_tabs.decAccount,
tx_tabs.dispose,
tx_tabs.eq,
tx_tabs.flushRejects,
tx_tabs.gasLimits,
tx_tabs.ge,
tx_tabs.gt,
tx_tabs.incAccount,
tx_tabs.incNonce,
tx_tabs.le,
tx_tabs.len,
tx_tabs.lt,
tx_tabs.nItems,
tx_tabs.reassign,
tx_tabs.reject,
tx_tabs.verify,
undumpNextGroup
const
# pretty printing
localInfo* = block:
var rc: array[bool,string]
rc[true] = "L"
rc[false] = "R"
rc
statusInfo* = block:
var rc: array[TxItemStatus,string]
rc[txItemPending] = "*"
rc[txItemStaged] = "S"
rc[txItemPacked] = "P"
rc
# ------------------------------------------------------------------------------
# Helpers
# ------------------------------------------------------------------------------
proc joinXX(s: string): string =
if s.len <= 30:
return s
if (s.len and 1) == 0:
result = s[0 ..< 8]
else:
result = "0" & s[0 ..< 7]
result &= "..(" & $((s.len + 1) div 2) & ").." & s[s.len-16 ..< s.len]
proc joinXX(q: seq[string]): string =
q.join("").joinXX
proc toXX[T](s: T): string =
s.toHex.strip(leading=true,chars={'0'}).toLowerAscii
proc toXX(q: Blob): string =
q.mapIt(it.toHex(2)).join(":")
proc toXX(a: EthAddress): string =
a.mapIt(it.toHex(2)).joinXX
proc toXX(h: Hash256): string =
h.data.mapIt(it.toHex(2)).joinXX
proc toXX(v: int64; r,s: UInt256): string =
v.toXX & ":" & ($r).joinXX & ":" & ($s).joinXX
# ------------------------------------------------------------------------------
# Public functions, units pretty printer
# ------------------------------------------------------------------------------
proc ppMs*(elapsed: Duration): string =
result = $elapsed.inMilliSeconds
let ns = elapsed.inNanoSeconds mod 1_000_000
if ns != 0:
# to rounded deca milli seconds
let dm = (ns + 5_000i64) div 10_000i64
result &= &".{dm:02}"
result &= "ms"
proc ppSecs*(elapsed: Duration): string =
result = $elapsed.inSeconds
let ns = elapsed.inNanoseconds mod 1_000_000_000
if ns != 0:
# to rounded decs seconds
let ds = (ns + 5_000_000i64) div 10_000_000i64
result &= &".{ds:02}"
result &= "s"
proc toKMG*[T](s: T): string =
proc subst(s: var string; tag, new: string): bool =
if tag.len < s.len and s[s.len - tag.len ..< s.len] == tag:
s = s[0 ..< s.len - tag.len] & new
return true
result = $s
for w in [("000", "K"),("000K","M"),("000M","G"),("000G","T"),
("000T","P"),("000P","E"),("000E","Z"),("000Z","Y")]:
if not result.subst(w[0],w[1]):
return
# ------------------------------------------------------------------------------
# Public functions, pretty printer
# ------------------------------------------------------------------------------
proc pp*(a: BlockNonce): string =
a.mapIt(it.toHex(2)).join.toLowerAscii
proc pp*(a: EthAddress): string =
a.mapIt(it.toHex(2)).join[32 .. 39].toLowerAscii
proc pp*(a: Hash256): string =
a.data.mapIt(it.toHex(2)).join[56 .. 63].toLowerAscii
proc pp*(q: seq[(EthAddress,int)]): string =
"[" & q.mapIt(&"{it[0].pp}:{it[1]:03d}").join(",") & "]"
proc pp*(w: TxItemStatus): string =
($w).replace("txItem")
proc pp*(tx: Transaction): string =
## Pretty print transaction (use for debugging)
result = "(txType=" & $tx.txType
if tx.chainId.uint64 != 0:
result &= ",chainId=" & $tx.chainId.uint64
result &= ",nonce=" & tx.nonce.toXX
if tx.gasPrice != 0:
result &= ",gasPrice=" & tx.gasPrice.toKMG
if tx.maxPriorityFee != 0:
result &= ",maxPrioFee=" & tx.maxPriorityFee.toKMG
if tx.maxFee != 0:
result &= ",maxFee=" & tx.maxFee.toKMG
if tx.gasLimit != 0:
result &= ",gasLimit=" & tx.gasLimit.toKMG
if tx.to.isSome:
result &= ",to=" & tx.to.get.toXX
if tx.value != 0:
result &= ",value=" & tx.value.toKMG
if 0 < tx.payload.len:
result &= ",payload=" & tx.payload.toXX
if 0 < tx.accessList.len:
result &= ",accessList=" & $tx.accessList
result &= ",VRS=" & tx.V.toXX(tx.R,tx.S)
result &= ")"
proc pp*(w: TxItemRef): string =
## Pretty print item (use for debugging)
let s = w.tx.pp
result = "(timeStamp=" & ($w.timeStamp).replace(' ','_') &
",hash=" & w.itemID.toXX &
",status=" & w.status.pp &
"," & s[1 ..< s.len]
proc pp*(txs: openArray[Transaction]; pfx = ""): string =
let txt = block:
var rc = ""
if 0 < txs.len:
rc = "[" & txs[0].pp
for n in 1 ..< txs.len:
rc &= ";" & txs[n].pp
rc &= "]"
rc
txt.multiReplace([
(",", &",\n {pfx}"),
(";", &",\n {pfx}")])
proc pp*(txs: openArray[Transaction]; pfxLen: int): string =
txs.pp(" ".repeat(pfxLen))
proc pp*(w: TxTabsItemsCount): string =
&"{w.pending}/{w.staged}/{w.packed}:{w.total}/{w.disposed}"
proc pp*(w: TxTabsGasTotals): string =
&"{w.pending}/{w.staged}/{w.packed}"
proc pp*(w: TxChainGasLimits): string =
&"min={w.minLimit}" &
&" trg={w.lwmLimit}:{w.trgLimit}" &
&" max={w.hwmLimit}:{w.maxLimit}"
# ------------------------------------------------------------------------------
# Public functions, other
# ------------------------------------------------------------------------------
proc isOK*(rc: ValidationResult): bool =
rc == ValidationResult.OK
proc toHex*(acc: EthAddress): string =
acc.toSeq.mapIt(it.toHex(2)).join
template showElapsed*(noisy: bool; info: string; code: untyped) =
let start = getTime()
code
if noisy:
let elpd {.inject.} = getTime() - start
if 0 < elpd.inSeconds:
echo "*** ", info, &": {elpd.ppSecs:>4}"
else:
echo "*** ", info, &": {elpd.ppMs:>4}"
proc say*(noisy = false; pfx = "***"; args: varargs[string, `$`]) =
if noisy:
if args.len == 0:
echo "*** ", pfx
elif 0 < pfx.len and pfx[^1] != ' ':
echo pfx, " ", args.toSeq.join
else:
echo pfx, args.toSeq.join
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

238
tests/test_txpool/setup.nim Normal file
View File

@ -0,0 +1,238 @@
# Nimbus
# Copyright (c) 2018-2019 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
import
std/[algorithm, os, sequtils, strformat, tables, times],
../../nimbus/[config, chain_config, constants, genesis],
../../nimbus/db/db_chain,
../../nimbus/p2p/chain,
../../nimbus/utils/[ec_recover, tx_pool],
../../nimbus/utils/tx_pool/[tx_chain, tx_item],
./helpers,
./sign_helper,
eth/[common, keys, p2p, trie/db],
stew/[keyed_queue],
stint
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
proc setStatus(xp: TxPoolRef; item: TxItemRef; status: TxItemStatus)
{.gcsafe,raises: [Defect,CatchableError].} =
## Change/update the status of the transaction item.
if status != item.status:
discard xp.txDB.reassign(item, status)
proc importBlocks(c: Chain; h: seq[BlockHeader]; b: seq[BlockBody]): int =
if c.persistBlocks(h,b) != ValidationResult.OK:
raiseAssert "persistBlocks() failed at block #" & $h[0].blockNumber
for body in b:
result += body.transactions.len
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc blockChainForTesting*(network: NetworkID): BaseChainDB =
result = newBaseChainDB(
newMemoryDb(),
id = network,
params = network.networkParams)
result.populateProgress
result.initializeEmptyDB
proc toTxPool*(
db: BaseChainDB; ## to be modified
file: string; ## input, file and transactions
getStatus: proc(): TxItemStatus; ## input, random function
loadBlocks: int; ## load at most this many blocks
minBlockTxs: int; ## load at least this many txs in blocks
loadTxs: int; ## load at most this many transactions
baseFee = 0.GasPrice; ## initalise with `baseFee` (unless 0)
noisy: bool): (TxPoolRef, int) =
var
txCount = 0
chainNo = 0
chainDB = db.newChain
nTxs = 0
doAssert not db.isNil
result[0] = TxPoolRef.new(db,testAddress)
result[0].baseFee = baseFee
for chain in file.undumpNextGroup:
let leadBlkNum = chain[0][0].blockNumber
chainNo.inc
if loadTxs <= txCount:
break
# Verify Genesis
if leadBlkNum == 0.u256:
doAssert chain[0][0] == db.getBlockHeader(0.u256)
continue
if leadBlkNum < loadBlocks.u256 or nTxs < minBlockTxs:
nTxs += chainDB.importBlocks(chain[0],chain[1])
continue
# Import transactions
for inx in 0 ..< chain[0].len:
let
num = chain[0][inx].blockNumber
txs = chain[1][inx].transactions
# Continue importing up until first non-trivial block
if txCount == 0 and txs.len == 0:
nTxs += chainDB.importBlocks(@[chain[0][inx]],@[chain[1][inx]])
continue
# Load transactions, one-by-one
for n in 0 ..< min(txs.len, loadTxs - txCount):
txCount.inc
let
status = statusInfo[getStatus()]
info = &"{txCount} #{num}({chainNo}) {n}/{txs.len} {status}"
noisy.showElapsed(&"insert: {info}"):
result[0].jobAddTx(txs[n], info)
if loadTxs <= txCount:
break
result[0].jobCommit
result[1] = nTxs
proc toTxPool*(
db: BaseChainDB; ## to be modified, initialisier for `TxPool`
itList: var seq[TxItemRef]; ## import items into new `TxPool` (read only)
baseFee = 0.GasPrice; ## initalise with `baseFee` (unless 0)
noisy = true): TxPoolRef =
doAssert not db.isNil
result = TxPoolRef.new(db,testAddress)
result.baseFee = baseFee
result.maxRejects = itList.len
noisy.showElapsed(&"Loading {itList.len} transactions"):
for item in itList:
result.jobAddTx(item.tx, item.info)
result.jobCommit
doAssert result.nItems.total == itList.len
proc toTxPool*(
db: BaseChainDB;
itList: seq[TxItemRef];
baseFee = 0.GasPrice;
noisy = true): TxPoolRef =
var newList = itList
db.toTxPool(newList, baseFee, noisy)
proc toTxPool*(
db: BaseChainDB; ## to be modified, initialisier for `TxPool`
timeGap: var Time; ## to be set, time in the middle of time gap
nGapItems: var int; ## to be set, # items before time gap
itList: var seq[TxItemRef]; ## import items into new `TxPool` (read only)
baseFee = 0.GasPrice; ## initalise with `baseFee` (unless 0)
itemsPC = 30; ## % number if items befor time gap
delayMSecs = 200; ## size of time vap
noisy = true): TxPoolRef =
## Variant of `toTxPoolFromSeq()` with a time gap between consecutive
## items on the `remote` queue
doAssert not db.isNil
doAssert 0 < itemsPC and itemsPC < 100
result = TxPoolRef.new(db,testAddress)
result.baseFee = baseFee
result.maxRejects = itList.len
let
delayAt = itList.len * itemsPC div 100
middleOfTimeGap = initDuration(milliSeconds = delayMSecs div 2)
noisy.showElapsed(&"Loading {itList.len} transactions"):
for n in 0 ..< itList.len:
let item = itList[n]
result.jobAddTx(item.tx, item.info)
if delayAt == n:
nGapItems = n # pass back value
noisy.say &"time gap after transactions"
let itemID = item.itemID
result.jobCommit
doAssert result.nItems.disposed == 0
timeGap = result.getItem(itemID).value.timeStamp + middleOfTimeGap
delayMSecs.sleep
result.jobCommit
doAssert result.nItems.total == itList.len
doAssert result.nItems.disposed == 0
proc toItems*(xp: TxPoolRef): seq[TxItemRef] =
toSeq(xp.txDB.byItemID.nextValues)
proc toItems*(xp: TxPoolRef; label: TxItemStatus): seq[TxItemRef] =
for (_,nonceList) in xp.txDB.decAccount(label):
result.add toSeq(nonceList.incNonce)
proc setItemStatusFromInfo*(xp: TxPoolRef) =
## Re-define status from last character of info field. Note that this might
## violate boundary conditions regarding nonces.
for item in xp.toItems:
let w = TxItemStatus.toSeq.filterIt(statusInfo[it][0] == item.info[^1])[0]
xp.setStatus(item, w)
proc getBackHeader*(xp: TxPoolRef; nTxs, nAccounts: int):
(BlockHeader, seq[Transaction], seq[EthAddress]) {.inline.} =
## back track the block chain for at least `nTxs` transactions and
## `nAccounts` sender accounts
var
accTab: Table[EthAddress,bool]
txsLst: seq[Transaction]
backHash = xp.head.blockHash
backHeader = xp.head
backBody = xp.chain.db.getBlockBody(backHash)
while true:
# count txs and step behind last block
txsLst.add backBody.transactions
backHash = backHeader.parentHash
if not xp.chain.db.getBlockHeader(backHash, backHeader) or
not xp.chain.db.getBlockBody(backHash, backBody):
break
# collect accounts unless max reached
if accTab.len < nAccounts:
for tx in backBody.transactions:
let rc = tx.ecRecover
if rc.isOK:
if xp.txDB.bySender.eq(rc.value).isOk:
accTab[rc.value] = true
if nAccounts <= accTab.len:
break
if nTxs <= txsLst.len and nAccounts <= accTab.len:
break
# otherwise get next block
(backHeader, txsLst.reversed, toSeq(accTab.keys))
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,92 @@
# Nimbus
# Copyright (c) 2018-2019 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
import
../../nimbus/constants,
../../nimbus/utils/ec_recover,
../../nimbus/utils/tx_pool/tx_item,
eth/[common, common/transaction, keys],
stew/results,
stint
const
# example from clique, signer: 658bdf435d810c91414ec09147daa6db62406379
prvKey = "9c647b8b7c4e7c3490668fb6c11473619db80c93704c70893d3813af4090c39c"
proc signature(tx: Transaction; key: PrivateKey): (int64,UInt256,UInt256) =
let
hashData = tx.txHashNoSignature.data
signature = key.sign(SkMessage(hashData)).toRaw
v = signature[64].int64
result[1] = UInt256.fromBytesBE(signature[0..31])
result[2] = UInt256.fromBytesBE(signature[32..63])
if tx.txType == TxLegacy:
if tx.V >= EIP155_CHAIN_ID_OFFSET:
# just a guess which does not always work .. see `txModPair()`
# see https://eips.ethereum.org/EIPS/eip-155
result[0] = (tx.V and not 1'i64) or (not v and 1)
else:
result[0] = 27 + v
else:
# currently unsupported, will skip this one .. see `txModPair()`
result[0] = -1
proc sign(tx: Transaction; key: PrivateKey): Transaction =
let (V,R,S) = tx.signature(key)
result = tx
result.V = V
result.R = R
result.S = S
proc sign(header: BlockHeader; key: PrivateKey): BlockHeader =
let
hashData = header.blockHash.data
signature = key.sign(SkMessage(hashData)).toRaw
result = header
result.extraData.add signature
# ------------
let
prvTestKey* = PrivateKey.fromHex(prvKey).value
pubTestKey* = prvTestKey.toPublicKey
testAddress* = pubTestKey.toCanonicalAddress
proc txModPair*(item: TxItemRef; nonce: int; priceBump: int):
(TxItemRef,Transaction,Transaction) =
## Produce pair of modified txs, might fail => so try another one
var tx0 = item.tx
tx0.nonce = nonce.AccountNonce
var tx1 = tx0
tx1.gasPrice = (tx0.gasPrice * (100 + priceBump) + 99) div 100
let
tx0Signed = tx0.sign(prvTestKey)
tx1Signed = tx1.sign(prvTestKey)
block:
let rc = tx0Signed.ecRecover
if rc.isErr or rc.value != testAddress:
return
block:
let rc = tx1Signed.ecRecover
if rc.isErr or rc.value != testAddress:
return
(item,tx0Signed,tx1Signed)
proc testKeySign*(header: BlockHeader): BlockHeader =
## Sign the header and embed the signature in extra data
header.sign(prvTestKey)
# End