2022-01-18 14:40:02 +00:00
|
|
|
|
# Nimbus
|
2024-02-20 03:07:38 +00:00
|
|
|
|
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
2022-01-18 14:40:02 +00:00
|
|
|
|
# Licensed under either of
|
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
|
|
|
# http://opensource.org/licenses/MIT)
|
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except
|
|
|
|
|
# according to those terms.
|
|
|
|
|
|
|
|
|
|
## TODO:
|
|
|
|
|
## =====
|
|
|
|
|
## * Impose a size limit to the bucket database. Which items would be removed?
|
|
|
|
|
##
|
|
|
|
|
## * There is a conceivable problem with the per-account optimisation. The
|
|
|
|
|
## algorithm chooses an account and does not stop packing until all txs
|
2022-04-08 08:38:47 +00:00
|
|
|
|
## of the account are packed or the block is full. In the latter case,
|
2022-01-18 14:40:02 +00:00
|
|
|
|
## there might be some txs left unpacked from the account which might be
|
|
|
|
|
## the most lucrative ones. Should this be tackled (see also next item)?
|
|
|
|
|
##
|
|
|
|
|
## * The classifier throws out all txs with negative gas tips. This implies
|
|
|
|
|
## that all subsequent txs must also be suspended for this account even
|
|
|
|
|
## though these following txs might be extraordinarily profitable so that
|
|
|
|
|
## packing the whole account might be woth wile. Should this be considered,
|
|
|
|
|
## somehow (see also previous item)?
|
|
|
|
|
##
|
|
|
|
|
##
|
|
|
|
|
## Transaction Pool
|
|
|
|
|
## ================
|
|
|
|
|
##
|
|
|
|
|
## The transaction pool collects transactions and holds them in a database.
|
|
|
|
|
## This database consists of the three buckets *pending*, *staged*, and
|
|
|
|
|
## *packed* and a *waste basket*. These database entities are discussed in
|
|
|
|
|
## more detail, below.
|
|
|
|
|
##
|
|
|
|
|
## At some point, there will be some transactions in the *staged* bucket.
|
|
|
|
|
## Upon request, the pool will pack as many of those transactions as possible
|
|
|
|
|
## into to *packed* bucket which will subsequently be used to generate a
|
|
|
|
|
## new Ethereum block.
|
|
|
|
|
##
|
|
|
|
|
## When packing transactions from *staged* into *packed* bucked, the staged
|
|
|
|
|
## transactions are sorted by *sender account* and *nonce*. The *sender
|
|
|
|
|
## account* values are ordered by a *ranking* function (highest ranking first)
|
|
|
|
|
## and the *nonce* values by their natural integer order. Then, transactions
|
|
|
|
|
## are greedily picked from the ordered set until there are enough
|
|
|
|
|
## transactions in the *packed* bucket. Some boundary condition applies which
|
|
|
|
|
## roughly says that for a given account, all the transactions packed must
|
|
|
|
|
## leave no gaps between nonce values when sorted.
|
|
|
|
|
##
|
|
|
|
|
## The rank function applied to the *sender account* sorting is chosen as a
|
|
|
|
|
## guess for higher profitability which goes with a higher rank account.
|
|
|
|
|
##
|
|
|
|
|
##
|
|
|
|
|
## Rank calculator
|
|
|
|
|
## ---------------
|
|
|
|
|
## Let *tx()* denote the mapping
|
|
|
|
|
## ::
|
|
|
|
|
## tx: (account,nonce) -> tx
|
|
|
|
|
##
|
|
|
|
|
## from an index pair *(account,nonce)* to a transaction *tx*. Also, for some
|
|
|
|
|
## external parameter *baseFee*, let
|
|
|
|
|
## ::
|
|
|
|
|
## maxProfit: (tx,baseFee) -> tx.effectiveGasTip(baseFee) * tx.gasLimit
|
|
|
|
|
##
|
|
|
|
|
## be the maximal tip a single transation can achieve (where unit of the
|
|
|
|
|
## *effectiveGasTip()* is a *price* and *gasLimit* is a *commodity value*.).
|
|
|
|
|
## Then the rank function
|
|
|
|
|
## ::
|
|
|
|
|
## rank(account) = Σ maxProfit(tx(account,ν),baseFee) / Σ tx(account,ν).gasLimit
|
|
|
|
|
## ν ν
|
|
|
|
|
##
|
|
|
|
|
## is a *price* estimate of the maximal avarage tip per gas unit over all
|
|
|
|
|
## transactions for the given account. The nonces `ν` for the summation
|
|
|
|
|
## run over all transactions from the *staged* and *packed* bucket.
|
|
|
|
|
##
|
|
|
|
|
##
|
|
|
|
|
##
|
|
|
|
|
##
|
|
|
|
|
## Pool database:
|
|
|
|
|
## --------------
|
|
|
|
|
## ::
|
2022-04-08 08:38:47 +00:00
|
|
|
|
## <Transactions> . <Status buckets> . <Terminal state>
|
|
|
|
|
## . .
|
|
|
|
|
## . . +----------+
|
|
|
|
|
## add() ----+---------------------------------> | |
|
|
|
|
|
## | . +-----------+ . | disposed |
|
|
|
|
|
## +-----------> | pending | ------> | |
|
|
|
|
|
## . +-----------+ . | |
|
|
|
|
|
## . | ^ ^ . | waste |
|
|
|
|
|
## . v | | . | basket |
|
|
|
|
|
## . +----------+ | . | |
|
|
|
|
|
## . | staged | | . | |
|
|
|
|
|
## . +----------+ | . | |
|
|
|
|
|
## . | | ^ | . | |
|
|
|
|
|
## . | v | | . | |
|
|
|
|
|
## . | +----------+ . | |
|
|
|
|
|
## . | | packed | -------> | |
|
|
|
|
|
## . | +----------+ . | |
|
|
|
|
|
## . +----------------------> | |
|
|
|
|
|
## . . +----------+
|
2022-01-18 14:40:02 +00:00
|
|
|
|
##
|
|
|
|
|
## The three columns *Batch queue*, *State bucket*, and *Terminal state*
|
|
|
|
|
## represent three different accounting (or database) systems. The pool
|
|
|
|
|
## database is continuosly updated while new transactions are added.
|
|
|
|
|
## Transactions are bundled with meta data which holds the full datanbase
|
|
|
|
|
## state in addition to other cached information like the sender account.
|
|
|
|
|
##
|
|
|
|
|
##
|
2022-04-08 14:05:30 +00:00
|
|
|
|
## New transactions
|
|
|
|
|
## ----------------
|
2022-01-18 14:40:02 +00:00
|
|
|
|
## When entering the pool, new transactions are bundled with meta data and
|
2022-04-08 14:05:30 +00:00
|
|
|
|
## appended to the batch queue. These bundles are called *items* which are
|
|
|
|
|
## forwarded to one of the following entites:
|
2022-01-18 14:40:02 +00:00
|
|
|
|
##
|
|
|
|
|
## * the *staged* bucket if the transaction is valid and match some constraints
|
|
|
|
|
## on expected minimum mining fees (or a semblance of that for *non-PoW*
|
|
|
|
|
## networks)
|
|
|
|
|
## * the *pending* bucket if the transaction is valid but is not subject to be
|
|
|
|
|
## held in the *staged* bucket
|
|
|
|
|
## * the *waste basket* if the transaction is invalid
|
|
|
|
|
##
|
|
|
|
|
## If a valid transaction item supersedes an existing one, the existing
|
|
|
|
|
## item is moved to the waste basket and the new transaction replaces the
|
|
|
|
|
## existing one in the current bucket if the gas price of the transaction is
|
|
|
|
|
## at least `priceBump` per cent higher (see adjustable parameters, below.)
|
|
|
|
|
##
|
|
|
|
|
## Status buckets
|
|
|
|
|
## --------------
|
|
|
|
|
## The term *bucket* is a nickname for a set of *items* (i.e. transactions
|
|
|
|
|
## bundled with meta data as mentioned earlier) all labelled with the same
|
|
|
|
|
## `status` symbol and not marked *waste*. In particular, bucket membership
|
|
|
|
|
## for an item is encoded as
|
|
|
|
|
##
|
|
|
|
|
## * the `status` field indicates the particular *bucket* membership
|
|
|
|
|
## * the `reject` field is reset/unset and has zero-equivalent value
|
|
|
|
|
##
|
|
|
|
|
## The following boundary conditions hold for the union of all buckets:
|
|
|
|
|
##
|
|
|
|
|
## * *Unique index:*
|
|
|
|
|
## Let **T** be the union of all buckets and **Q** be the
|
|
|
|
|
## set of *(sender,nonce)* pairs derived from the items of **T**. Then
|
|
|
|
|
## **T** and **Q** are isomorphic, i.e. for each pair *(sender,nonce)*
|
|
|
|
|
## from **Q** there is exactly one item from **T**, and vice versa.
|
|
|
|
|
##
|
|
|
|
|
## * *Consecutive nonces:*
|
|
|
|
|
## For each *(sender0,nonce0)* of **Q**, either
|
|
|
|
|
## *(sender0,nonce0-1)* is in **Q** or *nonce0* is the current nonce as
|
|
|
|
|
## registered with the *sender account* (implied by the block chain),
|
|
|
|
|
##
|
|
|
|
|
## The *consecutive nonces* requirement involves the *sender account*
|
|
|
|
|
## which depends on the current state of the block chain as represented by the
|
|
|
|
|
## internally cached head (i.e. insertion point where a new block is to be
|
|
|
|
|
## appended.)
|
|
|
|
|
##
|
|
|
|
|
## The following notation describes sets of *(sender,nonce)* pairs for
|
|
|
|
|
## per-bucket items. It will be used for boundary conditions similar to the
|
|
|
|
|
## ones above.
|
|
|
|
|
##
|
|
|
|
|
## * **Pending** denotes the set of *(sender,nonce)* pairs for the
|
|
|
|
|
## *pending* bucket
|
|
|
|
|
##
|
|
|
|
|
## * **Staged** denotes the set of *(sender,nonce)* pairs for the
|
|
|
|
|
## *staged* bucket
|
|
|
|
|
##
|
|
|
|
|
## * **Packed** denotes the set of *(sender,nonce)* pairs for the
|
|
|
|
|
## *packed* bucket
|
|
|
|
|
##
|
|
|
|
|
## The pending bucket
|
|
|
|
|
## ^^^^^^^^^^^^^^^^^^
|
|
|
|
|
## Items in this bucket hold valid transactions that are not in any of the
|
|
|
|
|
## other buckets. All itmes might be promoted form here into other buckets if
|
|
|
|
|
## the current state of the block chain as represented by the internally cached
|
|
|
|
|
## head changes.
|
|
|
|
|
##
|
|
|
|
|
## The staged bucket
|
|
|
|
|
## ^^^^^^^^^^^^^^^^^
|
|
|
|
|
## Items in this bucket are ready to be added to a new block. They typycally
|
|
|
|
|
## imply some expected minimum reward when mined on PoW networks. Some
|
|
|
|
|
## boundary condition holds:
|
|
|
|
|
##
|
|
|
|
|
## * *Consecutive nonces:*
|
|
|
|
|
## For any *(sender0,nonce0)* pair from **Staged**, the pair
|
|
|
|
|
## *(sender0,nonce0-1)* is not in **Pending**.
|
|
|
|
|
##
|
|
|
|
|
## Considering the respective boundary condition on the union of buckets
|
|
|
|
|
## **T**, this condition here implies that a *staged* per sender nonce has a
|
|
|
|
|
## predecessor in the *staged* or *packed* bucket or is a nonce as registered
|
|
|
|
|
## with the *sender account*.
|
|
|
|
|
##
|
|
|
|
|
## The packed bucket
|
|
|
|
|
## ^^^^^^^^^^^^^^^^^
|
|
|
|
|
## All items from this bucket have been selected from the *staged* bucket, the
|
|
|
|
|
## transactions of which (i.e. unwrapped items) can go right away into a new
|
|
|
|
|
## ethernet block. How these items are selected was described at the beginning
|
|
|
|
|
## of this chapter. The following boundary conditions holds:
|
|
|
|
|
##
|
|
|
|
|
## * *Consecutive nonces:*
|
|
|
|
|
## For any *(sender0,nonce0)* pair from **Packed**, the pair
|
|
|
|
|
## *(sender0,nonce0-1)* is neither in **Pending**, nor in **Staged**.
|
|
|
|
|
##
|
|
|
|
|
## Considering the respective boundary condition on the union of buckets
|
|
|
|
|
## **T**, this condition here implies that a *packed* per-sender nonce has a
|
|
|
|
|
## predecessor in the very *packed* bucket or is a nonce as registered with the
|
|
|
|
|
## *sender account*.
|
|
|
|
|
##
|
|
|
|
|
##
|
|
|
|
|
## Terminal state
|
|
|
|
|
## --------------
|
|
|
|
|
## After use, items are disposed into a waste basket *FIFO* queue which has a
|
|
|
|
|
## maximal length. If the length is exceeded, the oldest items are deleted.
|
|
|
|
|
## The waste basket is used as a cache for discarded transactions that need to
|
|
|
|
|
## re-enter the system. Recovering from the waste basket saves the effort of
|
|
|
|
|
## recovering the sender account from the signature. An item is identified
|
|
|
|
|
## *waste* if
|
|
|
|
|
##
|
|
|
|
|
## * the `reject` field is explicitely set and has a value different
|
|
|
|
|
## from a zero-equivalent.
|
|
|
|
|
##
|
|
|
|
|
## So a *waste* item is clearly distinguishable from any active one as a
|
|
|
|
|
## member of one of the *status buckets*.
|
|
|
|
|
##
|
|
|
|
|
##
|
|
|
|
|
##
|
|
|
|
|
## Pool coding
|
|
|
|
|
## ===========
|
2022-04-08 14:05:30 +00:00
|
|
|
|
## A piece of code using this pool architecture could look like as follows:
|
2022-01-18 14:40:02 +00:00
|
|
|
|
## ::
|
|
|
|
|
## # see also unit test examples, e.g. "Block packer tests"
|
2024-12-13 06:21:20 +00:00
|
|
|
|
## var chain: ForkedChainRef # to be initialised
|
2022-01-18 14:40:02 +00:00
|
|
|
|
## var txs: seq[Transaction] # to be initialised
|
|
|
|
|
##
|
|
|
|
|
##
|
2024-12-13 06:21:20 +00:00
|
|
|
|
## var xq = TxPoolRef.new(chain) # initialise tx-pool
|
2022-01-18 14:40:02 +00:00
|
|
|
|
## ..
|
|
|
|
|
##
|
2022-04-08 08:38:47 +00:00
|
|
|
|
## xq.add(txs) # add transactions ..
|
|
|
|
|
## .. # .. into the buckets
|
2022-01-18 14:40:02 +00:00
|
|
|
|
##
|
2023-11-01 02:24:32 +00:00
|
|
|
|
## let newBlock = xq.assembleBlock # fetch current mining block
|
2022-01-18 14:40:02 +00:00
|
|
|
|
##
|
2022-04-08 08:38:47 +00:00
|
|
|
|
## xp.smartHead(newBlock.header) # update pool, new insertion point
|
2022-01-18 14:40:02 +00:00
|
|
|
|
##
|
|
|
|
|
##
|
|
|
|
|
## Discussion of example
|
|
|
|
|
## ---------------------
|
2022-04-08 14:05:30 +00:00
|
|
|
|
## In the example, transactions are processed into buckets via `add()`.
|
2022-01-18 14:40:02 +00:00
|
|
|
|
##
|
2024-12-13 06:21:20 +00:00
|
|
|
|
## The `assembleBlock()` directive assembles and retrieves a new block for mining
|
2022-04-08 14:05:30 +00:00
|
|
|
|
## derived from the current pool state. It invokes the block packer which
|
|
|
|
|
## accumulates txs from the `pending` buscket into the `packed` bucket which
|
|
|
|
|
## then go into the block.
|
2022-01-18 14:40:02 +00:00
|
|
|
|
##
|
|
|
|
|
## Then mining and signing takes place ...
|
|
|
|
|
##
|
|
|
|
|
## After mining and signing, the view of the block chain as seen by the pool
|
|
|
|
|
## must be updated to be ready for a new mining process. In the best case, the
|
|
|
|
|
## canonical head is just moved to the currently mined block which would imply
|
|
|
|
|
## just to discard the contents of the *packed* bucket with some additional
|
|
|
|
|
## transactions from the *staged* bucket. A more general block chain state
|
|
|
|
|
## head update would be more complex, though.
|
|
|
|
|
##
|
|
|
|
|
## In the most complex case, the newly mined block was added to some block
|
|
|
|
|
## chain branch which has become an uncle to the new canonical head retrieved
|
2024-12-18 04:04:23 +00:00
|
|
|
|
## by `latestHeader()`. In order to update the pool to the very state
|
2022-01-18 14:40:02 +00:00
|
|
|
|
## one would have arrived if worked on the retrieved canonical head branch
|
2022-04-08 14:05:30 +00:00
|
|
|
|
## in the first place, the directive `smartHead()` calculates the actions of
|
|
|
|
|
## what is needed to get just there from the locally cached head state of the
|
|
|
|
|
## pool. These actions are applied by `smartHead()` after the internal head
|
|
|
|
|
## position was moved.
|
2022-01-18 14:40:02 +00:00
|
|
|
|
##
|
2022-04-08 14:05:30 +00:00
|
|
|
|
## The *setter* behind the internal head position adjustment also caches
|
|
|
|
|
## updated internal parameters as base fee, state, fork, etc.
|
2022-01-18 14:40:02 +00:00
|
|
|
|
##
|
|
|
|
|
##
|
|
|
|
|
## Adjustable Parameters
|
|
|
|
|
## ---------------------
|
|
|
|
|
##
|
|
|
|
|
## flags
|
|
|
|
|
## The `flags` parameter holds a set of strategy symbols for how to process
|
|
|
|
|
## items and buckets.
|
|
|
|
|
##
|
|
|
|
|
## *autoUpdateBucketsDB*
|
|
|
|
|
## Automatically update the state buckets after running batch jobs if the
|
|
|
|
|
## `dirtyBuckets` flag is also set.
|
|
|
|
|
##
|
|
|
|
|
## *autoZombifyUnpacked*
|
|
|
|
|
## Automatically dispose *pending* or *staged* tx items that were added to
|
|
|
|
|
## the state buckets database at least `lifeTime` ago.
|
|
|
|
|
##
|
|
|
|
|
## lifeTime
|
|
|
|
|
## Txs that stay longer in one of the buckets will be moved to a waste
|
|
|
|
|
## basket. From there they will be eventually deleted oldest first when
|
|
|
|
|
## the maximum size would be exceeded.
|
|
|
|
|
##
|
|
|
|
|
## priceBump
|
|
|
|
|
## There can be only one transaction in the database for the same `sender`
|
|
|
|
|
## account and `nonce` value. When adding a transaction with the same
|
|
|
|
|
## (`sender`, `nonce`) pair, the new transaction will replace the current one
|
|
|
|
|
## if it has a gas price which is at least `priceBump` per cent higher.
|
|
|
|
|
##
|
|
|
|
|
##
|
|
|
|
|
## Read-Only Parameters
|
|
|
|
|
## --------------------
|
2022-04-08 08:38:47 +00:00
|
|
|
|
## head
|
|
|
|
|
## Cached block chain insertion point, not necessarily the same header as
|
2024-12-18 04:04:23 +00:00
|
|
|
|
## retrieved by the `latestHeader()`. This insertion point can be
|
2022-04-08 08:38:47 +00:00
|
|
|
|
## adjusted with the `smartHead()` function.
|
2024-08-07 15:35:17 +00:00
|
|
|
|
|
2022-01-18 14:40:02 +00:00
|
|
|
|
|
|
|
|
|
import
|
2024-06-14 07:31:08 +00:00
|
|
|
|
std/[sequtils, tables],
|
2024-08-07 15:35:17 +00:00
|
|
|
|
./tx_pool/[tx_packer, tx_desc, tx_info, tx_item],
|
2022-01-18 14:40:02 +00:00
|
|
|
|
./tx_pool/tx_tabs,
|
2022-03-31 08:21:36 +00:00
|
|
|
|
./tx_pool/tx_tasks/[
|
|
|
|
|
tx_add,
|
|
|
|
|
tx_bucket,
|
|
|
|
|
tx_head,
|
2024-08-07 15:35:17 +00:00
|
|
|
|
tx_dispose],
|
2022-01-18 14:40:02 +00:00
|
|
|
|
chronicles,
|
2024-05-30 12:54:03 +00:00
|
|
|
|
stew/keyed_queue,
|
|
|
|
|
results,
|
2023-10-19 00:50:07 +00:00
|
|
|
|
../common/common,
|
2024-09-04 09:54:54 +00:00
|
|
|
|
./chain/forked_chain,
|
2023-10-19 00:50:07 +00:00
|
|
|
|
./casper
|
2022-01-18 14:40:02 +00:00
|
|
|
|
|
|
|
|
|
export
|
|
|
|
|
TxItemRef,
|
|
|
|
|
TxItemStatus,
|
|
|
|
|
TxPoolFlags,
|
|
|
|
|
TxPoolRef,
|
|
|
|
|
TxTabsItemsCount,
|
|
|
|
|
results,
|
|
|
|
|
tx_desc.startDate,
|
|
|
|
|
tx_info,
|
|
|
|
|
tx_item.effectiveGasTip,
|
|
|
|
|
tx_item.info,
|
|
|
|
|
tx_item.itemID,
|
|
|
|
|
tx_item.sender,
|
|
|
|
|
tx_item.status,
|
|
|
|
|
tx_item.timeStamp,
|
2024-08-07 15:35:17 +00:00
|
|
|
|
tx_item.tx,
|
|
|
|
|
tx_desc.head
|
2022-01-18 14:40:02 +00:00
|
|
|
|
|
2023-01-30 22:10:23 +00:00
|
|
|
|
{.push raises: [].}
|
2022-01-18 14:40:02 +00:00
|
|
|
|
|
|
|
|
|
logScope:
|
|
|
|
|
topics = "tx-pool"
|
|
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
# Private functions: tasks processor
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
proc maintenanceProcessing(xp: TxPoolRef)
|
2023-01-30 22:10:23 +00:00
|
|
|
|
{.gcsafe,raises: [CatchableError].} =
|
2022-04-08 14:05:30 +00:00
|
|
|
|
## Tasks to be done after add/del txs processing
|
2022-01-18 14:40:02 +00:00
|
|
|
|
|
|
|
|
|
# Purge expired items
|
2024-08-07 15:35:17 +00:00
|
|
|
|
if autoZombifyUnpacked in xp.pFlags:
|
2022-01-18 14:40:02 +00:00
|
|
|
|
# Move transactions older than `xp.lifeTime` to the waste basket.
|
|
|
|
|
xp.disposeExpiredItems
|
|
|
|
|
|
|
|
|
|
# Update buckets
|
|
|
|
|
if autoUpdateBucketsDB in xp.pFlags:
|
|
|
|
|
if xp.pDirtyBuckets:
|
|
|
|
|
# For all items, re-calculate item status values (aka bucket labels).
|
|
|
|
|
# If the `force` flag is set, re-calculation is done even though the
|
|
|
|
|
# change flag has remained unset.
|
|
|
|
|
discard xp.bucketUpdateAll
|
|
|
|
|
xp.pDirtyBuckets = false
|
|
|
|
|
|
2024-10-16 01:34:12 +00:00
|
|
|
|
proc setHead(xp: TxPoolRef; val: Header)
|
2023-01-30 22:10:23 +00:00
|
|
|
|
{.gcsafe,raises: [CatchableError].} =
|
2022-04-08 08:38:47 +00:00
|
|
|
|
## Update cached block chain insertion point. This will also update the
|
|
|
|
|
## internally cached `baseFee` (depends on the block chain state.)
|
2024-08-07 15:35:17 +00:00
|
|
|
|
if xp.head != val:
|
|
|
|
|
xp.head = val # calculates the new baseFee
|
|
|
|
|
xp.txDB.baseFee = xp.baseFee
|
2022-04-08 08:38:47 +00:00
|
|
|
|
xp.pDirtyBuckets = true
|
|
|
|
|
xp.bucketFlushPacked
|
2022-01-18 14:40:02 +00:00
|
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
# Public constructor/destructor
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
2024-12-13 06:21:20 +00:00
|
|
|
|
proc new*(T: type TxPoolRef; chain: ForkedChainRef): T
|
2024-11-07 01:24:21 +00:00
|
|
|
|
{.gcsafe,raises: [].} =
|
2024-05-28 18:26:51 +00:00
|
|
|
|
## Constructor, returns a new tx-pool descriptor.
|
2022-01-18 14:40:02 +00:00
|
|
|
|
new result
|
2024-12-13 06:21:20 +00:00
|
|
|
|
result.init(chain)
|
2022-01-18 14:40:02 +00:00
|
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
# Public functions, task manager, pool actions serialiser
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
# core/tx_pool.go(848): func (pool *TxPool) AddLocals(txs []..
|
|
|
|
|
# core/tx_pool.go(864): func (pool *TxPool) AddRemotes(txs []..
|
2024-05-15 03:07:59 +00:00
|
|
|
|
proc add*(xp: TxPoolRef; txs: openArray[PooledTransaction]; info = "")
|
2023-01-30 22:10:23 +00:00
|
|
|
|
{.gcsafe,raises: [CatchableError].} =
|
2022-04-08 14:05:30 +00:00
|
|
|
|
## Add a list of transactions to be processed and added to the buckets
|
|
|
|
|
## database. It is OK pass an empty list in which case some maintenance
|
|
|
|
|
## check can be forced.
|
2022-01-18 14:40:02 +00:00
|
|
|
|
##
|
|
|
|
|
## The argument Transactions `txs` may come in any order, they will be
|
|
|
|
|
## sorted by `<account,nonce>` before adding to the database with the
|
|
|
|
|
## least nonce first. For this reason, it is suggested to pass transactions
|
|
|
|
|
## in larger groups. Calling single transaction jobs, they must strictly be
|
2022-04-08 14:05:30 +00:00
|
|
|
|
## passed *smaller nonce* before *larger nonce*.
|
2022-04-08 08:38:47 +00:00
|
|
|
|
xp.pDoubleCheckAdd xp.addTxs(txs, info).topItems
|
|
|
|
|
xp.maintenanceProcessing
|
2022-01-18 14:40:02 +00:00
|
|
|
|
|
|
|
|
|
# core/tx_pool.go(854): func (pool *TxPool) AddLocals(txs []..
|
|
|
|
|
# core/tx_pool.go(883): func (pool *TxPool) AddRemotes(txs []..
|
2024-05-15 03:07:59 +00:00
|
|
|
|
proc add*(xp: TxPoolRef; tx: PooledTransaction; info = "")
|
2023-01-30 22:10:23 +00:00
|
|
|
|
{.gcsafe,raises: [CatchableError].} =
|
2022-04-08 08:38:47 +00:00
|
|
|
|
## Variant of `add()` for a single transaction.
|
|
|
|
|
xp.add(@[tx], info)
|
2022-01-18 14:40:02 +00:00
|
|
|
|
|
2024-12-13 06:21:20 +00:00
|
|
|
|
proc smartHead*(xp: TxPoolRef; pos: Header): bool
|
2023-01-30 22:10:23 +00:00
|
|
|
|
{.gcsafe,raises: [CatchableError].} =
|
2022-04-08 08:38:47 +00:00
|
|
|
|
## This function moves the internal head cache (i.e. tx insertion point,
|
|
|
|
|
## vmState) and ponts it to a now block on the chain.
|
|
|
|
|
##
|
2024-08-07 15:35:17 +00:00
|
|
|
|
## it calculates the
|
2022-04-08 14:05:30 +00:00
|
|
|
|
## txs that need to be added or deleted after moving the insertion point
|
2022-04-08 08:38:47 +00:00
|
|
|
|
## head so that the tx-pool will not fail to re-insert quered txs that are
|
|
|
|
|
## on the chain, already. Neither will it loose any txs. After updating the
|
|
|
|
|
## the internal head cache, the previously calculated actions will be
|
|
|
|
|
## applied.
|
|
|
|
|
##
|
2024-12-13 06:21:20 +00:00
|
|
|
|
let rcDiff = xp.headDiff(pos, xp.chain)
|
2022-01-18 14:40:02 +00:00
|
|
|
|
if rcDiff.isOk:
|
|
|
|
|
let changes = rcDiff.value
|
|
|
|
|
|
2022-04-08 08:38:47 +00:00
|
|
|
|
# Need to move head before adding txs which may rightly be rejected in
|
|
|
|
|
# `addTxs()` otherwise.
|
2024-02-20 03:07:38 +00:00
|
|
|
|
xp.setHead(pos)
|
2022-04-08 08:38:47 +00:00
|
|
|
|
|
2022-01-18 14:40:02 +00:00
|
|
|
|
# Delete already *mined* transactions
|
|
|
|
|
if 0 < changes.remTxs.len:
|
2022-04-04 15:56:21 +00:00
|
|
|
|
debug "queuing delta txs",
|
|
|
|
|
mode = "remove",
|
|
|
|
|
num = changes.remTxs.len
|
2022-04-08 08:38:47 +00:00
|
|
|
|
xp.disposeById(toSeq(changes.remTxs.keys), txInfoChainHeadUpdate)
|
2022-04-04 15:56:21 +00:00
|
|
|
|
|
2022-01-18 14:40:02 +00:00
|
|
|
|
xp.maintenanceProcessing
|
2022-04-08 08:38:47 +00:00
|
|
|
|
return true
|
2022-01-18 14:40:02 +00:00
|
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
# Public functions, getters
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
2024-05-28 18:26:51 +00:00
|
|
|
|
func com*(xp: TxPoolRef): CommonRef =
|
2023-08-27 01:23:45 +00:00
|
|
|
|
## Getter
|
2024-08-07 15:35:17 +00:00
|
|
|
|
xp.vmState.com
|
2022-01-18 14:40:02 +00:00
|
|
|
|
|
2024-10-26 11:10:54 +00:00
|
|
|
|
type AssembledBlock* = object
|
2024-05-15 03:07:59 +00:00
|
|
|
|
blk*: EthBlock
|
2024-06-14 07:31:08 +00:00
|
|
|
|
blobsBundle*: Opt[BlobsBundle]
|
2024-08-08 23:05:18 +00:00
|
|
|
|
blockValue*: UInt256
|
2024-10-26 11:10:54 +00:00
|
|
|
|
executionRequests*: Opt[array[3, seq[byte]]]
|
2024-05-15 03:07:59 +00:00
|
|
|
|
|
|
|
|
|
proc assembleBlock*(
|
|
|
|
|
xp: TxPoolRef,
|
|
|
|
|
someBaseFee: bool = false
|
2024-10-26 11:10:54 +00:00
|
|
|
|
): Result[AssembledBlock, string] {.gcsafe,raises: [CatchableError].} =
|
2022-01-18 14:40:02 +00:00
|
|
|
|
## Getter, retrieves a packed block ready for mining and signing depending
|
|
|
|
|
## on the internally cached block chain head, the txs in the pool and some
|
|
|
|
|
## tuning parameters. The following block header fields are left
|
|
|
|
|
## uninitialised:
|
|
|
|
|
##
|
2024-10-16 01:34:12 +00:00
|
|
|
|
## * *mixHash*: Hash32
|
2022-01-18 14:40:02 +00:00
|
|
|
|
## * *nonce*: BlockNonce
|
|
|
|
|
##
|
|
|
|
|
## Note that this getter runs *ad hoc* all the txs through the VM in
|
|
|
|
|
## order to build the block.
|
2022-12-02 04:35:41 +00:00
|
|
|
|
|
2024-10-29 05:01:59 +00:00
|
|
|
|
var pst = xp.packerVmExec().valueOr: # updates vmState
|
2023-11-01 02:24:32 +00:00
|
|
|
|
return err(error)
|
|
|
|
|
|
|
|
|
|
var blk = EthBlock(
|
2024-08-07 15:35:17 +00:00
|
|
|
|
header: pst.assembleHeader # uses updated vmState
|
2023-11-01 02:24:32 +00:00
|
|
|
|
)
|
2024-05-15 03:07:59 +00:00
|
|
|
|
var blobsBundle: BlobsBundle
|
|
|
|
|
|
|
|
|
|
for _, nonceList in xp.txDB.packingOrderAccounts(txItemPacked):
|
|
|
|
|
for item in nonceList.incNonce:
|
|
|
|
|
let tx = item.pooledTx
|
|
|
|
|
blk.txs.add tx.tx
|
|
|
|
|
if tx.networkPayload != nil:
|
|
|
|
|
for k in tx.networkPayload.commitments:
|
|
|
|
|
blobsBundle.commitments.add k
|
|
|
|
|
for p in tx.networkPayload.proofs:
|
|
|
|
|
blobsBundle.proofs.add p
|
|
|
|
|
for blob in tx.networkPayload.blobs:
|
|
|
|
|
blobsBundle.blobs.add blob
|
2024-10-18 23:39:33 +00:00
|
|
|
|
blk.header.transactionsRoot = calcTxRoot(blk.txs)
|
2022-01-18 14:40:02 +00:00
|
|
|
|
|
2024-08-07 15:35:17 +00:00
|
|
|
|
let com = xp.vmState.com
|
2024-07-17 10:05:53 +00:00
|
|
|
|
if com.isShanghaiOrLater(blk.header.timestamp):
|
2024-06-14 07:31:08 +00:00
|
|
|
|
blk.withdrawals = Opt.some(com.pos.withdrawals)
|
2023-05-22 10:55:19 +00:00
|
|
|
|
|
2024-07-17 10:05:53 +00:00
|
|
|
|
if not com.isCancunOrLater(blk.header.timestamp) and blobsBundle.commitments.len > 0:
|
2024-05-15 03:07:59 +00:00
|
|
|
|
return err("PooledTransaction contains blobs prior to Cancun")
|
|
|
|
|
let blobsBundleOpt =
|
2024-07-17 10:05:53 +00:00
|
|
|
|
if com.isCancunOrLater(blk.header.timestamp):
|
2024-05-15 03:07:59 +00:00
|
|
|
|
doAssert blobsBundle.commitments.len == blobsBundle.blobs.len
|
|
|
|
|
doAssert blobsBundle.proofs.len == blobsBundle.blobs.len
|
2024-06-14 07:31:08 +00:00
|
|
|
|
Opt.some blobsBundle
|
2024-05-15 03:07:59 +00:00
|
|
|
|
else:
|
2024-06-14 07:31:08 +00:00
|
|
|
|
Opt.none BlobsBundle
|
2024-05-15 03:07:59 +00:00
|
|
|
|
|
2023-08-27 01:23:45 +00:00
|
|
|
|
if someBaseFee:
|
|
|
|
|
# make sure baseFee always has something
|
2024-06-14 07:31:08 +00:00
|
|
|
|
blk.header.baseFeePerGas = Opt.some(blk.header.baseFeePerGas.get(0.u256))
|
2023-11-01 02:24:32 +00:00
|
|
|
|
|
2024-10-26 11:10:54 +00:00
|
|
|
|
let executionRequestsOpt =
|
|
|
|
|
if com.isPragueOrLater(blk.header.timestamp):
|
|
|
|
|
Opt.some(pst.executionRequests)
|
|
|
|
|
else:
|
|
|
|
|
Opt.none(array[3, seq[byte]])
|
|
|
|
|
|
|
|
|
|
ok AssembledBlock(
|
2024-05-15 03:07:59 +00:00
|
|
|
|
blk: blk,
|
2024-08-08 23:05:18 +00:00
|
|
|
|
blobsBundle: blobsBundleOpt,
|
2024-10-26 11:10:54 +00:00
|
|
|
|
blockValue: pst.blockValue,
|
|
|
|
|
executionRequests: executionRequestsOpt)
|
2023-10-19 00:50:07 +00:00
|
|
|
|
|
2022-01-18 14:40:02 +00:00
|
|
|
|
# core/tx_pool.go(474): func (pool SetGasPrice,*TxPool) Stats() (int, int) {
|
|
|
|
|
# core/tx_pool.go(1728): func (t *txLookup) Count() int {
|
|
|
|
|
# core/tx_pool.go(1737): func (t *txLookup) LocalCount() int {
|
|
|
|
|
# core/tx_pool.go(1745): func (t *txLookup) RemoteCount() int {
|
2024-05-28 18:26:51 +00:00
|
|
|
|
func nItems*(xp: TxPoolRef): TxTabsItemsCount =
|
2022-01-18 14:40:02 +00:00
|
|
|
|
## Getter, retrieves the current number of items per bucket and
|
|
|
|
|
## some totals.
|
|
|
|
|
xp.txDB.nItems
|
|
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
# Public functions, per-tx-item operations
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
# core/tx_pool.go(979): func (pool *TxPool) Get(hash common.Hash) ..
|
|
|
|
|
# core/tx_pool.go(985): func (pool *TxPool) Has(hash common.Hash) bool {
|
2024-10-16 01:34:12 +00:00
|
|
|
|
func getItem*(xp: TxPoolRef; hash: Hash32): Result[TxItemRef,void] =
|
2022-01-18 14:40:02 +00:00
|
|
|
|
## Returns a transaction if it is contained in the pool.
|
|
|
|
|
xp.txDB.byItemID.eq(hash)
|
|
|
|
|
|
2024-05-28 18:26:51 +00:00
|
|
|
|
func disposeItems*(xp: TxPoolRef; item: TxItemRef;
|
2022-01-18 14:40:02 +00:00
|
|
|
|
reason = txInfoExplicitDisposal;
|
|
|
|
|
otherReason = txInfoImpliedDisposal): int
|
2023-01-30 22:10:23 +00:00
|
|
|
|
{.discardable,gcsafe,raises: [CatchableError].} =
|
2022-01-18 14:40:02 +00:00
|
|
|
|
## Move item to wastebasket. All items for the same sender with nonces
|
|
|
|
|
## greater than the current one are deleted, as well. The function returns
|
|
|
|
|
## the number of items eventally removed.
|
|
|
|
|
xp.disposeItemAndHigherNonces(item, reason, otherReason)
|
|
|
|
|
|
2024-10-16 01:34:12 +00:00
|
|
|
|
iterator txHashes*(xp: TxPoolRef): Hash32 =
|
2022-11-09 12:39:57 +00:00
|
|
|
|
for txHash in nextKeys(xp.txDB.byItemID):
|
|
|
|
|
yield txHash
|
|
|
|
|
|
2024-10-16 01:34:12 +00:00
|
|
|
|
iterator okPairs*(xp: TxPoolRef): (Hash32, TxItemRef) =
|
2022-11-09 12:39:57 +00:00
|
|
|
|
for x in nextPairs(xp.txDB.byItemID):
|
|
|
|
|
if x.data.reject == txInfoOk:
|
|
|
|
|
yield (x.key, x.data)
|
|
|
|
|
|
2024-05-28 18:26:51 +00:00
|
|
|
|
func numTxs*(xp: TxPoolRef): int =
|
2022-11-09 12:39:57 +00:00
|
|
|
|
xp.txDB.byItemID.len
|
|
|
|
|
|
2024-05-28 18:26:51 +00:00
|
|
|
|
func disposeAll*(xp: TxPoolRef) {.raises: [CatchableError].} =
|
2023-07-28 10:32:49 +00:00
|
|
|
|
let numTx = xp.numTxs
|
|
|
|
|
var list = newSeqOfCap[TxItemRef](numTx)
|
|
|
|
|
for x in nextPairs(xp.txDB.byItemID):
|
|
|
|
|
list.add x.data
|
|
|
|
|
for x in list:
|
|
|
|
|
xp.disposeItems(x)
|
|
|
|
|
|
2022-01-18 14:40:02 +00:00
|
|
|
|
# ------------------------------------------------------------------------------
|
2022-03-31 08:21:36 +00:00
|
|
|
|
# Public functions, local/remote accounts
|
2022-01-18 14:40:02 +00:00
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
2024-10-16 01:34:12 +00:00
|
|
|
|
func inPoolAndOk*(xp: TxPoolRef; txHash: Hash32): bool =
|
2023-08-21 02:10:18 +00:00
|
|
|
|
let res = xp.getItem(txHash)
|
|
|
|
|
if res.isErr: return false
|
|
|
|
|
res.get().reject == txInfoOk
|
|
|
|
|
|
2024-10-16 01:34:12 +00:00
|
|
|
|
func inPoolAndReason*(xp: TxPoolRef; txHash: Hash32): Result[void, string] =
|
2023-10-20 08:30:05 +00:00
|
|
|
|
let res = xp.getItem(txHash)
|
|
|
|
|
if res.isErr:
|
|
|
|
|
# try to look in rejecteds
|
|
|
|
|
let r = xp.txDB.byRejects.eq(txHash)
|
|
|
|
|
if r.isErr:
|
|
|
|
|
return err("cannot find tx in txpool")
|
|
|
|
|
else:
|
|
|
|
|
return err(r.get().rejectInfo)
|
|
|
|
|
|
|
|
|
|
let item = res.get()
|
|
|
|
|
if item.reject == txInfoOk:
|
|
|
|
|
return ok()
|
|
|
|
|
else:
|
|
|
|
|
return err(item.rejectInfo)
|
|
|
|
|
|
2022-01-18 14:40:02 +00:00
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
# End
|
|
|
|
|
# ------------------------------------------------------------------------------
|