Silence compiler gossip after nim upgrade (#1454)

* Silence some compiler gossip -- part 1, tx_pool

details:
  Mostly removing redundant imports and `Defect` tracer after switch
  to nim 1.6

* Silence some compiler gossip -- part 2, clique

details:
  Mostly removing redundant imports and `Defect` tracer after switch
  to nim 1.6

* Silence some compiler gossip -- part 3, misc core

details:
  Mostly removing redundant imports and `Defect` tracer after switch
  to nim 1.6

* Silence some compiler gossip -- part 4, sync

details:
  Mostly removing redundant imports and `Defect` tracer after switch
  to nim 1.6

* Clique update

why:
  Missing exception annotation
This commit is contained in:
Jordan Hrycaj 2023-01-30 22:10:23 +00:00 committed by GitHub
parent 197d2b16dd
commit 89ae9621c4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
97 changed files with 611 additions and 655 deletions

View File

@ -13,9 +13,7 @@ import
../../utils/utils,
../pow,
../clique,
../validate,
chronicles,
stew/endians2
../validate
export
common
@ -38,14 +36,13 @@ type
## First block to when `extraValidation` will be applied (only
## effective if `extraValidation` is true.)
{.push raises: [Defect].}
{.push raises: [].}
# ------------------------------------------------------------------------------
# Private constructor helper
# ------------------------------------------------------------------------------
proc initChain(c: ChainRef; com: CommonRef; extraValidation: bool)
{.gcsafe, raises: [Defect,CatchableError].} =
proc initChain(c: ChainRef; com: CommonRef; extraValidation: bool) =
## Constructor for the `Chain` descriptor object.
c.com = com
@ -56,16 +53,14 @@ proc initChain(c: ChainRef; com: CommonRef; extraValidation: bool)
# Public constructors
# ------------------------------------------------------------------------------
proc newChain*(com: CommonRef, extraValidation: bool): ChainRef
{.gcsafe, raises: [Defect,CatchableError].} =
proc newChain*(com: CommonRef, extraValidation: bool): ChainRef =
## Constructor for the `Chain` descriptor object.
## The argument `extraValidation` enables extra block
## chain validation if set `true`.
new result
result.initChain(com, extraValidation)
proc newChain*(com: CommonRef): ChainRef
{.gcsafe, raises: [Defect,CatchableError].} =
proc newChain*(com: CommonRef): ChainRef =
## Constructor for the `Chain` descriptor object. All sub-object descriptors
## are initialised with defaults. So is extra block chain validation
## * `enabled` for PoA networks (such as Goerli)
@ -105,7 +100,7 @@ proc verifyFrom*(c: ChainRef): BlockNumber =
c.verifyFrom
proc currentBlock*(c: ChainRef): BlockHeader
{.gcsafe, raises: [Defect,CatchableError].} =
{.gcsafe, raises: [CatchableError].} =
## currentBlock retrieves the current head block of the canonical chain.
## Ideally the block should be retrieved from the blockchain's internal cache.
## but now it's enough to retrieve it from database

View File

@ -18,7 +18,6 @@ import
./chain_desc,
./chain_helpers,
chronicles,
stew/endians2,
stint
when not defined(release):
@ -34,7 +33,7 @@ type
PersistBlockFlags = set[PersistBlockFlag]
{.push raises: [Defect].}
{.push raises: [].}
# ------------------------------------------------------------------------------
# Private
@ -139,7 +138,7 @@ proc persistBlocksImpl(c: ChainRef; headers: openArray[BlockHeader];
proc insertBlockWithoutSetHead*(c: ChainRef, header: BlockHeader,
body: BlockBody): ValidationResult
{.gcsafe, raises: [Defect,CatchableError].} =
{.gcsafe, raises: [CatchableError].} =
safeP2PChain("persistBlocks"):
result = c.persistBlocksImpl([header], [body], {NoPersistHeader, NoSaveReceipts})
@ -147,7 +146,7 @@ proc insertBlockWithoutSetHead*(c: ChainRef, header: BlockHeader,
c.db.persistHeaderToDbWithoutSetHead(header)
proc setCanonical*(c: ChainRef, header: BlockHeader): ValidationResult
{.gcsafe, raises: [Defect,CatchableError].} =
{.gcsafe, raises: [CatchableError].} =
if header.parentHash == Hash256():
discard c.db.setHead(header.blockHash)
@ -165,7 +164,7 @@ proc setCanonical*(c: ChainRef, header: BlockHeader): ValidationResult
discard c.db.setHead(header.blockHash)
proc setCanonical*(c: ChainRef, blockHash: Hash256): ValidationResult
{.gcsafe, raises: [Defect,CatchableError].} =
{.gcsafe, raises: [CatchableError].} =
var header: BlockHeader
if not c.db.getBlockHeader(blockHash, header):
debug "Failed to get BlockHeader",
@ -176,7 +175,7 @@ proc setCanonical*(c: ChainRef, blockHash: Hash256): ValidationResult
proc persistBlocks*(c: ChainRef; headers: openArray[BlockHeader];
bodies: openArray[BlockBody]): ValidationResult
{.gcsafe, raises: [Defect,CatchableError].} =
{.gcsafe, raises: [CatchableError].} =
# Run the VM here
if headers.len != bodies.len:
debug "Number of headers not matching number of bodies"

View File

@ -24,7 +24,7 @@ import
./clique/snapshot/[ballot, snapshot_desc],
stew/results
{.push raises: [Defect].}
{.push raises: [].}
# Note that mining is unsupported. Unused code ported from the Go
# implementation is stashed into the `clique_unused` module.
@ -82,7 +82,7 @@ proc cliqueDispose*(c: Clique; state: var CliqueState) =
## `cliqueRestore()` was wrapped in a `defer:` statement.
state = err(CliqueState)
proc cliqueSigners*(c: Clique): seq[EthAddress] {.inline.} =
proc cliqueSigners*(c: Clique): seq[EthAddress] =
## Retrieve the sorted list of authorized signers for the current state
## of the `Clique` descriptor.
##
@ -90,7 +90,7 @@ proc cliqueSigners*(c: Clique): seq[EthAddress] {.inline.} =
## function is invoked.
c.snapshot.ballot.authSigners
proc cliqueSignersLen*(c: Clique): int {.inline.} =
proc cliqueSignersLen*(c: Clique): int =
## Get the number of authorized signers for the current state of the
## `Clique` descriptor. The result is equivalent to `c.cliqueSigners.len`.
c.snapshot.ballot.authSignersLen

View File

@ -21,7 +21,6 @@
import
std/[random, times],
ethash,
stew/results,
../../db/db_chain,
../../utils/ec_recover,
./clique_defs
@ -32,6 +31,8 @@ export
const
prngSeed = 42
{.push raises: [].}
type
CliqueCfg* = ref object of RootRef
db*: ChainDBRef ##\
@ -73,8 +74,6 @@ type
## Time interval after which the `snapshotApply()` function main loop
## produces logging entries.
{.push raises: [Defect].}
# ------------------------------------------------------------------------------
# Public constructor
# ------------------------------------------------------------------------------
@ -95,8 +94,11 @@ proc newCliqueCfg*(db: ChainDBRef): CliqueCfg =
# ------------------------------------------------------------------------------
# clique/clique.go(145): func ecrecover(header [..]
proc ecRecover*(cfg: CliqueCfg; header: BlockHeader): auto
{.gcsafe, raises: [Defect,CatchableError].} =
proc ecRecover*(
cfg: CliqueCfg;
header: BlockHeader;
): auto
{.gcsafe, raises: [CatchableError].} =
cfg.signatures.ecRecover(header)
# ------------------------------------------------------------------------------
@ -132,7 +134,7 @@ proc `logInterval=`*(cfg: CliqueCfg; duration: Duration) =
# Public PRNG, may be overloaded
# ------------------------------------------------------------------------------
method rand*(cfg: CliqueCfg; max: Natural): int {.gcsafe,base.} =
method rand*(cfg: CliqueCfg; max: Natural): int {.gcsafe, base, raises: [].} =
## The method returns a random number base on an internal PRNG providing a
## reproducible stream of random data. This function is supposed to be used
## exactly when repeatability comes in handy. Never to be used for crypto key

View File

@ -24,7 +24,7 @@ import
./clique_defs,
./snapshot/snapshot_desc,
chronicles,
eth/[keys, rlp],
eth/keys,
stew/[keyed_queue, results]
type
@ -88,7 +88,7 @@ type
## before have been vetted already regardless of the current branch. So
## the nearest `epoch` header is used.
{.push raises: [Defect].}
{.push raises: [].}
logScope:
topics = "clique PoA constructor"
@ -119,7 +119,10 @@ proc `$`*(e: CliqueError): string =
# Public getters
# ------------------------------------------------------------------------------
proc recents*(c: Clique): var KeyedQueue[CliqueSnapKey,Snapshot] =
proc recents*(
c: Clique;
): var KeyedQueue[CliqueSnapKey,Snapshot]
=
## Getter
c.recents

View File

@ -28,7 +28,7 @@ import
./clique_desc,
./clique_helpers
{.push raises: [Defect].}
{.push raises: [].}
# ------------------------------------------------------------------------------
# Private functions
@ -162,7 +162,7 @@ proc cliqueGenvote*(
voteInOk = false;
outOfTurn = false;
checkPoint: seq[EthAddress] = @[]): BlockHeader
{.gcsafe, raises: [Defect,CatchableError].} =
{.gcsafe, raises: [CatchableError].} =
## Variant of `clique_genvote()` where the `parent` is the canonical head
## on the the block chain database.
##

View File

@ -32,7 +32,7 @@ type
EthDescending = SortOrder.Descending.ord
EthAscending = SortOrder.Ascending.ord
{.push raises: [Defect].}
{.push raises: [].}
# ------------------------------------------------------------------------------
# Private helpers
@ -56,7 +56,7 @@ proc sorted*(e: openArray[EthAddress]; order = EthAscending): seq[EthAddress] =
return -1
elif y[n] < x[n]:
return 1
e.sorted(cmp = eCmp, order = order.SortOrder)
e.sorted(cmp = eCmp, order = order.ord.SortOrder)
proc cliqueResultErr*(w: CliqueError): CliqueOkResult =

View File

@ -22,7 +22,7 @@ import
std/[sequtils, times],
chronicles,
chronos,
eth/[keys, rlp],
eth/keys,
"../.."/[constants, utils/ec_recover],
../../common/common,
./clique_cfg,
@ -42,15 +42,15 @@ logScope:
# Private Helpers
# ------------------------------------------------------------------------------
proc isValidVote(s: Snapshot; a: EthAddress; authorize: bool): bool =
proc isValidVote(s: Snapshot; a: EthAddress; authorize: bool): bool {.gcsafe, raises: [].} =
s.ballot.isValidVote(a, authorize)
proc isSigner*(s: Snapshot; address: EthAddress): bool =
proc isSigner*(s: Snapshot; address: EthAddress): bool {.gcsafe, raises: [].} =
## See `clique_verify.isSigner()`
s.ballot.isAuthSigner(address)
# clique/snapshot.go(319): func (s *Snapshot) inturn(number [..]
proc inTurn*(s: Snapshot; number: BlockNumber, signer: EthAddress): bool =
proc inTurn*(s: Snapshot; number: BlockNumber, signer: EthAddress): bool {.gcsafe, raises: [].} =
## See `clique_verify.inTurn()`
let ascSignersList = s.ballot.authSigners
for offset in 0 ..< ascSignersList.len:
@ -62,14 +62,14 @@ proc inTurn*(s: Snapshot; number: BlockNumber, signer: EthAddress): bool =
# ------------------------------------------------------------------------------
# clique/clique.go(681): func calcDifficulty(snap [..]
proc calcDifficulty(s: Snapshot; signer: EthAddress): DifficultyInt =
proc calcDifficulty(s: Snapshot; signer: EthAddress): DifficultyInt {.gcsafe, raises: [].} =
if s.inTurn(s.blockNumber + 1, signer):
DIFF_INTURN
else:
DIFF_NOTURN
proc recentBlockNumber*(s: Snapshot;
a: EthAddress): Result[BlockNumber,void] =
a: EthAddress): Result[BlockNumber,void] {.gcsafe, raises: [].} =
## Return `BlockNumber` for `address` argument (if any)
for (number,recent) in s.recents.pairs:
if recent == a:
@ -82,7 +82,7 @@ proc recentBlockNumber*(s: Snapshot;
# clique/clique.go(506): func (c *Clique) Prepare(chain [..]
proc prepare*(c: Clique; parent: BlockHeader, header: var BlockHeader): CliqueOkResult
{.gcsafe, raises: [Defect, CatchableError].} =
{.gcsafe, raises: [CatchableError].} =
## For the Consensus Engine, `prepare()` initializes the consensus fields
## of a block header according to the rules of a particular engine.
##
@ -134,20 +134,20 @@ proc prepare*(c: Clique; parent: BlockHeader, header: var BlockHeader): CliqueOk
ok()
proc prepareForSeal*(c: Clique; prepHeader: BlockHeader; header: var BlockHeader) =
proc prepareForSeal*(c: Clique; prepHeader: BlockHeader; header: var BlockHeader) {.gcsafe, raises: [].} =
# TODO: use system.move?
header.nonce = prepHeader.nonce
header.extraData = prepHeader.extraData
header.mixDigest = prepHeader.mixDigest
# clique/clique.go(589): func (c *Clique) Authorize(signer [..]
proc authorize*(c: Clique; signer: EthAddress; signFn: CliqueSignerFn) =
proc authorize*(c: Clique; signer: EthAddress; signFn: CliqueSignerFn) {.gcsafe, raises: [].} =
## Injects private key into the consensus engine to mint new blocks with.
c.signer = signer
c.signFn = signFn
# clique/clique.go(724): func CliqueRLP(header [..]
proc cliqueRlp*(header: BlockHeader): seq[byte] =
proc cliqueRlp*(header: BlockHeader): seq[byte] {.gcsafe, raises: [].} =
## Returns the rlp bytes which needs to be signed for the proof-of-authority
## sealing. The RLP to sign consists of the entire header apart from the 65
## byte signature contained at the end of the extra data.
@ -159,7 +159,7 @@ proc cliqueRlp*(header: BlockHeader): seq[byte] =
header.encodeSealHeader
# clique/clique.go(688): func SealHash(header *types.Header) common.Hash {
proc sealHash*(header: BlockHeader): Hash256 =
proc sealHash*(header: BlockHeader): Hash256 {.gcsafe, raises: [].} =
## For the Consensus Engine, `sealHash()` returns the hash of a block prior
## to it being sealed.
##
@ -170,7 +170,7 @@ proc sealHash*(header: BlockHeader): Hash256 =
# clique/clique.go(599): func (c *Clique) Seal(chain [..]
proc seal*(c: Clique; ethBlock: var EthBlock):
Result[void,CliqueError] {.gcsafe,
raises: [Defect,CatchableError].} =
raises: [CatchableError].} =
## This implementation attempts to create a sealed block using the local
## signing credentials.
@ -242,7 +242,7 @@ proc seal*(c: Clique; ethBlock: var EthBlock):
# clique/clique.go(673): func (c *Clique) CalcDifficulty(chain [..]
proc calcDifficulty*(c: Clique;
parent: BlockHeader): Result[DifficultyInt,CliqueError]
{.gcsafe, raises: [Defect,CatchableError].} =
{.gcsafe, raises: [CatchableError].} =
## For the Consensus Engine, `calcDifficulty()` is the difficulty adjustment
## algorithm. It returns the difficulty that a new block should have.
##

View File

@ -23,7 +23,7 @@ import
chronicles,
eth/[keys],
stew/[keyed_queue, results],
"../.."/[utils/prettify],
../../utils/prettify,
"."/[clique_cfg, clique_defs, clique_desc],
./snapshot/[snapshot_apply, snapshot_desc]
@ -52,11 +52,14 @@ type
parents: seq[BlockHeader] ## explicit parents
{.push raises: [Defect].}
{.push raises: [].}
logScope:
topics = "clique PoA snapshot"
static:
const stopCompilerGossip {.used.} = 42.toSI
# ------------------------------------------------------------------------------
# Private debugging functions, pretty printing
# ------------------------------------------------------------------------------
@ -66,51 +69,51 @@ template say(d: var LocalSnaps; v: varargs[untyped]): untyped =
# uncomment body to enable, note that say() prints on <stderr>
# d.c.cfg.say v
proc pp(a: Hash256): string =
if a == EMPTY_ROOT_HASH:
"*blank-root*"
elif a == EMPTY_SHA3:
"*empty-sha3*"
else:
a.data.mapIt(it.toHex(2)).join[56 .. 63].toLowerAscii
#proc pp(a: Hash256): string =
# if a == EMPTY_ROOT_HASH:
# "*blank-root*"
# elif a == EMPTY_SHA3:
# "*empty-sha3*"
# else:
# a.data.mapIt(it.toHex(2)).join[56 .. 63].toLowerAscii
proc pp(q: openArray[BlockHeader]; n: int): string =
result = "["
if 5 < n:
result &= toSeq(q[0 .. 2]).mapIt("#" & $it.blockNumber).join(", ")
result &= " .." & $n & ".. #" & $q[n-1].blockNumber
else:
result &= toSeq(q[0 ..< n]).mapIt("#" & $it.blockNumber).join(", ")
result &= "]"
#proc pp(q: openArray[BlockHeader]; n: int): string =
# result = "["
# if 5 < n:
# result &= toSeq(q[0 .. 2]).mapIt("#" & $it.blockNumber).join(", ")
# result &= " .." & $n & ".. #" & $q[n-1].blockNumber
# else:
# result &= toSeq(q[0 ..< n]).mapIt("#" & $it.blockNumber).join(", ")
# result &= "]"
proc pp(b: BlockNumber, q: openArray[BlockHeader]; n: int): string =
"#" & $b & " + " & q.pp(n)
#proc pp(b: BlockNumber, q: openArray[BlockHeader]; n: int): string =
# "#" & $b & " + " & q.pp(n)
proc pp(q: openArray[BlockHeader]): string =
q.pp(q.len)
#proc pp(q: openArray[BlockHeader]): string =
# q.pp(q.len)
proc pp(b: BlockNumber, q: openArray[BlockHeader]): string =
b.pp(q, q.len)
#proc pp(b: BlockNumber, q: openArray[BlockHeader]): string =
# b.pp(q, q.len)
proc pp(h: BlockHeader, q: openArray[BlockHeader]; n: int): string =
"headers=(" & h.blockNumber.pp(q,n) & ")"
#proc pp(h: BlockHeader, q: openArray[BlockHeader]; n: int): string =
# "headers=(" & h.blockNumber.pp(q,n) & ")"
proc pp(h: BlockHeader, q: openArray[BlockHeader]): string =
h.pp(q,q.len)
#proc pp(h: BlockHeader, q: openArray[BlockHeader]): string =
# h.pp(q,q.len)
proc pp(t: var LocalPath; w: var LocalSubChain): string =
var (a, b) = (w.first, w.top)
if a == 0 and b == 0: b = t.chain.len
"trail=(#" & $t.snaps.blockNumber & " + " & t.chain[a ..< b].pp & ")"
#proc pp(t: var LocalPath; w: var LocalSubChain): string =
# var (a, b) = (w.first, w.top)
# if a == 0 and b == 0: b = t.chain.len
# "trail=(#" & $t.snaps.blockNumber & " + " & t.chain[a ..< b].pp & ")"
proc pp(t: var LocalPath): string =
var w = LocalSubChain()
t.pp(w)
#proc pp(t: var LocalPath): string =
# var w = LocalSubChain()
# t.pp(w)
proc pp(err: CliqueError): string =
"(" & $err[0] & "," & err[1] & ")"
#proc pp(err: CliqueError): string =
# "(" & $err[0] & "," & err[1] & ")"
# ------------------------------------------------------------------------------
# Private helpers
@ -147,8 +150,7 @@ proc isSnapshotPosition(d: var LocalSnaps; number: BlockNumber): bool =
# Private functions
# ------------------------------------------------------------------------------
proc findSnapshot(d: var LocalSnaps): bool
{.gcsafe, raises: [Defect,CatchableError].} =
proc findSnapshot(d: var LocalSnaps): bool =
## Search for a snapshot starting at current header starting at the pivot
## value `d.start`. The snapshot returned in `trail` is a clone of the
## cached snapshot and can be modified later.
@ -232,7 +234,7 @@ proc findSnapshot(d: var LocalSnaps): bool
proc applyTrail(d: var LocalSnaps): CliqueOkResult
{.gcsafe, raises: [Defect,CatchableError].} =
{.gcsafe, raises: [CatchableError].} =
## Apply any `trail` headers on top of the snapshot `snap`
if d.subChn.first < d.subChn.top:
block:
@ -263,7 +265,7 @@ proc applyTrail(d: var LocalSnaps): CliqueOkResult
proc updateSnapshot(d: var LocalSnaps): SnapshotResult
{.gcsafe, raises: [Defect,CatchableError].} =
{.gcsafe, raises: [CatchableError].} =
## Find snapshot for header `d.start.header` and assign it to the LRU cache.
## This function was expects thet the LRU cache already has a slot allocated
## for the snapshot having run `getLruSnaps()`.
@ -323,7 +325,7 @@ proc updateSnapshot(d: var LocalSnaps): SnapshotResult
proc cliqueSnapshotSeq*(c: Clique; header: BlockHeader;
parents: var seq[BlockHeader]): SnapshotResult
{.gcsafe, raises: [Defect,CatchableError].} =
{.gcsafe, raises: [CatchableError].} =
## Create authorisation state snapshot of a given point in the block chain
## and store it in the `Clique` descriptor to be retrievable as `c.snapshot`
## if successful.
@ -359,7 +361,7 @@ proc cliqueSnapshotSeq*(c: Clique; header: BlockHeader;
proc cliqueSnapshotSeq*(c: Clique; hash: Hash256;
parents: var seq[BlockHeader]): SnapshotResult
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
## Create authorisation state snapshot of a given point in the block chain
## and store it in the `Clique` descriptor to be retrievable as `c.snapshot`
## if successful.
@ -400,24 +402,24 @@ proc cliqueSnapshotSeq*(c: Clique; hash: Hash256;
# clique/clique.go(369): func (c *Clique) snapshot(chain [..]
proc cliqueSnapshot*(c: Clique; header: BlockHeader;
parents: var seq[BlockHeader]): SnapshotResult
{.gcsafe, raises: [Defect,CatchableError].} =
{.gcsafe, raises: [CatchableError].} =
var list = toSeq(parents)
c.cliqueSnapshotSeq(header,list)
proc cliqueSnapshot*(c: Clique;hash: Hash256;
parents: openArray[BlockHeader]): SnapshotResult
{.gcsafe, raises: [Defect,CatchableError].} =
{.gcsafe, raises: [CatchableError].} =
var list = toSeq(parents)
c.cliqueSnapshotSeq(hash,list)
proc cliqueSnapshot*(c: Clique; header: BlockHeader): SnapshotResult
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
## Short for `cliqueSnapshot(c,header,@[])`
var blind: seq[BlockHeader]
c.cliqueSnapshotSeq(header, blind)
proc cliqueSnapshot*(c: Clique; hash: Hash256): SnapshotResult
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
## Short for `cliqueSnapshot(c,hash,@[])`
var blind: seq[BlockHeader]
c.cliqueSnapshot(hash, blind)

View File

@ -34,7 +34,7 @@ import
chronicles,
stew/results
{.push raises: [Defect].}
{.push raises: [].}
logScope:
topics = "clique PoA verify header"
@ -45,7 +45,7 @@ logScope:
# consensus/misc/forks.go(30): func VerifyForkHashes(config [..]
proc verifyForkHashes(com: CommonRef; header: BlockHeader): CliqueOkResult
{.gcsafe, raises: [Defect,ValueError].} =
{.gcsafe, raises: [ValueError].} =
## Verify that blocks conforming to network hard-forks do have the correct
## hashes, to avoid clients going off on different chains.
@ -95,7 +95,7 @@ proc inTurn*(s: Snapshot; number: BlockNumber, signer: EthAddress): bool =
# clique/clique.go(463): func (c *Clique) verifySeal(chain [..]
proc verifySeal(c: Clique; header: BlockHeader): CliqueOkResult
{.gcsafe, raises: [Defect,CatchableError].} =
{.gcsafe, raises: [CatchableError].} =
## Check whether the signature contained in the header satisfies the
## consensus protocol requirements. The method accepts an optional list of
## parent headers that aren't yet part of the local blockchain to generate
@ -141,7 +141,7 @@ proc verifySeal(c: Clique; header: BlockHeader): CliqueOkResult
# clique/clique.go(314): func (c *Clique) verifyCascadingFields(chain [..]
proc verifyCascadingFields(c: Clique; com: CommonRef; header: BlockHeader;
parents: var seq[BlockHeader]): CliqueOkResult
{.gcsafe, raises: [Defect,CatchableError].} =
{.gcsafe, raises: [CatchableError].} =
## Verify all the header fields that are not standalone, rather depend on a
## batch of previous headers. The caller may optionally pass in a batch of
## parents (ascending order) to avoid looking those up from the database.
@ -201,8 +201,7 @@ proc verifyCascadingFields(c: Clique; com: CommonRef; header: BlockHeader;
return c.verifySeal(header)
proc verifyHeaderFields(c: Clique; header: BlockHeader): CliqueOkResult
{.gcsafe, raises: [Defect,CatchableError].} =
proc verifyHeaderFields(c: Clique; header: BlockHeader): CliqueOkResult =
## Check header fields, the ones that do not depend on a parent block.
# clique/clique.go(250): number := header.Number.Uint64()
@ -267,7 +266,7 @@ proc verifyHeaderFields(c: Clique; header: BlockHeader): CliqueOkResult
# clique/clique.go(246): func (c *Clique) verifyHeader(chain [..]
proc cliqueVerifyImpl(c: Clique; com: CommonRef; header: BlockHeader;
parents: var seq[BlockHeader]): CliqueOkResult
{.gcsafe, raises: [Defect,CatchableError].} =
{.gcsafe, raises: [CatchableError].} =
## Check whether a header conforms to the consensus rules. The caller may
## optionally pass in a batch of parents (ascending order) to avoid looking
## those up from the database. This is useful for concurrently verifying
@ -295,7 +294,7 @@ proc cliqueVerifyImpl(c: Clique; com: CommonRef; header: BlockHeader;
proc cliqueVerifySeq*(c: Clique; com: CommonRef; header: BlockHeader;
parents: var seq[BlockHeader]): CliqueOkResult
{.gcsafe, raises: [Defect,CatchableError].} =
{.gcsafe, raises: [CatchableError].} =
## Check whether a header conforms to the consensus rules. The caller may
## optionally pass in a batch of parents (ascending order) to avoid looking
## those up from the database. This is useful for concurrently verifying
@ -324,7 +323,7 @@ proc cliqueVerifySeq*(c: Clique; com: CommonRef; header: BlockHeader;
proc cliqueVerifySeq(c: Clique; com: CommonRef;
headers: var seq[BlockHeader]): CliqueOkResult
{.gcsafe, raises: [Defect,CatchableError].} =
{.gcsafe, raises: [CatchableError].} =
## This function verifies a batch of headers checking each header for
## consensus rules conformance. The `headers` list is supposed to
## contain a chain of headers, i e. `headers[i]` is parent to `headers[i+1]`.
@ -369,7 +368,7 @@ proc cliqueVerifySeq(c: Clique; com: CommonRef;
proc cliqueVerify*(c: Clique; com: CommonRef; header: BlockHeader;
parents: openArray[BlockHeader]): CliqueOkResult
{.gcsafe, raises: [Defect,CatchableError].} =
{.gcsafe, raises: [CatchableError].} =
## Check whether a header conforms to the consensus rules. The caller may
## optionally pass on a batch of parents (ascending order) to avoid looking
## those up from the database. This function updates the list of authorised
@ -391,14 +390,14 @@ proc cliqueVerify*(c: Clique; com: CommonRef; header: BlockHeader;
# clique/clique.go(217): func (c *Clique) VerifyHeader(chain [..]
proc cliqueVerify*(c: Clique; com: CommonRef; header: BlockHeader): CliqueOkResult
{.gcsafe, raises: [Defect,CatchableError].} =
{.gcsafe, raises: [CatchableError].} =
## Consensus rules verifier without optional parents list.
var blind: seq[BlockHeader]
c.cliqueVerifySeq(com, header, blind)
proc cliqueVerify*(c: Clique; com: CommonRef;
headers: openArray[BlockHeader]): CliqueOkResult
{.gcsafe, raises: [Defect,CatchableError].} =
{.gcsafe, raises: [CatchableError].} =
## This function verifies a batch of headers checking each header for
## consensus rules conformance (see also the other `cliqueVerify()` function
## instance.) The `headers` list is supposed to contain a chain of headers,

View File

@ -45,7 +45,7 @@ type
authRemoved: bool ## last `addVote()` action was removing an
## authorised signer from the `authSig` list
{.push raises: [Defect].}
{.push raises: [].}
# ------------------------------------------------------------------------------
# Public debugging/pretty-printer support
@ -115,7 +115,7 @@ proc isAuthSigner*(t: var Ballot; address: EthAddress): bool =
address in t.authSig
proc delVote*(t: var Ballot; signer, address: EthAddress) {.
gcsafe, raises: [Defect,KeyError].} =
gcsafe, raises: [KeyError].} =
## Remove a particular previously added vote.
if address in t.votes:
if signer in t.votes[address].signers:
@ -137,7 +137,7 @@ proc isValidVote*(t: var Ballot; address: EthAddress; authorize: bool): bool =
proc addVote*(t: var Ballot; vote: Vote) {.
gcsafe, raises: [Defect,KeyError].} =
gcsafe, raises: [KeyError].} =
## Add a new vote collecting the signers for the particular voting address.
##
## Unless it is the first vote for this address, the authorisation type

View File

@ -19,14 +19,14 @@
##
import
std/[strutils, times],
std/times,
chronicles,
eth/[common, rlp],
eth/common,
stew/results,
".."/[clique_cfg, clique_defs],
"."/[ballot, snapshot_desc]
{.push raises: [Defect].}
{.push raises: [].}
logScope:
topics = "clique PoA snapshot-apply"
@ -66,7 +66,7 @@ template doWalkIt(first, last: int; code: untyped) =
# clique/snapshot.go(185): func (s *Snapshot) apply(headers [..]
proc snapshotApplySeq*(s: Snapshot; headers: var seq[BlockHeader],
first, last: int): CliqueOkResult
{.gcsafe, raises: [Defect,CatchableError].} =
{.gcsafe, raises: [CatchableError].} =
## Initialises an authorization snapshot `snap` by applying the `headers`
## to the argument snapshot desciptor `s`.
@ -172,7 +172,7 @@ proc snapshotApplySeq*(s: Snapshot; headers: var seq[BlockHeader],
proc snapshotApply*(s: Snapshot; headers: var seq[BlockHeader]): CliqueOkResult
{.gcsafe, raises: [Defect,CatchableError].} =
{.gcsafe, raises: [CatchableError].} =
if headers.len == 0:
return ok()
s.snapshotApplySeq(headers, 0, headers.len - 1)

View File

@ -52,7 +52,7 @@ type
cfg: CliqueCfg ## parameters to fine tune behavior
data*: SnapshotData ## real snapshot
{.push raises: [Defect].}
{.push raises: [].}
logScope:
topics = "clique PoA snapshot"
@ -67,7 +67,7 @@ proc append[K,V](rw: var RlpWriter; tab: Table[K,V]) =
rw.append((key,value))
proc read[K,V](rlp: var Rlp;
Q: type Table[K,V]): Q {.raises: [Defect,CatchableError].} =
Q: type Table[K,V]): Q {.gcsafe, raises: [CatchableError].} =
for w in rlp.items:
let (key,value) = w.read((K,V))
result[key] = value

View File

@ -12,10 +12,12 @@ import
stew/results,
../common/common
{.push raises: [].}
# https://eips.ethereum.org/EIPS/eip-4844
func validateEip4844Header*(
com: CommonRef, header: BlockHeader
): Result[void, string] {.raises: [Defect].} =
): Result[void, string] =
if header.excessDataGas.isSome:
return err("EIP-4844 not yet implemented")
return ok()

View File

@ -14,12 +14,10 @@ import
../../vm_state,
../../vm_types
{.push raises: [Defect].}
{.push raises: [].}
proc calculateReward*(vmState: BaseVMState; account: EthAddress;
number: BlockNumber; uncles: openArray[BlockHeader])
{.gcsafe, raises: [Defect,CatchableError].} =
number: BlockNumber; uncles: openArray[BlockHeader]) =
let blockReward = vmState.com.blockReward()
var mainReward = blockReward
@ -37,8 +35,7 @@ proc calculateReward*(vmState: BaseVMState; account: EthAddress;
proc calculateReward*(vmState: BaseVMState;
header: BlockHeader; body: BlockBody)
{.gcsafe, raises: [Defect,CatchableError].} =
header: BlockHeader; body: BlockBody) =
vmState.calculateReward(header.coinbase, header.blockNumber, body.uncles)
# End

View File

@ -25,7 +25,7 @@ type
Bloom = common.BloomFilter
LogsBloom = bloom.BloomFilter
{.push raises: [Defect].}
{.push raises: [].}
# ------------------------------------------------------------------------------
# Private functions
@ -58,8 +58,7 @@ func createBloom*(receipts: openArray[Receipt]): Bloom =
bloom.value = bloom.value or logsBloom(rec.logs).value
result = bloom.value.toByteArrayBE
proc makeReceipt*(vmState: BaseVMState; txType: TxType): Receipt
{.gcsafe, raises: [Defect,CatchableError].} =
proc makeReceipt*(vmState: BaseVMState; txType: TxType): Receipt =
var rec: Receipt
if vmState.com.forkGTE(Byzantium):

View File

@ -24,7 +24,7 @@ import
chronicles,
stew/results
{.push raises: [Defect].}
{.push raises: [].}
# ------------------------------------------------------------------------------
# Private functions
@ -32,7 +32,7 @@ import
proc procBlkPreamble(vmState: BaseVMState;
header: BlockHeader; body: BlockBody): bool
{.gcsafe, raises: [Defect,CatchableError].} =
{.gcsafe, raises: [CatchableError].} =
if vmState.com.daoForkSupport and
vmState.com.daoForkBlock.get == header.blockNumber:
@ -82,7 +82,7 @@ proc procBlkPreamble(vmState: BaseVMState;
proc procBlkEpilogue(vmState: BaseVMState;
header: BlockHeader; body: BlockBody): bool
{.gcsafe, raises: [Defect,RlpError].} =
{.gcsafe, raises: [RlpError].} =
# Reward beneficiary
vmState.mutateStateDB:
if vmState.generateWitness:
@ -122,7 +122,7 @@ proc processBlockNotPoA*(
vmState: BaseVMState; ## Parent environment of header/body block
header: BlockHeader; ## Header/body block to add to the blockchain
body: BlockBody): ValidationResult
{.gcsafe, raises: [Defect,CatchableError].} =
{.gcsafe, raises: [CatchableError].} =
## Processes `(header,body)` pair for a non-PoA network, only. This function
## will fail when applied to a PoA network like `Goerli`.
if vmState.com.consensus == ConsensusType.POA:
@ -159,7 +159,7 @@ proc processBlock*(
poa: Clique; ## PoA descriptor (if needed, at all)
header: BlockHeader; ## Header/body block to add to the blockchain
body: BlockBody): ValidationResult
{.gcsafe, raises: [Defect,CatchableError].} =
{.gcsafe, raises: [CatchableError].} =
## Generalised function to processes `(header,body)` pair for any network,
## regardless of PoA or not. Currently there is no mining support so this
## function is mostly the same as `processBlockNotPoA()`.

View File

@ -21,7 +21,7 @@ import
chronicles,
stew/results
{.push raises: [Defect].}
{.push raises: [].}
# ------------------------------------------------------------------------------
# Private functions
@ -115,7 +115,7 @@ proc processTransaction*(
sender: EthAddress; ## tx.getSender or tx.ecRecover
header: BlockHeader; ## Header for the block containing the current tx
fork: EVMFork): Result[GasInt,void]
{.gcsafe, raises: [Defect,CatchableError].} =
{.gcsafe, raises: [CatchableError].} =
## Process the transaction, write the results to accounts db. The function
## returns the amount of gas burned if executed.
safeExecutor("processTransaction"):
@ -126,7 +126,7 @@ proc processTransaction*(
tx: Transaction; ## Transaction to validate
sender: EthAddress; ## tx.getSender or tx.ecRecover
header: BlockHeader): Result[GasInt,void]
{.gcsafe, raises: [Defect,CatchableError].} =
{.gcsafe, raises: [CatchableError].} =
## Variant of `processTransaction()` with `*fork* derived
## from the `vmState` argument.
let fork = vmState.com.toEVMFork(header.blockNumber)

View File

@ -17,12 +17,13 @@ import
export
eip1559
{.push raises: [].}
# ------------------------------------------------------------------------------
# Pre Eip 1559 gas limit validation
# ------------------------------------------------------------------------------
proc validateGasLimit(header: BlockHeader; limit: GasInt): Result[void, string]
{.raises: [Defect].} =
proc validateGasLimit(header: BlockHeader; limit: GasInt): Result[void,string] =
let diff = if limit > header.gasLimit:
limit - header.gasLimit
else:
@ -40,8 +41,7 @@ proc validateGasLimit(header: BlockHeader; limit: GasInt): Result[void, string]
return err("invalid gas limit below 5000")
ok()
proc validateGasLimit(com: CommonRef; header: BlockHeader): Result[void, string]
{.raises: [Defect].} =
proc validateGasLimit(com: CommonRef; header: BlockHeader): Result[void, string] =
let parent = try:
com.db.getBlockHeader(header.parentHash)
except CatchableError:
@ -66,7 +66,7 @@ proc calcEip1599BaseFee*(com: CommonRef; parent: BlockHeader): UInt256 =
# consensus/misc/eip1559.go(32): func VerifyEip1559Header(config [..]
proc verifyEip1559Header(com: CommonRef;
parent, header: BlockHeader): Result[void, string]
{.raises: [Defect].} =
{.raises: [].} =
## Verify that the gas limit remains within allowed bounds
let limit = if com.isLondon(parent.blockNumber):
parent.gasLimit
@ -96,8 +96,7 @@ proc verifyEip1559Header(com: CommonRef;
return ok()
proc validateGasLimitOrBaseFee*(com: CommonRef;
header, parent: BlockHeader): Result[void, string]
{.gcsafe, raises: [Defect].} =
header, parent: BlockHeader): Result[void, string] =
if not com.isLondon(header.blockNumber):
# Verify BaseFee not present before EIP-1559 fork.

View File

@ -20,7 +20,7 @@ import
ethash,
stint
{.push raises: [Defect].}
{.push raises: [].}
type
PowDigest = tuple ##\
@ -77,7 +77,7 @@ proc append(w: var RlpWriter; specs: PowSpecs) =
w.append(specs.difficulty)
proc read(rlp: var Rlp; Q: type PowSpecs): Q
{.raises: [Defect,RlpError].} =
{.raises: [RlpError].} =
## RLP support
rlp.tryEnterList()
result.blockNumber = rlp.read(HashOrNum).number
@ -90,7 +90,7 @@ proc rlpTextEncode(specs: PowSpecs): string =
"specs #" & $specs.blockNumber & " " & rlp.encode(specs).toHex
proc decodeRlpText(data: string): PowSpecs
{.raises: [Defect,CatchableError].} =
{.raises: [CatchableError].} =
if 180 < data.len and data[0 .. 6] == "specs #":
let hexData = data.split
if hexData.len == 3:
@ -132,8 +132,7 @@ proc tryNonceFull(nonce: uint64;
return value
proc mineFull(tm: PowRef; blockNumber: BlockNumber; powHeaderDigest: Hash256,
difficulty: DifficultyInt; startNonce: BlockNonce): uint64
{.gcsafe,raises: [Defect,CatchableError].} =
difficulty: DifficultyInt; startNonce: BlockNonce): uint64 =
## Returns a valid nonce. This function was inspired by the function
## python function `mine()` from
## `ethash <https://eth.wiki/en/concepts/ethash/ethash>`_.
@ -226,7 +225,7 @@ proc getPowSpecs*(header: BlockHeader): PowSpecs =
proc getPowCacheLookup*(tm: PowRef;
blockNumber: BlockNumber): (uint64, Hash256)
{.gcsafe, raises: [KeyError, Defect, CatchableError].} =
{.gcsafe, raises: [KeyError].} =
## Returns the pair `(size,digest)` derived from the lookup cache for the
## `hashimotoLight()` function for the given block number. The `size` is the
## full size of the dataset (the cache represents) as passed on to the
@ -247,8 +246,7 @@ proc getPowCacheLookup*(tm: PowRef;
# ------------------------
proc getPowDigest*(tm: PowRef; blockNumber: BlockNumber;
powHeaderDigest: Hash256; nonce: BlockNonce): PowDigest
{.gcsafe,raises: [Defect,CatchableError].} =
powHeaderDigest: Hash256; nonce: BlockNonce): PowDigest =
## Calculate the expected value of `header.mixDigest` using the
## `hashimotoLight()` library method.
let
@ -256,21 +254,18 @@ proc getPowDigest*(tm: PowRef; blockNumber: BlockNumber;
u64Nonce = uint64.fromBytesBE(nonce)
hashimotoLight(ds.size, ds.data, powHeaderDigest, u64Nonce)
proc getPowDigest*(tm: PowRef; header: BlockHeader): PowDigest
{.gcsafe,raises: [Defect,CatchableError].} =
proc getPowDigest*(tm: PowRef; header: BlockHeader): PowDigest =
## Variant of `getPowDigest()`
tm.getPowDigest(header.blockNumber, header.miningHash, header.nonce)
proc getPowDigest*(tm: PowRef; specs: PowSpecs): PowDigest
{.gcsafe,raises: [Defect,CatchableError].} =
proc getPowDigest*(tm: PowRef; specs: PowSpecs): PowDigest =
## Variant of `getPowDigest()`
tm.getPowDigest(specs.blockNumber, specs.miningHash, specs.nonce)
# ------------------
proc getNonce*(tm: PowRef; number: BlockNumber; powHeaderDigest: Hash256;
difficulty: DifficultyInt; startNonce: BlockNonce): BlockNonce
{.gcsafe,raises: [Defect,CatchableError].} =
difficulty: DifficultyInt; startNonce: BlockNonce): BlockNonce =
## Mining function that calculates the value of a `nonce` satisfying the
## difficulty challenge. This is the most basic function of the
## `getNonce()` series with explicit argument `startNonce`. If this is
@ -286,20 +281,17 @@ proc getNonce*(tm: PowRef; number: BlockNumber; powHeaderDigest: Hash256;
tm.mineFull(number, powHeaderDigest, difficulty, startNonce).toBytesBE
proc getNonce*(tm: PowRef; number: BlockNumber; powHeaderDigest: Hash256;
difficulty: DifficultyInt): BlockNonce
{.gcsafe,raises: [Defect,CatchableError].} =
difficulty: DifficultyInt): BlockNonce =
## Variant of `getNonce()`
var startNonce: array[8,byte]
tm.rng[].generate(startNonce)
tm.getNonce(number, powHeaderDigest, difficulty, startNonce)
proc getNonce*(tm: PowRef; header: BlockHeader): BlockNonce
{.gcsafe,raises: [Defect,CatchableError].} =
proc getNonce*(tm: PowRef; header: BlockHeader): BlockNonce =
## Variant of `getNonce()`
tm.getNonce(header.blockNumber, header.miningHash, header.difficulty)
proc getNonce*(tm: PowRef; specs: PowSpecs): BlockNonce
{.gcsafe,raises: [Defect,CatchableError].} =
proc getNonce*(tm: PowRef; specs: PowSpecs): BlockNonce =
## Variant of `getNonce()`
tm.getNonce(specs.blockNumber, specs.miningHash, specs.difficulty)
@ -310,8 +302,7 @@ proc nGetNonce*(tm: PowRef): uint64 =
# ------------------
proc generatePowDataset*(tm: PowRef; number: BlockNumber)
{.gcsafe,raises: [Defect,CatchableError].} =
proc generatePowDataset*(tm: PowRef; number: BlockNumber) =
## Prepare dataset for the `getNonce()` mining function. This dataset
## changes with the epoch of the argument `number` so it is applicable for
## the full epoch. If not generated explicitely, it will be done so by the
@ -337,7 +328,7 @@ proc dumpPowSpecs*(header: BlockHeader): string =
header.getPowSpecs.dumpPowSpecs
proc undumpPowSpecs*(data: string): PowSpecs
{.raises: [Defect,CatchableError].} =
{.raises: [CatchableError].} =
## Recover `PowSpecs` object from text representation
data.decodeRlpText

View File

@ -8,7 +8,6 @@
import
std/[strformat, times],
eth/[rlp],
./difficulty
export BlockHeader

View File

@ -19,7 +19,7 @@ import
ethash,
stew/keyed_queue
{.push raises: [Defect].}
{.push raises: [].}
type
PowCacheItemRef* = ref object
@ -69,8 +69,7 @@ proc new*(T: type PowCacheRef; maxItems = nItemsMax): T =
# Public functions, constructor
# ------------------------------------------------------------------------------
proc get*(pc: var PowCache; bn: BlockNumber): PowCacheItemRef
{.gcsafe, raises: [Defect, CatchableError].} =
proc get*(pc: var PowCache; bn: BlockNumber): PowCacheItemRef =
## Return a cache derived from argument `blockNumber` ready to be used
## for the `hashimotoLight()` method.
let
@ -90,18 +89,15 @@ proc get*(pc: var PowCache; bn: BlockNumber): PowCacheItemRef
pc.cache.lruAppend(key, pair, pc.cacheMax)
proc get*(pcr: PowCacheRef; bn: BlockNumber): PowCacheItemRef
{.gcsafe, raises: [Defect, CatchableError].} =
proc get*(pcr: PowCacheRef; bn: BlockNumber): PowCacheItemRef =
## Variant of `getCache()`
pcr[].get(bn)
proc hasItem*(pc: var PowCache; bn: BlockNumber): bool
{.gcsafe,raises: [Defect,CatchableError].} =
proc hasItem*(pc: var PowCache; bn: BlockNumber): bool =
## Returns true if there is a cache entry for argument `bn`.
pc.cache.hasKey(bn.toKey)
proc hasItem*(pcr: PowCacheRef; bn: BlockNumber): bool
{.gcsafe,raises: [Defect,CatchableError].} =
proc hasItem*(pcr: PowCacheRef; bn: BlockNumber): bool =
## Variant of `hasItem()`
pcr[].hasItem(bn)

View File

@ -21,7 +21,7 @@ import
ethash,
stew/keyed_queue
{.push raises: [Defect].}
{.push raises: [].}
type
PowDatasetItemRef* = ref object
@ -102,7 +102,7 @@ proc new*(T: type PowDatasetRef; maxItems = nItemsMax): T =
# ------------------------------------------------------------------------------
proc get*(pd: var PowDataset; bn: BlockNumber): PowDatasetItemRef
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [].} =
## Return a cache derived from argument `blockNumber` ready to be used
## for the `hashimotoLight()` method.
let
@ -115,7 +115,7 @@ proc get*(pd: var PowDataset; bn: BlockNumber): PowDatasetItemRef
let
# note that `getDataSize()` and `getCacheSize()` depend on
# `key * EPOCH_LENGTH` rather than the original block number.
top = key * EPOCH_LENGTH
# top = key * EPOCH_LENGTH -- notused
cache = pd.cache.get(bn)
pair = PowDatasetItemRef(
size: cache.size,
@ -124,18 +124,18 @@ proc get*(pd: var PowDataset; bn: BlockNumber): PowDatasetItemRef
pd.dataset.lruAppend(key, pair, pd.datasetMax)
proc get*(pdr: PowDatasetRef; bn: BlockNumber): PowDatasetItemRef
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [].} =
## Variant of `getCache()`
pdr[].get(bn)
proc hasItem*(pd: var PowDataset; bn: BlockNumber): bool
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [].} =
##Returns true if there is a cache entry for argument `bn`.
pd.dataset.hasKey(bn.toKey)
proc hasItem*(pdr: PowDatasetRef; bn: BlockNumber): bool
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [].} =
## Variant of `hasItem()`
pdr[].hasItem(bn)

View File

@ -9,7 +9,7 @@
# according to those terms.
import
std/[times, tables, typetraits],
std/[times, typetraits],
pkg/[chronos,
stew/results,
chronicles,
@ -22,7 +22,7 @@ import
tx_pool,
casper,
validate],
"."/clique/[clique_defs,
"."/clique/[
clique_desc,
clique_cfg,
clique_sealer],

View File

@ -461,7 +461,7 @@ export
tx_tabs.local,
tx_tabs.remote
{.push raises: [Defect].}
{.push raises: [].}
logScope:
topics = "tx-pool"
@ -471,7 +471,7 @@ logScope:
# ------------------------------------------------------------------------------
proc maintenanceProcessing(xp: TxPoolRef)
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
## Tasks to be done after add/del txs processing
# Purge expired items
@ -490,7 +490,7 @@ proc maintenanceProcessing(xp: TxPoolRef)
xp.pDirtyBuckets = false
proc setHead(xp: TxPoolRef; val: BlockHeader)
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
## Update cached block chain insertion point. This will also update the
## internally cached `baseFee` (depends on the block chain state.)
if xp.chain.head != val:
@ -504,7 +504,7 @@ proc setHead(xp: TxPoolRef; val: BlockHeader)
# ------------------------------------------------------------------------------
proc new*(T: type TxPoolRef; com: CommonRef; miner: EthAddress): T
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
## Constructor, returns a new tx-pool descriptor. The `miner` argument is
## the fee beneficiary for informational purposes only.
new result
@ -517,7 +517,7 @@ proc new*(T: type TxPoolRef; com: CommonRef; miner: EthAddress): T
# core/tx_pool.go(848): func (pool *TxPool) AddLocals(txs []..
# core/tx_pool.go(864): func (pool *TxPool) AddRemotes(txs []..
proc add*(xp: TxPoolRef; txs: openArray[Transaction]; info = "")
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
## Add a list of transactions to be processed and added to the buckets
## database. It is OK pass an empty list in which case some maintenance
## check can be forced.
@ -533,12 +533,12 @@ proc add*(xp: TxPoolRef; txs: openArray[Transaction]; info = "")
# core/tx_pool.go(854): func (pool *TxPool) AddLocals(txs []..
# core/tx_pool.go(883): func (pool *TxPool) AddRemotes(txs []..
proc add*(xp: TxPoolRef; tx: Transaction; info = "")
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
## Variant of `add()` for a single transaction.
xp.add(@[tx], info)
proc smartHead*(xp: TxPoolRef; pos: BlockHeader; blindMode = false): bool
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
## This function moves the internal head cache (i.e. tx insertion point,
## vmState) and ponts it to a now block on the chain.
##
@ -582,7 +582,7 @@ proc smartHead*(xp: TxPoolRef; pos: BlockHeader; blindMode = false): bool
return true
proc triggerReorg*(xp: TxPoolRef)
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
## This function triggers a tentative bucket re-org action by setting the
## `dirtyBuckets` parameter. This re-org action eventually happens only if
## the `autoUpdateBucketsDB` flag is also set.
@ -603,7 +603,7 @@ proc dirtyBuckets*(xp: TxPoolRef): bool =
xp.pDirtyBuckets
proc ethBlock*(xp: TxPoolRef): EthBlock
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
## Getter, retrieves a packed block ready for mining and signing depending
## on the internally cached block chain head, the txs in the pool and some
## tuning parameters. The following block header fields are left
@ -621,14 +621,12 @@ proc ethBlock*(xp: TxPoolRef): EthBlock
for (_,nonceList) in xp.txDB.packingOrderAccounts(txItemPacked):
result.txs.add toSeq(nonceList.incNonce).mapIt(it.tx)
proc gasCumulative*(xp: TxPoolRef): GasInt
{.gcsafe,raises: [Defect,CatchableError].} =
proc gasCumulative*(xp: TxPoolRef): GasInt =
## Getter, retrieves the gas that will be burned in the block after
## retrieving it via `ethBlock`.
xp.chain.gasUsed
proc gasTotals*(xp: TxPoolRef): TxTabsGasTotals
{.gcsafe,raises: [Defect,CatchableError].} =
proc gasTotals*(xp: TxPoolRef): TxTabsGasTotals =
## Getter, retrieves the current gas limit totals per bucket.
xp.txDB.gasTotals
@ -679,8 +677,7 @@ proc minTipPrice*(xp: TxPoolRef): GasPrice =
# core/tx_pool.go(1728): func (t *txLookup) Count() int {
# core/tx_pool.go(1737): func (t *txLookup) LocalCount() int {
# core/tx_pool.go(1745): func (t *txLookup) RemoteCount() int {
proc nItems*(xp: TxPoolRef): TxTabsItemsCount
{.gcsafe,raises: [Defect,CatchableError].} =
proc nItems*(xp: TxPoolRef): TxTabsItemsCount =
## Getter, retrieves the current number of items per bucket and
## some totals.
xp.txDB.nItems
@ -706,7 +703,7 @@ proc trgGasLimit*(xp: TxPoolRef): GasInt =
# ------------------------------------------------------------------------------
proc `baseFee=`*(xp: TxPoolRef; val: GasPrice)
{.gcsafe,raises: [Defect,KeyError].} =
{.gcsafe,raises: [KeyError].} =
## Setter, sets `baseFee` explicitely witout triggering a packer update.
## Stil a database update might take place when updating account ranks.
##
@ -726,8 +723,7 @@ proc `lwmTrgPercent=`*(xp: TxPoolRef; val: int) =
gasCeil: xp.chain.lhwm.gasCeil
)
proc `flags=`*(xp: TxPoolRef; val: set[TxPoolFlags])
{.gcsafe,raises: [Defect,CatchableError].} =
proc `flags=`*(xp: TxPoolRef; val: set[TxPoolFlags]) =
## Setter, strategy symbols for how to process items and buckets.
xp.pFlags = val
@ -747,8 +743,7 @@ proc `maxRejects=`*(xp: TxPoolRef; val: int) =
xp.txDB.maxRejects = val
# core/tx_pool.go(444): func (pool *TxPool) SetGasPrice(price *big.Int) {
proc `minFeePrice=`*(xp: TxPoolRef; val: GasPrice)
{.gcsafe,raises: [Defect,CatchableError].} =
proc `minFeePrice=`*(xp: TxPoolRef; val: GasPrice) =
## Setter for `minFeePrice`. If there was a value change, this function
## implies `triggerReorg()`.
if xp.pMinFeePrice != val:
@ -777,15 +772,14 @@ proc `minTipPrice=`*(xp: TxPoolRef; val: GasPrice) =
# core/tx_pool.go(979): func (pool *TxPool) Get(hash common.Hash) ..
# core/tx_pool.go(985): func (pool *TxPool) Has(hash common.Hash) bool {
proc getItem*(xp: TxPoolRef; hash: Hash256): Result[TxItemRef,void]
{.gcsafe,raises: [Defect,CatchableError].} =
proc getItem*(xp: TxPoolRef; hash: Hash256): Result[TxItemRef,void] =
## Returns a transaction if it is contained in the pool.
xp.txDB.byItemID.eq(hash)
proc disposeItems*(xp: TxPoolRef; item: TxItemRef;
reason = txInfoExplicitDisposal;
otherReason = txInfoImpliedDisposal): int
{.discardable,gcsafe,raises: [Defect,CatchableError].} =
{.discardable,gcsafe,raises: [CatchableError].} =
## Move item to wastebasket. All items for the same sender with nonces
## greater than the current one are deleted, as well. The function returns
## the number of items eventally removed.
@ -835,7 +829,7 @@ proc accountRanks*(xp: TxPoolRef): TxTabsLocality =
proc addRemote*(xp: TxPoolRef;
tx: Transaction; force = false): Result[void,TxInfo]
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
## Adds the argument transaction `tx` to the buckets database.
##
## If the argument `force` is set `false` and the sender account of the
@ -869,7 +863,7 @@ proc addRemote*(xp: TxPoolRef;
proc addLocal*(xp: TxPoolRef;
tx: Transaction; force = false): Result[void,TxInfo]
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
## Adds the argument transaction `tx` to the buckets database.
##
## If the argument `force` is set `false` and the sender account of the

View File

@ -13,7 +13,7 @@
##
import
std/[sets, times],
std/times,
../../common/common,
../../constants,
../../db/accounts_cache,
@ -31,7 +31,7 @@ export
TxChainGasLimits,
TxChainGasLimitsPc
{.push raises: [Defect].}
{.push raises: [].}
const
TRG_THRESHOLD_PER_CENT = ##\
@ -71,7 +71,7 @@ type
# Private functions
# ------------------------------------------------------------------------------
proc prepareHeader(dh: TxChainRef; parent: BlockHeader)
{.gcsafe, raises: [Defect, CatchableError].} =
{.gcsafe, raises: [CatchableError].} =
case dh.com.consensus
of ConsensusType.POW:
@ -90,7 +90,7 @@ proc prepareHeader(dh: TxChainRef; parent: BlockHeader)
of ConsensusType.POS:
dh.com.pos.prepare(dh.prepHeader)
proc prepareForSeal(dh: TxChainRef; header: var BlockHeader) =
proc prepareForSeal(dh: TxChainRef; header: var BlockHeader) {.gcsafe, raises: [].} =
case dh.com.consensus
of ConsensusType.POW:
# do nothing, tx pool was designed with POW in mind
@ -101,7 +101,7 @@ proc prepareForSeal(dh: TxChainRef; header: var BlockHeader) =
dh.com.pos.prepareForSeal(header)
proc resetTxEnv(dh: TxChainRef; parent: BlockHeader; fee: Option[UInt256])
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
dh.txEnv.reset
# do hardfork transition before
@ -125,7 +125,7 @@ proc resetTxEnv(dh: TxChainRef; parent: BlockHeader; fee: Option[UInt256])
dh.txEnv.stateRoot = dh.txEnv.vmState.parent.stateRoot
proc update(dh: TxChainRef; parent: BlockHeader)
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
let
db = dh.com.db
@ -146,7 +146,7 @@ proc update(dh: TxChainRef; parent: BlockHeader)
# ------------------------------------------------------------------------------
proc new*(T: type TxChainRef; com: CommonRef; miner: EthAddress): T
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
## Constructor
new result
@ -162,8 +162,7 @@ proc new*(T: type TxChainRef; com: CommonRef; miner: EthAddress): T
# Public functions
# ------------------------------------------------------------------------------
proc getBalance*(dh: TxChainRef; account: EthAddress): UInt256
{.gcsafe,raises: [Defect,CatchableError].} =
proc getBalance*(dh: TxChainRef; account: EthAddress): UInt256 =
## Wrapper around `vmState.readOnlyStateDB.getBalance()` for a `vmState`
## descriptor positioned at the `dh.head`. This might differ from the
## `dh.vmState.readOnlyStateDB.getBalance()` which returnes the current
@ -171,8 +170,7 @@ proc getBalance*(dh: TxChainRef; account: EthAddress): UInt256
## procedure.
dh.roAcc.getBalance(account)
proc getNonce*(dh: TxChainRef; account: EthAddress): AccountNonce
{.gcsafe,raises: [Defect,CatchableError].} =
proc getNonce*(dh: TxChainRef; account: EthAddress): AccountNonce =
## Wrapper around `vmState.readOnlyStateDB.getNonce()` for a `vmState`
## descriptor positioned at the `dh.head`. This might differ from the
## `dh.vmState.readOnlyStateDB.getNonce()` which returnes the current balance
@ -180,7 +178,7 @@ proc getNonce*(dh: TxChainRef; account: EthAddress): AccountNonce
dh.roAcc.getNonce(account)
proc getHeader*(dh: TxChainRef): BlockHeader
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
## Generate a new header, a child of the cached `head`
let gasUsed = if dh.txEnv.receipts.len == 0: 0.GasInt
else: dh.txEnv.receipts[^1].cumulativeGasUsed
@ -206,7 +204,7 @@ proc getHeader*(dh: TxChainRef): BlockHeader
dh.prepareForSeal(result)
proc clearAccounts*(dh: TxChainRef)
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
## Reset transaction environment, e.g. before packing a new block
dh.resetTxEnv(dh.txEnv.vmState.parent, dh.txEnv.vmState.fee)
@ -296,7 +294,7 @@ proc `baseFee=`*(dh: TxChainRef; val: GasPrice) =
dh.txEnv.vmState.fee = UInt256.none()
proc `head=`*(dh: TxChainRef; val: BlockHeader)
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
## Setter, updates descriptor. This setter re-positions the `vmState` and
## account caches to a new insertion point on the block chain database.
dh.update(val)

View File

@ -18,7 +18,7 @@ import
../tx_item,
eth/eip1559
{.push raises: [Defect].}
{.push raises: [].}
const
INITIAL_BASE_FEE = EIP1559_INITIAL_BASE_FEE.truncate(uint64)

View File

@ -19,7 +19,7 @@ import
../../pow/header,
eth/[eip1559]
{.push raises: [Defect].}
{.push raises: [].}
type
TxChainGasLimitsPc* = tuple

View File

@ -22,7 +22,7 @@ import
./tx_tabs/tx_sender, # for verify()
eth/keys
{.push raises: [Defect].}
{.push raises: [].}
type
TxPoolCallBackRecursion* = object of Defect
@ -127,7 +127,7 @@ const
# ------------------------------------------------------------------------------
proc init*(xp: TxPoolRef; com: CommonRef; miner: EthAddress)
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
## Constructor, returns new tx-pool descriptor. The `miner` argument is
## the fee beneficiary for informational purposes only.
xp.startDate = getTime().utc.toTime
@ -221,7 +221,7 @@ proc `pMinPlGasPrice=`*(xp: TxPoolRef; val: GasPrice) =
# ------------------------------------------------------------------------------
proc verify*(xp: TxPoolRef): Result[void,TxInfo]
{.gcsafe, raises: [Defect,CatchableError].} =
{.gcsafe, raises: [CatchableError].} =
## Verify descriptor and subsequent data structures.
block:

View File

@ -15,7 +15,7 @@
import
metrics
{.push raises: [Defect].}
{.push raises: [].}
const
# Provide some fall-back counters available for unit tests
@ -82,7 +82,7 @@ declareGauge localGauge, "n/a"
declareGauge slotsGauge, "n/a"
# core/tx_pool.go(129): reheapTimer = metrics.NewRegisteredTimer(..
declareGauge reheapTimer, "n/a"
# declareGauge reheapTimer, "n/a" -- notused
# ----------------------

View File

@ -11,7 +11,7 @@
## Transaction Pool Info Symbols & Error Codes
## ===========================================
{.push raises: [Defect].}
{.push raises: [].}
type
TxInfo* = enum

View File

@ -14,12 +14,12 @@
import
std/[hashes, sequtils, strutils, times],
../../utils/[ec_recover, utils_defs],
../../utils/ec_recover,
./tx_info,
eth/[common, keys],
stew/results
{.push raises: [Defect].}
{.push raises: [].}
type
GasPrice* = ##|
@ -112,7 +112,7 @@ proc init*(item: TxItemRef; status: TxItemStatus; info: string) =
item.reject = txInfoOk
proc new*(T: type TxItemRef; tx: Transaction; itemID: Hash256;
status: TxItemStatus; info: string): Result[T,void] =
status: TxItemStatus; info: string): Result[T,void] {.gcsafe,raises: [].} =
## Create item descriptor.
let rc = tx.ecRecover
if rc.isErr:
@ -125,7 +125,7 @@ proc new*(T: type TxItemRef; tx: Transaction; itemID: Hash256;
status: status))
proc new*(T: type TxItemRef; tx: Transaction;
reject: TxInfo; status: TxItemStatus; info: string): T =
reject: TxInfo; status: TxItemStatus; info: string): T {.gcsafe,raises: [].} =
## Create incomplete item descriptor, so meta-data can be stored (e.g.
## for holding in the waste basket to be investigated later.)
T(tx: tx,

View File

@ -20,11 +20,11 @@ import
eth/[common, keys],
stew/[keyed_queue, keyed_queue/kq_debug, results, sorted_set]
{.push raises: [Defect].}
{.push raises: [].}
export
# bySender/byStatus index operations
any, eq, ge, gt, le, len, lt, nItems, gasLimits
sub, eq, ge, gt, le, len, lt, nItems, gasLimits
type
TxTabsItemsCount* = tuple
@ -82,7 +82,7 @@ const
# ------------------------------------------------------------------------------
proc deleteImpl(xp: TxTabsRef; item: TxItemRef): bool
{.gcsafe,raises: [Defect,KeyError].} =
{.gcsafe,raises: [KeyError].} =
## Delete transaction (and wrapping container) from the database. If
## successful, the function returns the wrapping container that was just
## removed.
@ -100,7 +100,7 @@ proc deleteImpl(xp: TxTabsRef; item: TxItemRef): bool
return true
proc insertImpl(xp: TxTabsRef; item: TxItemRef): Result[void,TxInfo]
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
if not xp.bySender.insert(item):
return err(txInfoErrSenderNonceIndex)
@ -118,7 +118,7 @@ proc insertImpl(xp: TxTabsRef; item: TxItemRef): Result[void,TxInfo]
# Public functions, constructor
# ------------------------------------------------------------------------------
proc new*(T: type TxTabsRef): T =
proc new*(T: type TxTabsRef): T {.gcsafe,raises: [].} =
## Constructor, returns new tx-pool descriptor.
new result
result.maxRejects = txTabMaxRejects
@ -141,7 +141,7 @@ proc insert*(
tx: var Transaction;
status = txItemPending;
info = ""): Result[void,TxInfo]
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
## Add new transaction argument `tx` to the database. If accepted and added
## to the database, a `key` value is returned which can be used to retrieve
## this transaction direcly via `tx[key].tx`. The following holds for the
@ -173,7 +173,7 @@ proc insert*(
ok()
proc insert*(xp: TxTabsRef; item: TxItemRef): Result[void,TxInfo]
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
## Variant of `insert()` with fully qualified `item` argument.
if xp.byItemID.hasKey(item.itemID):
return err(txInfoErrAlreadyKnown)
@ -181,7 +181,7 @@ proc insert*(xp: TxTabsRef; item: TxItemRef): Result[void,TxInfo]
proc reassign*(xp: TxTabsRef; item: TxItemRef; status: TxItemStatus): bool
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
## Variant of `reassign()` for the `TxItemStatus` flag.
# make sure that the argument `item` is not some copy
let rc = xp.byItemID.eq(item.itemID)
@ -196,8 +196,7 @@ proc reassign*(xp: TxTabsRef; item: TxItemRef; status: TxItemStatus): bool
return true
proc flushRejects*(xp: TxTabsRef; maxItems = int.high): (int,int)
{.gcsafe,raises: [Defect,KeyError].} =
proc flushRejects*(xp: TxTabsRef; maxItems = int.high): (int,int) =
## Flush/delete at most `maxItems` oldest items from the waste basket and
## return the numbers of deleted and remaining items (a waste basket item
## is considered older if it was moved there earlier.)
@ -213,7 +212,7 @@ proc flushRejects*(xp: TxTabsRef; maxItems = int.high): (int,int)
proc dispose*(xp: TxTabsRef; item: TxItemRef; reason: TxInfo): bool
{.gcsafe,raises: [Defect,KeyError].} =
{.gcsafe,raises: [KeyError].} =
## Move argument `item` to rejects queue (aka waste basket.)
if xp.deleteImpl(item):
if xp.maxRejects <= xp.byRejects.len:
@ -223,8 +222,7 @@ proc dispose*(xp: TxTabsRef; item: TxItemRef; reason: TxInfo): bool
return true
proc reject*(xp: TxTabsRef; tx: var Transaction;
reason: TxInfo; status = txItemPending; info = "")
{.gcsafe,raises: [Defect,KeyError].} =
reason: TxInfo; status = txItemPending; info = "") =
## Similar to dispose but for a tx without the item wrapper, the function
## imports the tx into the waste basket (e.g. after it could not
## be inserted.)
@ -233,8 +231,7 @@ proc reject*(xp: TxTabsRef; tx: var Transaction;
let item = TxItemRef.new(tx, reason, status, info)
xp.byRejects[item.itemID] = item
proc reject*(xp: TxTabsRef; item: TxItemRef; reason: TxInfo)
{.gcsafe,raises: [Defect,KeyError].} =
proc reject*(xp: TxTabsRef; item: TxItemRef; reason: TxInfo) =
## Variant of `reject()` with `item` rather than `tx` (assuming
## `item` is not in the database.)
if xp.maxRejects <= xp.byRejects.len:
@ -243,8 +240,7 @@ proc reject*(xp: TxTabsRef; item: TxItemRef; reason: TxInfo)
xp.byRejects[item.itemID] = item
proc reject*(xp: TxTabsRef; tx: Transaction;
reason: TxInfo; status = txItemPending; info = "")
{.gcsafe,raises: [Defect,KeyError].} =
reason: TxInfo; status = txItemPending; info = "") =
## Variant of `reject()`
var ty = tx
xp.reject(ty, reason, status)
@ -274,7 +270,7 @@ proc remote*(lc: TxTabsLocality): seq[EthAddress] =
# ------------------------------------------------------------------------------
proc `baseFee=`*(xp: TxTabsRef; val: GasPrice)
{.gcsafe,raises: [Defect,KeyError].} =
{.gcsafe,raises: [KeyError].} =
## Setter, update may cause database re-org
if xp.bySender.baseFee != val:
xp.bySender.baseFee = val
@ -300,16 +296,14 @@ proc hasTx*(xp: TxTabsRef; tx: Transaction): bool =
## paradigm for accessing a transaction container.
xp.byItemID.hasKey(tx.itemID)
proc nItems*(xp: TxTabsRef): TxTabsItemsCount
{.gcsafe,raises: [Defect,KeyError].} =
proc nItems*(xp: TxTabsRef): TxTabsItemsCount =
result.pending = xp.byStatus.eq(txItemPending).nItems
result.staged = xp.byStatus.eq(txItemStaged).nItems
result.packed = xp.byStatus.eq(txItemPacked).nItems
result.total = xp.byItemID.len
result.disposed = xp.byRejects.len
proc gasTotals*(xp: TxTabsRef): TxTabsGasTotals
{.gcsafe,raises: [Defect,KeyError].} =
proc gasTotals*(xp: TxTabsRef): TxTabsGasTotals =
result.pending = xp.byStatus.eq(txItemPending).gasLimits
result.staged = xp.byStatus.eq(txItemStaged).gasLimits
result.packed = xp.byStatus.eq(txItemPacked).gasLimits
@ -357,7 +351,7 @@ proc flushLocals*(xp: TxTabsRef) =
iterator incAccount*(xp: TxTabsRef; bucket: TxItemStatus;
fromRank = TxRank.low): (EthAddress,TxStatusNonceRef)
{.gcsafe,raises: [Defect,KeyError].} =
{.gcsafe,raises: [KeyError].} =
## Walk accounts with increasing ranks and return a nonce-ordered item list.
let rcBucket = xp.byStatus.eq(bucket)
if rcBucket.isOk:
@ -379,7 +373,7 @@ iterator incAccount*(xp: TxTabsRef; bucket: TxItemStatus;
iterator decAccount*(xp: TxTabsRef; bucket: TxItemStatus;
fromRank = TxRank.high): (EthAddress,TxStatusNonceRef)
{.gcsafe,raises: [Defect,KeyError].} =
{.gcsafe,raises: [KeyError].} =
## Walk accounts with decreasing ranks and return the nonce-ordered item list.
let rcBucket = xp.byStatus.eq(bucket)
if rcBucket.isOk:
@ -400,7 +394,7 @@ iterator decAccount*(xp: TxTabsRef; bucket: TxItemStatus;
iterator packingOrderAccounts*(xp: TxTabsRef; bucket: TxItemStatus):
(EthAddress,TxStatusNonceRef)
{.gcsafe,raises: [Defect,KeyError].} =
{.gcsafe,raises: [KeyError].} =
## Loop over accounts from a particular bucket ordered by
## + local ranks, higest one first
## + remote ranks, higest one first
@ -419,7 +413,7 @@ iterator packingOrderAccounts*(xp: TxTabsRef; bucket: TxItemStatus):
iterator incAccount*(xp: TxTabsRef;
fromRank = TxRank.low): (EthAddress,TxSenderNonceRef)
{.gcsafe,raises: [Defect,KeyError].} =
{.gcsafe,raises: [KeyError].} =
## Variant of `incAccount()` without bucket restriction.
var rcRank = xp.byRank.ge(fromRank)
while rcRank.isOk:
@ -427,7 +421,7 @@ iterator incAccount*(xp: TxTabsRef;
# Try all sender adresses found
for account in addrList.keys:
yield (account, xp.bySender.eq(account).any.value.data)
yield (account, xp.bySender.eq(account).sub.value.data)
# Get next ranked address list (top down index walk)
rcRank = xp.byRank.gt(rank) # potenially modified database
@ -435,7 +429,7 @@ iterator incAccount*(xp: TxTabsRef;
iterator decAccount*(xp: TxTabsRef;
fromRank = TxRank.high): (EthAddress,TxSenderNonceRef)
{.gcsafe,raises: [Defect,KeyError].} =
{.gcsafe,raises: [KeyError].} =
## Variant of `decAccount()` without bucket restriction.
var rcRank = xp.byRank.le(fromRank)
while rcRank.isOk:
@ -443,7 +437,7 @@ iterator decAccount*(xp: TxTabsRef;
# Try all sender adresses found
for account in addrList.keys:
yield (account, xp.bySender.eq(account).any.value.data)
yield (account, xp.bySender.eq(account).sub.value.data)
# Get next ranked address list (top down index walk)
rcRank = xp.byRank.lt(rank) # potenially modified database
@ -453,8 +447,7 @@ iterator decAccount*(xp: TxTabsRef;
# -----------------------------------------------------------------------------
iterator incNonce*(nonceList: TxSenderNonceRef;
nonceFrom = AccountNonce.low): TxItemRef
{.gcsafe,raises: [Defect,KeyError].} =
nonceFrom = AccountNonce.low): TxItemRef =
## Second stage iterator inside `incAccount()` or `decAccount()`. The
## items visited are always sorted by least-nonce first.
var rc = nonceList.ge(nonceFrom)
@ -502,7 +495,7 @@ iterator decNonce*(nonceList: TxStatusNonceRef;
# ------------------------------------------------------------------------------
proc verify*(xp: TxTabsRef): Result[void,TxInfo]
{.gcsafe, raises: [Defect,CatchableError].} =
{.gcsafe, raises: [CatchableError].} =
## Verify descriptor and subsequent data structures.
block:
let rc = xp.bySender.verify

View File

@ -18,7 +18,7 @@ import
eth/[common],
stew/[results, sorted_set]
{.push raises: [Defect].}
{.push raises: [].}
type
TxRank* = ##\
@ -61,7 +61,7 @@ proc clear*(rt: var TxRankTab) =
# ------------------------------------------------------------------------------
proc insert*(rt: var TxRankTab; rank: TxRank; sender: EthAddress): bool
{.gcsafe,raises: [Defect,KeyError].} =
{.gcsafe,raises: [KeyError].} =
## Add or update a new ranked address. This function returns `true` it the
## address exists already with the current rank.
@ -93,7 +93,7 @@ proc insert*(rt: var TxRankTab; rank: TxRank; sender: EthAddress): bool
proc delete*(rt: var TxRankTab; sender: EthAddress): bool
{.gcsafe,raises: [Defect,KeyError].} =
{.gcsafe,raises: [KeyError].} =
## Delete argument address `sender` from rank table.
if rt.addrTab.hasKey(sender):
let
@ -111,7 +111,7 @@ proc delete*(rt: var TxRankTab; sender: EthAddress): bool
proc verify*(rt: var TxRankTab): Result[void,TxInfo]
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
var
seen: Table[EthAddress,TxRank]
@ -177,7 +177,7 @@ proc nItems*(rt: var TxRankTab): int =
proc eq*(rt: var TxRankTab; sender: EthAddress):
SortedSetResult[EthAddress,TxRank]
{.gcsafe,raises: [Defect,KeyError].} =
{.gcsafe,raises: [KeyError].} =
if rt.addrTab.hasKey(sender):
return toSortedSetResult(key = sender, data = rt.addrTab[sender])
err(rbNotFound)

View File

@ -19,7 +19,7 @@ import
eth/[common],
stew/[results, keyed_queue, keyed_queue/kq_debug, sorted_set]
{.push raises: [Defect].}
{.push raises: [].}
type
TxSenderNonceRef* = ref object ##\
@ -136,7 +136,7 @@ proc recalcProfit(nonceData: TxSenderNonceRef; baseFee: GasPrice) =
# ------------------------------------------------------------------------------
proc mkInxImpl(gt: var TxSenderTab; item: TxItemRef): Result[TxSenderInx,void]
{.gcsafe,raises: [Defect,KeyError].} =
{.gcsafe,raises: [KeyError].} =
var inxData: TxSenderInx
if gt.addrList.hasKey(item.sender):
@ -171,7 +171,7 @@ proc mkInxImpl(gt: var TxSenderTab; item: TxItemRef): Result[TxSenderInx,void]
proc getInxImpl(gt: var TxSenderTab; item: TxItemRef): Result[TxSenderInx,void]
{.gcsafe,raises: [Defect,KeyError].} =
{.gcsafe,raises: [KeyError].} =
var inxData: TxSenderInx
if not gt.addrList.hasKey(item.sender):
@ -202,7 +202,7 @@ proc init*(gt: var TxSenderTab) =
# ------------------------------------------------------------------------------
proc insert*(gt: var TxSenderTab; item: TxItemRef): bool
{.gcsafe,raises: [Defect,KeyError].} =
{.gcsafe,raises: [KeyError].} =
## Add transaction `item` to the list. The function has no effect if the
## transaction exists, already.
let rc = gt.mkInxImpl(item)
@ -223,7 +223,7 @@ proc insert*(gt: var TxSenderTab; item: TxItemRef): bool
proc delete*(gt: var TxSenderTab; item: TxItemRef): bool
{.gcsafe,raises: [Defect,KeyError].} =
{.gcsafe,raises: [KeyError].} =
let rc = gt.getInxImpl(item)
if rc.isOk:
let
@ -253,7 +253,7 @@ proc delete*(gt: var TxSenderTab; item: TxItemRef): bool
proc verify*(gt: var TxSenderTab): Result[void,TxInfo]
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
## Walk `EthAddress` > `TxSenderLocus` > `AccountNonce` > items
block:
@ -264,7 +264,7 @@ proc verify*(gt: var TxSenderTab): Result[void,TxInfo]
var totalCount = 0
for p in gt.addrList.nextPairs:
let schedData = p.data
var addrCount = 0
#var addrCount = 0 -- notused
# at least one of status lists must be available
if schedData.nActive == 0:
return err(txInfoVfySenderLeafEmpty)
@ -373,8 +373,7 @@ proc baseFee*(gt: var TxSenderTab): GasPrice =
# Public functions, setters
# ------------------------------------------------------------------------------
proc `baseFee=`*(gt: var TxSenderTab; val: GasPrice)
{.gcsafe,raises: [Defect,KeyError].} =
proc `baseFee=`*(gt: var TxSenderTab; val: GasPrice) =
## Setter. When invoked, there is *always* a re-calculation of the profit
## values stored with the sender address.
gt.baseFee = val
@ -404,7 +403,7 @@ proc nItems*(gt: var TxSenderTab): int =
proc rank*(gt: var TxSenderTab; sender: EthAddress): Result[int64,void]
{.gcsafe,raises: [Defect,KeyError].} =
{.gcsafe,raises: [KeyError].} =
## The *rank* of the `sender` argument address is the
## ::
## maxProfit() / gasLimits()
@ -418,7 +417,7 @@ proc rank*(gt: var TxSenderTab; sender: EthAddress): Result[int64,void]
proc eq*(gt: var TxSenderTab; sender: EthAddress):
SortedSetResult[EthAddress,TxSenderSchedRef]
{.gcsafe,raises: [Defect,KeyError].} =
{.gcsafe,raises: [KeyError].} =
if gt.addrList.hasKey(sender):
return toSortedSetResult(key = sender, data = gt.addrList[sender])
err(rbNotFound)
@ -458,7 +457,7 @@ proc eq*(rc: SortedSetResult[EthAddress,TxSenderSchedRef];
err(rc.error)
proc any*(schedData: TxSenderSchedRef):
proc sub*(schedData: TxSenderSchedRef):
SortedSetResult[TxSenderSchedule,TxSenderNonceRef] =
## Return all-entries sub-list
let nonceData = schedData.allList
@ -466,11 +465,11 @@ proc any*(schedData: TxSenderSchedRef):
return err(rbNotFound)
toSortedSetResult(key = txSenderAny, data = nonceData)
proc any*(rc: SortedSetResult[EthAddress,TxSenderSchedRef]):
proc sub*(rc: SortedSetResult[EthAddress,TxSenderSchedRef]):
SortedSetResult[TxSenderSchedule,TxSenderNonceRef] =
## Return all-entries sub-list
if rc.isOk:
return rc.value.data.any
return rc.value.data.sub
err(rc.error)
@ -480,7 +479,7 @@ proc eq*(schedData: TxSenderSchedRef;
## Variant of `eq()` using unified key schedule
case key
of txSenderAny:
return schedData.any
return schedData.sub
of txSenderPending:
return schedData.eq(txItemPending)
of txSenderStaged:
@ -499,10 +498,6 @@ proc eq*(rc: SortedSetResult[EthAddress,TxSenderSchedRef];
# Public SortedSet ops -- `AccountNonce` (level 2)
# ------------------------------------------------------------------------------
proc len*(nonceData: TxSenderNonceRef): int =
let rc = nonceData.nonceList.len
proc nItems*(nonceData: TxSenderNonceRef): int =
## Getter, total number of items in the sub-list
nonceData.nonceList.len
@ -607,8 +602,7 @@ proc lt*(rc: SortedSetResult[TxSenderSchedule,TxSenderNonceRef];
# Public iterators
# ------------------------------------------------------------------------------
iterator accounts*(gt: var TxSenderTab): (EthAddress,int64)
{.gcsafe,raises: [Defect,KeyError].} =
iterator accounts*(gt: var TxSenderTab): (EthAddress,int64) =
## Sender account traversal, returns the account address and the rank
## for that account.
for p in gt.addrList.nextPairs:

View File

@ -18,7 +18,7 @@ import
eth/[common],
stew/[results, keyed_queue, keyed_queue/kq_debug, sorted_set]
{.push raises: [Defect].}
{.push raises: [].}
type
TxStatusNonceRef* = ref object ##\
@ -46,18 +46,18 @@ type
# Private helpers
# ------------------------------------------------------------------------------
proc `$`(rq: TxStatusNonceRef): string =
proc `$`(rq: TxStatusNonceRef): string {.gcsafe, raises: [].} =
## Needed by `rq.verify()` for printing error messages
$rq.nonceList.len
proc nActive(sq: TxStatusTab): int =
proc nActive(sq: TxStatusTab): int {.gcsafe, raises: [].} =
## Number of non-nil items
for status in TxItemStatus:
if not sq.statusList[status].isNil:
result.inc
proc mkInxImpl(sq: var TxStatusTab; item: TxItemRef): Result[TxStatusInx,void]
{.gcsafe,raises: [Defect,KeyError].} =
{.gcsafe,raises: [KeyError].} =
## Fails if item exists, already
var inx: TxStatusInx
@ -86,7 +86,7 @@ proc mkInxImpl(sq: var TxStatusTab; item: TxItemRef): Result[TxStatusInx,void]
proc getInxImpl(sq: var TxStatusTab; item: TxItemRef): Result[TxStatusInx,void]
{.gcsafe,raises: [Defect,KeyError].} =
{.gcsafe,raises: [KeyError].} =
var inx: TxStatusInx
# array of buckets (aka status) => senders
@ -105,14 +105,14 @@ proc getInxImpl(sq: var TxStatusTab; item: TxItemRef): Result[TxStatusInx,void]
# Public all-queue helpers
# ------------------------------------------------------------------------------
proc init*(sq: var TxStatusTab; size = 10) =
proc init*(sq: var TxStatusTab; size = 10) {.gcsafe, raises: [].} =
## Optional constructor
sq.size = 0
sq.statusList.reset
proc insert*(sq: var TxStatusTab; item: TxItemRef): bool
{.gcsafe,raises: [Defect,KeyError].} =
{.gcsafe,raises: [KeyError].} =
## Add transaction `item` to the list. The function has no effect if the
## transaction exists, already (apart from returning `false`.)
let rc = sq.mkInxImpl(item)
@ -125,7 +125,7 @@ proc insert*(sq: var TxStatusTab; item: TxItemRef): bool
proc delete*(sq: var TxStatusTab; item: TxItemRef): bool
{.gcsafe,raises: [Defect,KeyError].} =
{.gcsafe,raises: [KeyError].} =
let rc = sq.getInxImpl(item)
if rc.isOk:
let inx = rc.value
@ -145,7 +145,7 @@ proc delete*(sq: var TxStatusTab; item: TxItemRef): bool
proc verify*(sq: var TxStatusTab): Result[void,TxInfo]
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
## walk `TxItemStatus` > `EthAddress` > `AccountNonce`
var totalCount = 0
@ -161,7 +161,8 @@ proc verify*(sq: var TxStatusTab): Result[void,TxInfo]
addrCount = 0
gasLimits = 0.GasInt
for p in addrData.addrList.nextPairs:
let (addrKey, nonceData) = (p.key, p.data)
# let (addrKey, nonceData) = (p.key, p.data) -- notused
let nonceData = p.data
block:
let rc = nonceData.nonceList.verify
@ -233,14 +234,14 @@ proc gasLimits*(rc: SortedSetResult[TxItemStatus,TxStatusSenderRef]): GasInt =
proc eq*(addrData: TxStatusSenderRef; sender: EthAddress):
SortedSetResult[EthAddress,TxStatusNonceRef]
{.gcsafe,raises: [Defect,KeyError].} =
{.gcsafe,raises: [KeyError].} =
if addrData.addrList.hasKey(sender):
return toSortedSetResult(key = sender, data = addrData.addrList[sender])
err(rbNotFound)
proc eq*(rc: SortedSetResult[TxItemStatus,TxStatusSenderRef];
sender: EthAddress): SortedSetResult[EthAddress,TxStatusNonceRef]
{.gcsafe,raises: [Defect,KeyError].} =
{.gcsafe,raises: [KeyError].} =
if rc.isOk:
return rc.value.data.eq(sender)
err(rc.error)

View File

@ -25,7 +25,7 @@ import
eth/[common, keys],
stew/[keyed_queue, sorted_set]
{.push raises: [Defect].}
{.push raises: [].}
type
TxAddStats* = tuple ##\
@ -56,7 +56,7 @@ logScope:
# ------------------------------------------------------------------------------
proc getItemList(tab: var AccouuntNonceTab; key: EthAddress): var NonceList
{.gcsafe,raises: [Defect,KeyError].} =
{.gcsafe,raises: [KeyError].} =
if not tab.hasKey(key):
tab[key] = NonceList.init
tab[key]
@ -66,12 +66,12 @@ proc getItemList(tab: var AccouuntNonceTab; key: EthAddress): var NonceList
# ------------------------------------------------------------------------------
proc supersede(xp: TxPoolRef; item: TxItemRef): Result[void,TxInfo]
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
var current: TxItemRef
block:
let rc = xp.txDB.bySender.eq(item.sender).any.eq(item.tx.nonce)
let rc = xp.txDB.bySender.eq(item.sender).sub.eq(item.tx.nonce)
if rc.isErr:
return err(txInfoErrUnspecified)
current = rc.value.data
@ -98,13 +98,13 @@ proc supersede(xp: TxPoolRef; item: TxItemRef): Result[void,TxInfo]
# ------------------------------------------------------------------------------
proc addTx*(xp: TxPoolRef; item: TxItemRef): bool
{.discardable,gcsafe,raises: [Defect,CatchableError].} =
{.discardable,gcsafe,raises: [CatchableError].} =
## Add a transaction item. It is tested and stored in either of the `pending`
## or `staged` buckets, or disposed into the waste basket. The function
## returns `true` if the item was added to the `staged` bucket.
var
stagedItemAdded = false
# stagedItemAdded = false -- notused
vetted = txInfoOk
# Leave this frame with `return`, or proceeed with error
@ -160,7 +160,7 @@ proc addTx*(xp: TxPoolRef; item: TxItemRef): bool
# core/tx_pool.go(889): func (pool *TxPool) addTxs(txs []*types.Transaction, ..
proc addTxs*(xp: TxPoolRef;
txs: openArray[Transaction]; info = ""): TxAddStats
{.discardable,gcsafe,raises: [Defect,CatchableError].} =
{.discardable,gcsafe,raises: [CatchableError].} =
## Add a list of transactions. The list is sorted after nonces and txs are
## tested and stored into either of the `pending` or `staged` buckets, or
## disposed o the waste basket. The function returns the tuple

View File

@ -27,7 +27,7 @@ import
eth/[common, keys],
stew/[sorted_set]
{.push raises: [Defect].}
{.push raises: [].}
const
minNonce = AccountNonce.low
@ -41,7 +41,7 @@ logScope:
proc bucketItemsReassignPending*(xp: TxPoolRef; labelFrom: TxItemStatus;
account: EthAddress; nonceFrom = minNonce)
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
## Move all items in bucket `lblFrom` with nonces not less than `nonceFrom`
## to the `pending` bucket
let rc = xp.txDB.byStatus.eq(labelFrom).eq(account)
@ -51,13 +51,13 @@ proc bucketItemsReassignPending*(xp: TxPoolRef; labelFrom: TxItemStatus;
proc bucketItemsReassignPending*(xp: TxPoolRef; item: TxItemRef)
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
## Variant of `bucketItemsReassignPending()`
xp.bucketItemsReassignPending(item.status, item.sender, item.tx.nonce)
proc bucketUpdateAll*(xp: TxPoolRef): bool
{.discardable,gcsafe,raises: [Defect,CatchableError].} =
{.discardable,gcsafe,raises: [CatchableError].} =
## Update all buckets. The function returns `true` if some items were added
## to the `staged` bucket.
@ -71,7 +71,7 @@ proc bucketUpdateAll*(xp: TxPoolRef): bool
for item in xp.pDoubleCheck:
if item.reject == txInfoOk:
# Check whether there was a gap when the head was moved backwards.
let rc = xp.txDB.bySender.eq(item.sender).any.gt(item.tx.nonce)
let rc = xp.txDB.bySender.eq(item.sender).sub.gt(item.tx.nonce)
if rc.isOk:
let nextItem = rc.value.data
if item.tx.nonce + 1 < nextItem.tx.nonce:
@ -81,7 +81,7 @@ proc bucketUpdateAll*(xp: TxPoolRef): bool
# For failed txs, make sure that the account state has not
# changed. Assuming that this list is complete, then there are
# no other account affected.
let rc = xp.txDB.bySender.eq(item.sender).any.ge(minNonce)
let rc = xp.txDB.bySender.eq(item.sender).sub.ge(minNonce)
if rc.isOk:
let firstItem = rc.value.data
if not xp.classifyValid(firstItem):
@ -159,7 +159,7 @@ proc bucketUpdateAll*(xp: TxPoolRef): bool
# ---------------------------
proc bucketFlushPacked*(xp: TxPoolRef)
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
## Move all items from the `packed` bucket to the `pending` bucket
for (_,nonceList) in xp.txDB.decAccount(txItemPacked):
for item in nonceList.incNonce:

View File

@ -26,7 +26,7 @@ import
import ../../../transaction except GasPrice, GasPriceEx # already in tx_item
{.push raises: [Defect].}
{.push raises: [].}
logScope:
topics = "tx-pool classify"
@ -62,7 +62,7 @@ proc checkTxBasic(xp: TxPoolRef; item: TxItemRef): bool =
true
proc checkTxNonce(xp: TxPoolRef; item: TxItemRef): bool
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
## Make sure that there is only one contiuous sequence of nonces (per
## sender) starting at the account nonce.
@ -79,7 +79,7 @@ proc checkTxNonce(xp: TxPoolRef; item: TxItemRef): bool
# for an existing account, nonces must come in increasing consecutive order
let rc = xp.txDB.bySender.eq(item.sender)
if rc.isOk:
if rc.value.data.any.eq(item.tx.nonce - 1).isErr:
if rc.value.data.sub.eq(item.tx.nonce - 1).isErr:
debug "invalid tx: account nonces gap",
txNonce = item.tx.nonce,
accountNonce
@ -92,7 +92,7 @@ proc checkTxNonce(xp: TxPoolRef; item: TxItemRef): bool
# ------------------------------------------------------------------------------
proc txNonceActive(xp: TxPoolRef; item: TxItemRef): bool
{.gcsafe,raises: [Defect,KeyError].} =
{.gcsafe,raises: [KeyError].} =
## Make sure that nonces appear as a contiuous sequence in `staged` bucket
## probably preceeded in `packed` bucket.
let rc = xp.txDB.bySender.eq(item.sender)
@ -125,8 +125,7 @@ proc txFeesCovered(xp: TxPoolRef; item: TxItemRef): bool =
return false
true
proc txCostInBudget(xp: TxPoolRef; item: TxItemRef): bool
{.gcsafe,raises: [Defect,CatchableError].} =
proc txCostInBudget(xp: TxPoolRef; item: TxItemRef): bool =
## Check whether the worst case expense is covered by the price budget,
let
balance = xp.chain.getBalance(item.sender)
@ -179,7 +178,7 @@ proc txPostLondonAcceptableTipAndFees(xp: TxPoolRef; item: TxItemRef): bool =
# ------------------------------------------------------------------------------
proc classifyValid*(xp: TxPoolRef; item: TxItemRef): bool
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
## Check a (typically new) transaction whether it should be accepted at all
## or re-jected right away.
@ -192,7 +191,7 @@ proc classifyValid*(xp: TxPoolRef; item: TxItemRef): bool
true
proc classifyActive*(xp: TxPoolRef; item: TxItemRef): bool
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
## Check whether a valid transaction is ready to be held in the
## `staged` bucket in which case the function returns `true`.

View File

@ -23,7 +23,7 @@ import
eth/[common, keys],
stew/keyed_queue
{.push raises: [Defect].}
{.push raises: [].}
logScope:
topics = "tx-pool dispose expired"
@ -43,8 +43,8 @@ proc utcNow: Time =
# ------------------------------------------------------------------------------
proc deleteOtherNonces(xp: TxPoolRef; item: TxItemRef; newerThan: Time): bool
{.gcsafe,raises: [Defect,KeyError].} =
let rc = xp.txDB.bySender.eq(item.sender).any
{.gcsafe,raises: [KeyError].} =
let rc = xp.txDB.bySender.eq(item.sender).sub
if rc.isOk:
for other in rc.value.data.incNonce(item.tx.nonce):
# only delete non-expired items
@ -58,7 +58,7 @@ proc deleteOtherNonces(xp: TxPoolRef; item: TxItemRef; newerThan: Time): bool
# ------------------------------------------------------------------------------
# core/tx_pool.go(384): for addr := range pool.queue {
proc disposeExpiredItems*(xp: TxPoolRef) {.gcsafe,raises: [Defect,KeyError].} =
proc disposeExpiredItems*(xp: TxPoolRef) {.gcsafe,raises: [KeyError].} =
## Any non-local transaction old enough will be removed. This will not
## apply to items in the packed queue.
let
@ -96,12 +96,12 @@ proc disposeExpiredItems*(xp: TxPoolRef) {.gcsafe,raises: [Defect,KeyError].} =
proc disposeItemAndHigherNonces*(xp: TxPoolRef; item: TxItemRef;
reason, otherReason: TxInfo): int
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
## Move item and higher nonces per sender to wastebasket.
if xp.txDB.dispose(item, reason):
result = 1
# For the current sender, delete all items with higher nonces
let rc = xp.txDB.bySender.eq(item.sender).any
let rc = xp.txDB.bySender.eq(item.sender).sub
if rc.isOk:
let nonceList = rc.value.data
@ -111,7 +111,7 @@ proc disposeItemAndHigherNonces*(xp: TxPoolRef; item: TxItemRef;
proc disposeById*(xp: TxPoolRef; itemIDs: openArray[Hash256]; reason: TxInfo)
{.gcsafe,raises: [Defect,KeyError].}=
{.gcsafe,raises: [KeyError].}=
## Dispose items by item ID wihtout checking whether this makes other items
## unusable (e.g. with higher nonces for the same sender.)
for itemID in itemIDs:

View File

@ -24,7 +24,7 @@ import
eth/keys,
stew/keyed_queue
{.push raises: [Defect].}
{.push raises: [].}
type
TxHeadDiffRef* = ref object ##\
@ -47,13 +47,13 @@ logScope:
# use it as a stack/lifo as the ordering is reversed
proc insert(xp: TxPoolRef; kq: TxHeadDiffRef; blockHash: Hash256)
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
let db = xp.chain.com.db
for tx in db.getBlockBody(blockHash).transactions:
kq.addTxs[tx.itemID] = tx
proc remove(xp: TxPoolRef; kq: TxHeadDiffRef; blockHash: Hash256)
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
let db = xp.chain.com.db
for tx in db.getBlockBody(blockHash).transactions:
kq.remTxs[tx.itemID] = true
@ -68,7 +68,7 @@ proc new(T: type TxHeadDiffRef): T =
# core/tx_pool.go(218): func (pool *TxPool) reset(oldHead, newHead ...
proc headDiff*(xp: TxPoolRef;
newHead: BlockHeader): Result[TxHeadDiffRef,TxInfo]
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
## This function caclulates the txs differences between the cached block
## chain head to a new head implied by the argument `newHeader`. Differences
## are returned as two tables for adding and removing txs. The tables table

View File

@ -32,7 +32,7 @@ import
eth/[keys, rlp, trie, trie/db],
stew/[sorted_set]
{.push raises: [Defect].}
{.push raises: [].}
type
TxPackerError* = object of CatchableError
@ -68,7 +68,7 @@ template safeExecutor(info: string; code: untyped) =
raise newException(TxPackerError, info & "(): " & $e.name & " -- " & e.msg)
proc persist(pst: TxPackerStateRef)
{.gcsafe,raises: [Defect,RlpError].} =
{.gcsafe,raises: [RlpError].} =
## Smart wrapper
if not pst.cleanState:
pst.xp.chain.vmState.stateDB.persist(clearCache = false)
@ -79,7 +79,7 @@ proc persist(pst: TxPackerStateRef)
# ------------------------------------------------------------------------------
proc runTx(pst: TxPackerStateRef; item: TxItemRef): GasInt
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
## Execute item transaction and update `vmState` book keeping. Returns the
## `gasUsed` after executing the transaction.
let
@ -96,7 +96,7 @@ proc runTx(pst: TxPackerStateRef; item: TxItemRef): GasInt
proc runTxCommit(pst: TxPackerStateRef; item: TxItemRef; gasBurned: GasInt)
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
## Book keeping after executing argument `item` transaction in the VM. The
## function returns the next number of items `nItems+1`.
let
@ -154,7 +154,7 @@ proc runTxCommit(pst: TxPackerStateRef; item: TxItemRef; gasBurned: GasInt)
# ------------------------------------------------------------------------------
proc vmExecInit(xp: TxPoolRef): TxPackerStateRef
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
# Flush `packed` bucket
xp.bucketFlushPacked
@ -173,7 +173,7 @@ proc vmExecInit(xp: TxPoolRef): TxPackerStateRef
proc vmExecGrabItem(pst: TxPackerStateRef; item: TxItemRef): Result[bool,void]
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
## Greedily collect & compact items as long as the accumulated `gasLimit`
## values are below the maximum block size.
let
@ -199,7 +199,7 @@ proc vmExecGrabItem(pst: TxPackerStateRef; item: TxItemRef): Result[bool,void]
vmState.stateDB.commit(accTx)
vmState.stateDB.persist(clearCache = false)
let midRoot = vmState.stateDB.rootHash
# let midRoot = vmState.stateDB.rootHash -- notused
# Finish book-keeping and move item to `packed` bucket
pst.runTxCommit(item, gasUsed)
@ -208,7 +208,7 @@ proc vmExecGrabItem(pst: TxPackerStateRef; item: TxItemRef): Result[bool,void]
proc vmExecCommit(pst: TxPackerStateRef)
{.gcsafe,raises: [Defect,CatchableError].} =
{.gcsafe,raises: [CatchableError].} =
let
xp = pst.xp
vmState = xp.chain.vmState
@ -247,7 +247,7 @@ proc vmExecCommit(pst: TxPackerStateRef)
# Public functions
# ------------------------------------------------------------------------------
proc packerVmExec*(xp: TxPoolRef) {.gcsafe,raises: [Defect,CatchableError].} =
proc packerVmExec*(xp: TxPoolRef) {.gcsafe,raises: [CatchableError].} =
## Rebuild `packed` bucket by selection items from the `staged` bucket
## after executing them in the VM.
let db = xp.chain.com.db

View File

@ -21,7 +21,7 @@ import
eth/[common, keys],
stew/keyed_queue
{.push raises: [Defect].}
{.push raises: [].}
logScope:
topics = "tx-pool recover item"
@ -36,8 +36,7 @@ let
# ------------------------------------------------------------------------------
proc recoverItem*(xp: TxPoolRef; tx: Transaction; status = txItemPending;
info = ""; acceptExisting = false): Result[TxItemRef,TxInfo]
{.gcsafe,raises: [Defect,CatchableError].} =
info = ""; acceptExisting = false): Result[TxItemRef,TxInfo] =
## Recover item from waste basket or create new. It is an error if the item
## is in the buckets database, already.
##

View File

@ -10,16 +10,15 @@
import
std/[sequtils, sets, times],
../common/common,
../db/accounts_cache,
".."/[transaction, common/common],
".."/[vm_state, vm_types, errors],
".."/[errors, transaction, vm_state, vm_types],
"."/[dao, eip4844, gaslimit, withdrawals],
./pow/[difficulty, header],
./pow,
chronicles,
eth/[rlp],
nimcrypto/utils,
stew/[objects, results, endians2]
stew/[objects, results]
from stew/byteutils
import nil
@ -29,7 +28,7 @@ export
pow.new,
results
{.push raises: [Defect].}
{.push raises: [].}
const
daoForkBlockExtraData* =
@ -404,7 +403,7 @@ proc validateHeaderAndKinship*(
header: BlockHeader;
body: BlockBody;
checkSealOK: bool;
pow: PowRef): Result[void, string] {.gcsafe,raises: [CatchableError, Defect].} =
pow: PowRef): Result[void, string] =
com.validateHeaderAndKinship(
header, body.uncles, body.transactions.len, checkSealOK, pow)

View File

@ -12,10 +12,12 @@ import
stew/results,
../common/common
{.push raises: [].}
# https://eips.ethereum.org/EIPS/eip-4895
func validateWithdrawals*(
com: CommonRef, header: BlockHeader
): Result[void, string] {.raises: [Defect].} =
): Result[void, string] =
if header.withdrawalsRoot.isSome:
return err("Withdrawals not yet implemented")
return ok()

View File

@ -15,7 +15,7 @@ import
stew/[interval_set, sorted_set],
"."/[full/worker, sync_desc, sync_sched, protocol]
{.push raises: [Defect].}
{.push raises: [].}
logScope:
topics = "full-sync"

View File

@ -16,7 +16,7 @@ import
../../utils/prettify,
../misc/timer_helper
{.push raises: [Defect].}
{.push raises: [].}
logScope:
topics = "full-ticker"

View File

@ -17,7 +17,7 @@ import
../misc/[best_pivot, block_queue],
./ticker
{.push raises:[Defect].}
{.push raises:[].}
logScope:
topics = "full-buddy"
@ -112,7 +112,7 @@ proc processStaged(buddy: FullBuddyRef): bool =
## Fetch a work item from the `staged` queue an process it to be
## stored on the persistent block chain.
let
ctx = buddy.ctx
ctx {.used.} = buddy.ctx
peer = buddy.peer
chainDb = buddy.ctx.chain.db
chain = buddy.ctx.chain
@ -125,7 +125,7 @@ proc processStaged(buddy: FullBuddyRef): bool =
return false
rc.value
startNumber = wi.headers[0].blockNumber
#startNumber = wi.headers[0].blockNumber -- unused
# Store in persistent database
try:
@ -296,7 +296,7 @@ proc runSingle*(buddy: FullBuddyRef) {.async.} =
##
let
ctx = buddy.ctx
peer = buddy.peer
peer {.used.} = buddy.peer
bq = buddy.data.bQueue
pv = buddy.data.pivot
@ -416,7 +416,7 @@ proc runMulti*(buddy: FullBuddyRef) {.async.} =
##
# Fetch work item
let
ctx = buddy.ctx
ctx {.used.} = buddy.ctx
bq = buddy.data.bQueue
rc = await bq.blockQueueWorker()
if rc.isErr:

View File

@ -18,7 +18,7 @@ import
../protocol/trace_config, # gossip noise control
../../core/[chain, tx_pool, tx_pool/tx_item]
{.push raises: [Defect].}
{.push raises: [].}
type
HashToTime = TableRef[Hash256, Time]
@ -74,19 +74,17 @@ const
# Private functions: helper functions
# ------------------------------------------------------------------------------
proc notEnabled(name: string) =
proc notEnabled(name: string) {.used.} =
debug "Wire handler method is disabled", meth = name
proc notImplemented(name: string) =
proc notImplemented(name: string) {.used.} =
debug "Wire handler method not implemented", meth = name
proc inPool(ctx: EthWireRef, txHash: Hash256): bool
{.gcsafe, raises: [Defect,CatchableError].} =
proc inPool(ctx: EthWireRef, txHash: Hash256): bool =
let res = ctx.txPool.getItem(txHash)
res.isOk
proc inPoolAndOk(ctx: EthWireRef, txHash: Hash256): bool
{.gcsafe, raises: [Defect,CatchableError].} =
proc inPoolAndOk(ctx: EthWireRef, txHash: Hash256): bool =
let res = ctx.txPool.getItem(txHash)
if res.isErr: return false
res.get().reject == txInfoOk
@ -94,7 +92,7 @@ proc inPoolAndOk(ctx: EthWireRef, txHash: Hash256): bool
proc successorHeader(db: ChainDBRef,
h: BlockHeader,
output: var BlockHeader,
skip = 0'u): bool {.gcsafe, raises: [Defect,RlpError].} =
skip = 0'u): bool {.gcsafe, raises: [RlpError].} =
let offset = 1 + skip.toBlockNumber
if h.blockNumber <= (not 0.toBlockNumber) - offset:
result = db.getBlockHeader(h.blockNumber + offset, output)
@ -102,7 +100,7 @@ proc successorHeader(db: ChainDBRef,
proc ancestorHeader(db: ChainDBRef,
h: BlockHeader,
output: var BlockHeader,
skip = 0'u): bool {.gcsafe, raises: [Defect,RlpError].} =
skip = 0'u): bool {.gcsafe, raises: [RlpError].} =
let offset = 1 + skip.toBlockNumber
if h.blockNumber >= offset:
result = db.getBlockHeader(h.blockNumber - offset, output)
@ -110,7 +108,7 @@ proc ancestorHeader(db: ChainDBRef,
proc blockHeader(db: ChainDBRef,
b: HashOrNum,
output: var BlockHeader): bool
{.gcsafe, raises: [Defect,RlpError].} =
{.gcsafe, raises: [RlpError].} =
if b.isHash:
db.getBlockHeader(b.hash, output)
else:
@ -120,8 +118,9 @@ proc blockHeader(db: ChainDBRef,
# Private functions: peers related functions
# ------------------------------------------------------------------------------
proc hash(peer: Peer): hashes.Hash =
hash(peer.remote)
when isMainModule:
proc hash(peer: Peer): hashes.Hash =
hash(peer.remote)
proc getPeers(ctx: EthWireRef, thisPeer: Peer): seq[Peer] =
# do not send back tx or txhash to thisPeer
@ -129,7 +128,7 @@ proc getPeers(ctx: EthWireRef, thisPeer: Peer): seq[Peer] =
if peer != thisPeer:
result.add peer
proc banExpiredReconnect(arg: pointer) {.gcsafe, raises: [Defect].} =
proc banExpiredReconnect(arg: pointer) =
# Reconnect to peer after ban period if pool is empty
try:
@ -382,7 +381,7 @@ proc txPoolEnabled*(ctx: EthWireRef; ena: bool) =
ctx.enableTxPool = if ena: Enabled else: Suspended
method getStatus*(ctx: EthWireRef): EthState
{.gcsafe, raises: [Defect,RlpError,EVMError].} =
{.gcsafe, raises: [RlpError,EVMError].} =
let
db = ctx.db
com = ctx.chain.com
@ -400,7 +399,7 @@ method getStatus*(ctx: EthWireRef): EthState
)
method getReceipts*(ctx: EthWireRef, hashes: openArray[Hash256]): seq[seq[Receipt]]
{.gcsafe, raises: [Defect,RlpError].} =
{.gcsafe, raises: [RlpError].} =
let db = ctx.db
var header: BlockHeader
for blockHash in hashes:
@ -410,8 +409,7 @@ method getReceipts*(ctx: EthWireRef, hashes: openArray[Hash256]): seq[seq[Receip
result.add @[]
trace "handlers.getReceipts: blockHeader not found", blockHash
method getPooledTxs*(ctx: EthWireRef, hashes: openArray[Hash256]): seq[Transaction]
{.gcsafe, raises: [Defect,CatchableError].} =
method getPooledTxs*(ctx: EthWireRef, hashes: openArray[Hash256]): seq[Transaction] =
let txPool = ctx.txPool
for txHash in hashes:
let res = txPool.getItem(txHash)
@ -421,7 +419,7 @@ method getPooledTxs*(ctx: EthWireRef, hashes: openArray[Hash256]): seq[Transacti
trace "handlers.getPooledTxs: tx not found", txHash
method getBlockBodies*(ctx: EthWireRef, hashes: openArray[Hash256]): seq[BlockBody]
{.gcsafe, raises: [Defect,RlpError].} =
{.gcsafe, raises: [RlpError].} =
let db = ctx.db
var body: BlockBody
for blockHash in hashes:
@ -432,7 +430,7 @@ method getBlockBodies*(ctx: EthWireRef, hashes: openArray[Hash256]): seq[BlockBo
trace "handlers.getBlockBodies: blockBody not found", blockHash
method getBlockHeaders*(ctx: EthWireRef, req: BlocksRequest): seq[BlockHeader]
{.gcsafe, raises: [Defect,RlpError].} =
{.gcsafe, raises: [RlpError].} =
let db = ctx.db
var foundBlock: BlockHeader
result = newSeqOfCap[BlockHeader](req.maxResults)
@ -450,7 +448,7 @@ method getBlockHeaders*(ctx: EthWireRef, req: BlocksRequest): seq[BlockHeader]
result.add foundBlock
method handleAnnouncedTxs*(ctx: EthWireRef, peer: Peer, txs: openArray[Transaction])
{.gcsafe, raises: [Defect,CatchableError].} =
{.gcsafe, raises: [CatchableError].} =
if ctx.enableTxPool != Enabled:
when trMissingOrDisabledGossipOk:
notEnabled("handleAnnouncedTxs")
@ -491,8 +489,7 @@ method handleAnnouncedTxs*(ctx: EthWireRef, peer: Peer, txs: openArray[Transacti
asyncSpawn ctx.sendNewTxHashes(newTxHashes, peers[sendFull..^1])
method handleAnnouncedTxsHashes*(ctx: EthWireRef, peer: Peer, txHashes: openArray[Hash256])
{.gcsafe, raises: [Defect,CatchableError].} =
method handleAnnouncedTxsHashes*(ctx: EthWireRef, peer: Peer, txHashes: openArray[Hash256]) =
if ctx.enableTxPool != Enabled:
when trMissingOrDisabledGossipOk:
notEnabled("handleAnnouncedTxsHashes")
@ -523,7 +520,7 @@ method handleAnnouncedTxsHashes*(ctx: EthWireRef, peer: Peer, txHashes: openArra
asyncSpawn ctx.fetchTransactions(reqHashes, peer)
method handleNewBlock*(ctx: EthWireRef, peer: Peer, blk: EthBlock, totalDifficulty: DifficultyInt)
{.gcsafe, raises: [Defect,CatchableError].} =
{.gcsafe, raises: [CatchableError].} =
if ctx.chain.com.forkGTE(MergeFork):
debug "Dropping peer for sending NewBlock after merge (EIP-3675)",
peer, blockNumber=blk.header.blockNumber,
@ -538,7 +535,7 @@ method handleNewBlock*(ctx: EthWireRef, peer: Peer, blk: EthBlock, totalDifficul
)
method handleNewBlockHashes*(ctx: EthWireRef, peer: Peer, hashes: openArray[NewBlockHashesAnnounce])
{.gcsafe, raises: [Defect,CatchableError].} =
{.gcsafe, raises: [CatchableError].} =
if ctx.chain.com.forkGTE(MergeFork):
debug "Dropping peer for sending NewBlockHashes after merge (EIP-3675)",
peer, numHashes=hashes.len

View File

@ -15,7 +15,7 @@ import
./eth as handlers_eth,
./snap as handlers_snap
{.used, push raises: [Defect].}
{.used, push raises: [].}
# ------------------------------------------------------------------------------
# Public functions: convenience mappings for `eth`
@ -26,7 +26,7 @@ proc setEthHandlerNewBlocksAndHashes*(
blockHandler: NewBlockHandler;
hashesHandler: NewBlockHashesHandler;
arg: pointer;
) {.gcsafe, raises: [Defect,CatchableError].} =
) {.gcsafe, raises: [CatchableError].} =
let w = EthWireRef(node.protocolState protocol.eth)
w.setNewBlockHandler(blockHandler, arg)
w.setNewBlockHashesHandler(hashesHandler, arg)

View File

@ -16,7 +16,7 @@ import
../protocol/snap/snap_types,
../../core/chain
{.push raises: [Defect].}
{.push raises: [].}
logScope:
topics = "wire-protocol"

View File

@ -23,7 +23,7 @@ import
../utils/utils,
../common/common
{.push raises:[Defect].}
{.push raises:[].}
logScope:
topics = "legacy-sync"
@ -181,7 +181,7 @@ proc validateDifficulty(ctx: LegacySyncRef,
proc validateHeader(ctx: LegacySyncRef, header: BlockHeader,
height = none(BlockNumber)): bool
{.raises: [Defect,CatchableError].} =
{.raises: [CatchableError].} =
if header.parentHash == GENESIS_PARENT_HASH:
return true
@ -476,8 +476,7 @@ proc appendWorkItem(ctx: LegacySyncRef, hash: Hash256,
numBlocks : numBlocks,
state : Initial)
proc persistWorkItem(ctx: LegacySyncRef, wi: var WantedBlocks): ValidationResult
{.gcsafe, raises:[Defect,CatchableError].} =
proc persistWorkItem(ctx: LegacySyncRef, wi: var WantedBlocks): ValidationResult =
try:
result = ctx.chain.persistBlocks(wi.headers, wi.bodies)
except CatchableError as e:
@ -504,8 +503,7 @@ proc persistWorkItem(ctx: LegacySyncRef, wi: var WantedBlocks): ValidationResult
wi.headers = @[]
wi.bodies = @[]
proc persistPendingWorkItems(ctx: LegacySyncRef): (int, ValidationResult)
{.gcsafe, raises:[Defect,CatchableError].} =
proc persistPendingWorkItems(ctx: LegacySyncRef): (int, ValidationResult) =
var nextStartIndex = ctx.finalizedBlock + 1
var keepRunning = true
var hasOutOfOrderBlocks = false
@ -533,12 +531,11 @@ proc persistPendingWorkItems(ctx: LegacySyncRef): (int, ValidationResult)
ctx.hasOutOfOrderBlocks = hasOutOfOrderBlocks
proc returnWorkItem(ctx: LegacySyncRef, workItem: int): ValidationResult
{.gcsafe, raises:[Defect,CatchableError].} =
proc returnWorkItem(ctx: LegacySyncRef, workItem: int): ValidationResult =
let wi = addr ctx.workQueue[workItem]
let askedBlocks = wi.numBlocks.int
let receivedBlocks = wi.headers.len
let start = wi.startIndex
let start {.used.} = wi.startIndex
if askedBlocks == receivedBlocks:
trace "Work item complete",
@ -813,8 +810,8 @@ proc peersAgreeOnChain(a, b: Peer): Future[bool] {.async.} =
result = latestBlock.isSome and latestBlock.get.headers.len > 0
if latestBlock.isSome:
let blockNumber = if result: $latestBlock.get.headers[0].blockNumber
else: "missing"
let blockNumber {.used.} = if result: $latestBlock.get.headers[0].blockNumber
else: "missing"
trace trEthRecvReceivedBlockHeaders, peer=a,
count=latestBlock.get.headers.len, blockNumber
@ -937,7 +934,7 @@ proc onPeerDisconnected(ctx: LegacySyncRef, p: Peer) =
# ------------------------------------------------------------------------------
proc new*(T: type LegacySyncRef; ethNode: EthereumNode; chain: ChainRef): T
{.gcsafe, raises:[Defect,CatchableError].} =
{.gcsafe, raises:[CatchableError].} =
result = LegacySyncRef(
# workQueue: n/a
# endBlockNumber: n/a
@ -994,8 +991,7 @@ proc start*(ctx: LegacySyncRef) =
proc handleNewBlockHashes(ctx: LegacySyncRef,
peer: Peer,
hashes: openArray[NewBlockHashesAnnounce]) {.
gcsafe, raises: [Defect, CatchableError].} =
hashes: openArray[NewBlockHashesAnnounce]) =
trace trEthRecvNewBlockHashes,
numHash=hashes.len
@ -1049,7 +1045,7 @@ proc handleNewBlock(ctx: LegacySyncRef,
peer: Peer,
blk: EthBlock,
totalDifficulty: DifficultyInt) {.
gcsafe, raises: [Defect, CatchableError].} =
gcsafe, raises: [CatchableError].} =
trace trEthRecvNewBlock,
number=blk.header.blockNumber,
@ -1118,8 +1114,7 @@ proc handleNewBlock(ctx: LegacySyncRef,
proc newBlockHashesHandler*(arg: pointer,
peer: Peer,
hashes: openArray[NewBlockHashesAnnounce]) {.
gcsafe, raises: [Defect, CatchableError].} =
hashes: openArray[NewBlockHashesAnnounce]) =
let ctx = cast[LegacySyncRef](arg)
ctx.handleNewBlockHashes(peer, hashes)
@ -1127,7 +1122,7 @@ proc newBlockHandler*(arg: pointer,
peer: Peer,
blk: EthBlock,
totalDifficulty: DifficultyInt) {.
gcsafe, raises: [Defect, CatchableError].} =
gcsafe, raises: [CatchableError].} =
let ctx = cast[LegacySyncRef](arg)
ctx.handleNewBlock(peer, blk, totalDifficulty)

View File

@ -21,7 +21,7 @@ import
stew/byteutils,
".."/[protocol, sync_desc, types]
{.push raises:[Defect].}
{.push raises:[].}
logScope:
topics = "best-pivot"
@ -60,9 +60,9 @@ type
# Private helpers
# ------------------------------------------------------------------------------
proc hash(peer: Peer): Hash =
## Mixin `HashSet[Peer]` handler
hash(cast[pointer](peer))
#proc hash(peer: Peer): Hash =
# ## Mixin `HashSet[Peer]` handler
# hash(cast[pointer](peer))
template safeTransport(
bp: BestPivotWorkerRef;
@ -158,7 +158,7 @@ proc getBestHeader(
if hdrRespLen == 1:
let
header = hdrResp.get.headers[0]
blockNumber = header.blockNumber
blockNumber {.used.} = header.blockNumber
trace trEthRecvReceivedBlockHeaders, peer, hdrRespLen, blockNumber
bp.comFailCount = 0 # reset fail count
return ok(header)
@ -219,7 +219,7 @@ proc agreesOnChain(
if hdrResp.isSome:
let hdrRespLen = hdrResp.get.headers.len
if 0 < hdrRespLen:
let blockNumber = hdrResp.get.headers[0].blockNumber
let blockNumber {.used.} = hdrResp.get.headers[0].blockNumber
trace trEthRecvReceivedBlockHeaders, peer, start, fetch,
hdrRespLen, blockNumber
return ok()
@ -380,7 +380,7 @@ proc pivotNegotiate*(
if rx.isOk:
bp.global.trusted.incl peer
when extraTraceMessages:
let bestHeader =
let bestHeader {.used.} =
if bp.header.isSome: "#" & $bp.header.get.blockNumber
else: "nil"
trace "Accepting peer", peer, trusted=bp.global.trusted.len,
@ -397,7 +397,7 @@ proc pivotNegotiate*(
if bp.global.trusted.len == 0:
bp.global.trusted.incl peer
when extraTraceMessages:
let bestHeader =
let bestHeader {.used.} =
if bp.header.isSome: "#" & $bp.header.get.blockNumber
else: "nil"
trace "Assume initial trusted peer", peer,
@ -465,7 +465,7 @@ proc pivotNegotiate*(
# Evaluate status, finally
if bp.global.minPeers <= bp.global.trusted.len:
when extraTraceMessages:
let bestHeader =
let bestHeader {.used.} =
if bp.header.isSome: "#" & $bp.header.get.blockNumber
else: "nil"
trace "Peer trusted now", peer,

View File

@ -66,7 +66,7 @@ import
../../utils/utils,
".."/[protocol, sync_desc, types]
{.push raises:[Defect].}
{.push raises:[].}
logScope:
topics = "block-queue"
@ -334,7 +334,7 @@ proc fetchHeaders(
# Fetch headers from peer
var hdrResp: Option[blockHeadersObj]
block:
let reqLen = hdrReq.maxResults
let reqLen {.used.} = hdrReq.maxResults
qd.safeTransport("Error fetching block headers"):
hdrResp = await peer.getBlockHeaders(hdrReq)
# Beware of peer terminating the session
@ -507,7 +507,7 @@ proc blockQueueFetchStaged*(
return err(EmptyQueue)
let
peer = qd.peer
peer {.used.} = qd.peer
wi = rc.value.data
topAccepted = qd.global.topAccepted
startNumber = wi.headers[0].blockNumber
@ -618,7 +618,7 @@ proc blockQueueBacktrackWorker*(
var error = BacktrackDisabled
if qd.global.backtrack.isSome:
let
peer = qd.peer
peer {.used.} = qd.peer
wi = BlockItemRef(
# This dummy interval can savely merged back without any effect
blocks: highBlockRange,

View File

@ -11,7 +11,7 @@
import
chronos
{.push raises: [Defect].}
{.push raises: [].}
# Use `safeSetTimer` consistently, with a `ref T` argument if including one.
type

View File

@ -8,7 +8,6 @@
# those terms.
import
std/[tables],
chronicles,
chronos,
eth/p2p,

View File

@ -30,6 +30,9 @@ export
logScope:
topics = "eth66"
static:
const stopCompilerGossip {.used.} = Hash256().toHex
const
ethVersion* = 66
prettyEthProtoName* = "[eth/" & $ethVersion & "]"

View File

@ -30,6 +30,9 @@ export
logScope:
topics = "eth67"
static:
const stopCompilerGossip {.used.} = Hash256().toHex
const
ethVersion* = 67
prettyEthProtoName* = "[eth/" & $ethVersion & "]"

View File

@ -138,20 +138,20 @@ proc init(s: var FlowControlState,
s.minRecharge = minRecharge
s.lastUpdate = t
func canMakeRequest(s: FlowControlState,
maxCost: ReqCostInt): (LesTime, float64) =
## Returns the required waiting time before sending a request and
## the estimated buffer level afterwards (as a fraction of the limit)
const safetyMargin = 50
var maxCost = min(
maxCost + safetyMargin * s.minRecharge,
s.bufLimit)
if s.bufValue >= maxCost:
result[1] = float64(s.bufValue - maxCost) / float64(s.bufLimit)
else:
result[0] = (maxCost - s.bufValue) / s.minRecharge
#func canMakeRequest(s: FlowControlState,
# maxCost: ReqCostInt): (LesTime, float64) =
# ## Returns the required waiting time before sending a request and
# ## the estimated buffer level afterwards (as a fraction of the limit)
# const safetyMargin = 50
#
# var maxCost = min(
# maxCost + safetyMargin * s.minRecharge,
# s.bufLimit)
#
# if s.bufValue >= maxCost:
# result[1] = float64(s.bufValue - maxCost) / float64(s.bufLimit)
# else:
# result[0] = (maxCost - s.bufValue) / s.minRecharge
func canServeRequest(srv: LesNetwork): bool =
result = srv.reqCount < srv.maxReqCount and
@ -267,9 +267,9 @@ proc initFlowControl*(network: LesNetwork, les: ProtocolInfo,
warn "Failed to load persisted LES message stats. " &
"Flow control will be re-initilized."
proc canMakeRequest(peer: var LesPeer, maxCost: int): (LesTime, float64) =
peer.localFlowState.update now()
return peer.localFlowState.canMakeRequest(maxCost)
#proc canMakeRequest(peer: var LesPeer, maxCost: int): (LesTime, float64) =
# peer.localFlowState.update now()
# return peer.localFlowState.canMakeRequest(maxCost)
template getRequestCost(peer: LesPeer, localOrRemote: untyped,
msgId, costQuantity: int): ReqCostInt =

View File

@ -74,9 +74,9 @@ const
keyGenesisHash = "genesisHash"
## B_32: the hash of the Genesis block.
keyServeHeaders = "serveHeaders"
## (optional, no value)
## present if the peer can serve header chain downloads.
#keyServeHeaders = "serveHeaders"
# ## (optional, no value)
# ## present if the peer can serve header chain downloads.
keyServeChainSince = "serveChainSince"
## P (optional)
@ -160,7 +160,8 @@ proc getCostQuantity(fn: NimNode): tuple[quantityExpr, maxQuantity: NimNode] =
macro outgoingRequestDecorator(n: untyped): untyped =
result = n
let (costQuantity, maxQuantity) = n.getCostQuantity
#let (costQuantity, maxQuantity) = n.getCostQuantity
let (costQuantity, _) = n.getCostQuantity
result.body.add quote do:
trackOutgoingRequest(peer.networkState(les),

View File

@ -10,8 +10,9 @@
import
chronicles,
eth/[common, p2p/private/p2p_types]
# ../../types
eth/common
{.push raises: [].}
type
SnapAccount* = object

View File

@ -175,7 +175,7 @@ const
# recognise or set these hashes in `Account` when serialising RLP for `snap`.
proc snapRead*(rlp: var Rlp; T: type Account; strict: static[bool] = false): T
{.gcsafe, raises: [Defect, RlpError]} =
{.gcsafe, raises: [RlpError]} =
## RLP decoding for `Account`. The `snap` RLP representation of the account
## differs from standard `Account` RLP. Empty storage hash and empty code
## hash are each represented by an RLP zero-length string instead of the

View File

@ -17,7 +17,7 @@ import
./snap/[worker, worker_desc],
"."/[protocol, sync_desc, sync_sched]
{.push raises: [Defect].}
{.push raises: [].}
logScope:
topics = "snap-sync"

View File

@ -8,7 +8,7 @@
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
{.push raises: [Defect].}
{.push raises: [].}
const
pivotTableLruEntriesMax* = 50

View File

@ -17,7 +17,7 @@ import
../protocol,
../types
{.push raises: [Defect].}
{.push raises: [].}
type
ByteArray32* = array[32,byte]
@ -157,7 +157,7 @@ proc init*(tag: var NodeTag; data: openArray[byte]): bool =
# ------------------------------------------------------------------------------
proc read*[T: NodeTag|NodeKey](rlp: var Rlp, W: type T): T
{.gcsafe, raises: [Defect,RlpError].} =
{.gcsafe, raises: [RlpError].} =
rlp.read(Hash256).to(T)
proc append*(writer: var RlpWriter, val: NodeTag|NodeKey) =

View File

@ -23,7 +23,7 @@ import
./worker/db/[hexary_desc, snapdb_desc, snapdb_pivot],
"."/[range_desc, worker_desc]
{.push raises: [Defect].}
{.push raises: [].}
logScope:
topics = "snap-buddy"
@ -239,10 +239,10 @@ proc runMulti*(buddy: SnapBuddyRef) {.async.} =
when extraTraceMessages:
block:
let
nAccounts = env.nAccounts
nSlotLists = env.nSlotLists
processed = env.fetchAccounts.processed.fullFactor.toPC(2)
nStoQu = env.fetchStorageFull.len + env.fetchStoragePart.len
nAccounts {.used.} = env.nAccounts
nSlotLists {.used.} = env.nSlotLists
processed {.used.} = env.fetchAccounts.processed.fullFactor.toPC(2)
nStoQu {.used.} = env.fetchStorageFull.len + env.fetchStoragePart.len
trace "Multi sync runner", peer, pivot, nAccounts, nSlotLists, processed,
nStoQu

View File

@ -13,6 +13,8 @@ import
../../../sync_desc,
../../constants
{.push raises: [].}
type
ComErrorStatsRef* = ref object
## particular error counters so connections will not be cut immediately

View File

@ -21,7 +21,7 @@ import
"../.."/[constants, range_desc, worker_desc],
./com_error
{.push raises: [Defect].}
{.push raises: [].}
logScope:
topics = "snap-fetch"
@ -48,8 +48,9 @@ proc getAccountRangeReq(
root, iv.minPt.to(Hash256), iv.maxPt.to(Hash256), fetchRequestBytesLimit)
return ok(reply)
except CatchableError as e:
let error {.used.} = e.msg
trace trSnapRecvError & "waiting for GetAccountRange reply", peer, pivot,
error=e.msg
error
return err()
# ------------------------------------------------------------------------------
@ -64,7 +65,7 @@ proc getAccountRange*(
): Future[Result[GetAccountRange,ComError]] {.async.} =
## Fetch data using the `snap#` protocol, returns the range covered.
let
peer = buddy.peer
peer {.used.} = buddy.peer
if trSnapTracePacketsOk:
trace trSnapSendSending & "GetAccountRange", peer, pivot, accRange=iv

View File

@ -18,7 +18,7 @@ import
"../.."/[constants, range_desc, worker_desc],
./com_error
{.push raises: [Defect].}
{.push raises: [].}
logScope:
topics = "snap-fetch"
@ -71,8 +71,9 @@ proc getStorageRangesReq(
return ok(reply)
except CatchableError as e:
let error {.used.} = e.msg
trace trSnapRecvError & "waiting for GetStorageRanges reply", peer, pivot,
error=e.msg
error
return err()
# ------------------------------------------------------------------------------
@ -93,7 +94,7 @@ proc getStorageRanges*(
## accounts are asked for without a range (non-zero `firstSlot` fields are
## ignored of later sequence items.)
let
peer = buddy.peer
peer {.used.} = buddy.peer
var
nAccounts = accounts.len

View File

@ -16,7 +16,7 @@ import
"../.."/[constants, range_desc, worker_desc],
./com_error
{.push raises: [Defect].}
{.push raises: [].}
logScope:
topics = "snap-fetch"
@ -48,8 +48,9 @@ proc getTrieNodesReq(
return ok(reply)
except CatchableError as e:
let error {.used.} = e.msg
trace trSnapRecvError & "waiting for GetByteCodes reply", peer, pivot,
error=e.msg
error
return err()
# ------------------------------------------------------------------------------
@ -66,7 +67,7 @@ proc getTrieNodes*(
## Fetch data using the `snap#` protocol, returns the trie nodes requested
## (if any.)
let
peer = buddy.peer
peer {.used.} = buddy.peer
nPaths = paths.len
if nPaths == 0:

View File

@ -15,7 +15,7 @@ import
../../range_desc,
./hexary_error
{.push raises: [Defect].}
{.push raises: [].}
type
HexaryPpFn* = proc(key: RepairKey): string {.gcsafe.}

View File

@ -77,7 +77,7 @@ import
../../range_desc,
"."/[hexary_desc, hexary_error, hexary_nearby, hexary_paths]
{.push raises: [Defect].}
{.push raises: [].}
# ------------------------------------------------------------------------------
# Private helpers
@ -241,7 +241,7 @@ proc decomposeLeftImpl(
iv: NodeTagRange; # Proofed range of leaf paths
db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction
): Result[seq[NodeSpecs],HexaryError]
{.gcsafe, raises: [Defect,RlpError,KeyError].} =
{.gcsafe, raises: [RlpError,KeyError].} =
## Database agnostic implementation of `hexaryEnvelopeDecompose()`.
var nodeSpex: seq[NodeSpecs]
@ -272,7 +272,7 @@ proc decomposeRightImpl(
iv: NodeTagRange; # Proofed range of leaf paths
db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction
): Result[seq[NodeSpecs],HexaryError]
{.gcsafe, raises: [Defect,RlpError,KeyError].} =
{.gcsafe, raises: [RlpError,KeyError].} =
## Database agnostic implementation of `hexaryEnvelopeDecompose()`.
var nodeSpex: seq[NodeSpecs]
if iv.maxPt < env.maxPt:
@ -314,7 +314,7 @@ proc hexaryEnvelope*(node: NodeSpecs): NodeTagRange =
proc hexaryEnvelopeUniq*(
partialPaths: openArray[Blob];
): seq[Blob]
{.gcsafe, raises: [Defect,KeyError].} =
{.gcsafe, raises: [KeyError].} =
## Sort and simplify a list of partial paths by sorting envelopes while
## removing nested entries.
if partialPaths.len < 2:
@ -348,7 +348,7 @@ proc hexaryEnvelopeUniq*(
proc hexaryEnvelopeUniq*(
nodes: openArray[NodeSpecs];
): seq[NodeSpecs]
{.gcsafe, raises: [Defect,KeyError].} =
{.gcsafe, raises: [KeyError].} =
## Variant of `hexaryEnvelopeUniq` for sorting a `NodeSpecs` list by
## partial paths.
if nodes.len < 2:
@ -460,7 +460,7 @@ proc hexaryEnvelopeDecompose*(
iv: NodeTagRange; # Proofed range of leaf paths
db: HexaryTreeDbRef; # Database
): Result[seq[NodeSpecs],HexaryError]
{.gcsafe, raises: [Defect,KeyError].} =
{.gcsafe, raises: [KeyError].} =
## This function computes the decomposition of the argument `partialPath`
## relative to the argument range `iv`.
##
@ -523,7 +523,7 @@ proc hexaryEnvelopeDecompose*(
iv: NodeTagRange; # Proofed range of leaf paths
getFn: HexaryGetFn; # Database abstraction
): Result[seq[NodeSpecs],HexaryError]
{.gcsafe, raises: [Defect,RlpError].} =
{.gcsafe, raises: [RlpError].} =
## Variant of `hexaryEnvelopeDecompose()` for persistent database.
let env = partialPath.hexaryEnvelope
if iv.maxPt < env.minPt or env.maxPt < iv.minPt:

View File

@ -9,19 +9,19 @@
# except according to those terms.
import
std/[sequtils, sets, strutils, tables],
std/[sets, tables],
eth/[common, trie/nibbles],
../../range_desc,
"."/[hexary_desc, hexary_error]
{.push raises: [Defect].}
{.push raises: [].}
# ------------------------------------------------------------------------------
# Private debugging helpers
# ------------------------------------------------------------------------------
proc pp(q: openArray[byte]): string =
q.toSeq.mapIt(it.toHex(2)).join.toLowerAscii.pp(hex = true)
#proc pp(q: openArray[byte]): string =
# q.toSeq.mapIt(it.toHex(2)).join.toLowerAscii.pp(hex = true)
# ------------------------------------------------------------------------------
# Public
@ -33,7 +33,7 @@ proc hexaryImport*(
unrefNodes: var HashSet[RepairKey]; ## Keep track of freestanding nodes
nodeRefs: var HashSet[RepairKey]; ## Ditto
): HexaryNodeReport
{.gcsafe, raises: [Defect, RlpError, KeyError].} =
{.gcsafe, raises: [RlpError, KeyError].} =
## Decode a single trie item for adding to the table and add it to the
## database. Branch and exrension record links are collected.
if recData.len == 0:
@ -128,7 +128,7 @@ proc hexaryImport*(
db: HexaryTreeDbRef; ## Contains node table
rec: NodeSpecs; ## Expected key and value data pair
): HexaryNodeReport
{.gcsafe, raises: [Defect, RlpError, KeyError].} =
{.gcsafe, raises: [RlpError, KeyError].} =
## Ditto without referece checks but expected node key argument.
if rec.data.len == 0:
return HexaryNodeReport(error: RlpNonEmptyBlobExpected)

View File

@ -16,7 +16,7 @@ import
../../range_desc,
"."/[hexary_desc, hexary_paths]
{.push raises: [Defect].}
{.push raises: [].}
logScope:
topics = "snap-db"
@ -78,7 +78,7 @@ proc processLink(
inspect: var seq[(RepairKey,NibblesSeq)];
trail: NibblesSeq;
child: RepairKey;
) {.gcsafe, raises: [Defect,KeyError]} =
) =
## Helper for `hexaryInspect()`
if not child.isZero:
if not child.isNodeKey:
@ -99,7 +99,7 @@ proc processLink(
inspect: var seq[(NodeKey,NibblesSeq)];
trail: NibblesSeq;
child: Rlp;
) {.gcsafe, raises: [Defect,RlpError]} =
) {.gcsafe, raises: [RlpError]} =
## Ditto
if not child.isEmpty:
let childBlob = child.toBytes
@ -151,7 +151,7 @@ proc hexaryInspectTrie*(
stopAtLevel = 64u8; # Width-first depth level
maxDangling = high(int); # Maximal number of dangling results
): TrieNodeStat
{.gcsafe, raises: [Defect,KeyError]} =
{.gcsafe, raises: [KeyError]} =
## Starting with the argument list `paths`, find all the non-leaf nodes in
## the hexary trie which have at least one node key reference missing in
## the trie database. The references for these nodes are collected and
@ -226,7 +226,7 @@ proc hexaryInspectTrie*(
let
(rKey, parentTrail) = reVisit[n]
node = db.tab[rKey]
parent = rKey.convertTo(NodeKey)
# parent = rKey.convertTo(NodeKey) -- unused
case node.kind:
of Extension:
@ -267,7 +267,7 @@ proc hexaryInspectTrie*(
stopAtLevel = 64u8; # Width-first depth level
maxDangling = high(int); # Maximal number of dangling results
): TrieNodeStat
{.gcsafe, raises: [Defect,RlpError]} =
{.gcsafe, raises: [RlpError]} =
## Variant of `hexaryInspectTrie()` for persistent database.
when extraTraceMessages:
let nPaths = paths.len

View File

@ -15,13 +15,13 @@
## re-factored database layer.
import
std/[sequtils, strutils, tables],
std/[tables],
eth/[common, trie/nibbles],
stew/results,
../../range_desc,
"."/[hexary_desc, hexary_error, hexary_paths]
{.push raises: [Defect].}
{.push raises: [].}
type
RPathXStep = object
@ -34,13 +34,13 @@ type
# Private debugging helpers
# ------------------------------------------------------------------------------
proc pp(w: RPathXStep; db: HexaryTreeDbRef): string =
let y = if w.canLock: "lockOk" else: "noLock"
"(" & $w.pos & "," & y & "," & w.step.pp(db) & ")"
#proc pp(w: RPathXStep; db: HexaryTreeDbRef): string =
# let y = if w.canLock: "lockOk" else: "noLock"
# "(" & $w.pos & "," & y & "," & w.step.pp(db) & ")"
proc pp(w: seq[RPathXStep]; db: HexaryTreeDbRef; indent = 4): string =
let pfx = "\n" & " ".repeat(indent)
w.mapIt(it.pp(db)).join(pfx)
#proc pp(w: seq[RPathXStep]; db: HexaryTreeDbRef; indent = 4): string =
# let pfx = "\n" & " ".repeat(indent)
# w.mapIt(it.pp(db)).join(pfx)
# ------------------------------------------------------------------------------
# Private helpers
@ -72,14 +72,14 @@ proc `xPfx=`(node: RNodeRef, val: NibblesSeq) =
of Branch:
doAssert node.kind != Branch # Ooops
proc xData(node: RNodeRef): Blob =
case node.kind:
of Branch:
return node.bData
of Leaf:
return node.lData
of Extension:
doAssert node.kind != Extension # Ooops
#proc xData(node: RNodeRef): Blob =
# case node.kind:
# of Branch:
# return node.bData
# of Leaf:
# return node.lData
# of Extension:
# doAssert node.kind != Extension # Ooops
proc `xData=`(node: RNodeRef; val: Blob) =
case node.kind:
@ -200,7 +200,7 @@ proc rTreeInterpolate(
rPath: RPath;
db: HexaryTreeDbRef;
): RPath
{.gcsafe, raises: [Defect,KeyError]} =
{.gcsafe, raises: [KeyError]} =
## Extend path, add missing nodes to tree. The last node added will be
## a `Leaf` node if this function succeeds.
##
@ -273,7 +273,7 @@ proc rTreeInterpolate(
db: HexaryTreeDbRef;
payload: Blob;
): RPath
{.gcsafe, raises: [Defect,KeyError]} =
{.gcsafe, raises: [KeyError]} =
## Variant of `rTreeExtend()` which completes a `Leaf` record.
result = rPath.rTreeInterpolate(db)
if 0 < result.path.len and result.tail.len == 0:
@ -285,8 +285,7 @@ proc rTreeInterpolate(
proc rTreeUpdateKeys(
rPath: RPath;
db: HexaryTreeDbRef;
): Result[void,bool]
{.gcsafe, raises: [Defect,KeyError]} =
): Result[void,bool] =
## The argument `rPath` is assumed to organise database nodes as
##
## root -> ... -> () -> () -> ... -> () -> () ...
@ -442,7 +441,7 @@ proc rTreePrefill(
db: HexaryTreeDbRef;
rootKey: NodeKey;
dbItems: var seq[RLeafSpecs];
) {.gcsafe, raises: [Defect,KeyError].} =
) =
## Fill missing root node.
let nibbles = dbItems[^1].pathTag.to(NodeKey).ByteArray32.initNibbleRange
if dbItems.len == 1:
@ -452,7 +451,7 @@ proc rTreePrefill(
lPfx: nibbles,
lData: dbItems[^1].payload)
else:
let key = db.newRepairKey()
# let key = db.newRepairKey() -- notused
var node = RNodeRef(
state: TmpRoot,
kind: Branch)
@ -463,7 +462,7 @@ proc rTreeSquashRootNode(
db: HexaryTreeDbRef;
rootKey: NodeKey;
): RNodeRef
{.gcsafe, raises: [Defect,KeyError].} =
{.gcsafe, raises: [KeyError].} =
## Handle fringe case and return root node. This function assumes that the
## root node has been installed, already. This function will check the root
## node for a combination `Branch->Extension/Leaf` for a single child root
@ -513,7 +512,7 @@ proc hexaryInterpolate*(
dbItems: var seq[RLeafSpecs]; # List of path and leaf items
bootstrap = false; # Can create root node on-the-fly
): Result[void,HexaryError]
{.gcsafe, raises: [Defect,KeyError]} =
{.gcsafe, raises: [KeyError]} =
## From the argument list `dbItems`, leaf nodes will be added to the hexary
## trie while interpolating the path for the leaf nodes by adding missing
## nodes. This action is typically not a full trie rebuild. Some partial node

View File

@ -16,13 +16,13 @@ import
../../range_desc,
"."/[hexary_desc, hexary_error, hexary_paths]
{.push raises: [Defect].}
{.push raises: [].}
proc hexaryNearbyRight*(path: RPath; db: HexaryTreeDbRef;
): Result[RPath,HexaryError] {.gcsafe, raises: [Defect,KeyError]}
): Result[RPath,HexaryError] {.gcsafe, raises: [KeyError]}
proc hexaryNearbyRight*(path: XPath; getFn: HexaryGetFn;
): Result[XPath,HexaryError] {.gcsafe, raises: [Defect,RlpError]}
): Result[XPath,HexaryError] {.gcsafe, raises: [RlpError]}
# ------------------------------------------------------------------------------
# Private helpers
@ -31,7 +31,7 @@ proc hexaryNearbyRight*(path: XPath; getFn: HexaryGetFn;
proc toBranchNode(
rlp: Rlp
): XNodeObj
{.gcsafe, raises: [Defect,RlpError]} =
{.gcsafe, raises: [RlpError]} =
var rlp = rlp
XNodeObj(kind: Branch, bLink: rlp.read(array[17,Blob]))
@ -39,14 +39,14 @@ proc toLeafNode(
rlp: Rlp;
pSegm: NibblesSeq
): XNodeObj
{.gcsafe, raises: [Defect,RlpError]} =
{.gcsafe, raises: [RlpError]} =
XNodeObj(kind: Leaf, lPfx: pSegm, lData: rlp.listElem(1).toBytes)
proc toExtensionNode(
rlp: Rlp;
pSegm: NibblesSeq
): XNodeObj
{.gcsafe, raises: [Defect,RlpError]} =
{.gcsafe, raises: [RlpError]} =
XNodeObj(kind: Extension, ePfx: pSegm, eLink: rlp.listElem(1).toBytes)
proc `<=`(a, b: NibblesSeq): bool =
@ -91,7 +91,7 @@ proc hexaryNearbyRightImpl(
rootKey: NodeKey; # State root
db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction
): Result[NodeTag,HexaryError]
{.gcsafe, raises: [Defect,KeyError,RlpError]} =
{.gcsafe, raises: [KeyError,RlpError]} =
## Wrapper
let path = block:
let rc = baseTag.hexaryPath(rootKey, db).hexaryNearbyRight(db)
@ -111,7 +111,7 @@ proc hexaryNearbyLeftImpl(
rootKey: NodeKey; # State root
db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction
): Result[NodeTag,HexaryError]
{.gcsafe, raises: [Defect,KeyError,RlpError]} =
{.gcsafe, raises: [KeyError,RlpError]} =
## Wrapper
let path = block:
let rc = baseTag.hexaryPath(rootKey, db).hexaryNearbyLeft(db)
@ -136,7 +136,7 @@ proc completeLeast(
db: HexaryTreeDbRef;
pathLenMax = 64;
): Result[RPath,HexaryError]
{.gcsafe, raises: [Defect,KeyError].} =
{.gcsafe, raises: [KeyError].} =
## Extend path using least nodes without recursion.
var rPath = RPath(path: path.path)
@ -184,7 +184,7 @@ proc completeLeast(
getFn: HexaryGetFn;
pathLenMax = 64;
): Result[XPath,HexaryError]
{.gcsafe, raises: [Defect,RlpError].} =
{.gcsafe, raises: [RlpError].} =
## Variant of `completeLeast()` for persistent database
var xPath = XPath(path: path.path)
@ -243,7 +243,7 @@ proc completeMost(
db: HexaryTreeDbRef;
pathLenMax = 64;
): Result[RPath,HexaryError]
{.gcsafe, raises: [Defect,KeyError].} =
{.gcsafe, raises: [KeyError].} =
## Extend path using max nodes without recursion.
var rPath = RPath(path: path.path)
@ -290,7 +290,7 @@ proc completeMost(
getFn: HexaryGetFn;
pathLenMax = 64;
): Result[XPath,HexaryError]
{.gcsafe, raises: [Defect,RlpError].} =
{.gcsafe, raises: [RlpError].} =
## Variant of `completeLeast()` for persistent database
var xPath = XPath(path: path.path)
@ -539,7 +539,7 @@ proc hexaryNearbyRightMissing*(
path: RPath;
db: HexaryTreeDbRef;
): bool
{.gcsafe, raises: [Defect,KeyError]} =
{.gcsafe, raises: [KeyError]} =
## Returns `true` if the maximally extended argument nodes `path` is the
## rightmost on the hexary trie database. It verifies that there is no more
## leaf entry to the right of the argument `path`.
@ -577,7 +577,7 @@ proc hexaryNearbyLeft*(
path: RPath; # Partially expanded path
db: HexaryTreeDbRef; # Database
): Result[RPath,HexaryError]
{.gcsafe, raises: [Defect,KeyError]} =
{.gcsafe, raises: [KeyError]} =
## Similar to `hexaryNearbyRight()`.
##
## This code is intended to be used for verifying a right-bound proof to
@ -619,10 +619,7 @@ proc hexaryNearbyLeft*(
let nextNibble = rPath.tail[0].int8
if 0 < nextNibble:
let
nextNode = db.tab[topLink]
rPathLen = rPath.path.len # in case of backtracking
rPathTail = rPath.tail
let nextNode = db.tab[topLink]
case nextNode.kind
of Leaf:
if nextNode.lPfx <= rPath.tail:
@ -675,7 +672,7 @@ proc hexaryNearbyLeft*(
path: XPath; # Partially expanded path
getFn: HexaryGetFn; # Database abstraction
): Result[XPath,HexaryError]
{.gcsafe, raises: [Defect,RlpError]} =
{.gcsafe, raises: [RlpError]} =
## Variant of `hexaryNearbyLeft()` for persistant database
# Some easy cases
@ -714,10 +711,7 @@ proc hexaryNearbyLeft*(
let nextNibble = xPath.tail[0].int8
if 0 < nextNibble:
let
nextNodeRlp = rlpFromBytes topLink.getFn()
xPathLen = xPath.path.len # in case of backtracking
xPathTail = xPath.tail
let nextNodeRlp = rlpFromBytes topLink.getFn()
case nextNodeRlp.listLen:
of 2:
if nextNodeRlp.listElem(0).toBytes.hexPrefixDecode[1] <= xPath.tail:
@ -773,7 +767,7 @@ proc hexaryNearbyRight*(
rootKey: NodeKey; # State root
db: HexaryTreeDbRef; # Database
): Result[NodeTag,HexaryError]
{.gcsafe, raises: [Defect,KeyError]} =
{.gcsafe, raises: [KeyError]} =
## Variant of `hexaryNearbyRight()` working with `NodeTag` arguments rather
## than `RPath()` ones.
noRlpErrorOops("hexaryNearbyRight"):
@ -784,7 +778,7 @@ proc hexaryNearbyRight*(
rootKey: NodeKey; # State root
getFn: HexaryGetFn; # Database abstraction
): Result[NodeTag,HexaryError]
{.gcsafe, raises: [Defect,RlpError]} =
{.gcsafe, raises: [RlpError]} =
## Variant of `hexaryNearbyRight()` for persistant database
noKeyErrorOops("hexaryNearbyRight"):
return baseTag.hexaryNearbyRightImpl(rootKey, getFn)
@ -795,7 +789,7 @@ proc hexaryNearbyLeft*(
rootKey: NodeKey; # State root
db: HexaryTreeDbRef; # Database
): Result[NodeTag,HexaryError]
{.gcsafe, raises: [Defect,KeyError]} =
{.gcsafe, raises: [KeyError]} =
## Similar to `hexaryNearbyRight()` for `NodeKey` arguments.
noRlpErrorOops("hexaryNearbyLeft"):
return baseTag.hexaryNearbyLeftImpl(rootKey, db)
@ -805,7 +799,7 @@ proc hexaryNearbyLeft*(
rootKey: NodeKey; # State root
getFn: HexaryGetFn; # Database abstraction
): Result[NodeTag,HexaryError]
{.gcsafe, raises: [Defect,RlpError]} =
{.gcsafe, raises: [RlpError]} =
## Variant of `hexaryNearbyLeft()` for persistant database
noKeyErrorOops("hexaryNearbyLeft"):
return baseTag.hexaryNearbyLeftImpl(rootKey, getFn)

View File

@ -17,19 +17,23 @@ import
../../range_desc,
./hexary_desc
{.push raises: [Defect].}
{.push raises: [].}
# ------------------------------------------------------------------------------
# Private debugging helpers
# ------------------------------------------------------------------------------
proc pp(w: Blob; db: HexaryTreeDbRef): string =
w.convertTo(RepairKey).pp(db)
#proc pp(w: Blob; db: HexaryTreeDbRef): string =
# w.convertTo(RepairKey).pp(db)
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc to(a: RepairKey; T: type RepairKey): RepairKey =
## Needed for generic function
a
proc convertTo(key: RepairKey; T: type NodeKey): T =
## Might be lossy, check before use
discard result.init(key.ByteArray33[1 .. 32])
@ -63,7 +67,7 @@ proc getNibblesImpl(path: XPath|RPath; start, maxLen: int): NibblesSeq =
proc toBranchNode(
rlp: Rlp
): XNodeObj
{.gcsafe, raises: [Defect,RlpError]} =
{.gcsafe, raises: [RlpError]} =
var rlp = rlp
XNodeObj(kind: Branch, bLink: rlp.read(array[17,Blob]))
@ -71,14 +75,14 @@ proc toLeafNode(
rlp: Rlp;
pSegm: NibblesSeq
): XNodeObj
{.gcsafe, raises: [Defect,RlpError]} =
{.gcsafe, raises: [RlpError]} =
XNodeObj(kind: Leaf, lPfx: pSegm, lData: rlp.listElem(1).toBytes)
proc toExtensionNode(
rlp: Rlp;
pSegm: NibblesSeq
): XNodeObj
{.gcsafe, raises: [Defect,RlpError]} =
{.gcsafe, raises: [RlpError]} =
XNodeObj(kind: Extension, ePfx: pSegm, eLink: rlp.listElem(1).toBytes)
# ------------------------------------------------------------------------------
@ -90,7 +94,7 @@ proc pathExtend(
key: RepairKey;
db: HexaryTreeDbRef;
): RPath
{.gcsafe, raises: [Defect,KeyError].} =
{.gcsafe, raises: [KeyError].} =
## For the given path, extend to the longest possible repair tree `db`
## path following the argument `path.tail`.
result = path
@ -128,7 +132,7 @@ proc pathExtend(
key: Blob;
getFn: HexaryGetFn;
): XPath
{.gcsafe, raises: [Defect,RlpError]} =
{.gcsafe, raises: [RlpError]} =
## Ditto for `XPath` rather than `RPath`
result = path
var key = key
@ -189,7 +193,7 @@ proc pathLeast(
key: Blob;
getFn: HexaryGetFn;
): XPath
{.gcsafe, raises: [Defect,RlpError]} =
{.gcsafe, raises: [RlpError]} =
## For the partial path given, extend by branch nodes with least node
## indices.
result = path
@ -279,7 +283,7 @@ proc pathMost(
key: Blob;
getFn: HexaryGetFn;
): XPath
{.gcsafe, raises: [Defect,RlpError]} =
{.gcsafe, raises: [RlpError]} =
## For the partial path given, extend by branch nodes with greatest node
## indices.
result = path
@ -424,11 +428,10 @@ proc hexaryPath*(
rootKey: NodeKey|RepairKey; # State root
db: HexaryTreeDbRef; # Database
): RPath
{.gcsafe, raises: [Defect,KeyError]} =
{.gcsafe, raises: [KeyError]} =
## Compute the longest possible repair tree `db` path matching the `nodeKey`
## nibbles. The `nodeNey` path argument comes before the `db` one for
## supporting a more functional notation.
proc to(a: RepairKey; T: type RepairKey): RepairKey = a
RPath(tail: partialPath).pathExtend(rootKey.to(RepairKey), db)
proc hexaryPath*(
@ -436,7 +439,7 @@ proc hexaryPath*(
rootKey: NodeKey|RepairKey;
db: HexaryTreeDbRef;
): RPath
{.gcsafe, raises: [Defect,KeyError]} =
{.gcsafe, raises: [KeyError]} =
## Variant of `hexaryPath` for a node key.
nodeKey.to(NibblesSeq).hexaryPath(rootKey, db)
@ -445,7 +448,7 @@ proc hexaryPath*(
rootKey: NodeKey|RepairKey;
db: HexaryTreeDbRef;
): RPath
{.gcsafe, raises: [Defect,KeyError]} =
{.gcsafe, raises: [KeyError]} =
## Variant of `hexaryPath` for a node tag.
nodeTag.to(NodeKey).hexaryPath(rootKey, db)
@ -464,7 +467,7 @@ proc hexaryPath*(
rootKey: NodeKey; # State root
getFn: HexaryGetFn; # Database abstraction
): XPath
{.gcsafe, raises: [Defect,RlpError]} =
{.gcsafe, raises: [RlpError]} =
## Compute the longest possible path on an arbitrary hexary trie.
XPath(tail: partialPath).pathExtend(rootKey.to(Blob), getFn)
@ -473,7 +476,7 @@ proc hexaryPath*(
rootKey: NodeKey;
getFn: HexaryGetFn;
): XPath
{.gcsafe, raises: [Defect,RlpError]} =
{.gcsafe, raises: [RlpError]} =
## Variant of `hexaryPath` for a node key..
nodeKey.to(NibblesSeq).hexaryPath(rootKey, getFn)
@ -482,7 +485,7 @@ proc hexaryPath*(
rootKey: NodeKey;
getFn: HexaryGetFn;
): XPath
{.gcsafe, raises: [Defect,RlpError]} =
{.gcsafe, raises: [RlpError]} =
## Variant of `hexaryPath` for a node tag..
nodeTag.to(NodeKey).hexaryPath(rootKey, getFn)
@ -491,7 +494,7 @@ proc hexaryPath*(
rootKey: NodeKey;
getFn: HexaryGetFn;
): XPath
{.gcsafe, raises: [Defect,RlpError]} =
{.gcsafe, raises: [RlpError]} =
## Variant of `hexaryPath` for a hex encoded partial path.
partialPath.hexPrefixDecode[1].hexaryPath(rootKey, getFn)
@ -505,7 +508,7 @@ proc hexaryPathNodeKey*(
db: HexaryTreeDbRef; # Database
missingOk = false; # Also return key for missing node
): Result[NodeKey,void]
{.gcsafe, raises: [Defect,KeyError]} =
{.gcsafe, raises: [KeyError]} =
## Returns the `NodeKey` equivalent for the argment `partialPath` if this
## node is available in the database. If the argument flag `missingOk` is
## set`true` and the last node addressed by the argument path is missing,
@ -529,7 +532,7 @@ proc hexaryPathNodeKey*(
db: HexaryTreeDbRef; # Database
missingOk = false; # Also return key for missing node
): Result[NodeKey,void]
{.gcsafe, raises: [Defect,KeyError]} =
{.gcsafe, raises: [KeyError]} =
## Variant of `hexaryPathNodeKey()` for hex encoded partial path.
partialPath.hexPrefixDecode[1].hexaryPathNodeKey(rootKey, db, missingOk)
@ -540,7 +543,7 @@ proc hexaryPathNodeKey*(
getFn: HexaryGetFn; # Database abstraction
missingOk = false; # Also return key for missing node
): Result[NodeKey,void]
{.gcsafe, raises: [Defect,RlpError]} =
{.gcsafe, raises: [RlpError]} =
## Variant of `hexaryPathNodeKey()` for persistent database.
let steps = partialPath.hexaryPath(rootKey, getFn)
if 0 < steps.path.len and steps.tail.len == 0:
@ -561,7 +564,7 @@ proc hexaryPathNodeKey*(
getFn: HexaryGetFn; # Database abstraction
missingOk = false; # Also return key for missing node
): Result[NodeKey,void]
{.gcsafe, raises: [Defect,RlpError]} =
{.gcsafe, raises: [RlpError]} =
## Variant of `hexaryPathNodeKey()` for persistent database and
## hex encoded partial path.
partialPath.hexPrefixDecode[1].hexaryPathNodeKey(rootKey, getFn, missingOk)
@ -590,7 +593,7 @@ proc next*(
getFn: HexaryGetFn;
minDepth = 64;
): XPath
{.gcsafe, raises: [Defect,RlpError]} =
{.gcsafe, raises: [RlpError]} =
## Advance the argument `path` to the next leaf node (if any.). The
## `minDepth` argument requires the result of `next()` to satisfy
## `minDepth <= next().getNibbles.len`.
@ -621,7 +624,7 @@ proc prev*(
getFn: HexaryGetFn;
minDepth = 64;
): XPath
{.gcsafe, raises: [Defect,RlpError]} =
{.gcsafe, raises: [RlpError]} =
## Advance the argument `path` to the previous leaf node (if any.) The
## `minDepth` argument requires the result of `next()` to satisfy
## `minDepth <= next().getNibbles.len`.

View File

@ -16,7 +16,7 @@ import
../../range_desc,
"."/[hexary_desc, hexary_error, hexary_nearby, hexary_paths]
{.push raises: [Defect].}
{.push raises: [].}
type
RangeLeaf* = object

View File

@ -16,7 +16,7 @@ import
rocksdb,
../../../../db/[kvstore_rocksdb, select_backend]
{.push raises: [Defect].}
{.push raises: [].}
type
RockyBulkLoadRef* = ref object of RootObj
@ -54,7 +54,7 @@ proc init*(T: type RockyBulkLoadRef; db: RocksStoreRef): T =
RockyBulkLoadRef.init(db, rocksdb_envoptions_create())
proc clearCacheFile*(db: RocksStoreRef; fileName: string): bool
{.gcsafe, raises: [Defect,OSError].} =
{.gcsafe, raises: [OSError].} =
## Remove left-over cache file from an imcomplete previous session. The
## return value `true` indicated that a cache file was detected.
discard
@ -64,7 +64,7 @@ proc clearCacheFile*(db: RocksStoreRef; fileName: string): bool
filePath.removeFile
return true
proc destroy*(rbl: RockyBulkLoadRef) {.gcsafe, raises: [Defect,OSError].} =
proc destroy*(rbl: RockyBulkLoadRef) {.gcsafe, raises: [OSError].} =
## Destructor, free memory resources and delete temporary file. This function
## can always be called even though `finish()` will call `destroy()`
## automatically if successful.
@ -159,7 +159,7 @@ proc add*(
proc finish*(
rbl: RockyBulkLoadRef
): Result[int64,void]
{.gcsafe, raises: [Defect,OSError].} =
{.gcsafe, raises: [OSError].} =
## Commit collected and cached data to the database. This function implies
## `destroy()` if successful. Otherwise `destroy()` must be called
## explicitely, e.g. after error analysis.

View File

@ -18,7 +18,7 @@ import
hexary_interpolate, hexary_inspect, hexary_paths, snapdb_desc,
snapdb_persistent]
{.push raises: [Defect].}
{.push raises: [].}
logScope:
topics = "snap-db"
@ -70,7 +70,7 @@ proc persistentAccounts(
db: HexaryTreeDbRef; ## Current table
ps: SnapDbAccountsRef; ## For persistent database
): Result[void,HexaryError]
{.gcsafe, raises: [Defect,OSError,KeyError].} =
{.gcsafe, raises: [OSError,KeyError].} =
## Store accounts trie table on databse
if ps.rockDb.isNil:
let rc = db.persistentAccountsPut(ps.kvDb)
@ -85,8 +85,7 @@ proc collectAccounts(
peer: Peer, ## for log messages
base: NodeTag;
acc: seq[PackedAccount];
): Result[seq[RLeafSpecs],HexaryError]
{.gcsafe, raises: [Defect, RlpError].} =
): Result[seq[RLeafSpecs],HexaryError] =
## Repack account records into a `seq[RLeafSpecs]` queue. The argument data
## `acc` are as received with the snap message `AccountRange`).
##

View File

@ -17,7 +17,7 @@ import
"."/[hexary_desc, hexary_error, hexary_import, hexary_nearby,
hexary_paths, rocky_bulk_load]
{.push raises: [Defect].}
{.push raises: [].}
logScope:
topics = "snap-db"
@ -70,8 +70,8 @@ proc toKey(a: RepairKey; ps: SnapDbBaseRef): uint =
proc toKey(a: NodeKey; ps: SnapDbBaseRef): uint =
a.to(RepairKey).toKey(ps)
proc toKey(a: NodeTag; ps: SnapDbBaseRef): uint =
a.to(NodeKey).toKey(ps)
#proc toKey(a: NodeTag; ps: SnapDbBaseRef): uint =
# a.to(NodeKey).toKey(ps)
proc ppImpl(a: RepairKey; pv: SnapDbRef): string =
if a.isZero: "ø" else:"$" & $a.toKey(pv)
@ -216,7 +216,7 @@ proc mergeProofs*(
proof: seq[Blob]; ## Node records
freeStandingOk = false; ## Remove freestanding nodes
): Result[void,HexaryError]
{.gcsafe, raises: [Defect,RlpError,KeyError].} =
{.gcsafe, raises: [RlpError,KeyError].} =
## Import proof records (as received with snap message) into a hexary trie
## of the repair table. These hexary trie records can be extended to a full
## trie at a later stage and used for validating account data.
@ -255,7 +255,7 @@ proc verifyLowerBound*(
base: NodeTag; ## Before or at first account entry in `data`
first: NodeTag; ## First account key
): Result[void,HexaryError]
{.gcsafe, raises: [Defect, KeyError].} =
{.gcsafe, raises: [KeyError].} =
## Verify that `base` is to the left of the first leaf entry and there is
## nothing in between.
var error: HexaryError
@ -279,7 +279,7 @@ proc verifyNoMoreRight*(
peer: Peer; ## For log messages
base: NodeTag; ## Before or at first account entry in `data`
): Result[void,HexaryError]
{.gcsafe, raises: [Defect, KeyError].} =
{.gcsafe, raises: [KeyError].} =
## Verify that there is are no more leaf entries to the right of and
## including `base`.
let

View File

@ -16,7 +16,7 @@ import
../../range_desc,
"."/[hexary_desc, hexary_error, rocky_bulk_load, snapdb_desc]
{.push raises: [Defect].}
{.push raises: [].}
logScope:
topics = "snap-db"
@ -140,7 +140,7 @@ proc persistentStateRootPut*(
db: TrieDatabaseRef;
root: NodeKey;
data: Blob;
) {.gcsafe, raises: [Defect, RlpError].} =
) {.gcsafe, raises: [RlpError].} =
## Save or update state root registry data.
const
zeroKey = NodeKey.default
@ -180,7 +180,7 @@ proc persistentAccountsPut*(
db: HexaryTreeDbRef;
rocky: RocksStoreRef
): Result[void,HexaryError]
{.gcsafe, raises: [Defect,OSError,KeyError].} =
{.gcsafe, raises: [OSError,KeyError].} =
## SST based bulk load on `rocksdb`.
if rocky.isNil:
return err(NoRocksDbBackend)
@ -229,7 +229,7 @@ proc persistentStorageSlotsPut*(
db: HexaryTreeDbRef;
rocky: RocksStoreRef
): Result[void,HexaryError]
{.gcsafe, raises: [Defect,OSError,KeyError].} =
{.gcsafe, raises: [OSError,KeyError].} =
## SST based bulk load on `rocksdb`.
if rocky.isNil:
return err(NoRocksDbBackend)

View File

@ -14,7 +14,7 @@ import
../../range_desc,
"."/[hexary_error, snapdb_desc, snapdb_persistent]
{.push raises: [Defect].}
{.push raises: [].}
type
SnapDbPivotRegistry* = object
@ -28,7 +28,7 @@ type
slotAccounts*: seq[NodeKey] ## List of accounts with storage slots
const
extraTraceMessages = false or true
extraTraceMessages {.used.} = false or true
# ------------------------------------------------------------------------------
# Private helpers

View File

@ -19,7 +19,7 @@ import
hexary_inspect, hexary_interpolate, hexary_paths, snapdb_desc,
snapdb_persistent]
{.push raises: [Defect].}
{.push raises: [].}
logScope:
topics = "snap-db"
@ -39,15 +39,15 @@ type
proc to(h: Hash256; T: type NodeKey): T =
h.data.T
proc convertTo(data: openArray[byte]; T: type Hash256): T =
discard result.data.NodeKey.init(data) # size error => zero
#proc convertTo(data: openArray[byte]; T: type Hash256): T =
# discard result.data.NodeKey.init(data) # size error => zero
template noKeyError(info: static[string]; code: untyped) =
try:
code
except KeyError as e:
raiseAssert "Not possible (" & info & "): " & e.msg
#template noKeyError(info: static[string]; code: untyped) =
# try:
# code
# except KeyError as e:
# raiseAssert "Not possible (" & info & "): " & e.msg
template noRlpExceptionOops(info: static[string]; code: untyped) =
try:
@ -61,15 +61,15 @@ template noRlpExceptionOops(info: static[string]; code: untyped) =
except Exception as e:
raiseAssert "Ooops " & info & ": name=" & $e.name & " msg=" & e.msg
template noGenericExOrKeyError(info: static[string]; code: untyped) =
try:
code
except KeyError as e:
raiseAssert "Not possible (" & info & "): " & e.msg
except Defect as e:
raise e
except Exception as e:
raiseAssert "Ooops " & info & ": name=" & $e.name & " msg=" & e.msg
#template noGenericExOrKeyError(info: static[string]; code: untyped) =
# try:
# code
# except KeyError as e:
# raiseAssert "Not possible (" & info & "): " & e.msg
# except Defect as e:
# raise e
# except Exception as e:
# raiseAssert "Ooops " & info & ": name=" & $e.name & " msg=" & e.msg
# ------------------------------------------------------------------------------
# Private functions
@ -79,7 +79,7 @@ proc persistentStorageSlots(
db: HexaryTreeDbRef; ## Current table
ps: SnapDbStorageSlotsRef; ## For persistent database
): Result[void,HexaryError]
{.gcsafe, raises: [Defect,OSError,KeyError].} =
{.gcsafe, raises: [OSError,KeyError].} =
## Store accounts trie table on databse
if ps.rockDb.isNil:
let rc = db.persistentStorageSlotsPut(ps.kvDb)
@ -94,8 +94,7 @@ proc collectStorageSlots(
peer: Peer; ## for log messages
base: NodeTag; ## before or at first account entry in `data`
slotLists: seq[SnapStorage];
): Result[seq[RLeafSpecs],HexaryError]
{.gcsafe, raises: [Defect, RlpError].} =
): Result[seq[RLeafSpecs],HexaryError] =
## Similar to `collectAccounts()`
var rcSlots: seq[RLeafSpecs]
@ -138,7 +137,7 @@ proc importStorageSlots(
proof: SnapStorageProof; ## Storage slots proof data
noBaseBoundCheck = false; ## Ignore left boundary proof check if `true`
): Result[seq[NodeSpecs],HexaryError]
{.gcsafe, raises: [Defect,RlpError,KeyError].} =
{.gcsafe, raises: [RlpError,KeyError].} =
## Process storage slots for a particular storage root. See `importAccounts()`
## for comments on the return value.
let
@ -226,8 +225,6 @@ proc init*(
peer: Peer = nil
): T =
## Constructor, starts a new accounts session.
let db = pv.kvDb
new result
result.init(pv, root.to(NodeKey))
result.peer = peer
@ -446,7 +443,7 @@ proc inspectStorageSlotsTrie*(
## ...
## ctx = rc.value.resumeCtx
##
let peer = ps.peer
let peer {.used.} = ps.peer
var stats: TrieNodeStat
noRlpExceptionOops("inspectStorageSlotsTrie()"):
if persistent:
@ -498,7 +495,7 @@ proc getStorageSlotsData*(
## Fetch storage slots data.
##
## Caveat: There is no unit test yet
let peer = ps.peer
let peer {.used.} = ps.peer
var acc: Account
noRlpExceptionOops("getStorageSlotsData()"):

View File

@ -22,7 +22,7 @@ import
storage_queue_helper],
./ticker
{.push raises: [Defect].}
{.push raises: [].}
logScope:
topics = "snap-pivot"
@ -341,7 +341,7 @@ proc pivotApprovePeer*(buddy: SnapBuddyRef) {.async.} =
## it will not proceed to the next scheduler task.
let
ctx = buddy.ctx
peer = buddy.peer
peer {.used.} = buddy.peer
beaconHeader = ctx.data.beaconHeader
var
pivotHeader: BlockHeader

View File

@ -60,7 +60,7 @@ import
"../.."/[constants, range_desc, worker_desc],
../db/[hexary_desc, hexary_envelope, hexary_inspect]
{.push raises: [Defect].}
{.push raises: [].}
# ------------------------------------------------------------------------------
# Private helpers

View File

@ -48,7 +48,7 @@ import
../db/[hexary_desc, hexary_envelope, hexary_error, snapdb_accounts],
"."/[find_missing_nodes, storage_queue_helper, swap_in]
{.push raises: [Defect].}
{.push raises: [].}
logScope:
topics = "snap-heal"
@ -95,7 +95,7 @@ proc healingCtx(
template discardRlpError(info: static[string]; code: untyped) =
try:
code
except RlpError as e:
except RlpError:
discard
template noExceptionOops(info: static[string]; code: untyped) =
@ -117,10 +117,10 @@ proc compileMissingNodesList(
## Find some missing glue nodes in accounts database.
let
ctx = buddy.ctx
peer = buddy.peer
peer {.used.} = buddy.peer
rootKey = env.stateHeader.stateRoot.to(NodeKey)
getFn = ctx.data.snapDb.getAccountFn
fa = env.fetchAccounts
fa {.used.} = env.fetchAccounts
# Import from earlier run
if ctx.swapInAccounts(env) != 0:
@ -128,7 +128,7 @@ proc compileMissingNodesList(
if not fa.processed.isFull:
noExceptionOops("compileMissingNodesList"):
let (missing, nLevel, nVisited) = fa.findMissingNodes(
let (missing, nLevel {.used.}, nVisited {.used.}) = fa.findMissingNodes(
rootKey, getFn, healAccountsInspectionPlanBLevel)
when extraTraceMessages:
@ -148,8 +148,8 @@ proc fetchMissingNodes(
## Extract from `nodes.missing` the next batch of nodes that need
## to be merged it into the database
let
ctx = buddy.ctx
peer = buddy.peer
ctx {.used.} = buddy.ctx
peer {.used.} = buddy.peer
stateRoot = env.stateHeader.stateRoot
pivot = "#" & $env.stateHeader.blockNumber # for logging
@ -202,7 +202,7 @@ proc kvAccountLeaf(
): (bool,NodeKey,Account) =
## Re-read leaf node from persistent database (if any)
let
peer = buddy.peer
peer {.used.} = buddy.peer
var
nNibbles = -1
@ -235,7 +235,7 @@ proc registerAccountLeaf(
## Process single account node as would be done with an interval by
## the `storeAccounts()` function
let
peer = buddy.peer
peer {.used.} = buddy.peer
pt = accKey.to(NodeTag)
# Register isolated leaf node
@ -263,7 +263,6 @@ proc accountsHealingImpl(
ctx = buddy.ctx
db = ctx.data.snapDb
peer = buddy.peer
fa = env.fetchAccounts
# Import from earlier runs (if any)
while ctx.swapInAccounts(env) != 0:
@ -332,8 +331,8 @@ proc healAccounts*(
) {.async.} =
## Fetching and merging missing account trie database nodes.
let
ctx = buddy.ctx
peer = buddy.peer
ctx {.used.} = buddy.ctx
peer {.used.} = buddy.peer
when extraTraceMessages:
trace logTxt "started", peer, ctx=buddy.healingCtx(env)

View File

@ -50,7 +50,7 @@ import
../db/[hexary_desc, hexary_envelope, snapdb_storage_slots],
"."/[find_missing_nodes, storage_queue_helper]
{.push raises: [Defect].}
{.push raises: [].}
logScope:
topics = "snap-heal"
@ -82,7 +82,7 @@ proc toPC(w: openArray[NodeSpecs]; n: static[int] = 3): string =
proc healingCtx(
buddy: SnapBuddyRef;
env: SnapPivotRef;
): string =
): string {.used.} =
"{" &
"pivot=" & "#" & $env.stateHeader.blockNumber & "," &
"runState=" & $buddy.ctrl.state & "," &
@ -131,15 +131,16 @@ proc compileMissingNodesList(
## Find some missing glue nodes in storage slots database.
let
ctx = buddy.ctx
peer = buddy.peer
peer {.used.} = buddy.peer
slots = kvp.data.slots
rootKey = kvp.key.to(NodeKey)
getFn = ctx.data.snapDb.getStorageSlotsFn(kvp.data.accKey)
if not slots.processed.isFull:
noExceptionOops("compileMissingNodesList"):
let (missing, nLevel, nVisited) = slots.findMissingNodes(
rootKey, getFn, healStorageSlotsInspectionPlanBLevel)
let (missing, nLevel {.used.}, nVisited {.used.}) =
slots.findMissingNodes(
rootKey, getFn, healStorageSlotsInspectionPlanBLevel)
when extraTraceMessages:
trace logTxt "missing nodes", peer,
@ -159,8 +160,8 @@ proc getNodesFromNetwork(
## Extract from `missing` the next batch of nodes that need
## to be merged it into the database
let
ctx = buddy.ctx
peer = buddy.peer
ctx {.used.} = buddy.ctx
peer {.used.} = buddy.peer
accPath = kvp.data.accKey.to(Blob)
storageRoot = kvp.key
fetchNodes = missing[0 ..< fetchRequestTrieNodesMax]
@ -282,8 +283,8 @@ proc healStorageSlots*(
) {.async.} =
## Fetching and merging missing slorage slots trie database nodes.
let
ctx = buddy.ctx
peer = buddy.peer
ctx {.used.} = buddy.ctx
peer {.used.} = buddy.peer
# Extract healing slot items from partial slots list
var toBeHealed: seq[SnapSlotsQueuePair]

View File

@ -54,7 +54,7 @@ import
../db/[hexary_envelope, snapdb_accounts],
"."/[storage_queue_helper, swap_in]
{.push raises: [Defect].}
{.push raises: [].}
logScope:
topics = "snap-range"
@ -70,8 +70,8 @@ const
template logTxt(info: static[string]): static[string] =
"Accounts range " & info
proc `$`(rs: NodeTagRangeSet): string =
rs.fullFactor.toPC(0)
#proc `$`(rs: NodeTagRangeSet): string =
# rs.fullFactor.toPC(0)
proc `$`(iv: NodeTagRange): string =
iv.fullFactor.toPC(3)
@ -79,7 +79,7 @@ proc `$`(iv: NodeTagRange): string =
proc fetchCtx(
buddy: SnapBuddyRef;
env: SnapPivotRef;
): string =
): string {.used.} =
"{" &
"pivot=" & "#" & $env.stateHeader.blockNumber & "," &
"runState=" & $buddy.ctrl.state & "," &
@ -146,7 +146,7 @@ proc accountsRangefetchImpl(
let
gotAccounts = dd.data.accounts.len # comprises `gotStorage`
gotStorage = dd.withStorage.len
gotStorage {.used.} = dd.withStorage.len
# Now, we fully own the scheduler. The original interval will savely be placed
# back for a moment (the `unprocessed` range set to be corrected below.)
@ -225,8 +225,8 @@ proc rangeFetchAccounts*(
if not fa.processed.isFull():
let
ctx = buddy.ctx
peer = buddy.peer
ctx {.used.} = buddy.ctx
peer {.used.} = buddy.peer
when extraTraceMessages:
trace logTxt "start", peer, ctx=buddy.fetchCtx(env)

View File

@ -73,7 +73,7 @@ import
../db/[hexary_error, snapdb_storage_slots],
./storage_queue_helper
{.push raises: [Defect].}
{.push raises: [].}
logScope:
topics = "snap-range"
@ -93,7 +93,6 @@ proc fetchCtx(
env: SnapPivotRef;
): string =
let
ctx = buddy.ctx
nStoQu = (env.fetchStorageFull.len +
env.fetchStoragePart.len +
env.parkedStorage.len)
@ -142,8 +141,6 @@ proc storeStoragesSingleBatch(
peer, stoRange.data, noBaseBoundCheck = true)
if 0 < report.len:
let topStoRange = stoRange.data.storages.len - 1
if report[^1].slot.isNone:
# Failed to store on database, not much that can be done here
gotSlotLists.dec(report.len - 1) # for logging only
@ -225,7 +222,7 @@ proc rangeFetchStorageSlots*(
if 0 < env.fetchStorageFull.len or 0 < env.fetchStoragePart.len:
let
ctx = buddy.ctx
peer = buddy.peer
peer {.used.} = buddy.peer
when extraTraceMessages:
trace logTxt "start", peer, ctx=buddy.fetchCtx(env)
@ -235,7 +232,8 @@ proc rangeFetchStorageSlots*(
var delayed: seq[AccountSlotsHeader]
while buddy.ctrl.running:
# Pull out the next request list from the queue
let (req, nComplete, nPartial) = ctx.storageQueueFetchFull(env)
let (req, nComplete {.used.}, nPartial {.used.}) =
ctx.storageQueueFetchFull(env)
if req.len == 0:
break
@ -261,8 +259,8 @@ proc rangeFetchStorageSlots*(
when extraTraceMessages:
let
subRange = rc.value.subRange.get
account = rc.value.accKey
subRange {.used.} = rc.value.subRange.get
account {.used.} = rc.value.accKey
trace logTxt "fetch partial", peer, ctx=buddy.fetchCtx(env),
nStorageQuPart=env.fetchStoragePart.len, subRange, account

View File

@ -15,7 +15,7 @@ import
"../.."/[constants, range_desc, worker_desc],
../db/[hexary_inspect, snapdb_storage_slots]
{.push raises: [Defect].}
{.push raises: [].}
# ------------------------------------------------------------------------------
# Private helpers
@ -247,8 +247,8 @@ proc storageQueueFetchFull*(
# node. So it is complete and can be fully removed from the batch.
nComplete.inc # Update for logging
else:
# This item becomes a partially available slot
let data = env.storageQueueGetOrMakePartial accItem
# This item becomes a partially available slot
#let data = env.storageQueueGetOrMakePartial accItem -- notused
nPartial.inc # Update for logging
(rcList, nComplete, nPartial)

View File

@ -45,7 +45,7 @@ import
../db/[hexary_desc, hexary_envelope, hexary_error,
hexary_paths, snapdb_accounts]
{.push raises: [Defect].}
{.push raises: [].}
logScope:
topics = "snap-swapin"
@ -264,7 +264,7 @@ proc swapInAccounts*(
return # nothing to do
let
pivot = "#" & $env.stateHeader.blockNumber # Logging & debugging
pivot {.used.} = "#" & $env.stateHeader.blockNumber # Logging & debugging
rootKey = env.stateHeader.stateRoot.to(NodeKey)
getFn = ctx.data.snapDb.getAccountFn

View File

@ -18,7 +18,7 @@ import
../../../utils/prettify,
../../misc/timer_helper
{.push raises: [Defect].}
{.push raises: [].}
logScope:
topics = "snap-tick"
@ -128,7 +128,7 @@ proc runLogTicker(t: TickerRef) {.gcsafe.} =
t.recovery != t.lastRecov or
tickerLogSuppressMax < (now - t.visited):
var
nAcc, nSto, bulk: string
nAcc, nSto: string
pv = "n/a"
bc = "n/a"
nStoQue = "n/a"

View File

@ -19,7 +19,7 @@ import
./worker/ticker,
./range_desc
{.push raises: [Defect].}
{.push raises: [].}
type
SnapAccountsList* = SortedSet[NodeTag,Hash256]

View File

@ -23,7 +23,7 @@ export
chain,
db_chain
{.push raises: [Defect].}
{.push raises: [].}
type
BuddyRunState* = enum

View File

@ -93,7 +93,7 @@ import
stew/keyed_queue,
"."/[handlers, sync_desc]
{.push raises: [Defect].}
{.push raises: [].}
static:
# type `EthWireRef` is needed in `initSync()`
@ -258,8 +258,8 @@ proc onPeerConnected[S,W](dsc: RunnerSyncRef[S,W]; peer: Peer) =
mixin runStart, runStop
# Check for known entry (which should not exist.)
let
maxWorkers = dsc.ctx.buddiesMax
nPeers = dsc.pool.len
maxWorkers {.used.} = dsc.ctx.buddiesMax
nPeers {.used.} = dsc.pool.len
nWorkers = dsc.buddies.len
if dsc.buddies.hasKey(peer.hash):
trace "Reconnecting zombie peer ignored", peer, nPeers, nWorkers, maxWorkers

View File

@ -13,7 +13,7 @@ import
eth/common/eth_types_rlp,
stew/byteutils
{.push raises: [Defect].}
{.push raises: [].}
type
BlockHash* = distinct Hash256