mirror of
https://github.com/status-im/nimbus-eth1.git
synced 2025-02-23 17:28:27 +00:00
Store proved snap accounts (#1145)
* Relocated `IntervalSets` to nim-stew repo * Accumulate accounts on temporary kv-DB why: Explore the data as returned from snap/1. Will be converted to a `eth/db` next. details: Verify and accumulate per/state-root accounts downloaded via snap. also: Some unit tests * Replace `Table` by `TrieDatabaseRef` for accounts accumulator * update ticker statistics details: mean/variance based counter update * allow persistent db for proved accounts * rebase, and globally activate unit test * fix statistics
This commit is contained in:
parent
e274b347ae
commit
134fe26997
@ -15,12 +15,19 @@ type
|
||||
terminalHash
|
||||
safeHash
|
||||
finalizedHash
|
||||
snapSyncStatus
|
||||
snapSyncAccount
|
||||
snapSyncProof
|
||||
|
||||
DbKey* = object
|
||||
# The first byte stores the key type. The rest are key-specific values
|
||||
data*: array[33, byte]
|
||||
dataEndPos*: uint8 # the last populated position in the data
|
||||
|
||||
DbXKey* = object
|
||||
data*: array[65, byte]
|
||||
dataEndPos*: uint8 # the last populated position in the data
|
||||
|
||||
proc genericHashKey*(h: Hash256): DbKey {.inline.} =
|
||||
result.data[0] = byte ord(genericHash)
|
||||
result.data[1 .. 32] = h.data
|
||||
@ -79,10 +86,26 @@ proc finalizedHashKey*(): DbKey {.inline.} =
|
||||
result.data[0] = byte ord(finalizedHash)
|
||||
result.dataEndPos = uint8 1
|
||||
|
||||
template toOpenArray*(k: DbKey): openArray[byte] =
|
||||
proc snapSyncStatusKey*(h: Hash256): DbKey =
|
||||
result.data[0] = byte ord(snapSyncStatus)
|
||||
result.data[1 .. 32] = h.data
|
||||
result.dataEndPos = uint8 32
|
||||
|
||||
proc snapSyncAccountKey*(h, b: Hash256): DbXKey =
|
||||
result.data[0] = byte ord(snapSyncAccount)
|
||||
result.data[1 .. 32] = h.data
|
||||
result.data[33 .. 64] = b.data
|
||||
result.dataEndPos = uint8 64
|
||||
|
||||
proc snapSyncProofKey*(h: Hash256): DbKey =
|
||||
result.data[0] = byte ord(snapSyncProof)
|
||||
result.data[1 .. 32] = h.data
|
||||
result.dataEndPos = uint8 32
|
||||
|
||||
template toOpenArray*(k: DbKey|DbXKey): openArray[byte] =
|
||||
k.data.toOpenArray(0, int(k.dataEndPos))
|
||||
|
||||
proc hash*(k: DbKey): Hash =
|
||||
proc hash*(k: DbKey|DbXKey): Hash =
|
||||
result = hash(k.toOpenArray)
|
||||
|
||||
proc `==`*(a, b: DbKey): bool {.inline.} =
|
||||
|
@ -12,27 +12,8 @@
|
||||
## This module implements `snap/1`, the `Ethereum Snapshot Protocol (SNAP)
|
||||
## <https://github.com/ethereum/devp2p/blob/master/caps/snap.md>`_.
|
||||
##
|
||||
## Modifications for *Geth* compatibility
|
||||
## --------------------------------------
|
||||
##
|
||||
## `GetAccountRange` and `GetStorageRanges` take parameters `origin` and
|
||||
## `limit`, instead of a single `startingHash` parameter in the
|
||||
## specification. The parameters `origin` and `limit` are 256-bit paths
|
||||
## representing the starting hash and ending trie path, both inclusive.
|
||||
##
|
||||
## The `snap/1` specification doesn't match reality. If the specification is
|
||||
## strictly followed omitting `limit`, *Geth 1.10* disconnects immediately so
|
||||
## this implementation strives to meet the *Geth* behaviour.
|
||||
##
|
||||
## Results from either call may include one item with path `>= limit`. *Geth*
|
||||
## fetches data from its internal database until it reaches this condition or
|
||||
## the bytes threshold, then replies with what it fetched. Usually there is
|
||||
## no item at the exact path `limit`, so there is one after.
|
||||
##
|
||||
##
|
||||
## Modified `GetStorageRanges` (0x02) message syntax
|
||||
## ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
##
|
||||
## -------------------------------------------------
|
||||
## As implementes here, the request message is encoded as
|
||||
##
|
||||
## `[reqID, rootHash, accountHashes, origin, limit, responseBytes]`
|
||||
@ -53,7 +34,6 @@
|
||||
##
|
||||
## Discussion of *Geth* `GetStorageRanges` behaviour
|
||||
## ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
##
|
||||
## - Parameters `origin` and `limit` may each be empty blobs, which mean "all
|
||||
## zeros" (0x00000...) or "no limit" (0xfffff...) respectively.
|
||||
##
|
||||
@ -95,8 +75,7 @@
|
||||
##
|
||||
## * Avoid the condition by using `origin >= 1` when using `limit`.
|
||||
##
|
||||
## * Use trie node traversal (`snap` `GetTrieNodes` or `eth` `GetNodeData`)
|
||||
## to obtain the omitted proof.
|
||||
## * Use trie node traversal (`snap` `GetTrieNodes`) to obtain the omitted proof.
|
||||
##
|
||||
## - When multiple accounts are requested with `origin > 0`, only one account's
|
||||
## storage is returned. There is no point requesting multiple accounts with
|
||||
@ -113,36 +92,12 @@
|
||||
## treated `origin` as applying to only the first account and `limit` to only
|
||||
## the last account, but it doesn't.)
|
||||
##
|
||||
## Modified `GetAccountRange` (0x00) packet syntax
|
||||
## ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
##
|
||||
## As implementes here, the request message is encoded as
|
||||
##
|
||||
## `[reqID, rootHash, origin, limit, responseBytes]`
|
||||
##
|
||||
## It requests an unknown number of accounts from a given account trie, starting
|
||||
## at the specified account hash and capped by the maximum allowed response
|
||||
## size in bytes. The intended purpose of this message is to fetch a large
|
||||
## number of subsequent accounts from a remote node and reconstruct a state
|
||||
## subtrie locally.
|
||||
##
|
||||
## The `GetAccountRange` parameters `origin` and `limit` must be 32 byte
|
||||
## blobs. There is no reason why empty limit is not allowed here when it is
|
||||
## allowed for `GetStorageRanges`, it just isn't.
|
||||
##
|
||||
## * `reqID`: Request ID to match up responses with
|
||||
## * `rootHash`: Root hash of the account trie to serve
|
||||
## * `origin`: 32 byte storage slot hash of the first to retrieve
|
||||
## * `limit`: 32 byte storage slot hash fragment after which to stop serving
|
||||
## * `responseBytes`: 64 bit number soft limit at which to stop returning data
|
||||
##
|
||||
##
|
||||
## Performance benefits
|
||||
## --------------------
|
||||
##
|
||||
## `snap` is used for much higher performance transfer of the entire Ethereum
|
||||
## execution state (accounts, storage, bytecode) compared with hexary trie
|
||||
## traversal using `eth` `GetNodeData`.
|
||||
## traversal using the now obsolete `eth/66` `GetNodeData`.
|
||||
##
|
||||
## It improves both network and local storage performance. The benefits are
|
||||
## substantial, and summarised here:
|
||||
@ -160,32 +115,14 @@
|
||||
## whichever network protocol is used. Nimbus uses `snap` protocol because it
|
||||
## is a more efficient network protocol.
|
||||
##
|
||||
## Remote state and Beam sync benefits
|
||||
## -----------------------------------
|
||||
##
|
||||
## `snap` was not intended for Beam sync, or "remote state on demand", used by
|
||||
## transactions executing locally that fetch state from the network instead of
|
||||
## local storage.
|
||||
##
|
||||
## Even so, as a lucky accident `snap` allows individual states to be fetched
|
||||
## in fewer network round trips than `eth`. Often a single round trip,
|
||||
## compared with about 10 round trips per account query over `eth`. This is
|
||||
## because `eth` `GetNodeData` requires a trie traversal chasing hashes
|
||||
## sequentially, while `snap` `GetTrieNode` trie traversal can be done with
|
||||
## predictable paths.
|
||||
##
|
||||
## Therefore `snap` can be used to accelerate remote states and Beam sync.
|
||||
##
|
||||
## Distributed hash table (DHT) building block
|
||||
## -------------------------------------------
|
||||
##
|
||||
## Although `snap` was designed for bootstrapping clients with the entire
|
||||
## Ethereum state, it is well suited to fetching only a subset of path ranges.
|
||||
## This may be useful for bootstrapping distributed hash tables (DHTs).
|
||||
##
|
||||
## Path range metadata benefits
|
||||
## ----------------------------
|
||||
##
|
||||
## Because data is handled in path ranges, this allows a compact metadata
|
||||
## representation of what data is stored locally and what isn't, compared with
|
||||
## the size of a representation of partially completed trie traversal with
|
||||
@ -212,13 +149,13 @@ logScope:
|
||||
|
||||
type
|
||||
SnapAccount* = object
|
||||
accHash*: LeafItem
|
||||
accHash*: NodeTag
|
||||
accBody* {.rlpCustomSerialization.}: Account
|
||||
|
||||
SnapAccountProof* = seq[Blob]
|
||||
|
||||
SnapStorage* = object
|
||||
slotHash*: LeafItem
|
||||
slotHash*: NodeTag
|
||||
slotData*: Blob
|
||||
|
||||
SnapStorageProof* = seq[Blob]
|
||||
@ -249,50 +186,13 @@ const
|
||||
# avoids transmitting these hashes in about 90% of accounts. We need to
|
||||
# recognise or set these hashes in `Account` when serialising RLP for `snap`.
|
||||
|
||||
proc read(rlp: var Rlp, t: var SnapAccount, _: type Account): Account =
|
||||
## RLP decoding for `SnapAccount`, which contains a path and account.
|
||||
## The snap representation of the account differs from `Account` RLP.
|
||||
## Empty storage hash and empty code hash are each represented by an
|
||||
## RLP zero-length string instead of the full hash.
|
||||
rlp.tryEnterList()
|
||||
result.nonce = rlp.read(typeof(result.nonce))
|
||||
result.balance = rlp.read(typeof(result.balance))
|
||||
|
||||
if rlp.blobLen != 0 or not rlp.isBlob:
|
||||
result.storageRoot = rlp.read(typeof(result.storageRoot))
|
||||
if result.storageRoot == BLANK_ROOT_HASH:
|
||||
raise newException(RlpTypeMismatch,
|
||||
"BLANK_ROOT_HASH not encoded as empty string in Snap protocol")
|
||||
else:
|
||||
rlp.skipElem()
|
||||
result.storageRoot = BLANK_ROOT_HASH
|
||||
|
||||
if rlp.blobLen != 0 or not rlp.isBlob:
|
||||
result.codeHash = rlp.read(typeof(result.codeHash))
|
||||
if result.codeHash == EMPTY_SHA3:
|
||||
raise newException(RlpTypeMismatch,
|
||||
"EMPTY_SHA3 not encoded as empty string in Snap protocol")
|
||||
else:
|
||||
rlp.skipElem()
|
||||
result.codeHash = EMPTY_SHA3
|
||||
proc read(rlp: var Rlp, t: var SnapAccount, T: type Account): T =
|
||||
## RLP Mixin: decoding for `SnapAccount`.
|
||||
result = rlp.snapRead(T)
|
||||
|
||||
proc append(rlpWriter: var RlpWriter, t: SnapAccount, account: Account) =
|
||||
## RLP encoding for `SnapAccount`, which contains a path and account.
|
||||
## The snap representation of the account differs from `Account` RLP.
|
||||
## Empty storage hash and empty code hash are each represented by an
|
||||
## RLP zero-length string instead of the full hash.
|
||||
rlpWriter.append(account.nonce)
|
||||
rlpWriter.append(account.balance)
|
||||
|
||||
if account.storageRoot == BLANK_ROOT_HASH:
|
||||
rlpWriter.append("")
|
||||
else:
|
||||
rlpWriter.append(account.storageRoot)
|
||||
|
||||
if account.codeHash == EMPTY_SHA3:
|
||||
rlpWriter.append("")
|
||||
else:
|
||||
rlpWriter.append(account.codeHash)
|
||||
## RLP Mixin: encoding for `SnapAccount`.
|
||||
rlpWriter.snapAppend(account)
|
||||
|
||||
|
||||
p2pProtocol snap1(version = 1,
|
||||
@ -303,8 +203,7 @@ p2pProtocol snap1(version = 1,
|
||||
# User message 0x00: GetAccountRange.
|
||||
# Note: `origin` and `limit` differs from the specification to match Geth.
|
||||
proc getAccountRange(peer: Peer, rootHash: Hash256,
|
||||
# Next line differs from spec to match Geth.
|
||||
origin: LeafItem, limit: LeafItem,
|
||||
origin: NodeTag, limit: NodeTag,
|
||||
responseBytes: uint64) =
|
||||
trace trSnapRecvReceived & "GetAccountRange (0x00)", peer,
|
||||
accountRange=leafRangePp(origin, limit),
|
||||
@ -321,8 +220,7 @@ p2pProtocol snap1(version = 1,
|
||||
# User message 0x02: GetStorageRanges.
|
||||
# Note: `origin` and `limit` differs from the specification to match Geth.
|
||||
proc getStorageRanges(peer: Peer, rootHash: Hash256,
|
||||
accounts: openArray[LeafItem],
|
||||
# Next line differs from spec to match Geth.
|
||||
accounts: openArray[NodeTag],
|
||||
origin: openArray[byte], limit: openArray[byte],
|
||||
responseBytes: uint64) =
|
||||
when trSnapTracePacketsOk:
|
||||
@ -388,7 +286,7 @@ p2pProtocol snap1(version = 1,
|
||||
# User message 0x06: GetTrieNodes.
|
||||
requestResponse:
|
||||
proc getTrieNodes(peer: Peer, rootHash: Hash256,
|
||||
paths: openArray[InteriorPath], responseBytes: uint64) =
|
||||
paths: openArray[PathSegment], responseBytes: uint64) =
|
||||
trace trSnapRecvReceived & "GetTrieNodes (0x06)", peer,
|
||||
nodePaths=paths.len, stateRoot=($rootHash), responseBytes
|
||||
|
||||
|
@ -25,6 +25,7 @@ logScope:
|
||||
|
||||
type
|
||||
SnapSyncCtx* = ref object of Worker
|
||||
chain: AbstractChainDB
|
||||
buddies: KeyedQueue[Peer,WorkerBuddy] ## LRU cache with worker descriptors
|
||||
pool: PeerPool ## for starting the system
|
||||
|
||||
@ -62,7 +63,7 @@ proc workerLoop(sp: WorkerBuddy) {.async.} =
|
||||
# Do something, work a bit
|
||||
await sp.workerExec
|
||||
|
||||
trace "Peer worker done", peer=sp,
|
||||
trace "Peer worker done", peer=sp, ctrlState=sp.ctrl.state,
|
||||
peers=ns.pool.len, workers=ns.buddies.len, maxWorkers=ns.buddiesMax
|
||||
|
||||
|
||||
@ -79,17 +80,22 @@ proc onPeerConnected(ns: SnapSyncCtx, peer: Peer) =
|
||||
if not sp.workerStart():
|
||||
trace "Ignoring useless peer", peer,
|
||||
peers=ns.pool.len, workers=ns.buddies.len, maxWorkers=ns.buddiesMax
|
||||
asyncSpawn peer.disconnect(UselessPeer)
|
||||
sp.ctrl.zombie = true
|
||||
return
|
||||
|
||||
# Check for table overflow. An overflow should not happen if the table is
|
||||
# as large as the peer connection table.
|
||||
if ns.buddiesMax <= ns.buddies.len:
|
||||
let leastPeer = ns.buddies.shift.value.data
|
||||
trace "Peer overflow! Deleting least used entry", leastPeer,
|
||||
peers=ns.pool.len, workers=ns.buddies.len, maxWorkers=ns.buddiesMax
|
||||
leastPeer.workerStop()
|
||||
asyncSpawn leastPeer.peer.disconnect(UselessPeer)
|
||||
if leastPeer.ctrl.zombie:
|
||||
trace "Dequeuing zombie peer", leastPeer,
|
||||
peers=ns.pool.len, workers=ns.buddies.len, maxWorkers=ns.buddiesMax
|
||||
discard
|
||||
else:
|
||||
trace "Peer table full! Dequeuing least used entry", leastPeer,
|
||||
peers=ns.pool.len, workers=ns.buddies.len, maxWorkers=ns.buddiesMax
|
||||
leastPeer.workerStop()
|
||||
leastPeer.ctrl.zombie = true
|
||||
|
||||
# Add peer entry
|
||||
discard ns.buddies.lruAppend(sp.peer, sp, ns.buddiesMax)
|
||||
@ -99,15 +105,20 @@ proc onPeerConnected(ns: SnapSyncCtx, peer: Peer) =
|
||||
|
||||
|
||||
proc onPeerDisconnected(ns: SnapSyncCtx, peer: Peer) =
|
||||
let rc = ns.buddies.delete(peer)
|
||||
if rc.isOk:
|
||||
rc.value.data.workerStop()
|
||||
trace "Disconnected peer", peer,
|
||||
peers=ns.pool.len, workers=ns.buddies.len, maxWorkers=ns.buddiesMax
|
||||
else:
|
||||
let rc = ns.buddies.eq(peer)
|
||||
if rc.isErr:
|
||||
debug "Disconnected from unregistered peer", peer,
|
||||
peers=ns.pool.len, workers=ns.buddies.len, maxWorkers=ns.buddiesMax
|
||||
discard
|
||||
return
|
||||
let sp = rc.value
|
||||
if sp.ctrl.zombie:
|
||||
trace "Disconnected zombie peer", peer,
|
||||
peers=ns.pool.len, workers=ns.buddies.len, maxWorkers=ns.buddiesMax
|
||||
else:
|
||||
sp.workerStop()
|
||||
ns.buddies.del(peer)
|
||||
trace "Disconnected peer", peer,
|
||||
peers=ns.pool.len, workers=ns.buddies.len, maxWorkers=ns.buddiesMax
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
@ -117,6 +128,7 @@ proc new*(T: type SnapSyncCtx; ethNode: EthereumNode; maxPeers: int): T =
|
||||
## Constructor
|
||||
new result
|
||||
let size = max(1,maxPeers)
|
||||
result.chain = ethNode.chain
|
||||
result.buddies.init(size)
|
||||
result.buddiesMax = size
|
||||
result.pool = ethNode.peerPool
|
||||
@ -132,7 +144,7 @@ proc start*(ctx: SnapSyncCtx) =
|
||||
ctx.onPeerDisconnected(p))
|
||||
|
||||
# Initialise sub-systems
|
||||
ctx.workerSetup()
|
||||
ctx.workerSetup(ctx.chain)
|
||||
po.setProtocol eth
|
||||
ctx.pool.addObserver(ctx, po)
|
||||
|
||||
|
@ -10,220 +10,389 @@
|
||||
# distributed except according to those terms.
|
||||
|
||||
import
|
||||
std/math,
|
||||
std/[math, sequtils, strutils, hashes],
|
||||
eth/common/eth_types,
|
||||
stew/byteutils,
|
||||
nimcrypto/keccak,
|
||||
stew/[byteutils, interval_set],
|
||||
stint,
|
||||
../../utils/interval_set,
|
||||
../../constants,
|
||||
../types
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
type
|
||||
LeafItem* =
|
||||
NodeTag* = ##\
|
||||
## Trie leaf item, account hash etc.
|
||||
distinct UInt256
|
||||
|
||||
LeafRange* = ##\
|
||||
## Interval `[minPt,maxPt]` of` LeafItem` elements, can be managed in an
|
||||
## Interval `[minPt,maxPt]` of` NodeTag` elements, can be managed in an
|
||||
## `IntervalSet` data type.
|
||||
Interval[LeafItem,UInt256]
|
||||
Interval[NodeTag,UInt256]
|
||||
|
||||
LeafRangeSet* = ##\
|
||||
## Managed structure to handle non-adjacent `LeafRange` intervals
|
||||
IntervalSetRef[LeafItem,UInt256]
|
||||
IntervalSetRef[NodeTag,UInt256]
|
||||
|
||||
LeafItemData* = ##\
|
||||
## Serialisation of `LeafItem`
|
||||
array[32,byte]
|
||||
PathSegment* = object
|
||||
## Path prefix or trailer for an interior node in a hexary trie. See also
|
||||
## the implementation of `NibblesSeq` from `eth/trie/nibbles` for a more
|
||||
## general implementation.
|
||||
bytes: seq[byte] ## <tag> + at most 32 bytes (aka 64 nibbles)
|
||||
|
||||
InteriorPath* = object
|
||||
## Path to an interior node in an Ethereum hexary trie.
|
||||
bytes: LeafItemData ## at most 64 nibbles (unused nibbles must be zero)
|
||||
nDigits: byte ## left prefix length, number of nibbles
|
||||
|
||||
#const
|
||||
# interiorPathMaxDepth = 2 * sizeof(LeafItemData)
|
||||
PathSegmentError = enum
|
||||
isNoError = 0
|
||||
isTooLongEvenLength ## More than 64 nibbles (even number)
|
||||
isTooLongOddLength ## More than 63 nibbles (odd number)
|
||||
isUnknownType ## Unknown encoduing type
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc to*(lp: LeafItem; T: type LeafItemData): T =
|
||||
lp.UInt256.toBytesBE.T
|
||||
proc to*(nid: NodeTag; T: type Hash256): T =
|
||||
result.data = nid.UInt256.toBytesBE
|
||||
|
||||
proc to*(data: LeafItemData; T: type LeafItem): T =
|
||||
UInt256.fromBytesBE(data).T
|
||||
proc to*(nid: NodeTag; T: type NodeHash): T =
|
||||
nid.to(Hash256).T
|
||||
|
||||
proc to*(n: SomeUnsignedInt; T: type LeafItem): T =
|
||||
proc to*(h: Hash256; T: type NodeTag): T =
|
||||
UInt256.fromBytesBE(h.data).T
|
||||
|
||||
proc to*(nh: NodeHash; T: type NodeTag): T =
|
||||
nh.Hash256.to(T)
|
||||
|
||||
proc to*(n: SomeUnsignedInt|UInt256; T: type NodeTag): T =
|
||||
n.u256.T
|
||||
|
||||
proc to*(hash: UInt256; T: type LeafItem): T =
|
||||
hash.T
|
||||
|
||||
#[
|
||||
proc to*(ip: InteriorPath; T: type LeafItem): T =
|
||||
ip.bytes.to(T)
|
||||
|
||||
proc to*(lp: LeafItem; T: type InteriorPath): T =
|
||||
InteriorPath(bytes: lp.to(LeafItemData), numDigits: interiorPathMaxDepth)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public `InteriorPath` functions
|
||||
# Public constructors
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc maxDepth*(_: InteriorPath | typedesc[InteriorPath]): int =
|
||||
interiorPathMaxDepth
|
||||
|
||||
proc depth*(ip: InteriorPath): int =
|
||||
ip.numDigits.int
|
||||
|
||||
proc digit*(ip: InteriorPath, index: int): int =
|
||||
doAssert 0 <= index and index < ip.depth
|
||||
let b = ip.bytes[index shr 1]
|
||||
(if (index and 1) == 0: (b shr 4) else: (b and 0x0f)).int
|
||||
|
||||
proc add*(path: var InteriorPath, digit: byte) =
|
||||
doAssert path.numDigits < interiorPathMaxDepth
|
||||
inc path.numDigits
|
||||
if (path.numDigits and 1) != 0:
|
||||
path.bytes[path.numDigits shr 1] = (digit shl 4)
|
||||
proc new*(T: type NodeHash; ps: PathSegment): T =
|
||||
## Import `PathSegment` argument into a `LeafTtemData`. Missing nibbles on the
|
||||
## right will be zero padded.
|
||||
if (ps.bytes[0] and 0x10) == 0:
|
||||
for n in 1 ..< ps.bytes.len:
|
||||
result.Hash256.data[n-1] = ps.bytes[n]
|
||||
else:
|
||||
path.bytes[(path.numDigits shr 1) - 1] += (digit and 0x0f)
|
||||
for n in 0 ..< ps.bytes.len:
|
||||
result.Hash256.data[n] = (ps.bytes[n] shl 4) or (ps.bytes[n+1] shr 4)
|
||||
|
||||
proc addPair*(path: var InteriorPath, digitPair: byte) =
|
||||
doAssert path.numDigits < interiorPathMaxDepth - 1
|
||||
path.numDigits += 2
|
||||
if (path.numDigits and 1) == 0:
|
||||
path.bytes[(path.numDigits shr 1) - 1] = digitPair
|
||||
proc new*(T: type NodeTag; ps: PathSegment): T =
|
||||
## Import `PathSegment` argument into a `LeafTtem`. Missing nibbles on the
|
||||
## right will be zero padded.
|
||||
NodeHash.new(ps).to(NodeTag)
|
||||
|
||||
proc init*(nh: var NodeHash; data: openArray[byte]): bool =
|
||||
## Import argument `data` into `nh` which must have length either `32` or `0`.
|
||||
## The latter case is equivalent to an all zero byte array of size `32`.
|
||||
if data.len == 32:
|
||||
for n in 0 ..< 32:
|
||||
nh.Hash256.data[n] = data[n]
|
||||
return true
|
||||
elif data.len == 0:
|
||||
nh.reset
|
||||
return true
|
||||
|
||||
proc init*(nt: var NodeTag; data: openArray[byte]): bool =
|
||||
## Similar to `init(li: var NodeTag; ps: PathSegment)`
|
||||
var h: NodeHash
|
||||
if h.init(data):
|
||||
nt = h.to(NodeTag)
|
||||
return true
|
||||
|
||||
proc init*(ps: var PathSegment; data: openArray[byte]): bool =
|
||||
## Import argument `data` into `ps` which must be a valid path as found
|
||||
## in a trie extension or leaf node starting with:
|
||||
## * 0x00, or 0x20: followed by at most 64 nibbles (i.e. by 32 bytes max),
|
||||
## Here, data path is made up of the at most 32 pairs of nibbles.
|
||||
## * 0x1x, or 0x3x: followed by at most 62 nibbles (31 bytes max). Here the
|
||||
## data path value starts with the `x` followed by the at most 62 pairs of
|
||||
## nibbles.
|
||||
if 0 < data.len:
|
||||
# Check first byte for marker
|
||||
if ((data[0] and 0xdf) == 0x00 and data.len <= 33) or # right nibble 0
|
||||
((data[0] and 0xd0) == 0x10 and data.len <= 32): # right nibble 1st dgt
|
||||
ps.bytes = data.toSeq
|
||||
return true
|
||||
|
||||
proc new*(T: type PathSegment; tag: NodeTag; isLeaf = false): T =
|
||||
## Create `PathSegment` from `NodeTag`. If the `isLeaf` argument is set, the
|
||||
## path segment is marked as a leaf node (trie prefix' 0x20').
|
||||
result.bytes = @[0.byte] & tag.to(Hash256).data.toSeq
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public `PathSegment` functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc verify*(ps: PathSegment): Result[void,PathSegmentError] =
|
||||
## Check `ip` for consistency
|
||||
if ps.bytes.len == 0:
|
||||
return ok()
|
||||
if (ps.bytes[0] and 0xdf) == 0:
|
||||
if 33 < ps.bytes.len:
|
||||
return err(isTooLongEvenLength)
|
||||
elif (ps.bytes[0] and 0xd0) == 0x10:
|
||||
if 32 < ps.bytes.len:
|
||||
return err(isTooLongOddLength)
|
||||
else:
|
||||
path.bytes[(path.numDigits shr 1) - 1] += (digitPair shr 4)
|
||||
path.bytes[path.numDigits shr 1] = (digitPair shl 4)
|
||||
return err(isUnknownType)
|
||||
ok()
|
||||
|
||||
proc pop*(path: var InteriorPath) =
|
||||
doAssert 0 < path.numDigits
|
||||
dec path.numDigits
|
||||
path.bytes[path.numDigits shr 1] =
|
||||
if (path.numDigits and 1) == 0: 0.byte
|
||||
else: path.bytes[path.numDigits shr 1] and 0xf0
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public comparison functions for `InteriorPath`
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc low*(T: type InteriorPath): T = low(UInt256).to(LeafItem).to(T)
|
||||
proc high*(T: type InteriorPath): T = high(UInt256).to(LeafItem).to(T)
|
||||
|
||||
proc `==`*(path1, path2: InteriorPath): bool =
|
||||
# Paths are zero-padded to the end of the array, so comparison is easy.
|
||||
for i in 0 ..< (max(path1.numDigits, path2.numDigits).int + 1) shr 1:
|
||||
if path1.bytes[i] != path2.bytes[i]:
|
||||
return false
|
||||
return true
|
||||
|
||||
proc `<=`*(path1, path2: InteriorPath): bool =
|
||||
# Paths are zero-padded to the end of the array, so comparison is easy.
|
||||
for i in 0 ..< (max(path1.numDigits, path2.numDigits).int + 1) shr 1:
|
||||
if path1.bytes[i] != path2.bytes[i]:
|
||||
return path1.bytes[i] <= path2.bytes[i]
|
||||
return true
|
||||
|
||||
proc `<`*(path1, path2: InteriorPath): bool = not(path2 <= path1)
|
||||
|
||||
#proc cmp*(path1, path2: InteriorPath): int =
|
||||
# # Paths are zero-padded to the end of the array, so comparison is easy.
|
||||
# for i in 0 ..< (max(path1.numDigits, path2.numDigits).int + 1) shr 1:
|
||||
# if path1.bytes[i] != path2.bytes[i]:
|
||||
# return path1.bytes[i].int - path2.bytes[i].int
|
||||
# return 0
|
||||
|
||||
proc prefix*(lp: LeafItem; digits: byte): InteriorPath =
|
||||
## From the argument item `lp`, return the prefix made up by preserving the
|
||||
## leading `digit` nibbles (ie. `(digits+1)/2` bytes.)
|
||||
doAssert digits <= interiorPathMaxDepth
|
||||
result = InteriorPath(
|
||||
bytes: lp.to(LeafItemData),
|
||||
numDigits: digits)
|
||||
let tailInx = (digits + 1) shr 1
|
||||
# reset the tail to zero
|
||||
for inx in tailInx ..< interiorPathMaxDepth:
|
||||
result.bytes[inx] = 0.byte
|
||||
if (digits and 1) != 0: # fix leftlost nibble
|
||||
result.bytes[digits shr 1] = result.bytes[digits shr 1] and 0xf0.byte
|
||||
|
||||
proc `in`*(ip: InteriorPath; iv: LeafRange): bool =
|
||||
iv.minPt.prefix(ip.numDigits) <= ip and ip <= iv.maxPt.prefix(ip.numDigits)
|
||||
|
||||
|
||||
proc toHex*(path: InteriorPath, withEllipsis = true): string =
|
||||
const hexChars = "0123456789abcdef"
|
||||
let digits = path.depth
|
||||
if not withEllipsis:
|
||||
result = newString(digits)
|
||||
proc len*(ps: PathSegment): int =
|
||||
## Returns the number of nibbles in the range 0..64.
|
||||
if ps.bytes.len == 0:
|
||||
0
|
||||
elif (ps.bytes[0] and 0x10) == 0:
|
||||
2 * ps.bytes.len - 2
|
||||
else:
|
||||
result = newString(min(digits + 3, 64))
|
||||
result[^3] = '.'
|
||||
result[^2] = '.'
|
||||
result[^1] = '.'
|
||||
for i in 0 ..< digits:
|
||||
result[i] = hexChars[path.digit(i)]
|
||||
2 * ps.bytes.len - 1
|
||||
|
||||
proc pathRange*(path1, path2: InteriorPath): string =
|
||||
path1.toHex(withEllipsis = false) & '-' & path2.toHex(withEllipsis = false)
|
||||
proc setLen*(ps: var PathSegment; newLen: int) =
|
||||
## Truncate or extend the length (i.e. the number of nibbles) of the argument
|
||||
## `ip` to `newLen` bertwwn 0..63. When extending, new nibbles are zero
|
||||
## initialised.
|
||||
## This function throws an assertion defect if the `newLen` argument is
|
||||
## outside the range 0..64.
|
||||
doAssert 0 <= newLen and newLen <= 64
|
||||
if ps.bytes.len == 0:
|
||||
ps.bytes = @[0.byte]
|
||||
if (ps.bytes[0] and 0x10) == 0:
|
||||
if (newLen and 1) == 0: # both, old and new lengths are even
|
||||
ps.bytes.setLen(1 + (newLen shr 1))
|
||||
else: # new length odd, need to shift nibbles
|
||||
let newBytesLen = (newLen + 1) shr 1
|
||||
ps.bytes[0] = ps.bytes[0] or 0x10
|
||||
if 1 < ps.bytes.len:
|
||||
ps.bytes[0] = ps.bytes[0] or (ps.bytes[1] shr 4)
|
||||
for n in 1 ..< min(ps.bytes.len-1, newBytesLen):
|
||||
ps.bytes[n] = (ps.bytes[n] shl 4) or (ps.bytes[n+1] shr 4)
|
||||
ps.bytes.setLen(newBytesLen)
|
||||
else:
|
||||
if (newLen and 1) == 1: # both, old and new lengths are odd
|
||||
ps.bytes.setLen((newLen + 1) shr 1)
|
||||
else: # new even length => shift nibbles right
|
||||
let oldBytesLen = ps.bytes.len
|
||||
ps.bytes.setLen((newLen shr 1) + 1)
|
||||
for n in countDown(min(ps.bytes.len-1,oldBytesLen),1):
|
||||
ps.bytes[n] = (ps.bytes[n-1] shl 4) or (ps.bytes[n] shr 4)
|
||||
ps.bytes[0] = ps.bytes[0] and 0xd0
|
||||
|
||||
proc `$`*(path: InteriorPath): string =
|
||||
path.toHex
|
||||
proc `[]`*(ps: PathSegment; nibbleInx: int): int =
|
||||
## Extract the nibble (aka hex digit) value at the argument position index
|
||||
## `nibbleInx`. If the position index `nibbleInx` does not relate to a valid
|
||||
## nibble position, `0` is returned
|
||||
##
|
||||
## This function throws an assertion defect if the `nibbleInx` is outside
|
||||
## the range 0..63.
|
||||
doAssert 0 <= nibbleInx and nibbleInx < 64
|
||||
if ps.bytes.len == 0:
|
||||
result = 0
|
||||
elif (ps.bytes[0] and 0x10) == 0:
|
||||
let byteInx = (nibbleInx shr 1) + 1
|
||||
if (nibbleInx and 1) == 0:
|
||||
result = ps.bytes[byteInx].int shr 4
|
||||
else:
|
||||
result = ps.bytes[byteInx].int and 0x0f
|
||||
else:
|
||||
let byteInx = (nibbleInx + 1) shr 1
|
||||
if (nibbleInx and 1) == 0:
|
||||
result = ps.bytes[byteInx].int and 0x0f
|
||||
else:
|
||||
result = ps.bytes[byteInx].int shr 4
|
||||
|
||||
proc `$`*(paths: (InteriorPath, InteriorPath)): string =
|
||||
pathRange(paths[0], paths[1])
|
||||
#]#
|
||||
proc `[]=`*(ps: var PathSegment; nibbleInx: int; value: int) =
|
||||
## Assign a nibble (aka hex) value `value` at position `nibbleInx`. If the
|
||||
## length of the argument `ip` was smaller than the `nibbleInx`, the length
|
||||
## will be extended to include that nibble.
|
||||
##
|
||||
## This function throws an assertion defect if the `nibbleInx` is outside
|
||||
## the range 0..63, or if `value` is outside 0..15.
|
||||
doAssert 0 <= nibbleInx and nibbleInx < 64
|
||||
doAssert 0 <= value and value < 16
|
||||
if ps.len <= nibbleInx:
|
||||
if ps.bytes.len == 0:
|
||||
ps.bytes = @[0.byte]
|
||||
ps.setLen(nibbleInx + 1)
|
||||
if (ps.bytes[0] and 0x10) == 0:
|
||||
let byteInx = (nibbleInx shr 1) + 1
|
||||
if (nibbleInx and 1) == 0:
|
||||
ps.bytes[byteInx] = (value.uint8 shl 4) or (ps.bytes[byteInx] and 0x0f)
|
||||
else:
|
||||
ps.bytes[byteInx] = (ps.bytes[byteInx] and 0xf0) or value.uint8
|
||||
else:
|
||||
let byteInx = (nibbleInx + 1) shr 1
|
||||
if (nibbleInx and 1) == 0:
|
||||
ps.bytes[byteInx] = (ps.bytes[byteInx] and 0xf0) or value.uint8
|
||||
else:
|
||||
ps.bytes[byteInx] = (value.uint8 shl 4) or (ps.bytes[byteInx] and 0x0f)
|
||||
|
||||
proc `$`*(ps: PathSegment): string =
|
||||
$ps.len & "#" & ps.bytes.mapIt(it.toHex(2)).join.toLowerAscii
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public `LeafItem` and `LeafRange` functions
|
||||
# Public rlp support
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc u256*(lp: LeafItem): UInt256 = lp.UInt256
|
||||
proc low*(T: type LeafItem): T = low(UInt256).T
|
||||
proc high*(T: type LeafItem): T = high(UInt256).T
|
||||
|
||||
proc `+`*(a: LeafItem; b: UInt256): LeafItem = (a.u256+b).LeafItem
|
||||
proc `-`*(a: LeafItem; b: UInt256): LeafItem = (a.u256-b).LeafItem
|
||||
proc `-`*(a, b: LeafItem): UInt256 = (a.u256 - b.u256)
|
||||
|
||||
proc `==`*(a, b: LeafItem): bool = a.u256 == b.u256
|
||||
proc `<=`*(a, b: LeafItem): bool = a.u256 <= b.u256
|
||||
proc `<`*(a, b: LeafItem): bool = a.u256 < b.u256
|
||||
|
||||
|
||||
# RLP serialisation for `LeafItem`.
|
||||
proc read*(rlp: var Rlp, T: type LeafItem): T
|
||||
proc read*(rlp: var Rlp, T: type NodeTag): T
|
||||
{.gcsafe, raises: [Defect,RlpError]} =
|
||||
rlp.read(LeafItemData).to(T)
|
||||
rlp.read(Hash256).to(T)
|
||||
|
||||
proc append*(rlpWriter: var RlpWriter, leafPath: LeafItem) =
|
||||
rlpWriter.append(leafPath.to(LeafItemData))
|
||||
proc append*(writer: var RlpWriter, nid: NodeTag) =
|
||||
writer.append(nid.to(Hash256))
|
||||
|
||||
# -------------
|
||||
|
||||
proc snapRead*(rlp: var Rlp; T: type Account; strict: static[bool] = false): T
|
||||
{.gcsafe, raises: [Defect, RlpError]} =
|
||||
## RLP decoding for `Account`. The `snap` RLP representation of the account
|
||||
## differs from standard `Account` RLP. Empty storage hash and empty code
|
||||
## hash are each represented by an RLP zero-length string instead of the
|
||||
## full hash.
|
||||
##
|
||||
## Normally, this read function will silently handle standard encodinig and
|
||||
## `snap` enciding. Setting the argument strict as `false` the function will
|
||||
## throw an exception if `snap` encoding is violated.
|
||||
rlp.tryEnterList()
|
||||
result.nonce = rlp.read(typeof(result.nonce))
|
||||
result.balance = rlp.read(typeof(result.balance))
|
||||
if rlp.blobLen != 0 or not rlp.isBlob:
|
||||
result.storageRoot = rlp.read(typeof(result.storageRoot))
|
||||
when strict:
|
||||
if result.storageRoot == BLANK_ROOT_HASH:
|
||||
raise newException(RlpTypeMismatch,
|
||||
"BLANK_ROOT_HASH not encoded as empty string in Snap protocol")
|
||||
else:
|
||||
rlp.skipElem()
|
||||
result.storageRoot = BLANK_ROOT_HASH
|
||||
if rlp.blobLen != 0 or not rlp.isBlob:
|
||||
result.codeHash = rlp.read(typeof(result.codeHash))
|
||||
when strict:
|
||||
if result.codeHash == EMPTY_SHA3:
|
||||
raise newException(RlpTypeMismatch,
|
||||
"EMPTY_SHA3 not encoded as empty string in Snap protocol")
|
||||
else:
|
||||
rlp.skipElem()
|
||||
result.codeHash = EMPTY_SHA3
|
||||
|
||||
proc snapAppend*(writer: var RlpWriter; account: Account) =
|
||||
## RLP encoding for `Account`. The snap RLP representation of the account
|
||||
## differs from standard `Account` RLP. Empty storage hash and empty code
|
||||
## hash are each represented by an RLP zero-length string instead of the
|
||||
## full hash.
|
||||
writer.startList(4)
|
||||
writer.append(account.nonce)
|
||||
writer.append(account.balance)
|
||||
if account.storageRoot == BLANK_ROOT_HASH:
|
||||
writer.append("")
|
||||
else:
|
||||
writer.append(account.storageRoot)
|
||||
if account.codeHash == EMPTY_SHA3:
|
||||
writer.append("")
|
||||
else:
|
||||
writer.append(account.codeHash)
|
||||
|
||||
# -------------
|
||||
|
||||
proc compactRead*(rlp: var Rlp, T: type PathSegment): T
|
||||
{.gcsafe, raises: [Defect,RlpError]} =
|
||||
## Read compact encoded path segment
|
||||
rlp.tryEnterList()
|
||||
let
|
||||
path = rlp.read(array[32, byte])
|
||||
length = rlp.read(byte)
|
||||
if 64 < length:
|
||||
raise newException(
|
||||
MalformedRlpError, "More the most 64 nibbles for PathSegment")
|
||||
if (length and 1) == 0:
|
||||
# initalise as even extension
|
||||
result.bytes.setLen(1 + (length shr 1))
|
||||
for n in 1 ..< result.bytes.len:
|
||||
result.bytes[n] = path[n-1]
|
||||
else:
|
||||
# initalise as odd extension
|
||||
result.bytes.setLen((length + 1) shr 1)
|
||||
result.bytes[0] = 0x10 or (path[0] shl 4)
|
||||
for n in 1 ..< result.bytes.len:
|
||||
result.bytes[n] = (path[n-1] shl 4) or (path[n] shr 4)
|
||||
|
||||
proc compactAppend*(writer: var RlpWriter, ps: PathSegment) =
|
||||
## Append compact encoded path segment
|
||||
var path: array[32, byte]
|
||||
if (ps.bytes[0] and 0x10) == 0:
|
||||
for n in 1 ..< ps.bytes.len:
|
||||
path[n-1] = ps.bytes[n]
|
||||
else:
|
||||
for n in 1 ..< ps.bytes.len:
|
||||
path[n-1] = (ps.bytes[n-1] shl 4) or (ps.bytes[n] shr 4)
|
||||
path[ps.bytes.len-1] = ps.bytes[^1] shl 4
|
||||
writer.startList(2)
|
||||
writer.append(path)
|
||||
writer.append(ps.len.byte)
|
||||
|
||||
# -------------
|
||||
|
||||
proc dbRead*(rlp: var Rlp, T: type PathSegment): T
|
||||
{.gcsafe, raises: [Defect,RlpError]} =
|
||||
## Read as stored in the database
|
||||
result.bytes = rlp.read(Blob)
|
||||
|
||||
proc dbAppend*(writer: var RlpWriter, ps: PathSegment) =
|
||||
## Append in database record format
|
||||
writer.append(ps.bytes)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public `NodeTag` and `LeafRange` functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc u256*(lp: NodeTag): UInt256 = lp.UInt256
|
||||
proc low*(T: type NodeTag): T = low(UInt256).T
|
||||
proc high*(T: type NodeTag): T = high(UInt256).T
|
||||
|
||||
proc `+`*(a: NodeTag; b: UInt256): NodeTag = (a.u256+b).NodeTag
|
||||
proc `-`*(a: NodeTag; b: UInt256): NodeTag = (a.u256-b).NodeTag
|
||||
proc `-`*(a, b: NodeTag): UInt256 = (a.u256 - b.u256)
|
||||
|
||||
proc `==`*(a, b: NodeTag): bool = a.u256 == b.u256
|
||||
proc `<=`*(a, b: NodeTag): bool = a.u256 <= b.u256
|
||||
proc `<`*(a, b: NodeTag): bool = a.u256 < b.u256
|
||||
|
||||
proc hash*(a: NodeTag): Hash =
|
||||
## Mixin for `Table` or `keyedQueue`
|
||||
a.to(Hash256).data.hash
|
||||
|
||||
proc digestTo*(data: Blob; T: type NodeTag): T =
|
||||
## Hash the `data` argument
|
||||
keccak256.digest(data).to(T)
|
||||
|
||||
proc freeFactor*(lrs: LeafRangeSet): float =
|
||||
## Free factor, ie. `#items-free / 2^256` to be used in statistics
|
||||
if 0 < lrs.total:
|
||||
((high(LeafItem) - lrs.total).u256 + 1).to(float) / (2.0^256)
|
||||
((high(NodeTag) - lrs.total).u256 + 1).to(float) / (2.0^256)
|
||||
elif lrs.chunks == 0:
|
||||
1.0
|
||||
else:
|
||||
0.0
|
||||
|
||||
# Printing & pretty printing
|
||||
proc toHex*(lp: LeafItem): string = lp.to(LeafItemData).toHex
|
||||
proc `$`*(lp: LeafItem): string = lp.toHex
|
||||
proc `$`*(nt: NodeTag): string =
|
||||
if nt == high(NodeTag):
|
||||
"high(NodeTag)"
|
||||
elif nt == 0.u256.NodeTag:
|
||||
"0"
|
||||
else:
|
||||
nt.to(Hash256).data.toHex
|
||||
|
||||
proc leafRangePp*(a, b: LeafItem): string =
|
||||
proc leafRangePp*(a, b: NodeTag): string =
|
||||
## Needed for macro generated DSL files like `snap.nim` because the
|
||||
## `distinct` flavour of `LeafItem` is discarded there.
|
||||
## `distinct` flavour of `NodeTag` is discarded there.
|
||||
result = "[" & $a
|
||||
if a < b:
|
||||
result &= ',' & (if b < high(LeafItem): $b else: "high")
|
||||
if a != b:
|
||||
result &= ',' & $b
|
||||
result &= "]"
|
||||
|
||||
proc `$`*(a, b: LeafItem): string =
|
||||
proc `$`*(a, b: NodeTag): string =
|
||||
## Prettyfied prototype
|
||||
leafRangePp(a,b)
|
||||
|
||||
|
@ -545,9 +545,9 @@ proc peerSyncChainNonEmptyReply(
|
||||
# Public start/stop and admin functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc workerSetup*(ns: Worker) =
|
||||
proc workerSetup*(ns: Worker; chainDb: AbstractChainDB) =
|
||||
## Global set up
|
||||
ns.fetchSetup()
|
||||
ns.fetchSetup(chainDb)
|
||||
|
||||
proc workerRelease*(ns: Worker) =
|
||||
## Global clean up
|
||||
@ -559,7 +559,7 @@ proc workerStart*(sp: WorkerBuddy): bool =
|
||||
sp.peer.supports(protocol.eth) and
|
||||
sp.peer.state(protocol.eth).initialized:
|
||||
|
||||
sp.ctrl.init(fullyRunning = true)
|
||||
sp.ctrl.init(running = true)
|
||||
|
||||
# Initialise data retrieval
|
||||
sp.fetchStart()
|
||||
@ -575,8 +575,9 @@ proc workerStart*(sp: WorkerBuddy): bool =
|
||||
|
||||
proc workerStop*(sp: WorkerBuddy) =
|
||||
## Clean up this peer
|
||||
sp.ctrl.stopped = true
|
||||
sp.fetchStop()
|
||||
if not sp.ctrl.stopped:
|
||||
sp.ctrl.stopped = true
|
||||
sp.fetchStop()
|
||||
|
||||
proc workerLockedOk*(sp: WorkerBuddy): bool =
|
||||
sp.hunt.syncMode == SyncLocked
|
||||
@ -611,7 +612,7 @@ proc workerExec*(sp: WorkerBuddy) {.async.} =
|
||||
trace trEthRecvError & "waiting for GetBlockHeaders reply", peer=sp,
|
||||
error=e.msg
|
||||
inc sp.stats.major.networkErrors
|
||||
sp.ctrl.stopped = true
|
||||
sp.workerStop()
|
||||
return
|
||||
|
||||
if reply.isNone:
|
||||
|
@ -10,29 +10,32 @@
|
||||
# except according to those terms.
|
||||
|
||||
import
|
||||
std/math,
|
||||
chronos,
|
||||
eth/[common/eth_types, p2p],
|
||||
nimcrypto/keccak,
|
||||
stew/keyed_queue,
|
||||
stew/[interval_set, keyed_queue],
|
||||
stint,
|
||||
../../../utils/interval_set,
|
||||
../../types,
|
||||
../path_desc,
|
||||
./fetch/fetch_accounts,
|
||||
./fetch/[fetch_accounts, proof_db],
|
||||
"."/[ticker, worker_desc]
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
type
|
||||
FetchEx = ref object of WorkerFetchBase
|
||||
accTab: AccLruCache ## Global worker data
|
||||
quCount: uint64 ## Count visited roots
|
||||
lastPivot: LeafItem ## Used for calculating pivots
|
||||
accTab: AccLruCache ## Global worker data
|
||||
quCount: uint64 ## Count visited roots
|
||||
lastPivot: NodeTag ## Used for calculating pivots
|
||||
accRangeMaxLen: UInt256 ## Maximap interval length, high(u256)/#peers
|
||||
pdb: ProofDb ## Proof processing
|
||||
|
||||
AccTabEntryRef = ref object
|
||||
## Global worker table
|
||||
avail: LeafRangeSet ## Accounts to visit (organised as ranges)
|
||||
pivot: LeafItem ## where to to start fetching from
|
||||
avail: LeafRangeSet ## Accounts to visit (organised as ranges)
|
||||
pivot: NodeTag ## Where to to start fetching from
|
||||
base: WorkerFetchBase ## Back reference (`FetchEx` not working, here)
|
||||
|
||||
AccLruCache =
|
||||
KeyedQueue[TrieHash,AccTabEntryRef]
|
||||
@ -43,7 +46,7 @@ logScope:
|
||||
const
|
||||
accRangeMaxLen = ##\
|
||||
## ask for that many accounts at once (not the range is sparse)
|
||||
(high(LeafItem) - low(LeafItem)) div 1000
|
||||
(high(NodeTag) - low(NodeTag)) div 1000
|
||||
|
||||
pivotAccIncrement = ##\
|
||||
## increment when `lastPivot` would stay put
|
||||
@ -53,12 +56,9 @@ const
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc withMaxLen(iv: LeafRange): LeafRange =
|
||||
## Reduce accounts interval to maximal size
|
||||
if 0 < iv.len and iv.len < accRangeMaxLen:
|
||||
iv
|
||||
else:
|
||||
LeafRange.new(iv.minPt, iv.minPt + (accRangeMaxLen - 1).u256)
|
||||
proc `==`(a, b: AccTabEntryRef): bool =
|
||||
## Just to make things clear, should be default action anyway
|
||||
cast[pointer](a) == cast[pointer](b)
|
||||
|
||||
proc fetchEx(ns: Worker): FetchEx =
|
||||
## Getter
|
||||
@ -68,6 +68,14 @@ proc fetchEx(sp: WorkerBuddy): FetchEx =
|
||||
## Getter
|
||||
sp.ns.fetchEx
|
||||
|
||||
proc withMaxLen(atb: AccTabEntryRef; iv: LeafRange): LeafRange =
|
||||
## Reduce accounts interval to maximal size
|
||||
let maxlen = atb.base.FetchEx.accRangeMaxLen
|
||||
if 0 < iv.len and iv.len <= maxLen:
|
||||
iv
|
||||
else:
|
||||
LeafRange.new(iv.minPt, iv.minPt + maxLen - 1.u256)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
@ -76,7 +84,7 @@ proc getAccTab(sp: WorkerBuddy; key: TrieHash): AccTabEntryRef =
|
||||
## Fetch LRU table item, create a new one if missing.
|
||||
# fetch existing table (if any)
|
||||
block:
|
||||
let rc = sp.ns.fetchEx.accTab.lruFetch(key)
|
||||
let rc = sp.fetchEx.accTab.lruFetch(key)
|
||||
if rc.isOk:
|
||||
# Item was moved to the end of queue
|
||||
return rc.value
|
||||
@ -84,41 +92,51 @@ proc getAccTab(sp: WorkerBuddy; key: TrieHash): AccTabEntryRef =
|
||||
# Calculate some new start address for the range fetcher
|
||||
while true:
|
||||
# Derive pivot from last interval set in table
|
||||
let rc = sp.ns.fetchEx.accTab.last
|
||||
let rc = sp.fetchEx.accTab.last
|
||||
if rc.isErr:
|
||||
break # no more => stop
|
||||
# Check last interval
|
||||
let blkRc = rc.value.data.avail.le() # rightmost interval
|
||||
if blkRc.isErr:
|
||||
# Delete useless interval set, repeat
|
||||
sp.ns.fetchEx.accTab.del(rc.value.key)
|
||||
sp.fetchEx.accTab.del(rc.value.key)
|
||||
continue
|
||||
# use increasing `pivot` values
|
||||
if sp.ns.fetchEx.lastPivot < blkRc.value.minPt:
|
||||
if sp.fetchEx.lastPivot < blkRc.value.minPt:
|
||||
sp.ns.fetchEx.lastPivot = blkRc.value.minPt
|
||||
break
|
||||
if sp.ns.fetchEx.lastPivot < high(LeafItem) - pivotAccIncrement:
|
||||
sp.ns.fetchEx.lastPivot = sp.ns.fetchEx.lastPivot + pivotAccIncrement
|
||||
if sp.fetchEx.lastPivot < high(NodeTag) - pivotAccIncrement:
|
||||
sp.fetchEx.lastPivot = sp.ns.fetchEx.lastPivot + pivotAccIncrement
|
||||
break
|
||||
# Otherwise start at 0
|
||||
sp.ns.fetchEx.lastPivot = 0.to(LeafItem)
|
||||
sp.fetchEx.lastPivot = 0.to(NodeTag)
|
||||
break
|
||||
|
||||
let accRange = AccTabEntryRef(
|
||||
pivot: sp.ns.fetchEx.lastPivot,
|
||||
avail: LeafRangeSet.init())
|
||||
pivot: sp.fetchEx.lastPivot,
|
||||
avail: LeafRangeSet.init(),
|
||||
base: sp.fetchEx)
|
||||
|
||||
trace "New accounts list for syncing",
|
||||
peer=sp, stateRoot=key, pivot=sp.ns.fetchEx.lastPivot
|
||||
peer=sp, stateRoot=key, pivot=sp.fetchEx.lastPivot
|
||||
|
||||
# Statistics
|
||||
sp.ns.fetchEx.quCount.inc
|
||||
sp.fetchEx.quCount.inc
|
||||
|
||||
# Pre-filled with the largest possible interval
|
||||
discard accRange.avail.merge(low(LeafItem),high(LeafItem))
|
||||
discard accRange.avail.merge(low(NodeTag),high(NodeTag))
|
||||
|
||||
# Append and curb LRU table as needed
|
||||
return sp.ns.fetchEx.accTab.lruAppend(key, accRange, sp.ns.buddiesMax)
|
||||
return sp.fetchEx.accTab.lruAppend(key, accRange, sp.ns.buddiesMax)
|
||||
|
||||
|
||||
proc sameAccTab(sp: WorkerBuddy; key: TrieHash; accTab: AccTabEntryRef): bool =
|
||||
## Verify that account list entry has not changed.
|
||||
let rc = sp.fetchEx.accTab.eq(key)
|
||||
if rc.isErr:
|
||||
return accTab.isNil
|
||||
if not accTab.isNil:
|
||||
return accTab == rc.value
|
||||
|
||||
|
||||
proc fetchAccRange(atb: AccTabEntryRef): Result[LeafRange,void] =
|
||||
@ -137,14 +155,14 @@ proc fetchAccRange(atb: AccTabEntryRef): Result[LeafRange,void] =
|
||||
# Take the next interval to the right
|
||||
let rc = atb.avail.ge(atb.pivot)
|
||||
if rc.isOk:
|
||||
let iv = rc.value.withMaxLen
|
||||
let iv = atb.withMaxLen(rc.value)
|
||||
discard atb.avail.reduce(iv)
|
||||
return ok(iv)
|
||||
|
||||
# Otherwise wrap around
|
||||
let rc = atb.avail.ge()
|
||||
if rc.isOk:
|
||||
let iv = rc.value.withMaxLen
|
||||
let iv = atb.withMaxLen(rc.value)
|
||||
discard atb.avail.reduce(iv)
|
||||
return ok(iv)
|
||||
|
||||
@ -154,29 +172,47 @@ proc fetchAccRange(atb: AccTabEntryRef): Result[LeafRange,void] =
|
||||
proc putAccRange(atb: AccTabEntryRef; iv: LeafRange) =
|
||||
discard atb.avail.merge(iv)
|
||||
|
||||
proc putAccRange(atb: AccTabEntryRef; a, b: LeafItem) =
|
||||
proc putAccRange(atb: AccTabEntryRef; a, b: NodeTag) =
|
||||
discard atb.avail.merge(a, b)
|
||||
|
||||
|
||||
proc haveAccRange(atb: AccTabEntryRef): bool =
|
||||
0 < atb.avail.chunks
|
||||
|
||||
|
||||
proc meanStdDev(sum, sqSum: float; length: int): (float,float) =
|
||||
if 0 < length:
|
||||
result[0] = sum / length.float
|
||||
result[1] = sqrt(sqSum / length.float - result[0] * result[0])
|
||||
|
||||
proc tickerStats(ns: Worker): TickerStats {.gcsafe.} =
|
||||
result.totalQueues = ns.fetchEx.quCount
|
||||
for it in ns.fetchEx.accTab.nextValues:
|
||||
if 0 < it.avail.chunks:
|
||||
result.avFillFactor += it.avail.freeFactor
|
||||
result.activeQueues.inc
|
||||
var aSum, aSqSum, uSum, uSqSum: float
|
||||
for kvp in ns.fetchEx.accTab.nextPairs:
|
||||
|
||||
# Accounts mean & variance
|
||||
let aLen = ns.fetchEx.pdb.accountsLen(kvp.key).float
|
||||
aSum += aLen
|
||||
aSqSum += aLen * aLen
|
||||
|
||||
# Fill utilisation mean & variance
|
||||
let fill = kvp.data.avail.freeFactor
|
||||
uSum += fill
|
||||
uSqSum += fill * fill
|
||||
|
||||
result.activeQueues = ns.fetchEx.accTab.len
|
||||
result.flushedQueues = ns.fetchEx.quCount.int64 - result.activeQueues
|
||||
result.accounts = meanStdDev(aSum, aSqSum, result.activeQueues)
|
||||
result.fillFactor = meanStdDev(uSum, uSqSum, result.activeQueues)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public start/stop and admin functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc fetchSetup*(ns: Worker) =
|
||||
proc fetchSetup*(ns: Worker; chainDb: AbstractChainDB) =
|
||||
## Global set up
|
||||
ns.fetchBase = FetchEx()
|
||||
ns.fetchEx.accTab.init(ns.buddiesMax)
|
||||
ns.fetchEx.accRangeMaxLen = high(UInt256) div ns.buddiesMax.u256
|
||||
ns.fetchEx.pdb.init(chainDb.getTrieDB)
|
||||
ns.tickerSetup(cb = tickerStats)
|
||||
|
||||
proc fetchRelease*(ns: Worker) =
|
||||
@ -226,63 +262,76 @@ proc fetch*(sp: WorkerBuddy) {.async.} =
|
||||
trace "Fetching from peer", peer=sp, ctrlState=sp.ctrl.state
|
||||
sp.tickerStartPeer()
|
||||
|
||||
var
|
||||
stateRoot = sp.ctrl.stateRoot.get
|
||||
accTab = sp.getAccTab(stateRoot)
|
||||
|
||||
while not sp.ctrl.stopped:
|
||||
|
||||
if not accTab.haveAccRange():
|
||||
trace "Nothing more to sync from this peer", peer=sp
|
||||
while not accTab.haveAccRange():
|
||||
await sleepAsync(5.seconds) # TODO: Use an event trigger instead.
|
||||
|
||||
# We need a state root and an access range list (depending on state root)
|
||||
if sp.ctrl.stateRoot.isNone:
|
||||
trace "No current state root for this peer", peer=sp
|
||||
trace "Currently no state root", peer=sp
|
||||
# Wait for a new state root
|
||||
while not sp.ctrl.stopped and
|
||||
accTab.haveAccRange() and
|
||||
sp.ctrl.stateRoot.isNone:
|
||||
await sleepAsync(5.seconds) # TODO: Use an event trigger instead.
|
||||
await sleepAsync(5.seconds)
|
||||
continue
|
||||
|
||||
if stateRoot != sp.ctrl.stateRoot.get:
|
||||
# Ok, here is the `stateRoot`, tentatively try the access range list
|
||||
let
|
||||
stateRoot = sp.ctrl.stateRoot.get
|
||||
accTab = sp.getAccTab(stateRoot)
|
||||
sp.ctrl.stopped = false
|
||||
|
||||
if sp.ctrl.stopRequest:
|
||||
trace "Pausing sync until we get a new state root", peer=sp
|
||||
if not accTab.haveAccRange():
|
||||
trace "Currently no account ranges", peer=sp
|
||||
# Account ranges exhausted, wait for a new state root
|
||||
while not sp.ctrl.stopped and
|
||||
accTab.haveAccRange() and
|
||||
sp.ctrl.stateRoot.isSome and
|
||||
stateRoot == sp.ctrl.stateRoot.get:
|
||||
await sleepAsync(5.seconds) # TODO: Use an event trigger instead.
|
||||
stateRoot == sp.ctrl.stateRoot.get and
|
||||
sp.sameAccTab(stateRoot, accTab) and
|
||||
not accTab.haveAccRange():
|
||||
await sleepAsync(5.seconds)
|
||||
continue
|
||||
|
||||
if sp.ctrl.stopped:
|
||||
continue
|
||||
|
||||
# Rotate LRU table
|
||||
accTab = sp.getAccTab(stateRoot)
|
||||
|
||||
# Get a new interval, a range of accounts to visit
|
||||
# Get a range of accounts to fetch from
|
||||
let iv = block:
|
||||
let rc = accTab.fetchAccRange()
|
||||
if rc.isErr:
|
||||
continue
|
||||
rc.value
|
||||
|
||||
# Fetch data for this interval, `fetchAccounts()` returns the range covered
|
||||
let rc = await sp.fetchAccounts(stateRoot, iv)
|
||||
if rc.isErr:
|
||||
accTab.putAccRange(iv) # fail => interval back to pool
|
||||
elif rc.value.maxPt < iv.maxPt:
|
||||
# Fetch data for this range delegated to `fetchAccounts()`
|
||||
let dd = block:
|
||||
let rc = await sp.fetchAccounts(stateRoot, iv)
|
||||
if rc.isErr:
|
||||
accTab.putAccRange(iv) # fail => interval back to pool
|
||||
case rc.error:
|
||||
of NetworkProblem, MissingProof, AccountsMinTooSmall,
|
||||
AccountsMaxTooLarge:
|
||||
# Mark this peer dead, i.e. avoid fetching from this peer for a while
|
||||
sp.stats.major.networkErrors.inc()
|
||||
sp.ctrl.zombie = true
|
||||
of NothingSerious:
|
||||
discard
|
||||
of NoAccountsForStateRoot:
|
||||
# One could wait for a new state root but this may result in a
|
||||
# temporary standstill if all `fetch()` instances do the same. So
|
||||
# waiting for a while here might be preferable in the hope that the
|
||||
# situation changes at the peer.
|
||||
await sleepAsync(5.seconds)
|
||||
continue
|
||||
rc.value
|
||||
|
||||
# Register consumed accounts range
|
||||
if dd.consumed < iv.len:
|
||||
# return some unused range
|
||||
accTab.putAccRange(rc.value.maxPt + 1.u256, iv.maxPt)
|
||||
accTab.putAccRange(iv.minPt + dd.consumed.u256, iv.maxPt)
|
||||
|
||||
# Process data
|
||||
block:
|
||||
let rc = sp.ns.fetchEx.pdb.mergeProved(stateRoot, iv.minPt, dd.data)
|
||||
if rc.isErr:
|
||||
discard # ??
|
||||
|
||||
# while end
|
||||
|
||||
trace "No more sync available from this peer", peer=sp
|
||||
trace "Done syncing for this peer", peer=sp, ctrlState=sp.ctrl.state
|
||||
sp.tickerStopPeer()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -12,22 +12,15 @@
|
||||
## This module fetches the Ethereum account state trie from network peers by
|
||||
## traversing leaves of the trie in leaf path order, making network requests
|
||||
## using the `snap` protocol.
|
||||
##
|
||||
## From the leaves it is possible to reconstruct parts of a full trie. With a
|
||||
## separate trie traversal process it is possible to efficiently update the
|
||||
## leaf states for related tries (new blocks), and merge partial data from
|
||||
## different related tries (blocks at different times) together in a way that
|
||||
## eventually becomes a full trie for a single block.
|
||||
|
||||
import
|
||||
std/sets,
|
||||
chronos,
|
||||
eth/[common/eth_types, p2p],
|
||||
nimcrypto/keccak,
|
||||
../../../../utils/interval_set,
|
||||
stew/interval_set,
|
||||
"../../.."/[protocol, protocol/trace_config, types],
|
||||
../../path_desc,
|
||||
".."/[ticker, worker_desc]
|
||||
../worker_desc
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
@ -38,6 +31,19 @@ const
|
||||
snapRequestBytesLimit = 2 * 1024 * 1024
|
||||
## Soft bytes limit to request in `snap` protocol calls.
|
||||
|
||||
type
|
||||
FetchError* = enum
|
||||
NothingSerious,
|
||||
MissingProof,
|
||||
AccountsMinTooSmall,
|
||||
AccountsMaxTooLarge,
|
||||
NoAccountsForStateRoot,
|
||||
NetworkProblem
|
||||
|
||||
FetchAccounts* = object
|
||||
consumed*: UInt256 ## Leftmost accounts used from argument range
|
||||
data*: WorkerAccountRange ## reply data
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
@ -46,7 +52,7 @@ proc getAccountRange(
|
||||
sp: WorkerBuddy;
|
||||
root: TrieHash;
|
||||
iv: LeafRange
|
||||
): Future[Result[Option[accountRangeObj],void]] {.async.} =
|
||||
): Future[Result[Option[WorkerAccountRange],void]] {.async.} =
|
||||
try:
|
||||
let reply = await sp.peer.getAccountRange(
|
||||
root.to(Hash256), iv.minPt, iv.maxPt, snapRequestBytesLimit)
|
||||
@ -65,79 +71,96 @@ proc fetchAccounts*(
|
||||
peer: WorkerBuddy;
|
||||
stateRoot: TrieHash;
|
||||
iv: LeafRange
|
||||
): Future[Result[LeafRange,void]] {.async.} =
|
||||
): Future[Result[FetchAccounts,FetchError]] {.async.} =
|
||||
## Fetch data using the `snap#` protocol, returns the range covered.
|
||||
if trSnapTracePacketsOk:
|
||||
trace trSnapSendSending & "GetAccountRange", peer,
|
||||
accRange=iv, stateRoot, bytesLimit=snapRequestBytesLimit
|
||||
|
||||
let reply = block:
|
||||
var dd = block:
|
||||
let rc = await peer.getAccountRange(stateRoot, iv)
|
||||
if rc.isErr:
|
||||
inc peer.stats.major.networkErrors
|
||||
peer.ctrl.stopped = true
|
||||
return err()
|
||||
return err(NetworkProblem)
|
||||
if rc.value.isNone:
|
||||
trace trSnapRecvTimeoutWaiting & "for reply to GetAccountRange", peer
|
||||
return err()
|
||||
rc.value.get
|
||||
return err(NothingSerious)
|
||||
FetchAccounts(
|
||||
consumed: iv.len,
|
||||
data: rc.value.get)
|
||||
|
||||
let
|
||||
accounts = reply.accounts
|
||||
nAccounts = accounts.len
|
||||
|
||||
# TODO: We're not currently verifying boundary proofs, but we do depend on
|
||||
# whether there is a proof supplied. Unlike Snap sync, the Pie sync
|
||||
# algorithm doesn't verify most boundary proofs at this stage.
|
||||
proof = reply.proof
|
||||
nProof = proof.len
|
||||
nAccounts = dd.data.accounts.len
|
||||
nProof = dd.data.proof.len
|
||||
|
||||
if nAccounts == 0:
|
||||
# If there's no proof, this reply means the peer has no accounts available
|
||||
# in the range for this query. But if there's a proof, this reply means
|
||||
# there are no more accounts starting at path `origin` up to max path.
|
||||
# This makes all the difference to terminating the fetch. For now we'll
|
||||
# trust the mere existence of the proof rather than verifying it.
|
||||
# github.com/ethereum/devp2p/blob/master/caps/snap.md#getaccountrange-0x00:
|
||||
# Notes:
|
||||
# * Nodes must always respond to the query.
|
||||
# * If the node does not have the state for the requested state root, it
|
||||
# must return an empty reply. It is the responsibility of the caller to
|
||||
# query an state not older than 128 blocks.
|
||||
# * The responding node is allowed to return less data than requested (own
|
||||
# QoS limits), but the node must return at least one account. If no
|
||||
# accounts exist between startingHash and limitHash, then the first (if
|
||||
# any) account after limitHash must be provided.
|
||||
if nProof == 0:
|
||||
trace trSnapRecvReceived & "EMPTY AccountRange message", peer,
|
||||
# Maybe try another peer
|
||||
trace trSnapRecvReceived & "EMPTY AccountRange reply", peer,
|
||||
nAccounts, nProof, accRange="n/a", reqRange=iv, stateRoot
|
||||
# Don't keep retrying snap for this state.
|
||||
peer.ctrl.stopRequest = true
|
||||
return err()
|
||||
else:
|
||||
let accRange = LeafRange.new(iv.minPt, high(LeafItem))
|
||||
trace trSnapRecvReceived & "END AccountRange message", peer,
|
||||
nAccounts, nProof, accRange, reqRange=iv, stateRoot
|
||||
# Current slicer can't accept more result data than was requested, so
|
||||
# just leave the requested slice claimed and update statistics.
|
||||
return ok(iv)
|
||||
return err(NoAccountsForStateRoot)
|
||||
|
||||
# So there is no data, otherwise an account beyond the interval end
|
||||
# `iv.maxPt` would have been returned.
|
||||
trace trSnapRecvReceived & "END AccountRange message", peer,
|
||||
nAccounts, nProof, accRange=LeafRange.new(iv.minPt, high(NodeTag)),
|
||||
reqRange=iv, stateRoot
|
||||
dd.consumed = high(NodeTag) - iv.minPt
|
||||
return ok(dd)
|
||||
|
||||
let (accMinPt, accMaxPt) =
|
||||
(dd.data.accounts[0].accHash, dd.data.accounts[^1].accHash)
|
||||
|
||||
if nProof == 0:
|
||||
# github.com/ethereum/devp2p/blob/master/caps/snap.md#accountrange-0x01
|
||||
# Notes:
|
||||
# * If the account range is the entire state (requested origin was 0x00..0
|
||||
# and all accounts fit into the response), no proofs should be sent along
|
||||
# the response. This is unlikely for accounts, but since it's a common
|
||||
# situation for storage slots, this clause keeps the behavior the same
|
||||
# across both.
|
||||
if 0.to(NodeTag) < iv.minPt:
|
||||
trace trSnapRecvProtocolViolation & "missing proof in AccountRange", peer,
|
||||
nAccounts, nProof, accRange=LeafRange.new(iv.minPt, accMaxPt),
|
||||
reqRange=iv, stateRoot
|
||||
return err(MissingProof)
|
||||
# TODO: How do I know that the full accounts list is correct?
|
||||
|
||||
if accMinPt < iv.minPt:
|
||||
# Not allowed
|
||||
trace trSnapRecvProtocolViolation & "min too small in AccountRange", peer,
|
||||
nAccounts, nProof, accRange=LeafRange.new(accMinPt, accMaxPt),
|
||||
reqRange=iv, stateRoot
|
||||
return err(AccountsMinTooSmall)
|
||||
|
||||
if iv.maxPt < accMaxPt:
|
||||
# github.com/ethereum/devp2p/blob/master/caps/snap.md#getaccountrange-0x00:
|
||||
# Notes:
|
||||
# * [..]
|
||||
# * [..]
|
||||
# * [..] If no accounts exist between startingHash and limitHash, then the
|
||||
# first (if any) account after limitHash must be provided.
|
||||
if 1 < nAccounts:
|
||||
trace trSnapRecvProtocolViolation & "max too large in AccountRange", peer,
|
||||
nAccounts, nProof, accRange=LeafRange.new(iv.minPt, accMaxPt),
|
||||
reqRange=iv, stateRoot
|
||||
return err(AccountsMaxTooLarge)
|
||||
|
||||
let accRange = LeafRange.new(iv.minPt, accounts[^1].accHash)
|
||||
trace trSnapRecvReceived & "AccountRange message", peer,
|
||||
accounts=accounts.len, proofs=proof.len, accRange,
|
||||
nAccounts, nProof, accRange=LeafRange.new(iv.minPt, accMaxPt),
|
||||
reqRange=iv, stateRoot
|
||||
|
||||
# Missing proof isn't allowed, unless `minPt` is min path in which case
|
||||
# there might be no proof if the result spans the entire range.
|
||||
if proof.len == 0 and iv.minPt != low(LeafItem):
|
||||
trace trSnapRecvProtocolViolation & "missing proof in AccountRange", peer,
|
||||
nAccounts, nProof, accRange, reqRange=iv, stateRoot
|
||||
return err()
|
||||
|
||||
if accRange.maxPt < iv.maxPt:
|
||||
peer.tickerCountAccounts(0, nAccounts)
|
||||
return ok(LeafRange.new(iv.minPt, accRange.maxPt))
|
||||
|
||||
var keepAccounts = nAccounts
|
||||
# Current slicer can't accept more result data than was requested.
|
||||
# So truncate to limit before updating statistics.
|
||||
while iv.maxPt < accounts[keepAccounts-1].accHash:
|
||||
dec keepAccounts
|
||||
if keepAccounts == 0:
|
||||
break
|
||||
|
||||
peer.tickerCountAccounts(0, keepAccounts)
|
||||
return ok(iv) # all of `iv` used
|
||||
dd.consumed = (accMaxPt - iv.minPt) + 1
|
||||
return ok(dd)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
780
nimbus/sync/snap/worker/fetch/proof_db.nim
Normal file
780
nimbus/sync/snap/worker/fetch/proof_db.nim
Normal file
@ -0,0 +1,780 @@
|
||||
# Nimbus - Fetch account and storage states from peers by snapshot traversal
|
||||
#
|
||||
# Copyright (c) 2021 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
import
|
||||
std/[algorithm, hashes, options, sequtils, sets, strutils, strformat, tables],
|
||||
chronos,
|
||||
eth/[common/eth_types, p2p, rlp, trie/db],
|
||||
nimcrypto/keccak,
|
||||
stew/[byteutils, interval_set],
|
||||
stint,
|
||||
../../../../db/storage_types,
|
||||
"../../.."/[protocol, types],
|
||||
../../path_desc,
|
||||
../worker_desc
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
logScope:
|
||||
topics = "snap-proof"
|
||||
|
||||
const
|
||||
RowColumnParserDump = false
|
||||
NibbleFollowDump = false # true
|
||||
|
||||
type
|
||||
ProofError* = enum
|
||||
RlpEncoding
|
||||
RlpBlobExpected
|
||||
RlpNonEmptyBlobExpected
|
||||
RlpBranchLinkExpected
|
||||
RlpListExpected
|
||||
Rlp2Or17ListEntries
|
||||
RlpExtPathEncoding
|
||||
RlpLeafPathEncoding
|
||||
RlpRecTypeError
|
||||
ImpossibleKeyError
|
||||
RowUnreferenced
|
||||
AccountSmallerThanBase
|
||||
AccountsNotSrictlyIncreasing
|
||||
LastAccountProofFailed
|
||||
MissingMergeBeginDirective
|
||||
StateRootDiffers
|
||||
|
||||
ProofRecType = enum
|
||||
Branch,
|
||||
Extension,
|
||||
Leaf
|
||||
|
||||
StatusRec = object
|
||||
nAccounts: int
|
||||
nProofs: int
|
||||
|
||||
AccountRec = ##\
|
||||
## Account entry record
|
||||
distinct Account
|
||||
|
||||
ProofRec = object
|
||||
## Proofs entry record
|
||||
case kind: ProofRecType
|
||||
of Branch:
|
||||
vertex: array[16,NodeTag]
|
||||
value: Blob # name starts with a `v` as in vertex
|
||||
of Extension:
|
||||
extend: PathSegment
|
||||
follow: NodeTag
|
||||
of Leaf:
|
||||
path: PathSegment
|
||||
payload: Blob # name starts with a `p` as in path
|
||||
|
||||
ProofKvp = object
|
||||
key: NodeTag
|
||||
data: Option[ProofRec]
|
||||
|
||||
ProofDb* = object
|
||||
keyMap: Table[NodeTag,uint] ## For debugging only
|
||||
|
||||
rootTag: NodeTag ## Current root node
|
||||
rootHash: TrieHash ## Root node as hash
|
||||
stat: StatusRec ## table statistics
|
||||
|
||||
db: TrieDatabaseRef ## general database
|
||||
dbTx: DbTransaction ## Rollback state capture
|
||||
|
||||
newAccs: seq[(NodeTag,NodeTag)] ## New accounts group: (base,last)
|
||||
newProofs: seq[NodeTag] ## Newly added proofs records
|
||||
refPool: HashSet[NodeTag] ## New proofs references recs
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
template noRlpError(info: static[string]; code: untyped) =
|
||||
try:
|
||||
code
|
||||
except RlpError as e:
|
||||
raiseAssert "Inconveivable (" & info & "): " & e.msg
|
||||
|
||||
proc read(rlp: var Rlp; T: type ProofRec): T =
|
||||
## RLP mixin
|
||||
noRlpError("read(ProofRec)"):
|
||||
result.kind = rlp.read(typeof result.kind)
|
||||
rlp.tryEnterList()
|
||||
case result.kind:
|
||||
of Branch:
|
||||
result.vertex = rlp.read(typeof result.vertex)
|
||||
result.value = rlp.read(typeof result.value)
|
||||
of Extension:
|
||||
result.extend = rlp.dbRead(typeof result.extend)
|
||||
result.follow = rlp.read(typeof result.follow)
|
||||
of Leaf:
|
||||
result.path = rlp.dbRead(typeof result.path)
|
||||
result.payload = rlp.read(typeof result.payload)
|
||||
|
||||
proc append(writer: var RlpWriter; rec: ProofRec) =
|
||||
## RLP mixin
|
||||
append(writer, rec.kind)
|
||||
startList(writer, 2)
|
||||
case rec.kind:
|
||||
of Branch:
|
||||
append(writer, rec.vertex)
|
||||
append(writer, rec.value)
|
||||
of Extension:
|
||||
dbAppend(writer, rec.extend)
|
||||
append(writer, rec.follow)
|
||||
of Leaf:
|
||||
dbAppend(writer, rec.path)
|
||||
append(writer, rec.payload)
|
||||
|
||||
proc to(w: TrieHash; T: type NodeTag): T =
|
||||
## Syntactic sugar
|
||||
w.Hash256.to(T)
|
||||
|
||||
proc to(w: AccountRec; T: type Account): T =
|
||||
## Syntactic sugar
|
||||
w.T
|
||||
|
||||
proc to(w: Account; T: type AccountRec): T =
|
||||
## Syntactic sugar
|
||||
w.T
|
||||
|
||||
|
||||
func nibble(a: array[32,byte]; inx: int): int =
|
||||
let byteInx = inx shr 1
|
||||
if byteInx < 32:
|
||||
if (inx and 1) == 0:
|
||||
result = (a[byteInx] shr 4).int
|
||||
else:
|
||||
result = (a[byteInx] and 15).int
|
||||
|
||||
proc clearJournal(pv: var ProofDb) =
|
||||
pv.newAccs.setLen(0)
|
||||
pv.newProofs.setLen(0)
|
||||
pv.refPool.clear
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private debugging helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
import
|
||||
../../../../constants
|
||||
|
||||
template noPpError(info: static[string]; code: untyped) =
|
||||
try:
|
||||
code
|
||||
except ValueError as e:
|
||||
raiseAssert "Inconveivable (" & info & "): " & e.msg
|
||||
except KeyError as e:
|
||||
raiseAssert "Not possible (" & info & "): " & e.msg
|
||||
|
||||
proc pp(s: string; hex = false): string =
|
||||
if hex:
|
||||
let n = (s.len + 1) div 2
|
||||
(if s.len < 20: s else: s[0 .. 5] & ".." & s[s.len-8 .. s.len-1]) &
|
||||
"[" & (if 0 < n: "#" & $n else: "") & "]"
|
||||
elif s.len <= 30:
|
||||
s
|
||||
else:
|
||||
(if (s.len and 1) == 0: s[0 ..< 8] else: "0" & s[0 ..< 7]) &
|
||||
"..(" & $s.len & ").." & s[s.len-16 ..< s.len]
|
||||
|
||||
proc pp(a: Hash256; collapse = true): string =
|
||||
if not collapse:
|
||||
a.data.mapIt(it.toHex(2)).join.toLowerAscii
|
||||
elif a == ZERO_HASH256:
|
||||
"ZERO_HASH256"
|
||||
elif a == BLANK_ROOT_HASH:
|
||||
"BLANK_ROOT_HASH"
|
||||
elif a == EMPTY_UNCLE_HASH:
|
||||
"EMPTY_UNCLE_HASH"
|
||||
elif a == EMPTY_SHA3:
|
||||
"EMPTY_SHA3"
|
||||
elif a == ZERO_HASH256:
|
||||
"ZERO_HASH256"
|
||||
else:
|
||||
a.data.mapIt(it.toHex(2)).join[56 .. 63].toLowerAscii
|
||||
|
||||
proc pp(a: NodeHash|TrieHash; collapse = true): string =
|
||||
a.Hash256.pp(collapse)
|
||||
|
||||
proc pp(a: NodeTag; collapse = true): string =
|
||||
a.to(Hash256).pp(collapse)
|
||||
|
||||
proc toKey(a: NodeTag; pv: var ProofDb): uint =
|
||||
noPpError("pp(NodeTag)"):
|
||||
if not pv.keyMap.hasKey(a):
|
||||
pv.keyMap[a] = pv.keyMap.len.uint + 1
|
||||
result = pv.keyMap[a]
|
||||
|
||||
proc pp(a: NodeTag; pv: var ProofDb): string =
|
||||
$a.toKey(pv)
|
||||
|
||||
proc pp(q: openArray[byte]; noHash = false): string =
|
||||
if q.len == 32 and not noHash:
|
||||
var a: array[32,byte]
|
||||
for n in 0..31: a[n] = q[n]
|
||||
($Hash256(data: a)).pp
|
||||
else:
|
||||
q.toSeq.mapIt(it.toHex(2)).join.toLowerAscii.pp(hex = true)
|
||||
|
||||
proc pp(blob: Blob): string =
|
||||
blob.mapIt(it.toHex(2)).join
|
||||
|
||||
proc pp(a: Account): string =
|
||||
noPpError("pp(Account)"):
|
||||
result = &"({a.nonce},{a.balance},{a.storageRoot},{a.codeHash})"
|
||||
|
||||
proc pp(sa: SnapAccount): string =
|
||||
"(" & $sa.accHash & "," & sa.accBody.pp & ")"
|
||||
|
||||
proc pp(al: seq[SnapAccount]): string =
|
||||
result = " @["
|
||||
noPpError("pp(seq[SnapAccount])"):
|
||||
for n,rec in al:
|
||||
result &= &"| # <{n}>| {rec.pp},"
|
||||
if 10 < result.len:
|
||||
result[^1] = ']'
|
||||
else:
|
||||
result &= "]"
|
||||
|
||||
proc pp(blobs: seq[Blob]): string =
|
||||
result = " @["
|
||||
noPpError("pp(seq[Blob])"):
|
||||
for n,rec in blobs:
|
||||
result &= "| # <" & $n & ">| \"" & rec.pp & "\".hexToSeqByte,"
|
||||
if 10 < result.len:
|
||||
result[^1] = ']'
|
||||
else:
|
||||
result &= "]"
|
||||
|
||||
proc pp(hs: seq[NodeTag]; pv: var ProofDb): string =
|
||||
"<" & hs.mapIt(it.pp(pv)).join(",") & ">"
|
||||
|
||||
proc pp(hs: HashSet[NodeTag]; pv: var ProofDb): string =
|
||||
"{" & toSeq(hs.items).mapIt(it.toKey(pv)).sorted.mapIt($it).join(",") & "}"
|
||||
|
||||
proc pp(rec: ProofRec; pv: var ProofDb): string =
|
||||
noPpError("pp(ProofRec)"):
|
||||
case rec.kind:
|
||||
of Branch: result &=
|
||||
"b(" & rec.vertex.mapIt(it.pp(pv)).join(",") & "," &
|
||||
rec.value.pp.pp(true) & ")"
|
||||
of Leaf: result &=
|
||||
"l(" & ($rec.path).pp(true) & "," & rec.payload.pp.pp(true) & ")"
|
||||
of Extension: result &=
|
||||
"x(" & ($rec.extend).pp(true) & "," & rec.follow.pp(pv) & ")"
|
||||
|
||||
proc pp(rec: Option[ProofRec]; pv: var ProofDb): string =
|
||||
if rec.isSome:
|
||||
rec.get.pp(pv)
|
||||
else:
|
||||
"n/a"
|
||||
|
||||
proc pp(q: seq[ProofKvp]; pv: var ProofDb): string =
|
||||
result="@["
|
||||
for kvp in q:
|
||||
result &= "(" & kvp.key.pp(pv) & "," & kvp.data.pp(pv) & "),"
|
||||
if q.len == 0:
|
||||
result &= "]"
|
||||
else:
|
||||
result[^1] = ']'
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
template mkProofKey(pv: ProofDb; tag: NodeTag): openArray[byte] =
|
||||
tag.to(Hash256).snapSyncProofKey.toOpenArray
|
||||
|
||||
proc getProofsRec(pv: ProofDb; tag: NodeTag): Result[ProofRec,void] =
|
||||
let recData = pv.db.get(pv.mkProofKey(tag))
|
||||
if 0 < recData.len:
|
||||
return ok(recData.decode(ProofRec))
|
||||
err()
|
||||
|
||||
proc hasProofsRec(pv: ProofDb; tag: NodeTag): bool =
|
||||
pv.db.contains(pv.mkProofKey(tag))
|
||||
|
||||
proc collectRefs(pv: var ProofDb; rec: ProofRec) =
|
||||
case rec.kind:
|
||||
of Branch:
|
||||
for v in rec.vertex:
|
||||
pv.refPool.incl v
|
||||
of Extension:
|
||||
pv.refPool.incl rec.follow
|
||||
of Leaf:
|
||||
discard
|
||||
|
||||
proc collectRefs(pv: var ProofDb; tag: NodeTag) =
|
||||
let rc = pv.getProofsRec(tag)
|
||||
if rc.isOk:
|
||||
pv.collectRefs(rc.value)
|
||||
|
||||
proc addProofsRec(pv: var ProofDb; tag: NodeTag; rec: ProofRec) =
|
||||
#debug "addProofsRec", size=pv.nProofs, tag=tag.pp(pv), rec=rec.pp(pv)
|
||||
if not pv.hasProofsRec(tag):
|
||||
pv.db.put(pv.mkProofKey(tag), rlp.encode(rec))
|
||||
pv.stat.nProofs.inc
|
||||
pv.newProofs.add tag # to be committed
|
||||
# Always add references, the rec might have been added earlier outside
|
||||
# the current transaction.
|
||||
pv.collectRefs(rec)
|
||||
|
||||
# -----------
|
||||
|
||||
template mkAccKey(pv: ProofDb; tag: NodeTag): openArray[byte] =
|
||||
snapSyncAccountKey(tag.to(Hash256), pv.rootHash.Hash256).toOpenArray
|
||||
|
||||
proc hasAccountRec(pv: ProofDb; tag: NodeTag): bool =
|
||||
pv.db.contains(pv.mkAccKey(tag))
|
||||
|
||||
proc getAccountRec(pv: ProofDb; tag: NodeTag): Result[AccountRec,void] =
|
||||
let rec = pv.db.get(pv.mkAccKey(tag))
|
||||
if 0 < rec.len:
|
||||
noRlpError("read(AccountRec)"):
|
||||
return ok(rec.decode(Account).to(AccountRec))
|
||||
err()
|
||||
|
||||
proc addAccountRec(pv: var ProofDb; tag: NodeTag; rec: AccountRec) =
|
||||
if not pv.hasAccountRec(tag):
|
||||
pv.db.put(pv.mkAccKey(tag), rlp.encode(rec.to(Account)))
|
||||
pv.stat.nAccounts.inc
|
||||
|
||||
# -----------
|
||||
|
||||
template mkStatusKey(pv: ProofDb; root: TrieHash): openArray[byte] =
|
||||
snapSyncStatusKey(root.Hash256).toOpenArray
|
||||
|
||||
proc hasStatusRec(pv: ProofDb; root: TrieHash): bool =
|
||||
pv.db.contains(pv.mkStatusKey(root))
|
||||
|
||||
proc getStatusRec(pv: ProofDb; root: TrieHash): Result[StatusRec,void] =
|
||||
let rec = pv.db.get(pv.mkStatusKey(root))
|
||||
if 0 < rec.len:
|
||||
noRlpError("getStatusRec"):
|
||||
return ok(rec.decode(StatusRec))
|
||||
err()
|
||||
|
||||
proc useStatusRec(pv: ProofDb; root: TrieHash): StatusRec =
|
||||
let rec = pv.db.get(pv.mkStatusKey(root))
|
||||
if 0 < rec.len:
|
||||
noRlpError("findStatusRec"):
|
||||
return rec.decode(StatusRec)
|
||||
|
||||
proc putStatusRec(pv: ProofDb; root: TrieHash; rec: StatusRec) =
|
||||
pv.db.put(pv.mkStatusKey(root), rlp.encode(rec))
|
||||
|
||||
# Example trie from https://eth.wiki/en/fundamentals/patricia-tree
|
||||
#
|
||||
# lookup data:
|
||||
# "do": "verb"
|
||||
# "dog": "puppy"
|
||||
# "dodge": "coin"
|
||||
# "horse": "stallion"
|
||||
#
|
||||
# trie DB:
|
||||
# root: [16 A]
|
||||
# A: [* * * * B * * * [20+"orse" "stallion"] * * * * * * * *]
|
||||
# B: [00+"o" D]
|
||||
# D: [* * * * * * E * * * * * * * * * "verb"]
|
||||
# E: [17 [* * * * * * [35 "coin"] * * * * * * * * * "puppy"]]
|
||||
#
|
||||
# with first nibble of two-column rows:
|
||||
# hex bits | node type length
|
||||
# ---------+------------------
|
||||
# 0 0000 | extension even
|
||||
# 1 0001 | extension odd
|
||||
# 2 0010 | leaf even
|
||||
# 3 0011 | leaf odd
|
||||
#
|
||||
# and key path:
|
||||
# "do": 6 4 6 f
|
||||
# "dog": 6 4 6 f 6 7
|
||||
# "dodge": 6 4 6 f 6 7 6 5
|
||||
# "horse": 6 8 6 f 7 2 7 3 6 5
|
||||
#
|
||||
|
||||
proc parse(pv: ProofDb; rlpData: Blob): Result[ProofKvp,ProofError]
|
||||
{.gcsafe, raises: [Defect, RlpError].} =
|
||||
## Decode a single trie item for adding to the table
|
||||
|
||||
let recTag = rlpData.digestTo(NodeTag)
|
||||
when RowColumnParserDump:
|
||||
debug "Rlp column parser", recTag
|
||||
if pv.hasProofsRec(recTag):
|
||||
# No need to do this rec again
|
||||
return ok(ProofKvp(key: recTag, data: none(ProofRec)))
|
||||
|
||||
var
|
||||
# Inut data
|
||||
rlp = rlpData.rlpFromBytes
|
||||
|
||||
# Result data
|
||||
blobs = newSeq[Blob](2) # temporary, cache
|
||||
rec = ProofRec(kind: Branch) # part of output, default type
|
||||
top = 0 # count entries
|
||||
|
||||
# Collect lists of either 2 or 17 blob entries.
|
||||
for w in rlp.items:
|
||||
when RowColumnParserDump:
|
||||
debug "Rlp column parser", col=top, data=w.toBytes.pp
|
||||
case top
|
||||
of 0, 1:
|
||||
if not w.isBlob:
|
||||
return err(RlpBlobExpected)
|
||||
blobs[top] = rlp.read(Blob)
|
||||
of 2 .. 15:
|
||||
if not rec.vertex[top].init(rlp.read(Blob)):
|
||||
return err(RlpBranchLinkExpected)
|
||||
of 16:
|
||||
if not w.isBlob:
|
||||
return err(RlpBlobExpected)
|
||||
rec.value = rlp.read(Blob)
|
||||
else:
|
||||
return err(Rlp2Or17ListEntries)
|
||||
top.inc
|
||||
|
||||
when RowColumnParserDump:
|
||||
debug "Rlp column parser done collecting columns", col=top
|
||||
|
||||
# Assemble collected data
|
||||
case top:
|
||||
of 2:
|
||||
if blobs[0].len == 0:
|
||||
return err(RlpNonEmptyBlobExpected)
|
||||
case blobs[0][0] shr 4:
|
||||
of 0, 1:
|
||||
rec.kind = Extension
|
||||
if not (rec.extend.init(blobs[0]) and rec.follow.init(blobs[1])):
|
||||
return err(RlpExtPathEncoding)
|
||||
of 2, 3:
|
||||
rec.kind = Leaf
|
||||
if not rec.path.init(blobs[0]):
|
||||
return err(RlpLeafPathEncoding)
|
||||
rec.payload = blobs[1]
|
||||
else:
|
||||
return err(RlpRecTypeError)
|
||||
of 17:
|
||||
# Branch entry, complete the first two vertices
|
||||
for n,blob in blobs:
|
||||
if not rec.vertex[n].init(blob):
|
||||
return err(RlpBranchLinkExpected)
|
||||
else:
|
||||
return err(Rlp2Or17ListEntries)
|
||||
|
||||
ok(ProofKvp(key: recTag, data: some(rec)))
|
||||
|
||||
|
||||
proc parse(pv: var ProofDb; proof: SnapAccountProof): Result[void,ProofError] =
|
||||
## Decode a list of RLP encoded trie entries and add it to the rec pool
|
||||
try:
|
||||
for n,rlpRec in proof:
|
||||
when RowColumnParserDump:
|
||||
debug "Rlp rec parser", rec=n, data=rec.pp
|
||||
|
||||
let kvp = block:
|
||||
let rc = pv.parse(rlpRec)
|
||||
if rc.isErr:
|
||||
return err(rc.error)
|
||||
rc.value
|
||||
|
||||
if kvp.data.isNone: # avoids dups, stoll collects references
|
||||
pv.collectRefs(kvp.key)
|
||||
else:
|
||||
pv.addProofsRec(kvp.key, kvp.data.get)
|
||||
except RlpError:
|
||||
return err(RlpEncoding)
|
||||
except KeyError:
|
||||
return err(ImpossibleKeyError)
|
||||
|
||||
ok()
|
||||
|
||||
proc follow(pv: ProofDb; path: NodeTag): (int, Blob) =
|
||||
## Returns the number of matching digits/nibbles from the argument `tag`
|
||||
## found in the proofs trie.
|
||||
var
|
||||
inTop = 0
|
||||
inPath = path.UInt256.toBytesBE
|
||||
recTag = pv.rootTag
|
||||
leafBlob: Blob
|
||||
|
||||
when NibbleFollowDump:
|
||||
trace "follow", root=pv.rootTag, path
|
||||
|
||||
noRlpError("follow"):
|
||||
block loop:
|
||||
while true:
|
||||
|
||||
let rec = block:
|
||||
let rc = pv.getProofsRec(recTag)
|
||||
if rc.isErr:
|
||||
break loop
|
||||
rc.value
|
||||
|
||||
let recType = rec.kind
|
||||
case recType:
|
||||
of Branch:
|
||||
let
|
||||
nibble = inPath.nibble(inTop)
|
||||
newTag = rec.vertex[nibble]
|
||||
when NibbleFollowDump:
|
||||
trace "follow branch", recType, recTag, inTop, nibble, newTag
|
||||
recTag = newTag
|
||||
|
||||
of Leaf:
|
||||
for n in 0 ..< rec.path.len:
|
||||
if rec.path[n] != inPath.nibble(inTop + n):
|
||||
inTop += n
|
||||
when NibbleFollowDump:
|
||||
let tail = rec.path
|
||||
trace "follow leaf failed", recType, recTag, tail
|
||||
break loop
|
||||
inTop += rec.path.len
|
||||
leafBlob = rec.payload
|
||||
when NibbleFollowDump:
|
||||
trace "follow leaf", recType, recTag, inTop, done=true
|
||||
break loop
|
||||
|
||||
of Extension:
|
||||
for n in 0 ..< rec.extend.len:
|
||||
if rec.extend[n] != inPath.nibble(inTop + n):
|
||||
inTop += n
|
||||
when NibbleFollowDump:
|
||||
let tail = rec.extend
|
||||
trace "follow extension failed", recType, recTag, tail
|
||||
break loop
|
||||
inTop += rec.extend.len
|
||||
let newTag = rec.follow
|
||||
when NibbleFollowDump:
|
||||
trace "follow extension", recType, recTag, inTop, newTag
|
||||
recTag = newTag
|
||||
|
||||
# end case
|
||||
inTop.inc
|
||||
|
||||
# end while
|
||||
inTop.dec
|
||||
|
||||
when NibbleFollowDump:
|
||||
trace "follow done", tag, inTop
|
||||
|
||||
(inTop, leafBlob)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public constructor
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc init*(pv: var ProofDb; db: TrieDatabaseRef) =
|
||||
pv = ProofDb(db: db)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, transaction frame
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc isMergeTx*(pv: ProofDb): bool =
|
||||
## The function returns `true` exactly if a merge transaction was initialised
|
||||
## with `mergeBegin()`.
|
||||
not pv.dbTx.isNil
|
||||
|
||||
proc mergeBegin*(pv: var ProofDb; root: TrieHash): bool =
|
||||
## Prepare the system for accepting data input unless there is an open
|
||||
## transaction, already. The function returns `true` if
|
||||
## * There was no transaction initialised, yet
|
||||
## * There is an open transaction for the same state root argument `root`
|
||||
## In all other cases, `false` is returned.
|
||||
if pv.dbTx.isNil:
|
||||
# Update state root
|
||||
pv.rootTag = root.to(NodeTag)
|
||||
pv.rootHash = root
|
||||
# Fetch status record for this `root`
|
||||
pv.stat = pv.useStatusRec(root)
|
||||
# New DB transaction
|
||||
pv.dbTx = pv.db.beginTransaction
|
||||
return true
|
||||
# Make sure that the state roots are the same
|
||||
pv.rootHash == root
|
||||
|
||||
proc mergeCommit*(pv: var ProofDb): bool =
|
||||
## Accept merges and clear rollback journal if there was a transaction
|
||||
## initialised with `mergeBegin()`. If successful, `true` is returned, and
|
||||
## `false` otherwise.
|
||||
if not pv.dbTx.isNil:
|
||||
pv.dbTx.commit
|
||||
pv.dbTx = nil
|
||||
pv.clearJournal()
|
||||
pv.putStatusRec(pv.rootHash, pv.stat) # persistent new status for this root
|
||||
return true
|
||||
|
||||
proc mergeRollback*(pv: var ProofDb): bool =
|
||||
## Rewind discaring merges and clear rollback journal if there was a
|
||||
## transaction initialised with `mergeBegin()`. If successful, `true` is
|
||||
## returned, and `false` otherwise.
|
||||
if not pv.dbTx.isNil:
|
||||
pv.dbTx.rollback
|
||||
pv.dbTx = nil
|
||||
# restore previous status for this root
|
||||
pv.stat = pv.useStatusRec(pv.rootHash)
|
||||
pv.clearJournal()
|
||||
return true
|
||||
|
||||
proc merge*(
|
||||
pv: var ProofDb;
|
||||
proofs: SnapAccountProof
|
||||
): Result[void,ProofError] =
|
||||
## Merge account proofs (as received with the snap message `AccountRange`)
|
||||
## into the database. A rollback journal is maintained so that this operation
|
||||
## can be reverted.
|
||||
if pv.dbTx.isNil:
|
||||
return err(MissingMergeBeginDirective)
|
||||
let rc = pv.parse(proofs)
|
||||
if rc.isErr:
|
||||
trace "Merge() proof failed", proofs=proofs.len, error=rc.error
|
||||
return err(rc.error)
|
||||
ok()
|
||||
|
||||
proc merge*(
|
||||
pv: var ProofDb;
|
||||
base: NodeTag;
|
||||
acc: seq[SnapAccount]
|
||||
): Result[void,ProofError] =
|
||||
## Merge accounts (as received with the snap message `AccountRange`) into
|
||||
## the database. A rollback journal is maintained so that this operation
|
||||
## can be reverted.
|
||||
if pv.dbTx.isNil:
|
||||
return err(MissingMergeBeginDirective)
|
||||
if acc.len != 0:
|
||||
# Verify lower bound
|
||||
if acc[0].accHash < base:
|
||||
return err(AccountSmallerThanBase)
|
||||
# Verify strictly increasing account hashes
|
||||
for n in 1 ..< acc.len:
|
||||
if acc[n].accHash <= acc[n-1].accHash:
|
||||
return err(AccountsNotSrictlyIncreasing)
|
||||
# Add to database
|
||||
for sa in acc:
|
||||
pv.addAccountRec(sa.accHash, sa.accBody.to(AccountRec))
|
||||
# Stash boundary values, needed for later boundary proof
|
||||
pv.newAccs.add (base, acc[^1].accHash)
|
||||
ok()
|
||||
|
||||
proc mergeValidate*(pv: ProofDb): Result[void,ProofError] =
|
||||
## Verify non-commited accounts and proofs:
|
||||
## * The prosfs entries must all be referenced from within the rollback
|
||||
## journal
|
||||
## * For each group of accounts, the base `NodeTag` must be found in the
|
||||
## proof database with a partial path of length ???
|
||||
## * The last entry in a group of accounts must habe the `accBody` in the
|
||||
## proof database
|
||||
if pv.dbTx.isNil:
|
||||
return err(MissingMergeBeginDirective)
|
||||
|
||||
# Make sure that all recs are referenced
|
||||
if 0 < pv.newProofs.len:
|
||||
#debug "Ref check",refPool=pv.refPool.pp(pv),newProofs=pv.newProofs.pp(pv)
|
||||
for tag in pv.newProofs:
|
||||
if tag notin pv.refPool and tag != pv.rootTag:
|
||||
#debug "Unreferenced proofs rec", tag, tag=tag.pp(pv)
|
||||
return err(RowUnreferenced)
|
||||
|
||||
## verify accounts
|
||||
for (baseTag,accTag) in pv.newAccs:
|
||||
|
||||
# Validate increasing accounts
|
||||
|
||||
# Base and last account must be in database
|
||||
let
|
||||
nBaseDgts = pv.follow(baseTag)[0]
|
||||
(nAccDgts, accData) = pv.follow(accTag)
|
||||
|
||||
# Verify account base
|
||||
# ...
|
||||
|
||||
# Verify last account
|
||||
if nAccDgts == 64:
|
||||
let rc = pv.getAccountRec(accTag)
|
||||
if rc.isOk:
|
||||
noRlpError("validate(Account)"):
|
||||
if accData.decode(Account) == rc.value.to(Account):
|
||||
continue
|
||||
|
||||
# This account list did not verify
|
||||
return err(LastAccountProofFailed)
|
||||
|
||||
ok()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc mergeProved*(
|
||||
pv: var ProofDb;
|
||||
root: TrieHash;
|
||||
base: NodeTag;
|
||||
data: WorkerAccountRange
|
||||
): Result[void,ProofError] =
|
||||
## Validate and merge accounts and proofs (as received with the snap message
|
||||
## `AccountRange`) into the database. Any open transaction initialised with
|
||||
## `mergeBegin()` is continued ans finished.
|
||||
if not pv.mergeBegin(root):
|
||||
return err(StateRootDiffers)
|
||||
|
||||
block:
|
||||
let rc = pv.merge(data.proof)
|
||||
if rc.isErr:
|
||||
trace "Merge proofs failed",
|
||||
proof=data.proof.len, error=rc.error
|
||||
discard pv.mergeRollback()
|
||||
return err(rc.error)
|
||||
block:
|
||||
let rc = pv.merge(base, data.accounts)
|
||||
if rc.isErr:
|
||||
trace "Merge accounts failed",
|
||||
accounts=data.accounts.len, error=rc.error
|
||||
discard pv.mergeRollback()
|
||||
return err(rc.error)
|
||||
block:
|
||||
let rc = pv.mergeValidate()
|
||||
if rc.isErr:
|
||||
trace "Proofs or accounts do not valdate",
|
||||
accounts=data.accounts.len, error=rc.error
|
||||
discard pv.mergeRollback()
|
||||
return err(rc.error)
|
||||
|
||||
#trace "Merge accounts and proofs ok",
|
||||
# root=pv.rootTag, base=base, accounts=data.accounts.pp, proof=data.proof.pp
|
||||
discard pv.mergeCommit()
|
||||
ok()
|
||||
|
||||
proc proofsLen*(pv: ProofDb; root: TrieHash): int =
|
||||
## Number of entries in the proofs table for the argument state root `root`.
|
||||
if pv.rootHash == root:
|
||||
pv.stat.nProofs
|
||||
else:
|
||||
pv.useStatusRec(root).nProofs
|
||||
|
||||
proc accountsLen*(pv: ProofDb; root: TrieHash): int =
|
||||
## Number of entries in the accounts table for the argument state root `root`.
|
||||
if pv.rootHash == root:
|
||||
pv.stat.nAccounts
|
||||
else:
|
||||
pv.useStatusRec(root).nAccounts
|
||||
|
||||
proc journalLen*(pv: ProofDb): (bool,int,int,int) =
|
||||
## Size of the current rollback journal:
|
||||
## * oepn transaction, see `mergeBegin()`
|
||||
## * number of added recs
|
||||
## * number of added references implied by recs
|
||||
## * number of added accounts
|
||||
(not pv.dbTx.isNil, pv.newProofs.len, pv.refPool.len, pv.newAccs.len)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -10,6 +10,7 @@
|
||||
# except according to those terms.
|
||||
|
||||
import
|
||||
std/[strformat, strutils],
|
||||
chronos,
|
||||
chronicles,
|
||||
eth/[common/eth_types, p2p],
|
||||
@ -24,21 +25,17 @@ logScope:
|
||||
|
||||
type
|
||||
TickerStats* = object
|
||||
activeQueues*: uint
|
||||
totalQueues*: uint64
|
||||
avFillFactor*: float
|
||||
accounts*: (float,float) ## mean and standard deviation
|
||||
fillFactor*: (float,float) ## mean and standard deviation
|
||||
activeQueues*: int
|
||||
flushedQueues*: int64
|
||||
|
||||
TickerStatsUpdater* =
|
||||
proc(ns: Worker): TickerStats {.gcsafe, raises: [Defect].}
|
||||
|
||||
AccountsStats = object
|
||||
counted: int64
|
||||
bytes: int64
|
||||
|
||||
TickerEx = ref object of WorkerTickerBase
|
||||
## Account fetching state that is shared among all peers.
|
||||
ns: Worker
|
||||
accounts: AccountsStats
|
||||
peersActive: int
|
||||
statsCb: TickerStatsUpdater
|
||||
logTicker: TimerCallback
|
||||
@ -52,21 +49,33 @@ const
|
||||
# Private functions: ticking log messages
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
template noFmtError(info: static[string]; code: untyped) =
|
||||
try:
|
||||
code
|
||||
except ValueError as e:
|
||||
raiseAssert "Inconveivable (" & info & "): " & e.msg
|
||||
|
||||
proc setLogTicker(sf: TickerEx; at: Moment) {.gcsafe.}
|
||||
|
||||
proc runLogTicker(sf: TickerEx) {.gcsafe.} =
|
||||
var
|
||||
avAccounts = ""
|
||||
avUtilisation = ""
|
||||
let
|
||||
y = sf.statsCb(sf.ns)
|
||||
fill = if 0 < y.activeQueues: y.avFillFactor/y.activeQueues.float else: 0.0
|
||||
utilisation = fill.toPC(rounding = 0)
|
||||
tick = sf.tick.toSI
|
||||
peers = sf.peersActive
|
||||
|
||||
info "Sync accounts progress",
|
||||
tick = sf.tick.toSI,
|
||||
peers = sf.peersActive,
|
||||
accounts = sf.accounts.counted,
|
||||
states = y.totalQueues,
|
||||
queues = y.activeQueues,
|
||||
utilisation
|
||||
y = sf.statsCb(sf.ns)
|
||||
queues = y.activeQueues
|
||||
flushed = y.flushedQueues
|
||||
mem = getTotalMem().uint.toSI
|
||||
|
||||
noFmtError("runLogTicker"):
|
||||
avAccounts = (&"{(y.accounts[0]+0.5).int64}({(y.accounts[1]+0.5).int64})")
|
||||
avUtilisation = &"{y.fillFactor[0]*100.0:.2f}%({y.fillFactor[1]*100.0:.2f}%)"
|
||||
|
||||
info "Sync queue average statistics",
|
||||
tick, peers, queues, avAccounts, avUtilisation, flushed, mem
|
||||
|
||||
sf.tick.inc
|
||||
sf.setLogTicker(Moment.fromNow(tickerLogInterval))
|
||||
@ -121,10 +130,6 @@ proc tickerStop*(ns: Worker) =
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc tickerCountAccounts*(sp: WorkerBuddy; bytes: SomeInteger; nAcc = 1) =
|
||||
sp.ns.tickerEx.accounts.counted += nAcc
|
||||
sp.ns.tickerEx.accounts.bytes += bytes
|
||||
|
||||
proc tickerStartPeer*(sp: WorkerBuddy) =
|
||||
if sp.ns.tickerEx.peersActive <= 0:
|
||||
sp.ns.tickerEx.peersActive = 1
|
||||
|
@ -15,7 +15,7 @@ import
|
||||
nimcrypto/hash,
|
||||
stew/[byteutils, keyed_queue],
|
||||
../../../constants,
|
||||
../../types
|
||||
"../.."/[protocol/snap1, types]
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
@ -32,10 +32,10 @@ type
|
||||
BuddyRunState = enum
|
||||
## Combined state of two boolean values (`stopped`,`stopThisState`) as used
|
||||
## in the original source set up (should be double checked and simplified.)
|
||||
FullyRunning ## running, not requested to stop
|
||||
StopRequested ## running, stop request
|
||||
SingularStop ## stopped, no stop request (for completeness)
|
||||
FullyStopped ## stopped, stop request
|
||||
Running ## running, not requested to stop
|
||||
Stopped ## stopped, stop request
|
||||
ZombieStop ## abanon/ignore (LRU tab overflow, odd packets)
|
||||
ZombieRun ## additional zombie state to potentially recover from
|
||||
|
||||
WorkerBuddyStats* = tuple
|
||||
## Statistics counters for events associated with this peer.
|
||||
@ -94,6 +94,11 @@ type
|
||||
fetchBase*: WorkerFetchBase ## Opaque object reference
|
||||
tickerBase*: WorkerTickerBase ## Opaque object reference
|
||||
|
||||
# -------
|
||||
|
||||
WorkerAccountRange* = accountRangeObj
|
||||
## Syntactic sugar, type defined in `snap1`
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public Constructor
|
||||
# ------------------------------------------------------------------------------
|
||||
@ -102,10 +107,10 @@ proc new*(T: type WorkerBuddy; ns: Worker; peer: Peer): T =
|
||||
## Initial state all default settings.
|
||||
T(ns: ns, peer: peer)
|
||||
|
||||
proc init*(ctrl: var WorkerBuddyCtrl; fullyRunning: bool) =
|
||||
## Set initial running state `fullyRunning` if the argument `fullyRunning`
|
||||
## is `true`. Otherwise the running state is set `fullyStopped`.
|
||||
ctrl.runState = if fullyRunning: FullyRunning else: FullyStopped
|
||||
proc init*(ctrl: var WorkerBuddyCtrl; running: bool) =
|
||||
## Set initial running state `Running` if the argument `running`
|
||||
## is `true`. Otherwise the running state is set `stopped`.
|
||||
ctrl.runState = if running: Running else: Stopped
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
@ -124,67 +129,56 @@ proc state*(ctrl: WorkerBuddyCtrl): BuddyRunState =
|
||||
## Getter (logging only, details of `BuddyRunState` are private)
|
||||
ctrl.runState
|
||||
|
||||
proc fullyRunning*(ctrl: WorkerBuddyCtrl): bool =
|
||||
## Getter, if `true`, `stopped` and `stopRequest` are `false`
|
||||
ctrl.runState == FullyRunning
|
||||
|
||||
proc fullyStopped*(ctrl: WorkerBuddyCtrl): bool =
|
||||
## Getter, if `true`, `stopped` and `stopRequest` are `true`
|
||||
ctrl.runState == FullyStopped
|
||||
proc running*(ctrl: WorkerBuddyCtrl): bool =
|
||||
## Getter, if `true` if `ctrl.state()` is `Running`
|
||||
ctrl.runState == Running
|
||||
|
||||
proc stopped*(ctrl: WorkerBuddyCtrl): bool =
|
||||
## Getter, not running (ignoring pending stop request)
|
||||
ctrl.runState in {FullyStopped,SingularStop}
|
||||
## Getter, if `true`, if `ctrl.state()` is not `Running`
|
||||
ctrl.runState in {Stopped, ZombieStop, ZombieRun}
|
||||
|
||||
proc stopRequest*(ctrl: WorkerBuddyCtrl): bool =
|
||||
## Getter, pending stop request (ignoring running state)
|
||||
ctrl.runState in {StopRequested,FullyStopped}
|
||||
proc zombie*(ctrl: WorkerBuddyCtrl): bool =
|
||||
## Getter, `true` if `ctrl.state()` is `Zombie` (i.e. not `running()` and
|
||||
## not `stopped()`)
|
||||
ctrl.runState in {ZombieStop, ZombieRun}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public setters, `BuddyRunState` execution control functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc `zombie=`*(ctrl: var WorkerBuddyCtrl; value: bool) =
|
||||
## Setter
|
||||
if value:
|
||||
case ctrl.runState:
|
||||
of Running:
|
||||
ctrl.runState = ZombieRun
|
||||
of Stopped:
|
||||
ctrl.runState = ZombieStop
|
||||
else:
|
||||
discard
|
||||
else:
|
||||
case ctrl.runState:
|
||||
of ZombieRun:
|
||||
ctrl.runState = Running
|
||||
of ZombieStop:
|
||||
ctrl.runState = Stopped
|
||||
else:
|
||||
discard
|
||||
|
||||
proc `stopped=`*(ctrl: var WorkerBuddyCtrl; value: bool) =
|
||||
## Setter
|
||||
if value:
|
||||
case ctrl.runState:
|
||||
of FullyRunning:
|
||||
ctrl.runState = SingularStop
|
||||
of StopRequested:
|
||||
ctrl.runState = FullyStopped
|
||||
of SingularStop, FullyStopped:
|
||||
of Running:
|
||||
ctrl.runState = Stopped
|
||||
else:
|
||||
discard
|
||||
else:
|
||||
case ctrl.runState:
|
||||
of FullyRunning, StopRequested:
|
||||
of Stopped:
|
||||
ctrl.runState = Running
|
||||
else:
|
||||
discard
|
||||
of SingularStop:
|
||||
ctrl.runState = FullyRunning
|
||||
of FullyStopped:
|
||||
ctrl.runState = StopRequested
|
||||
|
||||
proc `stopRequest=`*(ctrl: var WorkerBuddyCtrl; value: bool) =
|
||||
## Setter, stop request (ignoring running state)
|
||||
if value:
|
||||
case ctrl.runState:
|
||||
of FullyRunning:
|
||||
ctrl.runState = StopRequested
|
||||
of StopRequested:
|
||||
discard
|
||||
of SingularStop:
|
||||
ctrl.runState = FullyStopped
|
||||
of FullyStopped:
|
||||
discard
|
||||
else:
|
||||
case ctrl.runState:
|
||||
of FullyRunning:
|
||||
discard
|
||||
of StopRequested:
|
||||
ctrl.runState = FullyRunning
|
||||
of SingularStop:
|
||||
discard
|
||||
of FullyStopped:
|
||||
ctrl.runState = SingularStop
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, debugging helpers (will go away eventually)
|
||||
|
@ -12,7 +12,6 @@
|
||||
import
|
||||
std/[math, strutils, hashes],
|
||||
eth/common/eth_types,
|
||||
nimcrypto/keccak,
|
||||
stew/byteutils
|
||||
|
||||
{.push raises: [Defect].}
|
||||
@ -69,19 +68,15 @@ proc to*(num: SomeInteger; T: type float): T =
|
||||
## Convert to float
|
||||
num.T
|
||||
|
||||
proc to*(w: TrieHash|NodeHash|BlockHash; T: type Hash256): T =
|
||||
proc to*(w: TrieHash|NodeHash|BlockHash|TxHash; T: type Hash256): T =
|
||||
## Get rid of `distinct` harness (needed for `snap1` and `eth1` protocol
|
||||
## driver access.)
|
||||
w.Hash256
|
||||
|
||||
proc to*(w: seq[NodeHash|NodeHash]; T: type seq[Hash256]): T =
|
||||
proc to*(w: seq[TrieHash|NodeHash|BlockHash|TxHash]; T: type seq[Hash256]): T =
|
||||
## Ditto
|
||||
cast[seq[Hash256]](w)
|
||||
|
||||
proc to*(data: Blob; T: type NodeHash): T =
|
||||
## Convert argument `data` to `NodeHash`
|
||||
keccak256.digest(data).T
|
||||
|
||||
proc to*(bh: BlockHash; T: type HashOrNum): T =
|
||||
## Convert argument blocj hash `bh` to `HashOrNum`
|
||||
T(isHash: true, hash: bh.Hash256)
|
||||
@ -100,9 +95,9 @@ proc `==`*(a,b: TrieHash): bool {.borrow.}
|
||||
proc `==`*(a,b: NodeHash): bool {.borrow.}
|
||||
proc `==`*(a,b: BlockHash): bool {.borrow.}
|
||||
|
||||
proc hash*(root: TrieHash): Hash =
|
||||
proc hash*(root: TrieHash|NodeHash|BlockHash): Hash =
|
||||
## Mixin for `Table` or `keyedQueue`
|
||||
root.to(Hash256).data.hash
|
||||
root.Hash256.data.hash
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public printing and pretty printing
|
||||
@ -129,11 +124,11 @@ proc toSI*(num: SomeUnsignedInt): string =
|
||||
const
|
||||
siUnits = [
|
||||
# <limit> <multiplier> <symbol>
|
||||
( 10_000u64, 1000f64, 'k'),
|
||||
( 10_000_000u64, 1000_000f64, 'm'),
|
||||
( 10_000_000_000u64, 1000_000_000f64, 'g'),
|
||||
( 10_000_000_000_000u64, 1000_000_000_000f64, 't'),
|
||||
( 10_000_000_000_000_000u64, 1000_000_000_000_000f64, 'p'),
|
||||
( 100_000u64, 1000f64, 'k'),
|
||||
( 100_000_000u64, 1000_000f64, 'm'),
|
||||
( 100_000_000_000u64, 1000_000_000f64, 'g'),
|
||||
( 100_000_000_000_000u64, 1000_000_000_000f64, 't'),
|
||||
( 100_000_000_000_000_000u64, 1000_000_000_000_000f64, 'p'),
|
||||
(10_000_000_000_000_000_000u64, 1000_000_000_000_000_000f64, 'e')]
|
||||
|
||||
lastUnit =
|
||||
@ -155,16 +150,12 @@ proc toSI*(num: SomeUnsignedInt): string =
|
||||
|
||||
result.insert(".", result.len - 3)
|
||||
|
||||
|
||||
func toHex*(hash: Hash256): string =
|
||||
## Shortcut for `byteutils.toHex(hash.data)`
|
||||
hash.data.toHex
|
||||
|
||||
func `$`*(th: TrieHash|NodeHash): string =
|
||||
th.Hash256.toHex
|
||||
|
||||
func `$`*(hash: Hash256): string =
|
||||
hash.toHex
|
||||
func `$`*(h: TrieHash|NodeHash|BlockHash|TxHash): string =
|
||||
$h.Hash256.data.toHex
|
||||
|
||||
func `$`*(blob: Blob): string =
|
||||
blob.toHex
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -13,8 +13,8 @@ cliBuilder:
|
||||
import ./test_code_stream,
|
||||
./test_accounts_cache,
|
||||
./test_custom_network,
|
||||
./test_sync_snap,
|
||||
./test_jwt_auth,
|
||||
./test_interval_set,
|
||||
./test_gas_meter,
|
||||
./test_memory,
|
||||
./test_stack,
|
||||
|
@ -142,6 +142,10 @@ proc flushDbDir(s: string) =
|
||||
if (dataDir / "data").dirExists:
|
||||
# Typically under Windows: there might be stale file locks.
|
||||
try: dataDir.removeDir except: discard
|
||||
block dontClearUnlessEmpty:
|
||||
for w in s.walkDir:
|
||||
break dontClearUnlessEmpty
|
||||
try: s.removeDir except: discard
|
||||
|
||||
proc say*(noisy = false; pfx = "***"; args: varargs[string, `$`]) =
|
||||
if noisy:
|
||||
|
@ -1,319 +0,0 @@
|
||||
# Nimbus - Types, data structures and shared utilities used in network sync
|
||||
#
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or
|
||||
# distributed except according to those terms.
|
||||
|
||||
import
|
||||
chronicles,
|
||||
stint,
|
||||
stew/results,
|
||||
unittest2,
|
||||
../nimbus/utils/interval_set
|
||||
|
||||
type
|
||||
FancyPoint = distinct UInt256 # instead of BlockNumber
|
||||
FancyRanges = IntervalSetRef[FancyPoint,UInt256]
|
||||
FancyInterval = Interval[FancyPoint,UInt256]
|
||||
|
||||
const
|
||||
uHigh = high(uint64)
|
||||
uLow = low(uint64)
|
||||
|
||||
let
|
||||
ivError = IntervalRc[FancyPoint,UInt256].err()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private data type interface
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc to(num: uint64; _: type FancyPoint): FancyPoint = num.u256.FancyPoint
|
||||
|
||||
# use a sub-range for `FancyPoint` elements
|
||||
proc high(T: type FancyPoint): T = uHigh.to(FancyPoint)
|
||||
proc low(T: type FancyPoint): T = uLow.to(FancyPoint)
|
||||
|
||||
proc u256(num: FancyPoint): UInt256 = num.UInt256
|
||||
proc `$`(num: FancyPoint): string = $num.u256
|
||||
|
||||
proc `+`*(a: FancyPoint; b: UInt256): FancyPoint = (a.u256+b).FancyPoint
|
||||
proc `-`*(a: FancyPoint; b: UInt256): FancyPoint = (a.u256-b).FancyPoint
|
||||
proc `-`*(a, b: FancyPoint): UInt256 = (a.u256 - b.u256)
|
||||
|
||||
proc `==`*(a, b: FancyPoint): bool = a.u256 == b.u256
|
||||
proc `<=`*(a, b: FancyPoint): bool = a.u256 <= b.u256
|
||||
proc `<`*(a, b: FancyPoint): bool = a.u256 < b.u256
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc truncate(num: FancyPoint; T: type uint64): uint64 =
|
||||
num.u256.truncate(uint64)
|
||||
|
||||
proc merge(br: FancyRanges; left, right: uint64): uint64 =
|
||||
let (a, b) = (left.to(FancyPoint), right.to(FancyPoint))
|
||||
br.merge(a, b).truncate(uint64)
|
||||
|
||||
proc reduce(br: FancyRanges; left, right: uint64): uint64 =
|
||||
let (a, b) = (left.to(FancyPoint), right.to(FancyPoint))
|
||||
br.reduce(a, b).truncate(uint64)
|
||||
|
||||
proc covered(br: FancyRanges; left, right: uint64): uint64 =
|
||||
let (a, b) = (left.to(FancyPoint), right.to(FancyPoint))
|
||||
br.covered(a, b).truncate(uint64)
|
||||
|
||||
proc delete(br: FancyRanges; start: uint64): Result[FancyInterval,void] =
|
||||
br.delete(start.to(FancyPoint))
|
||||
|
||||
proc le(br: FancyRanges; start: uint64): Result[FancyInterval,void] =
|
||||
br.le(start.to(FancyPoint))
|
||||
|
||||
proc ge(br: FancyRanges; start: uint64): Result[FancyInterval,void] =
|
||||
br.ge(start.to(FancyPoint))
|
||||
|
||||
proc iv(left, right: uint64): FancyInterval =
|
||||
FancyInterval.new(left.to(FancyPoint), right.to(FancyPoint))
|
||||
|
||||
proc setTraceLevel* =
|
||||
discard
|
||||
when defined(chronicles_runtime_filtering) and loggingEnabled:
|
||||
setLogLevel(LogLevel.TRACE)
|
||||
|
||||
proc setErrorLevel* =
|
||||
discard
|
||||
when defined(chronicles_runtime_filtering) and loggingEnabled:
|
||||
setLogLevel(LogLevel.ERROR)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Test Runner
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc intervalSetRunner(noisy = true) =
|
||||
|
||||
suite "IntervalSet: Intervals of FancyPoint entries over UInt256":
|
||||
let br = FancyRanges.init()
|
||||
var dup: FancyRanges
|
||||
|
||||
test "Verify max interval handling":
|
||||
br.clear()
|
||||
check br.merge(0,uHigh) == 0
|
||||
check br.chunks == 1
|
||||
check br.total == 0
|
||||
check br.verify.isOk
|
||||
|
||||
check br.reduce(uHigh,uHigh) == 1
|
||||
check br.chunks == 1
|
||||
check br.total == uHigh.u256
|
||||
check br.verify.isOk
|
||||
|
||||
test "Verify handling of maximal interval points (edge cases)":
|
||||
br.clear()
|
||||
check br.merge(0,uHigh) == 0
|
||||
check br.reduce(uHigh-1,uHigh-1) == 1
|
||||
check br.verify.isOk
|
||||
check br.chunks == 2
|
||||
check br.total == uHigh.u256
|
||||
|
||||
check br.le(uHigh) == iv(uHigh,uHigh)
|
||||
check br.le(uHigh-1) == iv(0,uHigh-2)
|
||||
check br.le(uHigh-2) == iv(0,uHigh-2)
|
||||
check br.le(uHigh-3) == ivError
|
||||
|
||||
check br.ge(0) == iv(0,uHigh-2)
|
||||
check br.ge(1) == iv(uHigh,uHigh)
|
||||
check br.ge(uHigh-3) == iv(uHigh,uHigh)
|
||||
check br.ge(uHigh-2) == iv(uHigh,uHigh)
|
||||
check br.ge(uHigh-3) == iv(uHigh,uHigh)
|
||||
check br.ge(uHigh) == iv(uHigh,uHigh)
|
||||
|
||||
check br.reduce(0,uHigh-2) == uHigh-1
|
||||
check br.verify.isOk
|
||||
check br.chunks == 1
|
||||
check br.total == 1.u256
|
||||
|
||||
check br.le(uHigh) == iv(uHigh,uHigh)
|
||||
check br.le(uHigh-1) == ivError
|
||||
check br.le(uHigh-2) == ivError
|
||||
check br.le(0) == ivError
|
||||
|
||||
check br.ge(uHigh) == iv(uHigh,uHigh)
|
||||
check br.ge(uHigh-1) == iv(uHigh,uHigh)
|
||||
check br.ge(uHigh-2) == iv(uHigh,uHigh)
|
||||
check br.ge(0) == iv(uHigh,uHigh)
|
||||
|
||||
br.clear()
|
||||
check br.total == 0 and br.chunks == 0
|
||||
check br.merge(0,uHigh) == 0
|
||||
check br.reduce(0,9999999) == 10000000
|
||||
check br.total.truncate(uint64) == (uHigh - 10000000) + 1
|
||||
check br.verify.isOk
|
||||
|
||||
check br.merge(uHigh,uHigh) == 0
|
||||
check br.verify.isOk
|
||||
|
||||
check br.reduce(uHigh,uHigh-1) == 1 # same as reduce(uHigh,uHigh)
|
||||
check br.total.truncate(uint64) == (uHigh - 10000000)
|
||||
check br.verify.isOk
|
||||
check br.merge(uHigh,uHigh-1) == 1 # same as merge(uHigh,uHigh)
|
||||
check br.total.truncate(uint64) == (uHigh - 10000000) + 1
|
||||
check br.verify.isOk
|
||||
|
||||
#interval_set.noisy = true
|
||||
#interval_set.noisy = false
|
||||
|
||||
test "Merge disjunct intervals on 1st set":
|
||||
br.clear()
|
||||
check br.merge( 0, 99) == 100
|
||||
check br.merge(200, 299) == 100
|
||||
check br.merge(400, 499) == 100
|
||||
check br.merge(600, 699) == 100
|
||||
check br.merge(800, 899) == 100
|
||||
check br.total == 500
|
||||
check br.chunks == 5
|
||||
check br.verify.isOk
|
||||
|
||||
test "Reduce non overlapping intervals on 1st set":
|
||||
check br.reduce(100, 199) == 0
|
||||
check br.reduce(300, 399) == 0
|
||||
check br.reduce(500, 599) == 0
|
||||
check br.reduce(700, 799) == 0
|
||||
check br.verify.isOk
|
||||
|
||||
test "Clone a 2nd set and verify covered data ranges":
|
||||
dup = br.clone
|
||||
check dup.covered( 0, 99) == 100
|
||||
check dup.covered(100, 199) == 0
|
||||
check dup.covered(200, 299) == 100
|
||||
check dup.covered(300, 399) == 0
|
||||
check dup.covered(400, 499) == 100
|
||||
check dup.covered(500, 599) == 0
|
||||
check dup.covered(600, 699) == 100
|
||||
check dup.covered(700, 799) == 0
|
||||
check dup.covered(800, 899) == 100
|
||||
check dup.covered(900, uint64.high) == 0
|
||||
|
||||
check dup.covered(200, 599) == 200
|
||||
check dup.covered(200, 799) == 300
|
||||
check dup.total == 500
|
||||
check dup.chunks == 5
|
||||
check dup.verify.isOk
|
||||
|
||||
test "Merge overlapping intervals on 2nd set":
|
||||
check dup.merge( 50, 250) == 100
|
||||
check dup.merge(450, 850) == 200
|
||||
check dup.verify.isOk
|
||||
|
||||
test "Verify covered data ranges on 2nd set":
|
||||
check dup.covered( 0, 299) == 300
|
||||
check dup.covered(300, 399) == 0
|
||||
check dup.covered(400, 899) == 500
|
||||
check dup.covered(900, uint64.high) == 0
|
||||
check dup.total == 800
|
||||
check dup.chunks == 2
|
||||
check dup.verify.isOk
|
||||
|
||||
test "Verify 1st and 2nd set differ":
|
||||
check br != dup
|
||||
|
||||
test "Reduce overlapping intervals on 2nd set":
|
||||
check dup.reduce(100, 199) == 100
|
||||
check dup.reduce(500, 599) == 100
|
||||
check dup.reduce(700, 799) == 100
|
||||
# interval_set.noisy = true
|
||||
check dup.verify.isOk
|
||||
|
||||
test "Verify 1st and 2nd set equal":
|
||||
check br == dup
|
||||
check br == br
|
||||
check dup == dup
|
||||
|
||||
test "Find intervals in the 1st set":
|
||||
check br.le(100) == iv( 0, 99)
|
||||
check br.le(199) == iv( 0, 99)
|
||||
check br.le(200) == iv( 0, 99)
|
||||
check br.le(299) == iv(200, 299)
|
||||
check br.le(999) == iv(800, 899)
|
||||
check br.le(50) == ivError
|
||||
|
||||
check br.ge( 0) == iv( 0, 99)
|
||||
check br.ge( 1) == iv(200, 299)
|
||||
check br.ge(800) == iv(800, 899)
|
||||
check br.ge(801) == ivError
|
||||
|
||||
test "Delete intervals from the 2nd set":
|
||||
check dup.delete(200) == iv(200, 299)
|
||||
check dup.delete(800) == iv(800, 899)
|
||||
check dup.verify.isOk
|
||||
|
||||
test "Interval intersections":
|
||||
check iv(100, 199) * iv(150, 249) == iv(150, 199)
|
||||
check iv(150, 249) * iv(100, 199) == iv(150, 199)
|
||||
|
||||
check iv(100, 199) * iv(200, 299) == ivError
|
||||
check iv(200, 299) * iv(100, 199) == ivError
|
||||
|
||||
check iv(200, uHigh) * iv(uHigh,uHigh) == iv(uHigh,uHigh)
|
||||
check iv(uHigh, uHigh) * iv(200,uHigh) == iv(uHigh,uHigh)
|
||||
|
||||
check iv(100, 199) * iv(150, 249) * iv(100, 170) == iv(150, 170)
|
||||
check (iv(100, 199) * iv(150, 249)) * iv(100, 170) == iv(150, 170)
|
||||
check iv(100, 199) * (iv(150, 249) * iv(100, 170)) == iv(150, 170)
|
||||
|
||||
test "Join intervals":
|
||||
check iv(100, 199) + iv(150, 249) == iv(100, 249)
|
||||
check iv(150, 249) + iv(100, 199) == iv(100, 249)
|
||||
|
||||
check iv(100, 198) + iv(202, 299) == ivError
|
||||
check iv(100, 199) + iv(200, 299) == iv(100, 299)
|
||||
check iv(100, 200) + iv(200, 299) == iv(100, 299)
|
||||
check iv(100, 201) + iv(200, 299) == iv(100, 299)
|
||||
|
||||
check iv(200, 299) + iv(100, 198) == ivError
|
||||
check iv(200, 299) + iv(100, 199) == iv(100, 299)
|
||||
check iv(200, 299) + iv(100, 200) == iv(100, 299)
|
||||
check iv(200, 299) + iv(100, 201) == iv(100, 299)
|
||||
|
||||
check iv(200, uHigh) + iv(uHigh,uHigh) == iv(200,uHigh)
|
||||
check iv(uHigh, uHigh) + iv(200,uHigh) == iv(200,uHigh)
|
||||
|
||||
check iv(150, 249) + iv(100, 149) + iv(200, 299) == iv(100, 299)
|
||||
check (iv(150, 249) + iv(100, 149)) + iv(200, 299) == iv(100, 299)
|
||||
check iv(150, 249) + (iv(100, 149) + iv(200, 299)) == ivError
|
||||
|
||||
test "Cut off intervals by other intervals":
|
||||
check iv(100, 199) - iv(150, 249) == iv(100, 149)
|
||||
check iv(150, 249) - iv(100, 199) == iv(200, 249)
|
||||
check iv(100, 199) - iv(200, 299) == iv(100, 199)
|
||||
check iv(200, 299) - iv(100, 199) == iv(200, 299)
|
||||
|
||||
check iv(200, 399) - iv(250, 349) == ivError
|
||||
check iv(200, 299) - iv(200, 299) == ivError
|
||||
check iv(200, 299) - iv(200, 399) == ivError
|
||||
check iv(200, 299) - iv(100, 299) == ivError
|
||||
check iv(200, 299) - iv(100, 399) == ivError
|
||||
|
||||
check iv(200, 299) - iv(100, 199) - iv(150, 249) == iv(250, 299)
|
||||
check (iv(200, 299) - iv(100, 199)) - iv(150, 249) == iv(250, 299)
|
||||
check iv(200, 299) - (iv(100, 199) - iv(150, 249)) == iv(200, 299)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Main function(s)
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc intervalSetMain*(noisy = defined(debug)) =
|
||||
noisy.intervalSetRunner
|
||||
|
||||
when isMainModule:
|
||||
let noisy = defined(debug) or true
|
||||
setTraceLevel()
|
||||
noisy.intervalSetRunner
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
254
tests/test_sync_snap.nim
Normal file
254
tests/test_sync_snap.nim
Normal file
@ -0,0 +1,254 @@
|
||||
# Nimbus - Types, data structures and shared utilities used in network sync
|
||||
#
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or
|
||||
# distributed except according to those terms.
|
||||
|
||||
## Snap sync components tester
|
||||
|
||||
import
|
||||
std/[distros, os, random, sequtils, strformat, strutils],
|
||||
chronicles,
|
||||
eth/[common/eth_types, trie/db],
|
||||
stint,
|
||||
stew/results,
|
||||
unittest2,
|
||||
../nimbus/db/select_backend,
|
||||
../nimbus/sync/[types, protocol/snap1],
|
||||
../nimbus/sync/snap/path_desc,
|
||||
../nimbus/sync/snap/worker/[fetch/proof_db, worker_desc],
|
||||
./replay/pp,
|
||||
./test_sync_snap/accounts_and_proofs
|
||||
|
||||
const
|
||||
baseDir = [".", "..", ".."/"..", $DirSep]
|
||||
repoDir = ["tests"/"replay", "tests"/"test_sync_snap"]
|
||||
|
||||
type
|
||||
TestSample = tuple ## sample format from `accounts_and_proofs`
|
||||
base: Hash256
|
||||
accounts: seq[(Hash256,uint64,UInt256,Hash256,Hash256)]
|
||||
proofs: seq[Blob]
|
||||
|
||||
TestItem = object ## palatable input format for tests
|
||||
base: NodeTag
|
||||
data: WorkerAccountRange
|
||||
|
||||
TestDbInstances =
|
||||
array[3,TrieDatabaseRef]
|
||||
|
||||
TestDbs = object
|
||||
persistent: bool
|
||||
dbDir: string
|
||||
inst: TestDbInstances
|
||||
|
||||
when defined(linux):
|
||||
# The `detectOs(Ubuntu)` directive is not Windows compatible, causes an
|
||||
# error when running the system command `lsb_release -d` in the background.
|
||||
let isUbuntu32bit = detectOs(Ubuntu) and int.sizeof == 4
|
||||
else:
|
||||
const isUbuntu32bit = false
|
||||
|
||||
let
|
||||
# Forces `check()` to print the error (as opposed when using `isOk()`)
|
||||
OkProof = Result[void,ProofError].ok()
|
||||
|
||||
# There was a problem with the Github/CI which results in spurious crashes
|
||||
# when leaving the `runner()` if the persistent BaseChainDB initialisation
|
||||
# was present, see `test_custom_network` for more details.
|
||||
disablePersistentDB = isUbuntu32bit
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc findFilePath(file: string): Result[string,void] =
|
||||
for dir in baseDir:
|
||||
for repo in repoDir:
|
||||
let path = dir / repo / file
|
||||
if path.fileExists:
|
||||
return ok(path)
|
||||
err()
|
||||
|
||||
proc pp(w: TrieHash): string =
|
||||
pp.pp(w.Hash256) # `pp()` also available from `worker-desc`
|
||||
|
||||
proc setTraceLevel =
|
||||
discard
|
||||
when defined(chronicles_runtime_filtering) and loggingEnabled:
|
||||
setLogLevel(LogLevel.TRACE)
|
||||
|
||||
proc setErrorLevel =
|
||||
discard
|
||||
when defined(chronicles_runtime_filtering) and loggingEnabled:
|
||||
setLogLevel(LogLevel.ERROR)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc to(data: seq[TestSample]; T: type seq[TestItem]): T =
|
||||
## Convert test data into usable format
|
||||
for r in data:
|
||||
result.add TestItem(
|
||||
base: r.base.to(NodeTag),
|
||||
data: WorkerAccountRange(
|
||||
proof: r.proofs,
|
||||
accounts: r.accounts.mapIt(
|
||||
SnapAccount(
|
||||
accHash: it[0].to(NodeTag),
|
||||
accBody: Account(
|
||||
nonce: it[1],
|
||||
balance: it[2],
|
||||
storageRoot: it[3],
|
||||
codeHash: it[4])))))
|
||||
|
||||
proc permute(r: var Rand; qLen: int): seq[int] =
|
||||
result = (0 ..< qLen).toSeq
|
||||
let
|
||||
halfLen = result.len shr 1
|
||||
randMax = result.len - halfLen - 1
|
||||
for left in 0 ..< halfLen:
|
||||
let right = halfLen + r.rand(randMax)
|
||||
result[left].swap(result[right])
|
||||
|
||||
proc flushDbDir(s: string) =
|
||||
if s != "":
|
||||
let baseDir = s / "tmp"
|
||||
for n in 0 ..< TestDbInstances.len:
|
||||
let instDir = baseDir / $n
|
||||
if (instDir / "nimbus" / "data").dirExists:
|
||||
# Typically under Windows: there might be stale file locks.
|
||||
try: instDir.removeDir except: discard
|
||||
block dontClearUnlessEmpty:
|
||||
for w in baseDir.walkDir:
|
||||
break dontClearUnlessEmpty
|
||||
try: baseDir.removeDir except: discard
|
||||
|
||||
proc testDbs(workDir = ""): TestDbs =
|
||||
if disablePersistentDB or workDir == "":
|
||||
result.persistent = false
|
||||
result.dbDir = "*notused*"
|
||||
else:
|
||||
result.persistent = true
|
||||
result.dbDir = workDir / "tmp"
|
||||
if result.persistent:
|
||||
result.dbDir.flushDbDir
|
||||
for n in 0 ..< result.inst.len:
|
||||
if not result.persistent:
|
||||
result.inst[n] = newMemoryDB()
|
||||
else:
|
||||
result.inst[n] = (result.dbDir / $n).newChainDB.trieDB
|
||||
|
||||
proc lastTwo(a: openArray[string]): seq[string] =
|
||||
if 1 < a.len: @[a[^2],a[^1]] else: a.toSeq
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Test Runners
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc accountsRunner(
|
||||
noisy = true; persistent: bool; root: TrieHash; data: seq[TestSample]) =
|
||||
let
|
||||
lst = data.to(seq[TestItem])
|
||||
tmpDir = "accounts_and_proofs.nim".findFilePath.value.splitFile.dir
|
||||
db = if persistent: tmpDir.testDbs() else: testDbs()
|
||||
dbDir = db.dbDir.split($DirSep).lastTwo.join($DirSep)
|
||||
info = if db.persistent: &"persistent db on \"{dbDir}\""
|
||||
else: "in-memory db"
|
||||
|
||||
defer:
|
||||
if db.persistent:
|
||||
tmpDir.flushDbDir
|
||||
|
||||
suite &"SyncSnap: accounts and proofs for {info}":
|
||||
var
|
||||
desc: ProofDb
|
||||
nRows: seq[int]
|
||||
|
||||
test &"Merging {lst.len} proofs for state root ..{root.pp}":
|
||||
desc.init(db.inst[0])
|
||||
check desc.mergeBegin(root)
|
||||
for proofs in lst.mapIt(it.data.proof):
|
||||
check desc.merge(proofs) == OkProof
|
||||
check desc.mergeValidate == OkProof
|
||||
nRows.add desc.proofsLen(root)
|
||||
check 1 < nRows.len # otherwise test makes no sense
|
||||
check 0 < nRows[^1]
|
||||
|
||||
test "Rollback full database":
|
||||
check desc.mergeRollback()
|
||||
check desc.proofsLen(root) == 0
|
||||
check desc.accountsLen(root) == 0
|
||||
check desc.journalLen == (false,0,0,0)
|
||||
|
||||
test "Merging and committing all except the last":
|
||||
for n,proofs in lst.mapIt(it.data.proof):
|
||||
check desc.mergeBegin(root)
|
||||
check desc.merge(proofs) == OkProof
|
||||
check nRows[n] == desc.proofsLen(root)
|
||||
check desc.mergeValidate == OkProof
|
||||
if n < nRows.len - 1:
|
||||
check desc.mergeCommit
|
||||
check nRows[n] == desc.proofsLen(root)
|
||||
check desc.mergeRollback
|
||||
check 1 < nRows.len and nRows[^2] == desc.proofsLen(root)
|
||||
|
||||
test &"Merging/committing {lst.len} proofs, transposed rows":
|
||||
desc.init(db.inst[1])
|
||||
check desc.proofsLen(root) == 0
|
||||
check desc.journalLen == (false,0,0,0)
|
||||
var r = initRand(42)
|
||||
for n,proofs in lst.mapIt(it.data.proof):
|
||||
let permProof = r.permute(proofs.len).mapIt(proofs[it])
|
||||
check desc.mergeBegin(root)
|
||||
check desc.merge(permProof) == OkProof
|
||||
check desc.mergeValidate == OkProof
|
||||
check desc.mergeCommit
|
||||
check nRows[n] == desc.proofsLen(root)
|
||||
|
||||
test &"Merging {lst.len} proved account groups"&
|
||||
&" for state root ..{root.pp}":
|
||||
desc.init(db.inst[2])
|
||||
for n,w in lst:
|
||||
check desc.mergeProved(root, w.base, w.data) == OkProof
|
||||
check desc.journalLen == (false,0,0,0)
|
||||
check nRows[n] == desc.proofsLen(root)
|
||||
check desc.journalLen == (false,0,0,0)
|
||||
check 1 < nRows.len # otherwise test makes no sense
|
||||
check 0 < nRows[^1]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Main function(s)
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc syncSnapMain*(noisy = defined(debug)) =
|
||||
noisy.accountsRunner(persistent = true, testRoot.TrieHash, testSamples)
|
||||
|
||||
when isMainModule:
|
||||
const noisy = defined(debug) or true
|
||||
|
||||
when true: # false:
|
||||
# Import additional data from test data repo
|
||||
import ../../nimbus-eth1-blobs/replay/accounts_and_proofs_ex
|
||||
else:
|
||||
const
|
||||
testRootEx = testRoot
|
||||
testSamplesEx = newSeq[TestSample]()
|
||||
|
||||
setTraceLevel()
|
||||
|
||||
# Verify sample state roots
|
||||
doAssert testRoot == testRootEx
|
||||
|
||||
let samplesList = (testSamples & testSamplesEx)
|
||||
noisy.accountsRunner(persistent = true, testRoot.TrieHash, samplesList)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
134
tests/test_sync_snap/accounts_and_proofs.nim
Normal file
134
tests/test_sync_snap/accounts_and_proofs.nim
Normal file
@ -0,0 +1,134 @@
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or
|
||||
# distributed except according to those terms.
|
||||
|
||||
## Collected snap/1 accounts and proofs when fetching accounts
|
||||
|
||||
import
|
||||
std/[sequtils],
|
||||
eth/common/eth_types,
|
||||
nimcrypto/hash,
|
||||
stew/byteutils,
|
||||
../../nimbus/constants
|
||||
|
||||
const
|
||||
root =
|
||||
"b538f1067958013728e013b52c3e37eaecf86ddc83fe5f7b4a045e50deb08810".toDigest
|
||||
|
||||
rec0 = (
|
||||
ZERO_HASH256,
|
||||
@[
|
||||
# <0>
|
||||
("00000013653234c2d78dcdc645c5141e358ef2e590fe5278778ba729ff5ffd95".toDigest,
|
||||
1u64,
|
||||
"7931794000000000".parse(Uint256),
|
||||
"56E81F171BCC55A6FF8345E692C0F86E5B48E01B996CADC001622FB5E363B421".toDigest,
|
||||
"C5D2460186F7233C927E7DB2DCC703C0E500B653CA82273B7BFAD8045D85A470".toDigest),
|
||||
|
||||
# <1>
|
||||
("0000008c38d769d75c1ad1de6660da51edc10394c11c50ff9a0ca9e8b8b35dc2".toDigest,
|
||||
9u64,
|
||||
"143314596029971".parse(Uint256),
|
||||
"56E81F171BCC55A6FF8345E692C0F86E5B48E01B996CADC001622FB5E363B421".toDigest,
|
||||
"C5D2460186F7233C927E7DB2DCC703C0E500B653CA82273B7BFAD8045D85A470".toDigest),
|
||||
|
||||
# <43695>
|
||||
("001048467d0933750604fb19cf5dd096f02f60279cc0d9cf03f9b3424a7fb95f".toDigest,
|
||||
4u64,
|
||||
0.u256,
|
||||
"56E81F171BCC55A6FF8345E692C0F86E5B48E01B996CADC001622FB5E363B421".toDigest,
|
||||
"C5D2460186F7233C927E7DB2DCC703C0E500B653CA82273B7BFAD8045D85A470".toDigest)],
|
||||
@[
|
||||
# <0>
|
||||
"F90211A0FFBB364A8CB0D565F70271627D91A255FB50D751D52A3348B61C9D7B4E98AB28A0FB1ED3251BBD153BFE1415F9946ABFF90C207678BC09EB006C2538D5EE181944A04EC56E4DC562B7C1C3DC3B0D917CE07B4975D9D4459B3F433EAF5D466DA5FF05A0F5283E423E1112E8E50A68D03E339F914F0D18883C4128571B1D14A64F2C9F2DA09414298F9C3AC243DD13F0412CFE069020D4268767E37ADC529D8923312E6519A07978D8ADDBF59DF2D472783308BB735D8BF9FC951FC694E4249B268F0B67CA67A0A17D1D539EEF8747147C2B77281AC355FF16FA42D6941489AB3A11B46D06DD2EA0D7D9CD27EDEEA84EDD53B201DEC05DDB8F6ADD8CDDC77628FFDE9CABBE4F6C1DA03C45D84EEFF0128C6D19BE1D8CAF2797C7332D5E12040B87E1F4E7E17D9D4977A0A8B7AA500844BCA70F76E20BB231291A54CBC71039D183DA4B1FB058FC79FC69A087682429DABD006289751A0EA2C05AA1FD277DA89BF8C7E26DBAEBC774F246A8A0DA0F3AAB84168AF6C0101C0994B881B4FC9EDC4E99E7F28BA90C26F65EE0C819A0A3D9721D23C8B8118B50FAAA40FB3424B8C2BA1B91A2EAC0AAD29868B74B8497A0D1C18AA65CCA65A7118E2C42C429BADE088FC61987B86575145B5A84CA5379A3A0AD509B03FDE185D3ED8CC884A4D0AC03390E7DB8FEC78EC3127DB28CEB670353A0403A13695F15EAAA0588C74282DFF5A9C05BD9039F44336F10BA5590E087043780".hexToSeqByte,
|
||||
# <1>
|
||||
"F90211A050B2D95C218D12F40BE4549EE50709E479B19157BA095501AA94293F662DCA7FA00FB68AA15AD8AD8E2773DC61A370AFE82DAB79EDFDEE1A076F9C3C39B90A30B2A0F7E3E89841383EA0264230C6E5F8BB7A9383E31B13D4333F7417AC389C47C368A0ADF864ED54A756828CA1E8B3C0D434C495CAE24FA3547B10D4B037628BEBD1F4A0ADEEBF028C5D866AC5E6F0D91234E599B2606D89FCFD90F3CF332B46A03AB057A065433307CF0FF284D6A3A7E6D0D434BCD3D732757CCCFA1020B21E5F4762BA5EA0FEC203B690FB1AB74055EF240EA0F9A0E5F27AE2FFED6DA9D2B64883AB456EFEA03C4C09C4F72C15A1CE661C60EB7725403337B46D979FEE259AA3BCC6B0AD6161A05E9BE5120BDF2E94D6E64D1DE4CBAFB7C12E830498228A7D12F6CE2461E93990A066B7F2AD805E04708D28CA06AEE29F88700EB51AB38F4BC109DD9F4ABAAC041BA02C2413B49872A483B1F6B97A741B4B8C59040F4F4759AE444643330CD9907F29A08651CEF990EF0A0C5AB52F4FA4AD4265ACB8F7D2358ABE9AC87BC926F04C1B24A0D29829A570067E446DA03CDFFA7203766F8365C65FBF0E23BF3233BB96D6C658A00F68202899DB482FAFF2AAB2635EDB72E43EBD170D4F0554BAF828E970C4DBC7A06D3D6F17ED39FBB36A6065AC34BE5C633F1B8928B5514DEFFD5A0FFA501AF202A0BE7035655FB3801E87902731C9F0041D8CAFBE471B48F0212293ACCD8C40CACC80".hexToSeqByte,
|
||||
# <2>
|
||||
"F90211A000F354080583902374DBAD850D0E8E33803412F22C7BA81CBC2778A3B3350761A0809A2CF3A2F87798CE2B4047BB288F17C000307BC7C57FA251CD2C7292596AECA04B40B0EF7E160F09D3DA0EA477C385A49074D35C937A2B74B3F51ABD8A5F9BCAA0F972583DC80407F31517059FCC80E3D31262D8637BB819E94F2D2CD0736A324CA033539BA750904CED59B6B37F3879FDB62AAA850DCF7994DA64DA41B3A9E78850A0B4F5AA77948FC84800F8A9365531A166B56D3D1C5BBC3367554D0C1DC7766811A0FF28D18F41F88909A7CDF60CE8F5B56171B6FFFC97CF299730AC08436AD562B1A0F83235BB3F0C276386895A503BEF8B61F7A8D65D9F6ED4A92C4FD0616E05DE1EA05DC6F966B54000C0B830DF4BB61E47D4D4357822FE4474823DF4862F92602E2AA067D7F3201504A6BC2CF96A010A856CABC4489BEE2F325AB40C6E3ED230898D68A082FCBFA9FCB388E1EC0CC70F14072B3011CACADC212FFB11DCA1A0387064427FA03F4EB0BC6BB0AF1B9AC31A64FB77C6B75F41301DEFBB3803A660E526D7A8D577A01813C0B7A37EBAA16B78E63E23D6E1EF58B9646C572723FCBAF706EFB0958C77A00E112F5A43F599A7858758D3783525C8BC57CFA1BC9D973045363A6091721A28A0879D607834EC77D3D62A5C30BE61615BFB9DAA671DABCC7294C7C3A8633DB6AFA05876CE1DD19DB3F2FCDE622F6C1AF61898481DD6C72BD9273106835A49C5248480".hexToSeqByte,
|
||||
# <3>
|
||||
"F90211A062A4606CBB57887CC2C4B9B686DF69A45791F8518E9B62FB6E86130B4B1C6D13A082126F32BE01C3EF18630C686074A2A393B323B8EC3C964705781631358B8E57A08A47D9820D50F48362B4EC12BCBCD759AC42B2F703401B40AA7699964ABA7B40A0C39C5E09856C11DCC6B3739D8846C5406F8FD462EB79095C419F233749A167C8A009D8A1308EBB7522740233D015BA1910584A7A0360BCFAA3C997FFDA7DB648FBA08728CFDBED8454545FAB8D7A587E24CBCA0AA9AF04F883F954399B1859EF91C1A082EE3DB9657745B7147DB490C653358D0E162C4C28F35D7416F7D720EBA69F48A0E301E9D346E2666B8E97D8A3719388B5FCF68B672D0ECEDC6ABACC3F6F906224A03FF691E4BCEB8DD2B604483F1A116FF8EAB64325F5F10AD426B431EDAE7C5ECEA0F92D8622AFA94F9C1A3C147A491897E39C522060C7FA283E21CD5FE23DA2A911A05995EFA81B02A46AD062F21F5C27E01CC16338AACD382DC796FF48D2B06B8A54A024EFE4D36BF0E3DD5587EB85B960051F2AD4F7E4522C770E5A76186F55A0CBF5A0E90C35D8AD9DEEEC60713B687F2830A9A20C705E46442A4EAFF7D867359F9009A0C2F59F608A467ABB02907A45B7446B5F375C660C256E63E20749C98FFD157299A02F08BF5CE0278F3A28A89D18BD44B2712792C362BF755BC545BC3C35E6895BB2A0BB6EDC6F46D9C76AE0CCDEBA9F1C18CA7F0EE94A561350BDAC731364007A525480".hexToSeqByte,
|
||||
# <4>
|
||||
"F90211A05A745895FC96B94DB4D0A16D034CF33F38059F2357ED7CB2EA35DB00DD92EF65A03FB18F877D09F9E2021D5EE638E2F2919E24CAEA933ED4AC51818323F7DDC29EA004E520D01462FC4C79A9A74FEE948EC2F36E708575E7AD4CD9C47F6B0B87F519A09CCEB99ADBC31003D903A288D6EE7DF583A4947FB4ADF0F22886E84A86D4D7E5A070D1C2E44D89E9A140F51F5D1C1E9906EF24A3E70A90614B4A8ACB1AEAB35A5CA001831185E3DBBAA2AEB1C18ED3E0F77B41F2E98E09490F399D8D2FAAB92CB3C3A067F57C912E5826406002CAC84732BF505702CA103ACB63A1024ED53F8AAC86C7A05D0D61D6026C41DFCF47FE3B19F277FC9FEBD94E4C2FF0AA12F15C4035B651B4A05CC597DD4F21C32D0EA8831C9CB585310C5B22CA8FAFEA15A4B3F93C2BAAF394A084807A504C68F016A6DBAB22370B77FAB6AD339DD8C6BFBE54BFD137C808D0CDA0ED42536EE6357BB3AA7DDC2A06FBB4E1D8DE2152A657183979151A8D04EFCA2FA078A48BF0F38B4F0972FBD2D876DD685F4FE8CCEFF168F1C6201C973ACEF1C1C8A0DBFAFB4F768292C4B23EB8F6F81CD29D956702E78390F2C4417F0C4E8F6C2A17A0E9B44679701E0B0F2EF944853AEAFB8CF9FFAC1CE6A52159AF74845E46F47125A0E41FC974110569D17E12190B596DE2C0E3C8B3BB451DC4C91154A0C2645D797AA01199389984707B7EC7157340E1D8B1174F5F47CE465FF4F8449F22E28EA56D0A80".hexToSeqByte,
|
||||
# <5>
|
||||
"F90211A050C20A151922301F0D1998EE0141A22B7C919BD0D716794EE7E3A5E0EC48DEC8A0AB31DFBEF2AC74B4E501DCE89315A6A89B10F20CBA64F761993A1037418613A7A0BF0D6EE592B2CAA6F302B27999438103809FAF702A2B30E5E1403965EF87B35EA0135D8AFE5EB5D20A1927A91F535BA7684490658EF272C933115BF0BF060CF5E6A0A1EE2F87381EA364E75D651631B5B00E62B1E7E2008458ACF29E0831D7760AFDA040AC269BEA082940F45ED7D813C47E21B930C88EF2B28BF187AE83789EF94BC5A02A03448BD5B58065042A47DB4310F364915C5D4E5FBDF32D292D6FB6BDD0708CA0E0C204B11B2EECD0A4986EEFD3413AD49DDDE082FEE6FAD50AD5C310EB7F22CDA0DA59FED5F92CC206DC2EA7BAD2EA92CC2E9C9FFE5F3A816BBF4EE2E585CE3DCAA073B2EB114312AAB7D2A765DC7923977FB60AF0447ECC7B381F193F68F658A1B7A031045FF0797D0309A4073C19A00172A2737B1D345269149A4AA4A82E6D202B4EA060BEC4F79BB8D0BCAF2B2E362B75845651A2FCC8876A3511D7843CA4E6D18DFDA01D4D7E3E579AA0FBADD67C3B9F09931DB5D733852B36F6201F9D7CF0961E1C17A0AAE7C5916D9FC01F0E7B27543F3991C0B48302A739F83030A951DA296BFCE7B8A0E3519D65CC3E19949E0D25424D5B3BFBD9DF02C84744FD39360471D197570803A0FD9C31C068D34B2701C0CDD3346012D84BB43E982BE5451E98CE50F9DB2D99DE80".hexToSeqByte,
|
||||
# <6>
|
||||
"F8B180A00642D4EC5F43A113E2E0C470A87AB859178187341C0BFA059AABBE2384BDFB62808080808080A0E5652B61A18FE7AC7F7C71DE1AE97B8CA1CF742CA89BE4BBE191A2F492C606C5808080A0B203CB3B8CF8825505939914BCBF2B7B7606768E503682093E9B08B99DB3B38AA014C13E3F5D55A90B2C7C654B7C1C34FC65D2268EEC0EB6296B3A8F6199154F4EA0A0CE8026B7C397771620B3B3620E2B0528157DB901C02EDB86BA87DF8DC23E268080".hexToSeqByte,
|
||||
# <7>
|
||||
"F90211A0175A0B1AEB1FFC698C61412794315A73B4C360EFA558142223EB50FDFB7C03E1A04AA803B66B8E66D570D504405C088CAFDE3F1568046360FD8AA0A365A4120214A0E6377FAD331E7EDD34F932BFFBDD7B0A3B828BBB7D2D6C73133B6919D9A49E20A0E7C4D8894D251DBDCE60AB039BF2B6B877FC032465CEEA46775BBD436630823CA0B5637ED98AF879C025DF5C7C0969A45BDD4061A891742DA7A5A95EF148A41623A05E301F8CA8969599E168E4C36276D7EA0CE70F88206CE550CBD211C5F011ED88A079C3DE527358AA2F1052BFDDBBCA68434044644E75EDD214D24281D6A0C58752A0086F191941F619D0B54136FD6E2582AB891066C8EB42113065E7E2ADF84FD5C1A01D1F5BE41598FF2FCAF1BA2A5C081120D1D70105DF11FA96729CBED928BBA2DEA07E7F5873A68A712E0D4952A9AE5216169488D12EB318FE4705396C58F1F0C88EA07585C154BFFE63F1600CD996C3551BB63E9ABF02470D9B92287A7746D6F87D30A090DE8B996508F04459B3FC3F75C65FC7F66CD7F0CB6E48D58B9853EC7DBD1F58A0F8D482EE79E5E29B6741861FE044346F6E5EA80BFD8A1378CCC73824A59EBB3EA0FDAD4E6FC2F866901830B8EB6FCD19ABC1AE8028BDC85C5A20D78D002F1F117CA0424A916B37993B3A8EAA67ABC370994D0F8931E2AD450F18FF820BCB5EBC88E3A032FE085815FE7CCA707217A0308A266BF6F63DEDEC466A8F89F7AE3F66876E7080".hexToSeqByte,
|
||||
# <8>
|
||||
"F90211A06C58C81EA693CAC268BD2F0733BEB9A2784C75AA20C39B47644BB8A5E2189B27A05945F9ECE092095BD10E7814A81A14DBEDB25342BEABFAA588BFCDAF6444BCA6A007CAC5ABE793C070CE26F2A21CD0F29A573252D2C88051BC3CD58BECEA261EBEA068D6CE58650D53BBFE68284E09749907136BD52EE69332329DC0B86256987290A0BD948923CEB4704B381153432836A6534DC48C5161558B5B4B2747E9618922E9A075244FB6B65AEAC7B016CB659B04F4F144C7A9093175BBEBD844090BF6548240A0932890EF1AE3A79F55162780E5C6B32B7DEE1DA34F3FC2EBEEDDD1E96FCD433FA0E264A2922852C7C489F91DAA2FCFF0C5285A7DA1DD7891A3D9408D74F63B394BA0BF60414930AC9613A6CEF84FEDD860D0878DF5862626F52DDC7D250CDC79D2CEA026EB321595E04C1C43C33A937E4429F9F731CDC1A737FCFD12AACCF445F26748A0FAD955C9809991E80713C556A6AE30D425F36C62BA701DB863DB601341AB9664A0D48C5E648623C5FEF68B95A7761F7CC59B82FFF0231857158CBAB82C0839B846A0F33215586D176E00AA996ACE3C47E9C7517FF4B2D8CFA3AE69A57C5D767AE8C5A02FC3250268C96860E1D52E2391C43BF1EE881987F96730A750E61C6CD91E6870A02E95A4BF0044815926DF4C81B09BE500DCCBBF98CFC9E624BF2E248EF625E2D3A0F346E1C14D8B033A03E6B8BFD8318B0DBACCA7B138B6AE9A72D84E712A52603380".hexToSeqByte,
|
||||
# <9>
|
||||
"F90211A0296F209978A24A9984C5E45D10D6B245947D83FA007DC2121C01A39577138663A055ACACB026401BA36C369FD68C335D30A0CCE097B25AD38819097CFE5D704867A031FF3A3298562295E6B44A13F79028A7DF49AB62CDBBC75B6B671B9265254A76A0BCA39714982A3DB01CF2211E5D13863968929DD3858000AA1970A577F14A5E8BA0A3E891D719D60B724951222664EAD2643E8B8A944BAF4EBAACAE702C8E1AEF42A0924AC4E2FC0B05E457C07660FBB4FC693FBE4ACA9F1290460A59057C742EB734A027C4975E9683463489E1BF7311A77689D566FFB7A9A0D9EBC944CFCE4265F2FBA0F7D781A27E6D5ABC32A251BAE37FCC0D87D6A021B3B24B22BF4E2EB184A9C397A0530A2DAD21BDF103E4A70448739D7B4905C5B23D6AC3505DF879960CEF80FAD6A0569BED1BACE3BF2BAF56C6A6EEE8E2D5722C5C3CB8116ECA93CB2F069BB1B2B2A06A040503F573725DAB1A804D4382492CF0E62AFAAC297AEF46D14D231AD07A24A05E08296620CBF5F661D98EC10AF23B81D3C42650657782E291D2EDE6FD0671C9A0A19F098F8D390CCF95A404C19B808C1E73BD637719458E43E3AA1AE667772657A007A61D4524CE417FD7E75A60C87E49D2ABE2B2F84DEB6195DC291E9227CF65E9A07EA8968C14011CD7F7ABE224736C000213124C9A0819B31E689CB8B534EC889CA004802E2FC25D0C7D828D66701727396F54AA6622453214DDA47F89ACA1616FDD80".hexToSeqByte,
|
||||
# <10>
|
||||
"F901318080A0A9B034F6DF142723288C01ABC1D11C5836D0584FCEB53D7F61E0216881474224A0B2B840F666627E35696A5E8B7764FD8C232A2AA09B5C30D6C72FD5AB75143DC8A0EAA66386D22922CFDDC505D0E4FB3A5B7F4C0F589C7200E58AE336284FBB6C02A0579C4844BABBC6F1343B8A2F88A497463533D0B8FA6351CF95D8A61B1457997680A0297155BBC073059DC7C59FB9DB094034DF55D6B93EC5FDEECE12806D1AA8EBC080A0C2DF13436F789DB4517236E714733A08D55239E7C72D9E12D657E39AB47553A5A0BF9B5757D3F5BEEF22AF958A4DED7F5D0CAD563DC3DC7C9BD22F7B61B3885BB0A076CE7BBA69CAFCFE416BC453A064610A9946330563312DD6C57923A1EE46FCDC8080A0866DEA9CB87A66E9C92F86A84526298955FE32F35B17B78DB21A28DF55B67E128080".hexToSeqByte,
|
||||
# <11>
|
||||
"F8518080A00AB67F3EB5F163D372289AF571D28A0CFE1EA9E5E8C1B12BED10B1F04ADD8163808080A0A43155ADC4D4F7C6F82A3AE1B2F5B0A91C13F8C8B91D2E4833BDCA163196CA2880808080808080808080".hexToSeqByte,
|
||||
# <12>
|
||||
"F8669D207D0933750604FB19CF5DD096F02F60279CC0D9CF03F9B3424A7FB95FB846F8440480A056E81F171BCC55A6FF8345E692C0F86E5B48E01B996CADC001622FB5E363B421A0C5D2460186F7233C927E7DB2DCC703C0E500B653CA82273B7BFAD8045D85A470".hexToSeqByte])
|
||||
|
||||
rec1 = (
|
||||
"001048467d0933750604fb19cf5dd096f02f60279cc0d9cf03f9b3424a7fb960".toDigest,
|
||||
@[
|
||||
# <0>
|
||||
("00104852da00c6b5afbceb650f30322fc6e4406b508796d325ff4d3ef3a904e5".toDigest,
|
||||
1u64,
|
||||
4974.u256,
|
||||
"56E81F171BCC55A6FF8345E692C0F86E5B48E01B996CADC001622FB5E363B421".toDigest,
|
||||
"C5D2460186F7233C927E7DB2DCC703C0E500B653CA82273B7BFAD8045D85A470".toDigest),
|
||||
|
||||
# <1>
|
||||
("00104859ecda2e64d1a459062849603d4ea641c749f0c3bbbf9e9f5faf9c16ba".toDigest,
|
||||
13u64,
|
||||
0.u256,
|
||||
"56E81F171BCC55A6FF8345E692C0F86E5B48E01B996CADC001622FB5E363B421".toDigest,
|
||||
"C5D2460186F7233C927E7DB2DCC703C0E500B653CA82273B7BFAD8045D85A470".toDigest),
|
||||
|
||||
# <43845>
|
||||
("0020c98b155f8a165cdab1ec9865e8a96c2acd182a7f590593d48c9ef88b5d29".toDigest,
|
||||
1u64,
|
||||
"1549611000000000".parse(Uint256),
|
||||
"56E81F171BCC55A6FF8345E692C0F86E5B48E01B996CADC001622FB5E363B421".toDigest,
|
||||
"C5D2460186F7233C927E7DB2DCC703C0E500B653CA82273B7BFAD8045D85A470".toDigest)],
|
||||
@[
|
||||
# <0>
|
||||
"F90211A0FFBB364A8CB0D565F70271627D91A255FB50D751D52A3348B61C9D7B4E98AB28A0FB1ED3251BBD153BFE1415F9946ABFF90C207678BC09EB006C2538D5EE181944A04EC56E4DC562B7C1C3DC3B0D917CE07B4975D9D4459B3F433EAF5D466DA5FF05A0F5283E423E1112E8E50A68D03E339F914F0D18883C4128571B1D14A64F2C9F2DA09414298F9C3AC243DD13F0412CFE069020D4268767E37ADC529D8923312E6519A07978D8ADDBF59DF2D472783308BB735D8BF9FC951FC694E4249B268F0B67CA67A0A17D1D539EEF8747147C2B77281AC355FF16FA42D6941489AB3A11B46D06DD2EA0D7D9CD27EDEEA84EDD53B201DEC05DDB8F6ADD8CDDC77628FFDE9CABBE4F6C1DA03C45D84EEFF0128C6D19BE1D8CAF2797C7332D5E12040B87E1F4E7E17D9D4977A0A8B7AA500844BCA70F76E20BB231291A54CBC71039D183DA4B1FB058FC79FC69A087682429DABD006289751A0EA2C05AA1FD277DA89BF8C7E26DBAEBC774F246A8A0DA0F3AAB84168AF6C0101C0994B881B4FC9EDC4E99E7F28BA90C26F65EE0C819A0A3D9721D23C8B8118B50FAAA40FB3424B8C2BA1B91A2EAC0AAD29868B74B8497A0D1C18AA65CCA65A7118E2C42C429BADE088FC61987B86575145B5A84CA5379A3A0AD509B03FDE185D3ED8CC884A4D0AC03390E7DB8FEC78EC3127DB28CEB670353A0403A13695F15EAAA0588C74282DFF5A9C05BD9039F44336F10BA5590E087043780".hexToSeqByte,
|
||||
# <1>
|
||||
"F90211A050B2D95C218D12F40BE4549EE50709E479B19157BA095501AA94293F662DCA7FA00FB68AA15AD8AD8E2773DC61A370AFE82DAB79EDFDEE1A076F9C3C39B90A30B2A0F7E3E89841383EA0264230C6E5F8BB7A9383E31B13D4333F7417AC389C47C368A0ADF864ED54A756828CA1E8B3C0D434C495CAE24FA3547B10D4B037628BEBD1F4A0ADEEBF028C5D866AC5E6F0D91234E599B2606D89FCFD90F3CF332B46A03AB057A065433307CF0FF284D6A3A7E6D0D434BCD3D732757CCCFA1020B21E5F4762BA5EA0FEC203B690FB1AB74055EF240EA0F9A0E5F27AE2FFED6DA9D2B64883AB456EFEA03C4C09C4F72C15A1CE661C60EB7725403337B46D979FEE259AA3BCC6B0AD6161A05E9BE5120BDF2E94D6E64D1DE4CBAFB7C12E830498228A7D12F6CE2461E93990A066B7F2AD805E04708D28CA06AEE29F88700EB51AB38F4BC109DD9F4ABAAC041BA02C2413B49872A483B1F6B97A741B4B8C59040F4F4759AE444643330CD9907F29A08651CEF990EF0A0C5AB52F4FA4AD4265ACB8F7D2358ABE9AC87BC926F04C1B24A0D29829A570067E446DA03CDFFA7203766F8365C65FBF0E23BF3233BB96D6C658A00F68202899DB482FAFF2AAB2635EDB72E43EBD170D4F0554BAF828E970C4DBC7A06D3D6F17ED39FBB36A6065AC34BE5C633F1B8928B5514DEFFD5A0FFA501AF202A0BE7035655FB3801E87902731C9F0041D8CAFBE471B48F0212293ACCD8C40CACC80".hexToSeqByte,
|
||||
# <2>
|
||||
"F90211A000F354080583902374DBAD850D0E8E33803412F22C7BA81CBC2778A3B3350761A0809A2CF3A2F87798CE2B4047BB288F17C000307BC7C57FA251CD2C7292596AECA04B40B0EF7E160F09D3DA0EA477C385A49074D35C937A2B74B3F51ABD8A5F9BCAA0F972583DC80407F31517059FCC80E3D31262D8637BB819E94F2D2CD0736A324CA033539BA750904CED59B6B37F3879FDB62AAA850DCF7994DA64DA41B3A9E78850A0B4F5AA77948FC84800F8A9365531A166B56D3D1C5BBC3367554D0C1DC7766811A0FF28D18F41F88909A7CDF60CE8F5B56171B6FFFC97CF299730AC08436AD562B1A0F83235BB3F0C276386895A503BEF8B61F7A8D65D9F6ED4A92C4FD0616E05DE1EA05DC6F966B54000C0B830DF4BB61E47D4D4357822FE4474823DF4862F92602E2AA067D7F3201504A6BC2CF96A010A856CABC4489BEE2F325AB40C6E3ED230898D68A082FCBFA9FCB388E1EC0CC70F14072B3011CACADC212FFB11DCA1A0387064427FA03F4EB0BC6BB0AF1B9AC31A64FB77C6B75F41301DEFBB3803A660E526D7A8D577A01813C0B7A37EBAA16B78E63E23D6E1EF58B9646C572723FCBAF706EFB0958C77A00E112F5A43F599A7858758D3783525C8BC57CFA1BC9D973045363A6091721A28A0879D607834EC77D3D62A5C30BE61615BFB9DAA671DABCC7294C7C3A8633DB6AFA05876CE1DD19DB3F2FCDE622F6C1AF61898481DD6C72BD9273106835A49C5248480".hexToSeqByte,
|
||||
# <3>
|
||||
"F90211A0175A0B1AEB1FFC698C61412794315A73B4C360EFA558142223EB50FDFB7C03E1A04AA803B66B8E66D570D504405C088CAFDE3F1568046360FD8AA0A365A4120214A0E6377FAD331E7EDD34F932BFFBDD7B0A3B828BBB7D2D6C73133B6919D9A49E20A0E7C4D8894D251DBDCE60AB039BF2B6B877FC032465CEEA46775BBD436630823CA0B5637ED98AF879C025DF5C7C0969A45BDD4061A891742DA7A5A95EF148A41623A05E301F8CA8969599E168E4C36276D7EA0CE70F88206CE550CBD211C5F011ED88A079C3DE527358AA2F1052BFDDBBCA68434044644E75EDD214D24281D6A0C58752A0086F191941F619D0B54136FD6E2582AB891066C8EB42113065E7E2ADF84FD5C1A01D1F5BE41598FF2FCAF1BA2A5C081120D1D70105DF11FA96729CBED928BBA2DEA07E7F5873A68A712E0D4952A9AE5216169488D12EB318FE4705396C58F1F0C88EA07585C154BFFE63F1600CD996C3551BB63E9ABF02470D9B92287A7746D6F87D30A090DE8B996508F04459B3FC3F75C65FC7F66CD7F0CB6E48D58B9853EC7DBD1F58A0F8D482EE79E5E29B6741861FE044346F6E5EA80BFD8A1378CCC73824A59EBB3EA0FDAD4E6FC2F866901830B8EB6FCD19ABC1AE8028BDC85C5A20D78D002F1F117CA0424A916B37993B3A8EAA67ABC370994D0F8931E2AD450F18FF820BCB5EBC88E3A032FE085815FE7CCA707217A0308A266BF6F63DEDEC466A8F89F7AE3F66876E7080".hexToSeqByte,
|
||||
# <4>
|
||||
"F90211A06C58C81EA693CAC268BD2F0733BEB9A2784C75AA20C39B47644BB8A5E2189B27A05945F9ECE092095BD10E7814A81A14DBEDB25342BEABFAA588BFCDAF6444BCA6A007CAC5ABE793C070CE26F2A21CD0F29A573252D2C88051BC3CD58BECEA261EBEA068D6CE58650D53BBFE68284E09749907136BD52EE69332329DC0B86256987290A0BD948923CEB4704B381153432836A6534DC48C5161558B5B4B2747E9618922E9A075244FB6B65AEAC7B016CB659B04F4F144C7A9093175BBEBD844090BF6548240A0932890EF1AE3A79F55162780E5C6B32B7DEE1DA34F3FC2EBEEDDD1E96FCD433FA0E264A2922852C7C489F91DAA2FCFF0C5285A7DA1DD7891A3D9408D74F63B394BA0BF60414930AC9613A6CEF84FEDD860D0878DF5862626F52DDC7D250CDC79D2CEA026EB321595E04C1C43C33A937E4429F9F731CDC1A737FCFD12AACCF445F26748A0FAD955C9809991E80713C556A6AE30D425F36C62BA701DB863DB601341AB9664A0D48C5E648623C5FEF68B95A7761F7CC59B82FFF0231857158CBAB82C0839B846A0F33215586D176E00AA996ACE3C47E9C7517FF4B2D8CFA3AE69A57C5D767AE8C5A02FC3250268C96860E1D52E2391C43BF1EE881987F96730A750E61C6CD91E6870A02E95A4BF0044815926DF4C81B09BE500DCCBBF98CFC9E624BF2E248EF625E2D3A0F346E1C14D8B033A03E6B8BFD8318B0DBACCA7B138B6AE9A72D84E712A52603380".hexToSeqByte,
|
||||
# <5>
|
||||
"F90211A0296F209978A24A9984C5E45D10D6B245947D83FA007DC2121C01A39577138663A055ACACB026401BA36C369FD68C335D30A0CCE097B25AD38819097CFE5D704867A031FF3A3298562295E6B44A13F79028A7DF49AB62CDBBC75B6B671B9265254A76A0BCA39714982A3DB01CF2211E5D13863968929DD3858000AA1970A577F14A5E8BA0A3E891D719D60B724951222664EAD2643E8B8A944BAF4EBAACAE702C8E1AEF42A0924AC4E2FC0B05E457C07660FBB4FC693FBE4ACA9F1290460A59057C742EB734A027C4975E9683463489E1BF7311A77689D566FFB7A9A0D9EBC944CFCE4265F2FBA0F7D781A27E6D5ABC32A251BAE37FCC0D87D6A021B3B24B22BF4E2EB184A9C397A0530A2DAD21BDF103E4A70448739D7B4905C5B23D6AC3505DF879960CEF80FAD6A0569BED1BACE3BF2BAF56C6A6EEE8E2D5722C5C3CB8116ECA93CB2F069BB1B2B2A06A040503F573725DAB1A804D4382492CF0E62AFAAC297AEF46D14D231AD07A24A05E08296620CBF5F661D98EC10AF23B81D3C42650657782E291D2EDE6FD0671C9A0A19F098F8D390CCF95A404C19B808C1E73BD637719458E43E3AA1AE667772657A007A61D4524CE417FD7E75A60C87E49D2ABE2B2F84DEB6195DC291E9227CF65E9A07EA8968C14011CD7F7ABE224736C000213124C9A0819B31E689CB8B534EC889CA004802E2FC25D0C7D828D66701727396F54AA6622453214DDA47F89ACA1616FDD80".hexToSeqByte,
|
||||
# <6>
|
||||
"F901318080A0A9B034F6DF142723288C01ABC1D11C5836D0584FCEB53D7F61E0216881474224A0B2B840F666627E35696A5E8B7764FD8C232A2AA09B5C30D6C72FD5AB75143DC8A0EAA66386D22922CFDDC505D0E4FB3A5B7F4C0F589C7200E58AE336284FBB6C02A0579C4844BABBC6F1343B8A2F88A497463533D0B8FA6351CF95D8A61B1457997680A0297155BBC073059DC7C59FB9DB094034DF55D6B93EC5FDEECE12806D1AA8EBC080A0C2DF13436F789DB4517236E714733A08D55239E7C72D9E12D657E39AB47553A5A0BF9B5757D3F5BEEF22AF958A4DED7F5D0CAD563DC3DC7C9BD22F7B61B3885BB0A076CE7BBA69CAFCFE416BC453A064610A9946330563312DD6C57923A1EE46FCDC8080A0866DEA9CB87A66E9C92F86A84526298955FE32F35B17B78DB21A28DF55B67E128080".hexToSeqByte,
|
||||
# <7>
|
||||
"F8518080A00AB67F3EB5F163D372289AF571D28A0CFE1EA9E5E8C1B12BED10B1F04ADD8163808080A0A43155ADC4D4F7C6F82A3AE1B2F5B0A91C13F8C8B91D2E4833BDCA163196CA2880808080808080808080".hexToSeqByte,
|
||||
# <8>
|
||||
"F8669D207D0933750604FB19CF5DD096F02F60279CC0D9CF03F9B3424A7FB95FB846F8440480A056E81F171BCC55A6FF8345E692C0F86E5B48E01B996CADC001622FB5E363B421A0C5D2460186F7233C927E7DB2DCC703C0E500B653CA82273B7BFAD8045D85A470".hexToSeqByte,
|
||||
# <9>
|
||||
"F90211A0D8DF8DD801321FCCFF6BC008D582698662A15ED4B29431A1CD1CB540169476ACA03B76B2DB6EE17A52E226B2301A7DD2FF21A957909FCD26D81DBA762DF65B678EA015C7B6DB1A993A54976301D2FB6AC31CEB33ED48018E322C8487F2DB3DC264EDA044B53BF21F700419E1A6F5340880311240A356596B095E8D8944D1240513F4DBA0BD2245AC2DD694A1823AC008FC02F2ADB917F5D3B5AF0A02CD8257C25BC4A9E1A033BA8C1554270E029F0FF6D43E4C903E179F5FBE2189BABC4744B7ADAD96178AA037CA87F94F696DE439973B89FE6DCCB39F64C0D95FEE05CC72DADF7C1F4063E0A054B143D6C582F858EDF2301467D3A0A6E11F6BFBA2B93C3F49C5AB9E418AEEAAA0C82405BC0E336A9C8BD8B171E4569086EF20CCA24FA032161D7E38522F7A6E3AA0187182D43F5A0E84692C755ECE4C5763CABFF4B88F8D960D60DF39B4E3ED80C6A0BD00857DCEB2AEFF0B3E6D94204C466B62A2F945DC5BA1C629B27B0149106E18A0865BCFFB1D3C36173CE347878A7E4FBC2DAB1DF7BF9BA80B9F53E06FE00D5461A0973E5E630F4E4FC9E65F6EB23A9B7C516201C9DAB309B29CA33B379B07BA9D29A0CC49BF76F5D3A790F3B7EC2392AA88B30BFF500BF03A17E4AC0D9093DE27A313A0B3ED1849C31A2B81446D87F590707E1A352D3B5431A8281F593FD153B9B75E33A028872B9C072274D6AAD1D3FAAFDD163D393ADB405C7435815E9367B06645941A80".hexToSeqByte,
|
||||
# <10>
|
||||
"F90211A0D50F2E39F45A2111D55147674DD3DFE9EF778723B49B17BBB5423338CF899966A0E30B43737727FF3F099B43478F3EB3F6CB58171A4D4914A49E18E4250BE7E17CA0EB0ADC11962D3A32FE947CBC5BD542D2E33BE338869CF2253E6EB4D5ABFFF308A00877B208516D96B6C8943261281568E787B0C4ED71C25F08DABE28B3CFA23311A07CE4931FB0A59EA544536ADD1C9731BF9A6806690CA5DE401304AABC57379E56A0C10B68793F9F0AEF92E0BC9511ADD7F32E64AE71325BE9FBC9A04ABE817C73F1A0E09B62F3EDDB32F66360E3CF098A11FAA5E6BA74FCBA58017B8AB1FBE322DC75A0A3D7CB9D94C9ABDCBE75EDDF9119EF6BA96EA469D4232EC16849C9F0D6A4D920A004CAC15D7CCEBBA9587B95F3656CEDAAD7F1180C63B47A7DCE5CEE3EB1F87655A0AEC2B3F123707530EDC5BB11DEF09AE196F3391DA1F7CD4555B3FB3200843B92A01B87103A5557E37231639416C5D76FD76E3D763C222198D6C554C64CF679F982A0D627D002CC4EE0C2F51E4304B350C16080FEBB3B1BB7267A42D245422C75826FA08F4307EB616ABD9A0FEDA83E3E12B41BDAF6F05215A6A92EE11CF68F830DF944A065D0E5A8EE025D5EFEBB60F767203221D4B7B243743283EA7EB745074098A2A1A04E4B7B9F232A245C7F7D1FD4AA5495DD8A6E27420366E4515A7C1A275764ADC3A0D53C6B29CB990E25619E4A1E4E92A1F35102985E6EC999D040896AFBD7D56AC480".hexToSeqByte,
|
||||
# <11>
|
||||
"F90211A04A89D57B9CB42F6E269D51AC8DDA52101DEF5163BFB415E0C7D5D838544905DEA0B9EA602CEB85FEB7F9763B870AD24845DCC4C47DD12D7CB091C9B640CED04166A0B323570ABA3DD34B2AD3042BD7A7E7A10F36BA3007A6C9F81182526960FCB297A0C15BB5B84B2E0E0145566DC26C33992B27D90DDAE3CAE6AD71E4CC6AB8FEC13DA066D2E06E2F9F5E3FCCF972BA9508A4E29316CEB1AD3E643DE4582073E90C9653A0DEA30438B2AB11706FD4EB35A0BE6B4C2E1A1FEB4D6F3C9D2105123FE325AF3AA0A76442E86C30687060E41A70112ED2D4B7F00AFD537D9562BA50F3A6870667B2A037E3C627AB7C7B4EE4BC86754B9C950D4BB992AA5F271CDCB9FDB280CFF74E4FA00673239BAF1BDB51BBC1E504B844C9275E39F998BE78153A6674B002844459EBA0D8C03E5D9B3B5295ADE0C34520CDE8BA36D3D4DDB49EC5B3C1C1A04E1C8213C9A00C7364BE1AB503A7A9017021E915D2AAB82FFA3B58E4FA4EF8A36D1BBAEF035AA0846D08C50C7978D5234C5D77565D4D3386D9FA6BBC0F20F58D726EE4CACA8C73A0C052ED2FF80CD00B598AD906101D2B539F1DA5745E2E39C9D6E51B6AB419A6E4A07817251C528F0D7297E4BB855EB66B767EE817B4D697BDAD59107734C6A13352A0B69712AA6A7D9BDDB4462F71EB5688583B277A13453F5CD37947B4561A7D5F23A0C7EFCD12218C436D3055B9DB0121964412A981BDC224ACDB6D8382B3B433DC0980".hexToSeqByte,
|
||||
# <12>
|
||||
"F9017180A00AB394B3794B7909B05FA99943D33B6C650A3EDA8278104EE6A815E63B23F294A00BEC2410E52705BCE45A0E7D536BC1BC720CCEAE09A82F259528EB52249BC7A580A0490DFF989D2CA627D0FFFE5A7624E5D1F93D13D148C8E895A533E637D72DF692A06963AF64D5582FF69C1AED0338830CFD29FEB6CA241D27CF2B003DF3872226AD80A051A0B35BC60C36717801FA70B97EF70F83DC87272C1BB1D8A018422A6881EFEBA0B240366AAF91CBE93688DF2012629A6E322BA7893BC155894912F1298727FC0280A0248DEDB0AE495BBDCC633729DB6755CF75154B487848EE1A2AF41B16651AB159A0F91E534C8C8B4C99673618DF0C8439134C5BE214CA88B31A9AF856F329280512A0740EF53248D750747B8A867C90B824C2D8D0B989BF5FBD9B0F85B1CC7EC8A5D780A02666267A3FF068897BDFE0B8CD363070ADC2A47F2B75245C5D95E2CE5C0BA665A04176569E211DE429567E85394E2851FC1858DB8AEADD2A9FDD76EA580BB62F2F80".hexToSeqByte,
|
||||
# <13>
|
||||
"F871808080808080808080A0E27FB04D7FEE1421851667D4958BF753B72531863F37128B1524F79036DA3DBBA0FC79B7B936154EFB48ED672C1C45F7ADE8D90C37C6876CCF0A8E67DAFB42CF57A0362D97C46FED60D536848D8A8F02A0B897606DA3841A83B68A206486B80F508D8080808080".hexToSeqByte,
|
||||
# <14>
|
||||
"F86D9D20155F8A165CDAB1EC9865E8A96C2ACD182A7F590593D48C9EF88B5D29B84DF84B018705815CED31EE00A056E81F171BCC55A6FF8345E692C0F86E5B48E01B996CADC001622FB5E363B421A0C5D2460186F7233C927E7DB2DCC703C0E500B653CA82273B7BFAD8045D85A470".hexToSeqByte])
|
||||
|
||||
# ----------
|
||||
|
||||
const
|
||||
testRoot* = root
|
||||
testSamples* = @[rec0, rec1]
|
||||
|
||||
# End
|
2
vendor/nim-eth
vendored
2
vendor/nim-eth
vendored
@ -1 +1 @@
|
||||
Subproject commit dacf827a8653459429623be7ceaf6ecca20fcf35
|
||||
Subproject commit 8761ea3222f8d4fbd7ebae6755665e791499d7f2
|
2
vendor/nim-json-serialization
vendored
2
vendor/nim-json-serialization
vendored
@ -1 +1 @@
|
||||
Subproject commit dbe0f1ae510d74a23f3693325635277bd0f0c1b9
|
||||
Subproject commit 3509706517f3562cbcbe9d94988eccdd80474ab8
|
2
vendor/nim-serialization
vendored
2
vendor/nim-serialization
vendored
@ -1 +1 @@
|
||||
Subproject commit 1d33fa3ced6bc274ed43d99345ceb9cd6bb4dd24
|
||||
Subproject commit 9631fbd1c81c8b25ff8740df440ca7ba87fa6131
|
2
vendor/nim-sqlite3-abi
vendored
2
vendor/nim-sqlite3-abi
vendored
@ -1 +1 @@
|
||||
Subproject commit fda455cfea2df707dde052034411ce63de218453
|
||||
Subproject commit 2f040a5bfcef78f29b72016dfef98706a0f6dc9f
|
2
vendor/nim-ssz-serialization
vendored
2
vendor/nim-ssz-serialization
vendored
@ -1 +1 @@
|
||||
Subproject commit da3c08c16da2e4d2e9f48556fbdbf90bfff22172
|
||||
Subproject commit cd500484e054ead951f2d07aeb81c1c8c695db26
|
2
vendor/nim-stew
vendored
2
vendor/nim-stew
vendored
@ -1 +1 @@
|
||||
Subproject commit 779ba052c827af46bea79ff8b12b159f68c0f14a
|
||||
Subproject commit 4cab7b08793d25c311efe88d54f948815643bc41
|
2
vendor/nim-toml-serialization
vendored
2
vendor/nim-toml-serialization
vendored
@ -1 +1 @@
|
||||
Subproject commit c2ce6e56c85d1db041b25b580ab0d83ce3ae35cd
|
||||
Subproject commit 4e15e00ed9e27a8d28b40b69ef06c6a4a388ae93
|
Loading…
x
Reference in New Issue
Block a user