Prepare snap server client test scenario cont1 (#1485)

* Renaming androgynous sub-object names according to where they belong

why:
  These objects are not explicitly dealt with. They give meaning to
  some generic wrapper objects. Naming them after their origin may
  help troubleshooting.

* Redefine proof nodes list data type for `snap/1` wire protocol

why:
  The current specification suffered from the fact that the basic data
  type for a proof node is an RLP encoded hexary node. This slightly
  confused the encoding/decoding magic.

details:
  This is the second attempt, now wrapping the `seq[Blob]` into a
  wrapper object of `seq[SnapProof]` for a distinct alias sequence.

  In the previous attempt, `SnapProof` was a wrapper object holding the
  `Blob` with magic applied to the `seq[]`. This needed the `append`
  mixin to strip the outer wrapper that was applied to the `Blob` already
  when it was passed as argument.

* Fix some prototype inconsistency

why:
  For easy reading, `getAccountRange()` handler return code should
  resemble the `accoundRange()` anruments prototype.
This commit is contained in:
Jordan Hrycaj 2023-03-03 20:01:59 +00:00 committed by GitHub
parent f20f20f962
commit 10ad7867e4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 115 additions and 94 deletions

View File

@ -22,7 +22,7 @@ logScope:
topics = "full-sync"
type
FullSyncRef* = RunnerSyncRef[CtxData,BuddyData]
FullSyncRef* = RunnerSyncRef[FullCtxData,FullBuddyData]
const
extraTraceMessages = false # or true

View File

@ -25,12 +25,12 @@ type
FirstPivotUseRegardless ## Force pivot if available
PivotRunMode ## SNAFU after some magic
BuddyData* = object
FullBuddyData* = object
## Local descriptor data extension
pivot*: BestPivotWorkerRef ## Local pivot worker descriptor
bQueue*: BlockQueueWorkerRef ## Block queue worker
CtxData* = object
FullCtxData* = object
## Globally shared data extension
rng*: ref HmacDrbgContext ## Random generator, pre-initialised
pivot*: BestPivotCtxRef ## Global pivot descriptor
@ -40,10 +40,10 @@ type
suspendAt*: BlockNumber ## Suspend if persistent head is larger
ticker*: TickerRef ## Logger ticker
FullBuddyRef* = BuddyRef[CtxData,BuddyData]
FullBuddyRef* = BuddyRef[FullCtxData,FullBuddyData]
## Extended worker peer descriptor
FullCtxRef* = CtxRef[CtxData]
FullCtxRef* = CtxRef[FullCtxData]
## Extended global descriptor
# End

View File

@ -18,7 +18,7 @@ import
../../db/db_chain,
../../core/chain,
../snap/range_desc,
../snap/worker/db/[hexary_desc, hexary_range, snapdb_desc, snapdb_accounts],
../snap/worker/db/[hexary_desc, hexary_range],
../protocol,
../protocol/snap/snap_types
@ -43,12 +43,12 @@ proc proofNodesSizeMax*(n: int): int {.gcsafe.}
template logTxt(info: static[string]): static[string] =
"handlers.snap." & info
proc notImplemented(name: string) =
debug "snapWire: hHandler method not implemented", meth=name
proc notImplemented(name: string) {.used.} =
debug "Wire handler method not implemented", meth=name
proc append(writer: var RlpWriter; t: SnapProof; node: Blob) =
## RLP mixin, encoding
writer.snapAppend node
proc getAccountFn(chain: ChainRef): HexaryGetFn {.gcsafe.} =
let db = chain.com.db.db
return proc(key: openArray[byte]): Blob = db.get(key)
# ------------------------------------------------------------------------------
# Private functions: fetch leaf range
@ -168,7 +168,13 @@ proc proofNodesSizeMax*(n: int): int =
high(int)
proc proofEncode*(proof: seq[SnapProof]): Blob =
rlp.encode proof
var writer = initRlpWriter()
writer.snapAppend SnapProofNodes(nodes: proof)
writer.finish
proc proofDecode*(data: Blob): seq[SnapProof] {.gcsafe, raises: [RlpError].} =
var reader = data.rlpFromBytes
reader.snapRead(SnapProofNodes).nodes
# ------------------------------------------------------------------------------
# Public functions: snap wire protocol handlers
@ -180,11 +186,11 @@ method getAccountRange*(
origin: Hash256;
limit: Hash256;
replySizeMax: uint64;
): (seq[SnapAccount], seq[SnapProof])
): (seq[SnapAccount], SnapProofNodes)
{.gcsafe, raises: [CatchableError].} =
## Fetch accounts list from database
let
db = SnapDbRef.init(ctx.chain.com.db.db).getAccountFn
db = ctx.chain.getAccountFn
iv = NodeTagRange.new(origin.to(NodeTag), limit.to(NodeTag))
sizeMax = min(replySizeMax,high(int).uint64).int
@ -192,7 +198,8 @@ method getAccountRange*(
let rc = ctx.fetchLeafRange(db, root, iv, sizeMax)
if rc.isOk:
return (rc.value.leafs.mapIt(it.to(SnapAccount)), rc.value.proof)
result[0] = rc.value.leafs.mapIt(it.to(SnapAccount))
result[1] = SnapProofNodes(nodes: rc.value.proof)
method getStorageRanges*(
@ -202,7 +209,7 @@ method getStorageRanges*(
origin: openArray[byte];
limit: openArray[byte];
replySizeMax: uint64;
): (seq[seq[SnapStorage]], seq[SnapProof])
): (seq[seq[SnapStorage]], SnapProofNodes)
{.gcsafe.} =
notImplemented("getStorageRanges")

View File

@ -11,6 +11,7 @@
{.push raises: [].}
import
std/[hashes, sequtils],
chronicles,
eth/common,
../../../constants
@ -23,8 +24,12 @@ type
accHash*: Hash256
accBody* {.rlpCustomSerialization.}: Account
SnapProof* = object
data* {.rlpCustomSerialization.}: Blob
SnapProof* = distinct Blob
## Rlp coded node data, to be handled different from a generic `Blob`
SnapProofNodes* = object
## Wrapper around `seq[SnapProof]` for controlling serialisation.
nodes*: seq[SnapProof]
SnapStorage* = object
slotHash*: Hash256
@ -34,6 +39,21 @@ type
SnapPeerState* = ref object of RootRef
# ------------------------------------------------------------------------------
# Public `SnapProof` type helpers
# ------------------------------------------------------------------------------
proc to*(data: Blob; T: type SnapProof): T = data.T
proc to*(node: SnapProof; T: type Blob): T = node.T
proc hash*(sp: SnapProof): Hash =
## Mixin for Table/HashSet
sp.to(Blob).hash
proc `==`*(a,b: SnapProof): bool =
## Mixin for Table/HashSet
a.to(Blob) == b.to(Blob)
# ------------------------------------------------------------------------------
# Public serialisation helpers
# ------------------------------------------------------------------------------
@ -105,37 +125,25 @@ proc snapAppend*(
proc snapRead*(
rlp: var Rlp;
T: type Blob;
T: type SnapProofNodes;
): T
{.gcsafe, raises: [RlpError]} =
## Rlp decoding for a proof node.
rlp.read Blob
{.gcsafe, raises: [RlpError].} =
## RLP decoding for a wrapped `SnapProof` sequence. This extra wrapper is
## needed as the `SnapProof` items are `Blob` items at heart which is also
## the serialised destination data type.
if rlp.isList:
for w in rlp.items:
result.nodes.add w.rawData.toSeq.to(SnapProof)
elif rlp.isBlob:
result.nodes.add rlp.rawData.toSeq.to(SnapProof)
proc snapAppend*(
writer: var RlpWriter;
proofNode: Blob;
) =
## Rlp encoding for proof node.
var start = 0u8
# Need some magic to strip an extra layer that will be re-introduced by
# the RLP encoder as object wrapper. The problem is that the `proofNode`
# argument blob is encoded already and a second encoding must be avoided.
#
# This extra work is not an issue as the number of proof nodes in a list
# is typically small.
if proofNode.len < 57:
# <c0> + data(max 55)
start = 1u8
elif 0xf7 < proofNode[0]:
# <f7+sizeLen> + size + data ..
start = proofNode[0] - 0xf7 + 1
else:
# Oops, unexpected data -- encode as is
discard
writer.appendRawBytes proofNode[start ..< proofNode.len]
proc snapAppend*(writer: var RlpWriter; spn: SnapProofNodes) =
## RLP encoding for a wrapped `SnapProof` sequence. This extra wrapper is
## needed as the `SnapProof` items are `Blob` items at heart which is also
## the serialised destination data type.
writer.startList spn.nodes.len
for w in spn.nodes:
writer.appendRawBytes w.to(Blob)
# ------------------------------------------------------------------------------
# Public service stubs
@ -150,7 +158,7 @@ method getAccountRange*(
origin: Hash256;
limit: Hash256;
replySizeMax: uint64;
): (seq[SnapAccount], seq[SnapProof])
): (seq[SnapAccount], SnapProofNodes)
{.base, raises: [CatchableError].} =
notImplemented("getAccountRange")
@ -161,7 +169,7 @@ method getStorageRanges*(
origin: openArray[byte];
limit: openArray[byte];
replySizeMax: uint64;
): (seq[seq[SnapStorage]], seq[SnapProof])
): (seq[seq[SnapStorage]], SnapProofNodes)
{.base.} =
notImplemented("getStorageRanges")

View File

@ -171,7 +171,7 @@ proc read(rlp: var Rlp, t: var SnapAccount, T: type Account): T =
## RLP mixin, decoding
rlp.snapRead T
proc read(rlp: var Rlp; t: var SnapProof; T: type Blob): T =
proc read(rlp: var Rlp; T: type SnapProofNodes): T =
## RLP mixin, decoding
rlp.snapRead T
@ -179,9 +179,9 @@ proc append(writer: var RlpWriter, t: SnapAccount, account: Account) =
## RLP mixin, encoding
writer.snapAppend account
proc append(writer: var RlpWriter; t: SnapProof; node: Blob) =
proc append(writer: var RlpWriter; spn: SnapProofNodes) =
## RLP mixin, encoding
writer.snapAppend node
writer.snapAppend spn
p2pProtocol snap1(version = snapVersion,
@ -210,7 +210,7 @@ p2pProtocol snap1(version = snapVersion,
# For logging only
nAccounts = accounts.len
nProof = proof.len
nProof = proof.nodes.len
if nAccounts == 0 and nProof == 0:
trace trSnapSendReplying & "EMPTY AccountRange (0x01)", peer
@ -224,7 +224,7 @@ p2pProtocol snap1(version = snapVersion,
proc accountRange(
peer: Peer;
accounts: openArray[SnapAccount];
proof: openArray[SnapProof])
proof: SnapProofNodes)
requestResponse:
@ -249,7 +249,7 @@ p2pProtocol snap1(version = snapVersion,
# For logging only
nSlots = slots.len
nProof = proof.len
nProof = proof.nodes.len
if nSlots == 0 and nProof == 0:
trace trSnapSendReplying & "EMPTY StorageRanges (0x03)", peer
@ -264,7 +264,7 @@ p2pProtocol snap1(version = snapVersion,
proc storageRanges(
peer: Peer;
slotLists: openArray[seq[SnapStorage]];
proof: openArray[SnapProof])
proof: SnapProofNodes)
requestResponse:

View File

@ -23,7 +23,7 @@ logScope:
topics = "snap-sync"
type
SnapSyncRef* = RunnerSyncRef[CtxData,BuddyData]
SnapSyncRef* = RunnerSyncRef[SnapCtxData,SnapBuddyData]
const
extraTraceMessages = false # or true

View File

@ -12,6 +12,8 @@
## traversing leaves of the trie in leaf path order, making network requests
## using the `snap` protocol.
{.push raises: [].}
import
std/sequtils,
chronos,
@ -21,8 +23,6 @@ import
"../.."/[constants, range_desc, worker_desc],
./com_error
{.push raises: [].}
logScope:
topics = "snap-fetch"
@ -79,7 +79,7 @@ proc getAccountRange*(
let snAccRange = rc.value.get
GetAccountRange(
data: PackedAccountRange(
proof: snAccRange.proof,
proof: snAccRange.proof.nodes,
accounts: snAccRange.accounts
# Re-pack accounts data
.mapIt(PackedAccount(

View File

@ -9,6 +9,8 @@
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
{.push raises: [].}
import
std/[options, sequtils],
chronos,
@ -18,8 +20,6 @@ import
"../.."/[constants, range_desc, worker_desc],
./com_error
{.push raises: [].}
logScope:
topics = "snap-fetch"
@ -122,7 +122,7 @@ proc getStorageRanges*(
let
nSlotLists = snStoRanges.slotLists.len
nProof = snStoRanges.proof.len
nProof = snStoRanges.proof.nodes.len
if nSlotLists == 0:
# github.com/ethereum/devp2p/blob/master/caps/snap.md#getstorageranges-0x02:
@ -138,7 +138,9 @@ proc getStorageRanges*(
return err(ComNoStorageForAccounts)
# Assemble return structure for given peer response
var dd = GetStorageRanges(data: AccountStorageRange(proof: snStoRanges.proof))
var dd = GetStorageRanges(
data: AccountStorageRange(
proof: snStoRanges.proof.nodes))
# Set the left proof boundary (if any)
if 0 < nProof and iv.isSome:

View File

@ -8,6 +8,8 @@
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
{.push raises: [].}
import
std/[sequtils, sets, tables],
chronicles,
@ -17,8 +19,6 @@ import
../../range_desc,
"."/[hexary_desc, hexary_error, hexary_nearby, hexary_paths]
{.push raises: [].}
type
RangeLeaf* = object
key*: NodeKey ## Leaf node path
@ -54,7 +54,7 @@ proc nonLeafPathNodes(
baseTag: NodeTag; # Left boundary
rootKey: NodeKey|RepairKey; # State root
db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction
): HashSet[Blob]
): HashSet[SnapProof]
{.gcsafe, raises: [CatchableError]} =
## Helper for `updateProof()`
baseTag
@ -62,7 +62,7 @@ proc nonLeafPathNodes(
.path
.mapIt(it.node)
.filterIt(it.kind != Leaf)
.mapIt(it.convertTo(Blob))
.mapIt(it.convertTo(Blob).to(SnapProof))
.toHashSet
# ------------------------------------------------------------------------------
@ -142,12 +142,12 @@ template updateProof(
var rp = RangeProof(
leafs: leafList,
proof: mapIt(toSeq(proof), SnapProof(data: it)))
proof: toSeq(proof))
if 0 < nSizeUsed:
rp.leafsSize = hexaryRangeRlpSize nSizeUsed
if 0 < rp.proof.len:
rp.proofSize = hexaryRangeRlpSize rp.proof.foldl(a + b.data.len, 0)
rp.proofSize = hexaryRangeRlpSize rp.proof.foldl(a + b.to(Blob).len, 0)
rp

View File

@ -8,6 +8,8 @@
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
{.push raises: [].}
import
std/[sequtils, tables],
chronicles,
@ -18,8 +20,6 @@ import
"."/[hexary_desc, hexary_error, hexary_import, hexary_nearby,
hexary_paths, rocky_bulk_load]
{.push raises: [].}
logScope:
topics = "snap-db"
@ -226,7 +226,7 @@ proc mergeProofs*(
refs = @[ps.root.to(RepairKey)].toHashSet
for n,rlpRec in proof:
let report = db.hexaryImport(rlpRec.data, nodes, refs)
let report = db.hexaryImport(rlpRec.to(Blob), nodes, refs)
if report.error != NothingSerious:
let error = report.error
trace "mergeProofs()", peer, item=n, proofs=proof.len, error

View File

@ -8,6 +8,8 @@
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
{.push raises: [].}
import
std/hashes,
eth/[common, p2p],
@ -19,8 +21,6 @@ import
./worker/ticker,
./range_desc
{.push raises: [].}
type
SnapAccountsList* = SortedSet[NodeTag,Hash256]
## Sorted pair of `(account,state-root)` entries
@ -83,12 +83,12 @@ type
state*: SnapDbPivotRegistry ## Saved recovery context state
level*: int ## top level is zero
BuddyData* = object
SnapBuddyData* = object
## Per-worker local descriptor data extension
errors*: ComErrorStatsRef ## For error handling
pivotEnv*: SnapPivotRef ## Environment containing state root
CtxData* = object
SnapCtxData* = object
## Globally shared data extension
rng*: ref HmacDrbgContext ## Random generator
dbBackend*: ChainDB ## Low level DB driver access (if any)
@ -105,10 +105,10 @@ type
# Info
ticker*: TickerRef ## Ticker, logger
SnapBuddyRef* = BuddyRef[CtxData,BuddyData]
SnapBuddyRef* = BuddyRef[SnapCtxData,SnapBuddyData]
## Extended worker peer descriptor
SnapCtxRef* = CtxRef[CtxData]
SnapCtxRef* = CtxRef[SnapCtxData]
## Extended global descriptor
# ------------------------------------------------------------------------------

View File

@ -69,7 +69,7 @@ proc dumpAccounts*(
blob.mapIt(it.toHex(2)).join.toLowerAscii
proc ppStr(proof: SnapProof): string =
proof.data.ppStr
proof.to(Blob).ppStr
proc ppStr(hash: Hash256): string =
hash.data.mapIt(it.toHex(2)).join.toLowerAscii
@ -183,7 +183,7 @@ iterator undumpNextAccount*(gzFile: string): UndumpAccounts =
of UndumpProofs:
if flds.len == 1:
data.data.proof.add SnapProof(data: flds[0].toByteSeq)
data.data.proof.add flds[0].toByteSeq.to(SnapProof)
nProofs.dec
if nProofs <= 0:
state = UndumpCommit

View File

@ -69,7 +69,7 @@ proc dumpStorages*(
blob.mapIt(it.toHex(2)).join.toLowerAscii
proc ppStr(proof: SnapProof): string =
proof.data.ppStr
proof.to(Blob).ppStr
proc ppStr(hash: Hash256): string =
hash.data.mapIt(it.toHex(2)).join.toLowerAscii
@ -209,7 +209,7 @@ iterator undumpNextStorages*(gzFile: string): UndumpStorages =
of UndumpProofs:
if flds.len == 1:
data.data.proof.add SnapProof(data: flds[0].toByteSeq)
data.data.proof.add flds[0].toByteSeq.to(SnapProof)
nProofs.dec
if nProofs <= 0:
state = UndumpCommit

View File

@ -110,12 +110,16 @@ proc test_calcProofsListSizes*() =
for n in [0, 1, 2, 126, 127]:
let
nodeBlobsEncoded = SnapProof(data: nodeBlob).repeat(n).proofEncode
nodeSample = nodeBlob.to(SnapProof).repeat(n)
nodeBlobsEncoded = nodeSample.proofEncode
nodeBlobsDecoded = nodeBlobsEncoded.proofDecode
nodeBlobsHex = nodeBlobsEncoded.toHex
brNodesHex = brNode.repeat(n).convertTo(Blob).toHex
#echo "+++ ", n, " ", nodeBlobsEncoded.rlpFromBytes.inspect
#echo ">>> ", n, " ", nodeBlobsHex
#echo "<<< ", n, " ", brNodesHex
check nodeBlobsEncoded.len == n.proofNodesSizeMax
check nodeBlobsDecoded == nodeSample
check nodeBlobsHex == brNodesHex
# ------------------------------------------------------------------------------

View File

@ -16,7 +16,7 @@ import
eth/[common, p2p, trie/nibbles],
stew/[byteutils, interval_set, results],
unittest2,
../../nimbus/sync/[protocol, types],
../../nimbus/sync/[handlers, protocol, types],
../../nimbus/sync/snap/range_desc,
../../nimbus/sync/snap/worker/db/[
hexary_desc, hexary_envelope, hexary_error, hexary_interpolate,
@ -33,16 +33,16 @@ const
# Private helpers
# ------------------------------------------------------------------------------
proc ppNodeKeys(a: openArray[Blob], dbg = HexaryTreeDbRef(nil)): string =
proc ppNodeKeys(a: openArray[SnapProof], dbg = HexaryTreeDbRef(nil)): string =
result = "["
if dbg.isNil:
result &= a.mapIt(it.digestTo(NodeKey).pp(collapse=true)).join(",")
result &= a.mapIt(it.to(Blob).digestTo(NodeKey).pp(collapse=true)).join(",")
else:
result &= a.mapIt(it.digestTo(NodeKey).pp(dbg)).join(",")
result &= a.mapIt(it.to(Blob).digestTo(NodeKey).pp(dbg)).join(",")
result &= "]"
# ------------------------------------------------------------------------------
# Private functions
# Private functionsto(Blob)
# ------------------------------------------------------------------------------
proc print_data(
@ -206,7 +206,7 @@ proc verifyRangeProof(
# Import proof nodes
var unrefs, refs: HashSet[RepairKey] # values ignored
for rlpRec in proof:
let importError = xDb.hexaryImport(rlpRec.data, unrefs, refs).error
let importError = xDb.hexaryImport(rlpRec.to(Blob), unrefs, refs).error
if importError != HexaryError(0):
check importError == HexaryError(0)
return err(importError)
@ -224,7 +224,7 @@ proc verifyRangeProof(
#"\n",
#"\n unrefs=[", unrefs.toSeq.mapIt(it.pp(dbg)).join(","), "]",
#"\n refs=[", refs.toSeq.mapIt(it.pp(dbg)).join(","), "]",
"\n\n proof=", proof.mapIt(it.data).ppNodeKeys(dbg),
"\n\n proof=", proof.ppNodeKeys(dbg),
"\n\n first=", leafs[0].key,
"\n ", leafs[0].key.hexaryPath(rootKey,xDb).pp(dbg),
"\n\n last=", leafs[^1].key,
@ -412,7 +412,7 @@ proc test_NodeRangeProof*(
proof = rc.value.proof
# Some sizes to verify (full data list)
check rc.value.proofSize == proof.encode.len
check rc.value.proofSize == proof.proofEncode.len
check rc.value.leafsSize == leafsRlpLen
else:
# Make sure that the size calculation deliver the expected number
@ -424,7 +424,7 @@ proc test_NodeRangeProof*(
check rx.value.leafs.len == leafs.len
# Some size to verify (truncated data list)
check rx.value.proofSize == rx.value.proof.encode.len
check rx.value.proofSize == rx.value.proof.proofEncode.len
# Re-adjust proof
proof = db.hexaryRangeLeafsProof(rootKey, iv.minPt, leafs).proof
@ -442,7 +442,7 @@ proc test_NodeRangeProof*(
noisy.say "***", "n=", n,
" cutOff=", cutOff,
" leafs=", leafs.len,
" proof=", proof.mapIt(it.data).ppNodeKeys(dbg),
" proof=", proof.ppNodeKeys(dbg),
"\n\n ",
" base=", iv.minPt,
"\n ", iv.minPt.hexaryPath(rootKey,db).pp(dbg),