Objects inheritance reorg for snap sync (#1091)

* new: time_helper, types

* new: path_desc

* new: base_desc

* Re-organised objects inheritance

why:
  Previous code used macros to instantiate opaque object references. This
  has been re-implemented with OO inheritance based logic.

* Normalised trace macros

* Using distinct types for Hash256 aliases

why:
  Better control of the meaning of the hashes, all or the same format

caveat:
  The protocol handler DSL used by eth66.nim and snap1.nim uses the
  underlying type Hash256 and cannot handle the distinct alias in
  rlp and chronicles/log macros. So Hash256 is used directly (does
  not change readability as the type is clear by parameter names.)
This commit is contained in:
Jordan Hrycaj 2022-05-17 12:09:49 +01:00 committed by GitHub
parent 6a9d875fe7
commit 575c69e6ba
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 1189 additions and 879 deletions

View File

@ -36,10 +36,11 @@
## easily. When the hooks aren't set, default behaviour applies. ## easily. When the hooks aren't set, default behaviour applies.
import import
chronos, stint, chronicles, stew/byteutils, macros, chronicles,
eth/[common/eth_types, rlp, p2p], chronos,
eth/p2p/[rlpx, private/p2p_types, blockchain_utils], eth/[common/eth_types, p2p, p2p/private/p2p_types, p2p/blockchain_utils],
".."/[sync_types, trace_helper], stew/byteutils,
".."/trace_helper,
./pickeled_eth_tracers ./pickeled_eth_tracers
export export
@ -51,7 +52,7 @@ export
type type
NewBlockHashesAnnounce* = object NewBlockHashesAnnounce* = object
hash: BlockHash hash: Hash256
number: BlockNumber number: BlockNumber
NewBlockAnnounce* = EthBlock NewBlockAnnounce* = EthBlock
@ -62,11 +63,11 @@ type
PeerState* = ref object PeerState* = ref object
initialized*: bool initialized*: bool
bestBlockHash*: BlockHash bestBlockHash*: Hash256
bestDifficulty*: DifficultyInt bestDifficulty*: DifficultyInt
onGetNodeData*: onGetNodeData*:
proc (peer: Peer, hashes: openArray[NodeHash], proc (peer: Peer, hashes: openArray[Hash256],
data: var seq[Blob]) {.gcsafe.} data: var seq[Blob]) {.gcsafe.}
onNodeData*: onNodeData*:
proc (peer: Peer, data: openArray[Blob]) {.gcsafe.} proc (peer: Peer, data: openArray[Blob]) {.gcsafe.}
@ -95,7 +96,7 @@ p2pProtocol eth66(version = ethVersion,
forkHash: chainForkId.crc.toBytesBE, forkHash: chainForkId.crc.toBytesBE,
forkNext: chainForkId.nextFork.toBlockNumber) forkNext: chainForkId.nextFork.toBlockNumber)
traceSending "Status (0x00) " & prettyEthProtoName, traceSendSending "Status (0x00) " & prettyEthProtoName,
peer, td=bestBlock.difficulty, peer, td=bestBlock.difficulty,
bestHash=bestBlock.blockHash.toHex, bestHash=bestBlock.blockHash.toHex,
networkId=network.networkId, networkId=network.networkId,
@ -140,25 +141,22 @@ p2pProtocol eth66(version = ethVersion,
ethVersionArg: uint, ethVersionArg: uint,
networkId: NetworkId, networkId: NetworkId,
totalDifficulty: DifficultyInt, totalDifficulty: DifficultyInt,
bestHash: BlockHash, bestHash: Hash256,
genesisHash: BlockHash, genesisHash: Hash256,
forkId: ForkId) = forkId: ForkId) =
traceReceived "Status (0x00)", traceRecvReceived "Status (0x00)",
peer, td=totalDifficulty, peer, networkId, totalDifficulty, bestHash, genesisHash,
bestHash=bestHash.toHex,
networkId,
genesis=genesisHash.toHex,
forkHash=forkId.forkHash.toHex, forkNext=forkId.forkNext forkHash=forkId.forkHash.toHex, forkNext=forkId.forkNext
# User message 0x01: NewBlockHashes. # User message 0x01: NewBlockHashes.
proc newBlockHashes(peer: Peer, hashes: openArray[NewBlockHashesAnnounce]) = proc newBlockHashes(peer: Peer, hashes: openArray[NewBlockHashesAnnounce]) =
traceGossipDiscarding "NewBlockHashes (0x01)", traceSendGossipDiscarding "NewBlockHashes (0x01)",
peer, hashes=hashes.len peer, hashes=hashes.len
discard discard
# User message 0x02: Transactions. # User message 0x02: Transactions.
proc transactions(peer: Peer, transactions: openArray[Transaction]) = proc transactions(peer: Peer, transactions: openArray[Transaction]) =
traceGossipDiscarding "Transactions (0x02)", traceSendGossipDiscarding "Transactions (0x02)",
peer, transactions=transactions.len peer, transactions=transactions.len
discard discard
@ -167,18 +165,18 @@ p2pProtocol eth66(version = ethVersion,
proc getBlockHeaders(peer: Peer, request: BlocksRequest) = proc getBlockHeaders(peer: Peer, request: BlocksRequest) =
if tracePackets: if tracePackets:
if request.maxResults == 1 and request.startBlock.isHash: if request.maxResults == 1 and request.startBlock.isHash:
traceReceived "GetBlockHeaders/Hash (0x03)", traceRecvReceived "GetBlockHeaders/Hash (0x03)",
peer, blockHash=($request.startBlock.hash), count=1 peer, blockHash=($request.startBlock.hash), count=1
elif request.maxResults == 1: elif request.maxResults == 1:
traceReceived "GetBlockHeaders (0x03)", traceRecvReceived "GetBlockHeaders (0x03)",
peer, `block`=request.startBlock.number, count=1 peer, `block`=request.startBlock.number, count=1
elif request.startBlock.isHash: elif request.startBlock.isHash:
traceReceived "GetBlockHeaders/Hash (0x03)", traceRecvReceived "GetBlockHeaders/Hash (0x03)",
peer, firstBlockHash=($request.startBlock.hash), peer, firstBlockHash=($request.startBlock.hash),
count=request.maxResults, count=request.maxResults,
step=traceStep(request) step=traceStep(request)
else: else:
traceReceived "GetBlockHeaders (0x03)", traceRecvReceived "GetBlockHeaders (0x03)",
peer, firstBlock=request.startBlock.number, peer, firstBlock=request.startBlock.number,
count=request.maxResults, count=request.maxResults,
step=traceStep(request) step=traceStep(request)
@ -191,10 +189,10 @@ p2pProtocol eth66(version = ethVersion,
let headers = peer.network.chain.getBlockHeaders(request) let headers = peer.network.chain.getBlockHeaders(request)
if headers.len > 0: if headers.len > 0:
traceReplying "with BlockHeaders (0x04)", traceSendReplying "with BlockHeaders (0x04)",
peer, sent=headers.len, requested=request.maxResults peer, sent=headers.len, requested=request.maxResults
else: else:
traceReplying "EMPTY BlockHeaders (0x04)", traceSendReplying "EMPTY BlockHeaders (0x04)",
peer, sent=0, requested=request.maxResults peer, sent=0, requested=request.maxResults
await response.send(headers) await response.send(headers)
@ -204,8 +202,8 @@ p2pProtocol eth66(version = ethVersion,
requestResponse: requestResponse:
# User message 0x05: GetBlockBodies. # User message 0x05: GetBlockBodies.
proc getBlockBodies(peer: Peer, hashes: openArray[BlockHash]) = proc getBlockBodies(peer: Peer, hashes: openArray[Hash256]) =
traceReceived "GetBlockBodies (0x05)", traceRecvReceived "GetBlockBodies (0x05)",
peer, hashes=hashes.len peer, hashes=hashes.len
if hashes.len > maxBodiesFetch: if hashes.len > maxBodiesFetch:
debug "GetBlockBodies (0x05) requested too many bodies", debug "GetBlockBodies (0x05) requested too many bodies",
@ -215,10 +213,10 @@ p2pProtocol eth66(version = ethVersion,
let bodies = peer.network.chain.getBlockBodies(hashes) let bodies = peer.network.chain.getBlockBodies(hashes)
if bodies.len > 0: if bodies.len > 0:
traceReplying "with BlockBodies (0x06)", traceSendReplying "with BlockBodies (0x06)",
peer, sent=bodies.len, requested=hashes.len peer, sent=bodies.len, requested=hashes.len
else: else:
traceReplying "EMPTY BlockBodies (0x06)", traceSendReplying "EMPTY BlockBodies (0x06)",
peer, sent=0, requested=hashes.len peer, sent=0, requested=hashes.len
await response.send(bodies) await response.send(bodies)
@ -230,26 +228,26 @@ p2pProtocol eth66(version = ethVersion,
proc newBlock(peer: Peer, bh: EthBlock, totalDifficulty: DifficultyInt) = proc newBlock(peer: Peer, bh: EthBlock, totalDifficulty: DifficultyInt) =
# (Note, needs to use `EthBlock` instead of its alias `NewBlockAnnounce` # (Note, needs to use `EthBlock` instead of its alias `NewBlockAnnounce`
# because either `p2pProtocol` or RLPx doesn't work with an alias.) # because either `p2pProtocol` or RLPx doesn't work with an alias.)
traceGossipDiscarding "NewBlock (0x07)", traceSendGossipDiscarding "NewBlock (0x07)",
peer, totalDifficulty, peer, totalDifficulty,
blockNumber = bh.header.blockNumber, blockNumber = bh.header.blockNumber,
blockDifficulty = bh.header.difficulty blockDifficulty = bh.header.difficulty
discard discard
# User message 0x08: NewPooledTransactionHashes. # User message 0x08: NewPooledTransactionHashes.
proc newPooledTransactionHashes(peer: Peer, hashes: openArray[TxHash]) = proc newPooledTransactionHashes(peer: Peer, txHashes: openArray[Hash256]) =
traceGossipDiscarding "NewPooledTransactionHashes (0x08)", traceSendGossipDiscarding "NewPooledTransactionHashes (0x08)",
peer, hashes=hashes.len peer, hashes=txHashes.len
discard discard
requestResponse: requestResponse:
# User message 0x09: GetPooledTransactions. # User message 0x09: GetPooledTransactions.
proc getPooledTransactions(peer: Peer, hashes: openArray[TxHash]) = proc getPooledTransactions(peer: Peer, txHashes: openArray[Hash256]) =
traceReceived "GetPooledTransactions (0x09)", traceRecvReceived "GetPooledTransactions (0x09)",
peer, hashes=hashes.len peer, hashes=txHashes.len
traceReplying "EMPTY PooledTransactions (0x10)", traceSendReplying "EMPTY PooledTransactions (0x10)",
peer, sent=0, requested=hashes.len peer, sent=0, requested=txHashes.len
await response.send([]) await response.send([])
# User message 0x0a: PooledTransactions. # User message 0x0a: PooledTransactions.
@ -258,22 +256,22 @@ p2pProtocol eth66(version = ethVersion,
nextId 0x0d nextId 0x0d
# User message 0x0d: GetNodeData. # User message 0x0d: GetNodeData.
proc getNodeData(peer: Peer, hashes: openArray[NodeHash]) = proc getNodeData(peer: Peer, nodeHashes: openArray[Hash256]) =
traceReceived "GetNodeData (0x0d)", peer, traceRecvReceived "GetNodeData (0x0d)", peer,
hashes=hashes.len hashes=nodeHashes.len
var data: seq[Blob] var data: seq[Blob]
if not peer.state.onGetNodeData.isNil: if not peer.state.onGetNodeData.isNil:
peer.state.onGetNodeData(peer, hashes, data) peer.state.onGetNodeData(peer, nodeHashes, data)
else: else:
data = peer.network.chain.getStorageNodes(hashes) data = peer.network.chain.getStorageNodes(nodeHashes)
if data.len > 0: if data.len > 0:
traceReplying "with NodeData (0x0e)", peer, traceSendReplying "with NodeData (0x0e)", peer,
sent=data.len, requested=hashes.len sent=data.len, requested=nodeHashes.len
else: else:
traceReplying "EMPTY NodeData (0x0e)", peer, traceSendReplying "EMPTY NodeData (0x0e)", peer,
sent=0, requested=hashes.len sent=0, requested=nodeHashes.len
await peer.nodeData(data) await peer.nodeData(data)
@ -284,17 +282,17 @@ p2pProtocol eth66(version = ethVersion,
# know if this is a valid reply ("Got reply") or something else. # know if this is a valid reply ("Got reply") or something else.
peer.state.onNodeData(peer, data) peer.state.onNodeData(peer, data)
else: else:
traceDiscarding "NodeData (0x0e)", peer, traceSendDiscarding "NodeData (0x0e)", peer,
bytes=data.len bytes=data.len
requestResponse: requestResponse:
# User message 0x0f: GetReceipts. # User message 0x0f: GetReceipts.
proc getReceipts(peer: Peer, hashes: openArray[BlockHash]) = proc getReceipts(peer: Peer, hashes: openArray[Hash256]) =
traceReceived "GetReceipts (0x0f)", traceRecvReceived "GetReceipts (0x0f)", peer,
peer, hashes=hashes.len hashes=hashes.len
traceReplying "EMPTY Receipts (0x10)", traceSendReplying "EMPTY Receipts (0x10)", peer,
peer, sent=0, requested=hashes.len sent=0, requested=hashes.len
await response.send([]) await response.send([])
# TODO: implement `getReceipts` and reactivate this code # TODO: implement `getReceipts` and reactivate this code
# await response.send(peer.network.chain.getReceipts(hashes)) # await response.send(peer.network.chain.getReceipts(hashes))

View File

@ -9,15 +9,15 @@
# at your option. This file may not be copied, modified, or distributed # at your option. This file may not be copied, modified, or distributed
# except according to those terms. # except according to those terms.
template traceReceived*(msg: static[string], args: varargs[untyped]) = template traceRecvReceived*(msg: static[string], args: varargs[untyped]) =
tracePacket "<< " & prettyEthProtoName & " Received " & msg, tracePacket "<< " & prettyEthProtoName & " Received " & msg,
`args` `args`
template traceGot*(msg: static[string], args: varargs[untyped]) = template traceRecvGot*(msg: static[string], args: varargs[untyped]) =
tracePacket "<< " & prettyEthProtoName & " Got " & msg, tracePacket "<< " & prettyEthProtoName & " Got " & msg,
`args` `args`
template traceProtocolViolation*(msg: static[string], args: varargs[untyped]) = template traceRecvProtocolViolation*(msg: static[string], args: varargs[untyped]) =
tracePacketError "<< " & prettyEthProtoName & " Protocol violation, " & msg, tracePacketError "<< " & prettyEthProtoName & " Protocol violation, " & msg,
`args` `args`
@ -25,27 +25,27 @@ template traceRecvError*(msg: static[string], args: varargs[untyped]) =
traceNetworkError "<< " & prettyEthProtoName & " Error " & msg, traceNetworkError "<< " & prettyEthProtoName & " Error " & msg,
`args` `args`
template traceTimeoutWaiting*(msg: static[string], args: varargs[untyped]) = template traceRecvTimeoutWaiting*(msg: static[string], args: varargs[untyped]) =
traceTimeout "<< " & prettyEthProtoName & " Timeout waiting " & msg, traceTimeout "<< " & prettyEthProtoName & " Timeout waiting " & msg,
`args` `args`
template traceSending*(msg: static[string], args: varargs[untyped]) = template traceSendSending*(msg: static[string], args: varargs[untyped]) =
tracePacket ">> " & prettyEthProtoName & " Sending " & msg, tracePacket ">> " & prettyEthProtoName & " Sending " & msg,
`args` `args`
template traceReplying*(msg: static[string], args: varargs[untyped]) = template traceSendReplying*(msg: static[string], args: varargs[untyped]) =
tracePacket ">> " & prettyEthProtoName & " Replying " & msg, tracePacket ">> " & prettyEthProtoName & " Replying " & msg,
`args` `args`
template traceDelaying*(msg: static[string], args: varargs[untyped]) = template traceSendDelaying*(msg: static[string], args: varargs[untyped]) =
tracePacket ">>" & prettyEthProtoName & " Delaying " & msg, tracePacket ">>" & prettyEthProtoName & " Delaying " & msg,
`args` `args`
template traceGossipDiscarding*(msg: static[string], args: varargs[untyped]) = template traceSendGossipDiscarding*(msg: static[string], args: varargs[untyped]) =
traceGossip "<< " & prettyEthProtoName & " Discarding " & msg, traceGossip "<< " & prettyEthProtoName & " Discarding " & msg,
`args` `args`
template traceDiscarding*(msg: static[string], args: varargs[untyped]) = template traceSendDiscarding*(msg: static[string], args: varargs[untyped]) =
tracePacket "<< " & prettyEthProtoName & " Discarding " & msg, tracePacket "<< " & prettyEthProtoName & " Discarding " & msg,
`args` `args`

View File

@ -9,15 +9,15 @@
# at your option. This file may not be copied, modified, or distributed # at your option. This file may not be copied, modified, or distributed
# except according to those terms. # except according to those terms.
template traceReceived*(msg: static[string], args: varargs[untyped]) = template traceRecvReceived*(msg: static[string], args: varargs[untyped]) =
tracePacket "<< " & prettySnapProtoName & " Received " & msg, tracePacket "<< " & prettySnapProtoName & " Received " & msg,
`args` `args`
template traceGot*(msg: static[string], args: varargs[untyped]) = template traceRecvGot*(msg: static[string], args: varargs[untyped]) =
tracePacket "<< " & prettySnapProtoName & " Got " & msg, tracePacket "<< " & prettySnapProtoName & " Got " & msg,
`args` `args`
template traceProtocolViolation*(msg: static[string], args: varargs[untyped]) = template traceRecvProtocolViolation*(msg: static[string], args: varargs[untyped]) =
tracePacketError "<< " & prettySnapProtoName & " Protocol violation, " & msg, tracePacketError "<< " & prettySnapProtoName & " Protocol violation, " & msg,
`args` `args`
@ -25,15 +25,15 @@ template traceRecvError*(msg: static[string], args: varargs[untyped]) =
traceNetworkError "<< " & prettySnapProtoName & " Error " & msg, traceNetworkError "<< " & prettySnapProtoName & " Error " & msg,
`args` `args`
template traceTimeoutWaiting*(msg: static[string], args: varargs[untyped]) = template traceRecvTimeoutWaiting*(msg: static[string], args: varargs[untyped]) =
traceTimeout "<< " & prettySnapProtoName & " Timeout waiting " & msg, traceTimeout "<< " & prettySnapProtoName & " Timeout waiting " & msg,
`args` `args`
template traceSending*(msg: static[string], args: varargs[untyped]) = template traceSendSending*(msg: static[string], args: varargs[untyped]) =
tracePacket ">> " & prettySnapProtoName & " Sending " & msg, tracePacket ">> " & prettySnapProtoName & " Sending " & msg,
`args` `args`
template traceReplying*(msg: static[string], args: varargs[untyped]) = template traceSendReplying*(msg: static[string], args: varargs[untyped]) =
tracePacket ">> " & prettySnapProtoName & " Replying " & msg, tracePacket ">> " & prettySnapProtoName & " Replying " & msg,
`args` `args`

View File

@ -200,12 +200,10 @@ import
std/options, std/options,
chronicles, chronicles,
chronos, chronos,
eth/[common/eth_types, rlp, p2p], eth/[common/eth_types, p2p, p2p/private/p2p_types],
eth/p2p/[rlpx, private/p2p_types],
nimcrypto/hash, nimcrypto/hash,
stew/byteutils, stew/byteutils,
stint, ".."/[snap/path_desc, trace_helper],
".."/[sync_types, trace_helper],
../../constants, ../../constants,
./pickeled_snap_tracers ./pickeled_snap_tracers
@ -281,7 +279,7 @@ proc append(rlpWriter: var RlpWriter, t: SnapAccount, account: Account) =
# RLP serialisation for `LeafPath`. # RLP serialisation for `LeafPath`.
template read(rlp: var Rlp, _: type LeafPath): LeafPath = template read(rlp: var Rlp, T: type LeafPath): T =
rlp.read(array[sizeof(LeafPath().toBytes), byte]).toLeafPath rlp.read(array[sizeof(LeafPath().toBytes), byte]).toLeafPath
template append(rlpWriter: var RlpWriter, leafPath: LeafPath) = template append(rlpWriter: var RlpWriter, leafPath: LeafPath) =
@ -295,15 +293,15 @@ p2pProtocol snap1(version = 1,
requestResponse: requestResponse:
# User message 0x00: GetAccountRange. # User message 0x00: GetAccountRange.
# Note: `origin` and `limit` differs from the specification to match Geth. # Note: `origin` and `limit` differs from the specification to match Geth.
proc getAccountRange(peer: Peer, rootHash: TrieHash, proc getAccountRange(peer: Peer, rootHash: Hash256,
# Next line differs from spec to match Geth. # Next line differs from spec to match Geth.
origin: LeafPath, limit: LeafPath, origin: LeafPath, limit: LeafPath,
responseBytes: uint64) = responseBytes: uint64) =
traceReceived "GetAccountRange (0x00)", peer, traceRecvReceived "GetAccountRange (0x00)", peer,
accountRange=pathRange(origin, limit), accountRange=pathRange(origin, limit),
stateRoot=($rootHash), responseBytes stateRoot=($rootHash), responseBytes
traceReplying "EMPTY AccountRange (0x01)", peer, sent=0 traceSendReplying "EMPTY AccountRange (0x01)", peer, sent=0
await response.send(@[], @[]) await response.send(@[], @[])
# User message 0x01: AccountRange. # User message 0x01: AccountRange.
@ -313,7 +311,7 @@ p2pProtocol snap1(version = 1,
requestResponse: requestResponse:
# User message 0x02: GetStorageRanges. # User message 0x02: GetStorageRanges.
# Note: `origin` and `limit` differs from the specification to match Geth. # Note: `origin` and `limit` differs from the specification to match Geth.
proc getStorageRanges(peer: Peer, rootHash: TrieHash, proc getStorageRanges(peer: Peer, rootHash: Hash256,
accounts: openArray[LeafPath], accounts: openArray[LeafPath],
# Next line differs from spec to match Geth. # Next line differs from spec to match Geth.
origin: openArray[byte], limit: openArray[byte], origin: openArray[byte], limit: openArray[byte],
@ -339,12 +337,12 @@ p2pProtocol snap1(version = 1,
if definiteFullRange: if definiteFullRange:
# Fetching storage for multiple accounts. # Fetching storage for multiple accounts.
traceReceived "GetStorageRanges/A (0x02)", peer, traceRecvReceived "GetStorageRanges/A (0x02)", peer,
accountPaths=accounts.len, accountPaths=accounts.len,
stateRoot=($rootHash), responseBytes stateRoot=($rootHash), responseBytes
elif accounts.len == 1: elif accounts.len == 1:
# Fetching partial storage for one account, aka. "large contract". # Fetching partial storage for one account, aka. "large contract".
traceReceived "GetStorageRanges/S (0x02)", peer, traceRecvReceived "GetStorageRanges/S (0x02)", peer,
accountPaths=1, accountPaths=1,
storageRange=(describe(origin) & '-' & describe(limit)), storageRange=(describe(origin) & '-' & describe(limit)),
stateRoot=($rootHash), responseBytes stateRoot=($rootHash), responseBytes
@ -352,12 +350,12 @@ p2pProtocol snap1(version = 1,
# This branch is separated because these shouldn't occur. It's not # This branch is separated because these shouldn't occur. It's not
# really specified what happens when there are multiple accounts and # really specified what happens when there are multiple accounts and
# non-default path range. # non-default path range.
traceReceived "GetStorageRanges/AS?? (0x02)", peer, traceRecvReceived "GetStorageRanges/AS?? (0x02)", peer,
accountPaths=accounts.len, accountPaths=accounts.len,
storageRange=(describe(origin) & '-' & describe(limit)), storageRange=(describe(origin) & '-' & describe(limit)),
stateRoot=($rootHash), responseBytes stateRoot=($rootHash), responseBytes
traceReplying "EMPTY StorageRanges (0x03)", peer, sent=0 traceSendReplying "EMPTY StorageRanges (0x03)", peer, sent=0
await response.send(@[], @[]) await response.send(@[], @[])
# User message 0x03: StorageRanges. # User message 0x03: StorageRanges.
@ -367,12 +365,12 @@ p2pProtocol snap1(version = 1,
# User message 0x04: GetByteCodes. # User message 0x04: GetByteCodes.
requestResponse: requestResponse:
proc getByteCodes(peer: Peer, hashes: openArray[NodeHash], proc getByteCodes(peer: Peer, nodeHashes: openArray[Hash256],
responseBytes: uint64) = responseBytes: uint64) =
traceReceived "GetByteCodes (0x04)", peer, traceRecvReceived "GetByteCodes (0x04)", peer,
hashes=hashes.len, responseBytes hashes=nodeHashes.len, responseBytes
traceReplying "EMPTY ByteCodes (0x05)", peer, sent=0 traceSendReplying "EMPTY ByteCodes (0x05)", peer, sent=0
await response.send(@[]) await response.send(@[])
# User message 0x05: ByteCodes. # User message 0x05: ByteCodes.
@ -380,12 +378,12 @@ p2pProtocol snap1(version = 1,
# User message 0x06: GetTrieNodes. # User message 0x06: GetTrieNodes.
requestResponse: requestResponse:
proc getTrieNodes(peer: Peer, rootHash: TrieHash, proc getTrieNodes(peer: Peer, rootHash: Hash256,
paths: openArray[InteriorPath], responseBytes: uint64) = paths: openArray[InteriorPath], responseBytes: uint64) =
traceReceived "GetTrieNodes (0x06)", peer, traceRecvReceived "GetTrieNodes (0x06)", peer,
nodePaths=paths.len, stateRoot=($rootHash), responseBytes nodePaths=paths.len, stateRoot=($rootHash), responseBytes
traceReplying "EMPTY TrieNodes (0x07)", peer, sent=0 traceSendReplying "EMPTY TrieNodes (0x07)", peer, sent=0
await response.send(@[]) await response.send(@[])
# User message 0x07: TrieNodes. # User message 0x07: TrieNodes.

View File

@ -13,22 +13,47 @@ import
chronicles, chronicles,
chronos, chronos,
eth/[common/eth_types, p2p, rlp], eth/[common/eth_types, p2p, rlp],
eth/p2p/[rlpx, peer_pool, private/p2p_types], eth/p2p/[peer_pool, private/p2p_types, rlpx],
stint, stint,
"."/[protocol, sync_types], ./protocol,
./snap/[chain_head_tracker, get_nodedata] ./snap/[base_desc, chain_head_tracker, get_nodedata, types],
./snap/pie/[sync_desc, peer_desc]
{.push raises: [Defect].} {.push raises: [Defect].}
type type
SnapSyncCtx* = ref object of SnapSync SnapSyncCtx* = ref object of SnapSyncEx
peerPool: PeerPool peerPool: PeerPool
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc fetchPeerDesc(ns: SnapSyncCtx, peer: Peer): SnapPeerEx =
## Find matching peer and remove descriptor from list
for i in 0 ..< ns.syncPeers.len:
if ns.syncPeers[i].peer == peer:
result = ns.syncPeers[i].ex
ns.syncPeers.delete(i)
return
proc new(T: type SnapPeerEx; ns: SnapSyncCtx; peer: Peer): T =
T(
ns: ns,
peer: peer,
stopped: false,
# Initial state: hunt forward, maximum uncertainty range.
syncMode: SyncHuntForward,
huntLow: 0.toBlockNumber,
huntHigh: high(BlockNumber),
huntStep: 0,
bestBlockNumber: 0.toBlockNumber)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private functions # Private functions
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc syncPeerLoop(sp: SyncPeer) {.async.} = proc syncPeerLoop(sp: SnapPeerEx) {.async.} =
# This basic loop just runs the head-hunter for each peer. # This basic loop just runs the head-hunter for each peer.
while not sp.stopped: while not sp.stopped:
await sp.peerHuntCanonical() await sp.peerHuntCanonical()
@ -37,54 +62,41 @@ proc syncPeerLoop(sp: SyncPeer) {.async.} =
let delayMs = if sp.syncMode == SyncLocked: 1000 else: 50 let delayMs = if sp.syncMode == SyncLocked: 1000 else: 50
await sleepAsync(chronos.milliseconds(delayMs)) await sleepAsync(chronos.milliseconds(delayMs))
proc syncPeerStart(sp: SyncPeer) =
proc syncPeerStart(sp: SnapPeerEx) =
asyncSpawn sp.syncPeerLoop() asyncSpawn sp.syncPeerLoop()
proc syncPeerStop(sp: SyncPeer) = proc syncPeerStop(sp: SnapPeerEx) =
sp.stopped = true sp.stopped = true
# TODO: Cancel SyncPeers that are running. We need clean cancellation for # TODO: Cancel running `SnapPeerEx` instances. We need clean cancellation
# this. Doing so reliably will be addressed at a later time. # for this. Doing so reliably will be addressed at a later time.
proc onPeerConnected(ns: SnapSyncCtx, protocolPeer: Peer) =
let sp = SyncPeer(
ns: ns,
peer: protocolPeer,
stopped: false,
# Initial state: hunt forward, maximum uncertainty range.
syncMode: SyncHuntForward,
huntLow: 0.toBlockNumber,
huntHigh: high(BlockNumber),
huntStep: 0,
bestBlockNumber: 0.toBlockNumber)
trace "Sync: Peer connected", peer=sp
proc onPeerConnected(ns: SnapSyncCtx, peer: Peer) =
trace "Snap: Peer connected", peer
let sp = SnapPeerEx.new(ns, peer)
sp.setupGetNodeData() sp.setupGetNodeData()
if protocolPeer.state(eth).initialized: if peer.state(eth).initialized:
# We know the hash but not the block number. # We know the hash but not the block number.
sp.bestBlockHash = protocolPeer.state(eth).bestBlockHash sp.bestBlockHash = peer.state(eth).bestBlockHash.BlockHash
#TODO: Temporarily disabled because it's useful to test the head hunter. # TODO: Temporarily disabled because it's useful to test the head hunter.
#sp.syncMode = SyncOnlyHash # sp.syncMode = SyncOnlyHash
else: else:
trace "Sync: state(eth) not initialized!" trace "Snap: state(eth) not initialized!"
ns.syncPeers.add(sp) ns.syncPeers.add(sp)
sp.syncPeerStart() sp.syncPeerStart()
proc onPeerDisconnected(ns: SnapSyncCtx, protocolPeer: Peer) = proc onPeerDisconnected(ns: SnapSyncCtx, peer: Peer) =
trace "Sync: Peer disconnected", peer=protocolPeer trace "Snap: Peer disconnected", peer
# Find matching `sp` and remove from `ns.syncPeers`.
var sp: SyncPeer = nil
for i in 0 ..< ns.syncPeers.len:
if ns.syncPeers[i].peer == protocolPeer:
sp = ns.syncPeers[i]
ns.syncPeers.delete(i)
break
if sp.isNil:
debug "Sync: Unknown peer disconnected", peer=protocolPeer
return
sp.syncPeerStop() let sp = ns.fetchPeerDesc(peer)
if sp.isNil:
debug "Snap: Disconnected from unregistered peer", peer
else:
sp.syncPeerStop()
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public functions # Public functions

View File

@ -0,0 +1,128 @@
# Nimbus - New sync approach - A fusion of snap, trie, beam and other methods
#
# Copyright (c) 2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
import
eth/[common/eth_types, p2p],
stew/[byteutils, keyed_queue, results],
./types
{.push raises: [Defect].}
const
seenBlocksMax = 500
## Internal size of LRU cache (for debugging)
type
SnapStat* = distinct int
SnapPeerStatsOk = object
reorgDetected*: SnapStat
getBlockHeaders*: SnapStat
getNodeData*: SnapStat
SnapPeerStatsMinor = object
timeoutBlockHeaders*: SnapStat
unexpectedBlockHash*: SnapStat
SnapPeerStatsMajor = object
networkErrors*: SnapStat
excessBlockHeaders*: SnapStat
wrongBlockHeader*: SnapStat
SnapPeerStats* = object
## Statistics counters for events associated with this peer.
## These may be used to recognise errors and select good peers.
ok*: SnapPeerStatsOk
minor*: SnapPeerStatsMinor
major*: SnapPeerStatsMajor
SnapPeerMode* = enum
## The current state of tracking the peer's canonical chain head.
## `bestBlockNumber` is only valid when this is `SyncLocked`.
SyncLocked
SyncOnlyHash
SyncHuntForward
SyncHuntBackward
SyncHuntRange
SyncHuntRangeFinal
SnapPeerBase* = ref object of RootObj
## Peer state tracking.
ns*: SnapSyncBase ## Opaque object reference
peer*: Peer ## eth p2pProtocol
stopped*: bool
pendingGetBlockHeaders*:bool
stats*: SnapPeerStats
# Peer canonical chain head ("best block") search state.
syncMode*: SnapPeerMode
bestBlockNumber*: BlockNumber
bestBlockHash*: BlockHash
huntLow*: BlockNumber ## Recent highest known present block.
huntHigh*: BlockNumber ## Recent lowest known absent block.
huntStep*: typeof(BlocksRequest.skip) # aka uint
# State root to fetch state for.
# This changes during sync and is slightly different for each peer.
syncStateRoot*: Option[TrieHash]
startedFetch*: bool
stopThisState*: bool
SnapSyncBase* = ref object of RootObj
## Shared state among all peers of a snap syncing node.
seenBlock: KeyedQueue[array[32,byte],BlockNumber]
## Temporary for pretty debugging, BlockHash keyed lru cache
syncPeers*: seq[SnapPeerBase]
## Peer state tracking
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc `$`*(sp: SnapPeerBase): string =
$sp.peer
proc inc(stat: var SnapStat) {.borrow.}
# ------------------------------------------------------------------------------
# Public functions, debugging helpers (will go away eventually)
# ------------------------------------------------------------------------------
proc pp*(sn: SnapSyncBase; bh: BlockHash): string =
## Pretty printer for debugging
let rc = sn.seenBlock.lruFetch(bh.untie.data)
if rc.isOk:
return "#" & $rc.value
$bh.untie.data.toHex
proc pp*(sn: SnapSyncBase; bh: BlockHash; bn: BlockNumber): string =
## Pretty printer for debugging
let rc = sn.seenBlock.lruFetch(bh.untie.data)
if rc.isOk:
return "#" & $rc.value
"#" & $sn.seenBlock.lruAppend(bh.untie.data, bn, seenBlocksMax)
proc pp*(sn: SnapSyncBase; bhn: HashOrNum): string =
if not bhn.isHash:
return "num(#" & $bhn.number & ")"
let rc = sn.seenBlock.lruFetch(bhn.hash.data)
if rc.isOk:
return "hash(#" & $rc.value & ")"
return "hash(" & $bhn.hash.data.toHex & ")"
proc seen*(sn: SnapSyncBase; bh: BlockHash; bn: BlockNumber) =
## Register for pretty printing
if not sn.seenBlock.lruFetch(bh.untie.data).isOk:
discard sn.seenBlock.lruAppend(bh.untie.data, bn, seenBlocksMax)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -58,16 +58,16 @@
## previously knew can become unavailable on the peer. So we must detect when ## previously knew can become unavailable on the peer. So we must detect when
## the current best block disappears and be able to reduce block number. ## the current best block disappears and be able to reduce block number.
{.push raises: [Defect].}
import import
std/bitops, std/bitops,
chronos, stint, chronicles, stew/byteutils, chronicles,
eth/[common/eth_types, rlp, p2p], chronos,
eth/p2p/[rlpx, private/p2p_types], eth/[common/eth_types, p2p, p2p/private/p2p_types],
../../p2p/chain/chain_desc, ../../p2p/chain/chain_desc,
".."/[protocol, protocol/pickeled_eth_tracers, sync_types, trace_helper], ".."/[protocol, protocol/pickeled_eth_tracers, trace_helper],
./pie/slicer "."/[base_desc, pie/peer_desc, pie/slicer, types]
{.push raises: [Defect].}
const const
syncLockedMinimumReply = 8 syncLockedMinimumReply = 8
@ -104,56 +104,86 @@ const
## Expansion factor during `SyncHuntBackward` exponential search. ## Expansion factor during `SyncHuntBackward` exponential search.
## 2 is chosen for better convergence when tracking a chain reorg. ## 2 is chosen for better convergence when tracking a chain reorg.
doAssert syncLockedMinimumReply >= 2 static:
doAssert syncLockedMinimumReply >= syncLockedQueryOverlap + 2 doAssert syncLockedMinimumReply >= 2
doAssert syncLockedQuerySize <= maxHeadersFetch doAssert syncLockedMinimumReply >= syncLockedQueryOverlap + 2
doAssert syncHuntQuerySize >= 1 and syncHuntQuerySize <= maxHeadersFetch doAssert syncLockedQuerySize <= maxHeadersFetch
doAssert syncHuntForwardExpandShift >= 1 and syncHuntForwardExpandShift <= 8 doAssert syncHuntQuerySize >= 1 and syncHuntQuerySize <= maxHeadersFetch
doAssert syncHuntBackwardExpandShift >= 1 and syncHuntBackwardExpandShift <= 8 doAssert syncHuntForwardExpandShift >= 1 and syncHuntForwardExpandShift <= 8
doAssert syncHuntBackwardExpandShift >= 1 and syncHuntBackwardExpandShift <= 8
# ------------------------------------------------------------------------------
# Private logging helpers
# ------------------------------------------------------------------------------
proc clearSyncStateRoot(sp: SyncPeer) = proc traceSyncLocked(sp: SnapPeerEx, bestNumber: BlockNumber,
bestHash: BlockHash) =
## Trace messages when peer canonical head is confirmed or updated.
let bestBlock = sp.ns.pp(bestHash,bestNumber)
if sp.syncMode != SyncLocked:
debug "Snap: Now tracking chain head of peer", peer=sp, bestBlock
elif bestNumber > sp.bestBlockNumber:
if bestNumber == sp.bestBlockNumber + 1:
debug "Snap: Peer chain head advanced one block", peer=sp,
advance=1, bestBlock
else:
debug "Snap: Peer chain head advanced some blocks", peer=sp,
advance=(sp.bestBlockNumber - bestNumber), bestBlock
elif bestNumber < sp.bestBlockNumber or bestHash != sp.bestBlockHash:
debug "Snap: Peer chain head reorg detected", peer=sp,
advance=(sp.bestBlockNumber - bestNumber), bestBlock
# proc peerSyncChainTrace(sp: SnapPeerEx) =
# ## To be called after `peerSyncChainRequest` has updated state.
# case sp.syncMode:
# of SyncLocked:
# trace "Snap: SyncLocked",
# bestBlock=sp.bestBlockNumber, bestBlockHash=($sp.bestBlockHash)
# of SyncOnlyHash:
# trace "Snap: OnlyHash", bestBlockHash=($sp.bestBlockHash)
# of SyncHuntForward:
# template highMax(n: BlockNumber): string =
# if n == high(BlockNumber): "max" else: $n
# trace "Snap: HuntForward",
# low=sp.huntLow, high=highMax(sp.huntHigh), step=sp.huntStep
# of SyncHuntBackward:
# trace "Snap: HuntBackward",
# low=sp.huntLow, high=sp.huntHigh, step=sp.huntStep
# of SyncHuntRange:
# trace "Snap: HuntRange",
# low=sp.huntLow, high=sp.huntHigh, step=sp.huntStep
# of SyncHuntRangeFinal:
# trace "Snap: HuntRangeFinal",
# low=sp.huntLow, high=sp.huntHigh, step=1
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
proc clearSyncStateRoot(sp: SnapPeerEx) =
if sp.syncStateRoot.isSome: if sp.syncStateRoot.isSome:
debug "Sync: Stopping state sync from this peer", peer=sp debug "Snap: Stopping state sync from this peer", peer=sp
sp.syncStateRoot = none(TrieHash) sp.syncStateRoot = none(TrieHash)
proc setSyncStateRoot(sp: SyncPeer, blockNumber: BlockNumber, proc setSyncStateRoot(sp: SnapPeerEx, blockNumber: BlockNumber,
blockHash: BlockHash, stateRoot: TrieHash) = blockHash: BlockHash, stateRoot: TrieHash) =
let thisBlock = sp.ns.pp(blockHash,blockNumber)
if sp.syncStateRoot.isNone: if sp.syncStateRoot.isNone:
debug "Sync: Starting state sync from this peer", peer=sp, debug "Snap: Starting state sync from this peer", peer=sp,
`block`=blockNumber, blockHash=($blockHash), stateRoot=($stateRoot) thisBlock, stateRoot
elif sp.syncStateRoot.unsafeGet != stateRoot: elif sp.syncStateRoot.unsafeGet != stateRoot:
trace "Sync: Adjusting state sync root from this peer", peer=sp, trace "Snap: Adjusting state sync root from this peer", peer=sp,
`block`=blockNumber, blockHash=($blockHash), stateRoot=($stateRoot) thisBlock, stateRoot
sp.syncStateRoot = some(stateRoot) sp.syncStateRoot = some(stateRoot)
if not sp.startedFetch: if not sp.startedFetch:
sp.startedFetch = true sp.startedFetch = true
trace "Sync: Starting to download block state", peer=sp, trace "Snap: Starting to download block state", peer=sp,
`block`=blockNumber, blockHash=($blockHash), stateRoot=($stateRoot) thisBlock, stateRoot
asyncSpawn sp.stateFetch() asyncSpawn sp.stateFetch()
proc traceSyncLocked(sp: SyncPeer, bestNumber: BlockNumber, proc setSyncLocked(sp: SnapPeerEx, bestNumber: BlockNumber,
bestHash: BlockHash) =
## Trace messages when peer canonical head is confirmed or updated.
if sp.syncMode != SyncLocked:
debug "Sync: Now tracking chain head of peer",
`block`=bestNumber, blockHash=($bestHash), peer=sp
elif bestNumber > sp.bestBlockNumber:
if bestNumber == sp.bestBlockNumber + 1:
debug "Sync: Peer chain head advanced one block", peer=sp,
advance=1, `block`=bestNumber, blockHash=($bestHash)
else:
debug "Sync: Peer chain head advanced some blocks", peer=sp,
advance=(sp.bestBlockNumber - bestNumber),
`block`=bestNumber, blockHash=($bestHash)
elif bestNumber < sp.bestBlockNumber or bestHash != sp.bestBlockHash:
debug "Sync: Peer chain head reorg detected", peer=sp,
advance=(sp.bestBlockNumber - bestNumber),
`block`=bestNumber, blockHash=($bestHash)
proc setSyncLocked(sp: SyncPeer, bestNumber: BlockNumber,
bestHash: BlockHash) = bestHash: BlockHash) =
## Actions to take when peer canonical head is confirmed or updated. ## Actions to take when peer canonical head is confirmed or updated.
sp.traceSyncLocked(bestNumber, bestHash) sp.traceSyncLocked(bestNumber, bestHash)
@ -161,7 +191,7 @@ proc setSyncLocked(sp: SyncPeer, bestNumber: BlockNumber,
sp.bestBlockHash = bestHash sp.bestBlockHash = bestHash
sp.syncMode = SyncLocked sp.syncMode = SyncLocked
proc setHuntBackward(sp: SyncPeer, lowestAbsent: BlockNumber) = proc setHuntBackward(sp: SnapPeerEx, lowestAbsent: BlockNumber) =
## Start exponential search mode backward due to new uncertainty. ## Start exponential search mode backward due to new uncertainty.
sp.syncMode = SyncHuntBackward sp.syncMode = SyncHuntBackward
sp.huntStep = 0 sp.huntStep = 0
@ -171,7 +201,7 @@ proc setHuntBackward(sp: SyncPeer, lowestAbsent: BlockNumber) =
sp.huntHigh = if lowestAbsent > 0: lowestAbsent else: 1.toBlockNumber sp.huntHigh = if lowestAbsent > 0: lowestAbsent else: 1.toBlockNumber
sp.clearSyncStateRoot() sp.clearSyncStateRoot()
proc setHuntForward(sp: SyncPeer, highestPresent: BlockNumber) = proc setHuntForward(sp: SnapPeerEx, highestPresent: BlockNumber) =
## Start exponential search mode forward due to new uncertainty. ## Start exponential search mode forward due to new uncertainty.
sp.syncMode = SyncHuntForward sp.syncMode = SyncHuntForward
sp.huntStep = 0 sp.huntStep = 0
@ -179,7 +209,7 @@ proc setHuntForward(sp: SyncPeer, highestPresent: BlockNumber) =
sp.huntHigh = high(BlockNumber) sp.huntHigh = high(BlockNumber)
sp.clearSyncStateRoot() sp.clearSyncStateRoot()
proc updateHuntAbsent(sp: SyncPeer, lowestAbsent: BlockNumber) = proc updateHuntAbsent(sp: SnapPeerEx, lowestAbsent: BlockNumber) =
## Converge uncertainty range backward. ## Converge uncertainty range backward.
if lowestAbsent < sp.huntHigh: if lowestAbsent < sp.huntHigh:
sp.huntHigh = lowestAbsent sp.huntHigh = lowestAbsent
@ -190,7 +220,7 @@ proc updateHuntAbsent(sp: SyncPeer, lowestAbsent: BlockNumber) =
sp.setHuntBackward(lowestAbsent) sp.setHuntBackward(lowestAbsent)
sp.clearSyncStateRoot() sp.clearSyncStateRoot()
proc updateHuntPresent(sp: SyncPeer, highestPresent: BlockNumber) = proc updateHuntPresent(sp: SnapPeerEx, highestPresent: BlockNumber) =
## Converge uncertainty range forward. ## Converge uncertainty range forward.
if highestPresent > sp.huntLow: if highestPresent > sp.huntLow:
sp.huntLow = highestPresent sp.huntLow = highestPresent
@ -201,7 +231,7 @@ proc updateHuntPresent(sp: SyncPeer, highestPresent: BlockNumber) =
sp.setHuntForward(highestPresent) sp.setHuntForward(highestPresent)
sp.clearSyncStateRoot() sp.clearSyncStateRoot()
proc peerSyncChainEmptyReply(sp: SyncPeer, request: BlocksRequest) = proc peerSyncChainEmptyReply(sp: SnapPeerEx, request: BlocksRequest) =
## Handle empty `GetBlockHeaders` reply. This means `request.startBlock` is ## Handle empty `GetBlockHeaders` reply. This means `request.startBlock` is
## absent on the peer. If it was `SyncLocked` there must have been a reorg ## absent on the peer. If it was `SyncLocked` there must have been a reorg
## and the previous canonical chain head has disappeared. If hunting, this ## and the previous canonical chain head has disappeared. If hunting, this
@ -213,15 +243,17 @@ proc peerSyncChainEmptyReply(sp: SyncPeer, request: BlocksRequest) =
if request.skip == 0 and not request.reverse and if request.skip == 0 and not request.reverse and
not request.startBlock.isHash and not request.startBlock.isHash and
request.startBlock.number == 1.toBlockNumber: request.startBlock.number == 1.toBlockNumber:
sp.setSyncLocked(0.toBlockNumber, sp.peer.network.chain.genesisHash) sp.setSyncLocked(0.toBlockNumber,
sp.setSyncStateRoot(0.toBlockNumber, sp.peer.network.chain.genesisHash, sp.peer.network.chain.genesisHash.BlockHash)
sp.peer.network.chain.Chain.genesisStateRoot) sp.setSyncStateRoot(0.toBlockNumber,
sp.peer.network.chain.genesisHash.BlockHash,
sp.peer.network.chain.Chain.genesisStateRoot.TrieHash)
return return
if sp.syncMode == SyncLocked or sp.syncMode == SyncOnlyHash: if sp.syncMode == SyncLocked or sp.syncMode == SyncOnlyHash:
inc sp.stats.ok.reorgDetected inc sp.stats.ok.reorgDetected
trace "Sync: Peer reorg detected, best block disappeared", peer=sp, trace "Snap: Peer reorg detected, best block disappeared", peer=sp,
`block`=request.startBlock startBlock=request.startBlock
let lowestAbsent = request.startBlock.number let lowestAbsent = request.startBlock.number
case sp.syncMode: case sp.syncMode:
@ -247,8 +279,9 @@ proc peerSyncChainEmptyReply(sp: SyncPeer, request: BlocksRequest) =
sp.bestBlockNumber = if lowestAbsent == 0.toBlockNumber: lowestAbsent sp.bestBlockNumber = if lowestAbsent == 0.toBlockNumber: lowestAbsent
else: lowestAbsent - 1.toBlockNumber else: lowestAbsent - 1.toBlockNumber
sp.bestBlockHash = default(typeof(sp.bestBlockHash)) sp.bestBlockHash = default(typeof(sp.bestBlockHash))
sp.ns.seen(sp.bestBlockHash,sp.bestBlockNumber)
proc peerSyncChainNonEmptyReply(sp: SyncPeer, request: BlocksRequest, proc peerSyncChainNonEmptyReply(sp: SnapPeerEx, request: BlocksRequest,
headers: openArray[BlockHeader]) = headers: openArray[BlockHeader]) =
## Handle non-empty `GetBlockHeaders` reply. This means `request.startBlock` ## Handle non-empty `GetBlockHeaders` reply. This means `request.startBlock`
## is present on the peer and in its canonical chain (unless the request was ## is present on the peer and in its canonical chain (unless the request was
@ -269,10 +302,10 @@ proc peerSyncChainNonEmptyReply(sp: SyncPeer, request: BlocksRequest,
if len < syncLockedMinimumReply and if len < syncLockedMinimumReply and
request.skip == 0 and not request.reverse and request.skip == 0 and not request.reverse and
len.uint < request.maxResults: len.uint < request.maxResults:
let blockHash = headers[highestIndex].blockHash let blockHash = headers[highestIndex].blockHash.BlockHash
sp.setSyncLocked(headers[highestIndex].blockNumber, blockHash) sp.setSyncLocked(headers[highestIndex].blockNumber, blockHash)
sp.setSyncStateRoot(headers[highestIndex].blockNumber, blockHash, sp.setSyncStateRoot(headers[highestIndex].blockNumber, blockHash,
headers[highestIndex].stateRoot) headers[highestIndex].stateRoot.TrieHash)
return return
# Be careful, this number is from externally supplied data and arithmetic # Be careful, this number is from externally supplied data and arithmetic
@ -302,9 +335,10 @@ proc peerSyncChainNonEmptyReply(sp: SyncPeer, request: BlocksRequest,
# still useful as a hint of what we knew recently, for example in displays. # still useful as a hint of what we knew recently, for example in displays.
if highestPresent > sp.bestBlockNumber: if highestPresent > sp.bestBlockNumber:
sp.bestBlockNumber = highestPresent sp.bestBlockNumber = highestPresent
sp.bestBlockHash = headers[highestIndex].blockHash sp.bestBlockHash = headers[highestIndex].blockHash.BlockHash
sp.ns.seen(sp.bestBlockHash,sp.bestBlockNumber)
proc peerSyncChainRequest(sp: SyncPeer, request: var BlocksRequest) = proc peerSyncChainRequest(sp: SnapPeerEx, request: var BlocksRequest) =
## Choose `GetBlockHeaders` parameters when hunting or following the canonical ## Choose `GetBlockHeaders` parameters when hunting or following the canonical
## chain of a peer. ## chain of a peer.
request = BlocksRequest( request = BlocksRequest(
@ -335,7 +369,7 @@ proc peerSyncChainRequest(sp: SyncPeer, request: var BlocksRequest) =
# We only have the hash of the recent head of the peer's canonical chain. # We only have the hash of the recent head of the peer's canonical chain.
# Like `SyncLocked`, query more than one item to detect when the # Like `SyncLocked`, query more than one item to detect when the
# canonical chain gets shorter, no change or longer. # canonical chain gets shorter, no change or longer.
request.startBlock = HashOrNum(isHash: true, hash: sp.bestBlockHash) request.startBlock = HashOrNum(isHash: true, hash: sp.bestBlockHash.untie)
request.maxResults = syncLockedQuerySize request.maxResults = syncLockedQuerySize
return return
@ -442,30 +476,11 @@ proc peerSyncChainRequest(sp: SyncPeer, request: var BlocksRequest) =
request.maxResults = syncHuntFinalSize request.maxResults = syncHuntFinalSize
sp.syncMode = SyncHuntRangeFinal sp.syncMode = SyncHuntRangeFinal
proc peerSyncChainTrace(sp: SyncPeer) = # ------------------------------------------------------------------------------
## To be called after `peerSyncChainRequest` has updated state. # Public functions
case sp.syncMode: # ------------------------------------------------------------------------------
of SyncLocked:
trace "Sync: SyncLocked",
bestBlock=sp.bestBlockNumber, bestBlockHash=($sp.bestBlockHash)
of SyncOnlyHash:
trace "Sync: OnlyHash", bestBlockHash=($sp.bestBlockHash)
of SyncHuntForward:
template highMax(n: BlockNumber): string =
if n == high(BlockNumber): "max" else: $n
trace "Sync: HuntForward",
low=sp.huntLow, high=highMax(sp.huntHigh), step=sp.huntStep
of SyncHuntBackward:
trace "Sync: HuntBackward",
low=sp.huntLow, high=sp.huntHigh, step=sp.huntStep
of SyncHuntRange:
trace "Sync: HuntRange",
low=sp.huntLow, high=sp.huntHigh, step=sp.huntStep
of SyncHuntRangeFinal:
trace "Sync: HuntRangeFinal",
low=sp.huntLow, high=sp.huntHigh, step=1
proc peerHuntCanonical*(sp: SyncPeer) {.async.} = proc peerHuntCanonical*(sp: SnapPeerEx) {.async.} =
## Query a peer to update our knowledge of its canonical chain and its best ## Query a peer to update our knowledge of its canonical chain and its best
## block, which is its canonical chain head. This can be called at any time ## block, which is its canonical chain head. This can be called at any time
## after a peer has negotiated the connection. ## after a peer has negotiated the connection.
@ -489,21 +504,8 @@ proc peerHuntCanonical*(sp: SyncPeer) {.async.} =
var request {.noinit.}: BlocksRequest var request {.noinit.}: BlocksRequest
sp.peerSyncChainRequest(request) sp.peerSyncChainRequest(request)
if tracePackets: traceSendSending "GetBlockHeaders", peer=sp, count=request.maxResults,
if request.maxResults == 1 and request.startBlock.isHash: startBlock=sp.ns.pp(request.startBlock), step=request.traceStep
traceSending "GetBlockHeaders/Hash", peer=sp,
blockHash=($request.startBlock.hash), count=1
elif request.maxResults == 1:
traceSending "GetBlockHeaders", peer=sp,
`block`=request.startBlock, count=1
elif request.startBlock.isHash:
traceSending "GetBlockHeaders/Hash", peer=sp,
firstBlockHash=request.startBlock, count=request.maxResults,
step=traceStep(request)
else:
traceSending "GetBlockHeaders", peer=sp,
firstBlock=request.startBlock, count=request.maxResults,
step=traceStep(request)
inc sp.stats.ok.getBlockHeaders inc sp.stats.ok.getBlockHeaders
var reply: typeof await sp.peer.getBlockHeaders(request) var reply: typeof await sp.peer.getBlockHeaders(request)
@ -517,34 +519,36 @@ proc peerHuntCanonical*(sp: SyncPeer) {.async.} =
return return
if reply.isNone: if reply.isNone:
traceTimeoutWaiting "for reply to GetBlockHeaders", traceRecvTimeoutWaiting "for reply to GetBlockHeaders", peer=sp
peer=sp
# TODO: Should disconnect? # TODO: Should disconnect?
inc sp.stats.minor.timeoutBlockHeaders inc sp.stats.minor.timeoutBlockHeaders
return return
let len = reply.get.headers.len let nHeaders = reply.get.headers.len
if tracePackets: if nHeaders == 0:
if len == 0: traceRecvGot "EMPTY reply BlockHeaders", peer=sp, got=0,
traceGot "EMPTY reply BlockHeaders", peer=sp, requested=request.maxResults
got=0, requested=request.maxResults else:
else: traceRecvGot "reply BlockHeaders", peer=sp, got=nHeaders,
let firstBlock = reply.get.headers[0].blockNumber requested=request.maxResults,
let lastBlock = reply.get.headers[len - 1].blockNumber firstBlock=reply.get.headers[0].blockNumber,
traceGot "reply BlockHeaders", peer=sp, lastBlock=reply.get.headers[^1].blockNumber
got=len, requested=request.maxResults, firstBlock, lastBlock
sp.pendingGetBlockHeaders = false sp.pendingGetBlockHeaders = false
if len > request.maxResults.int: if request.maxResults.int < nHeaders:
traceProtocolViolation "excess headers in BlockHeaders", traceRecvProtocolViolation "excess headers in BlockHeaders",
peer=sp, got=len, requested=request.maxResults peer=sp, got=nHeaders, requested=request.maxResults
# TODO: Should disconnect. # TODO: Should disconnect.
inc sp.stats.major.excessBlockHeaders inc sp.stats.major.excessBlockHeaders
return return
if len > 0: if 0 < nHeaders:
# TODO: Check this is not copying the `headers`. # TODO: Check this is not copying the `headers`.
sp.peerSyncChainNonEmptyReply(request, reply.get.headers) sp.peerSyncChainNonEmptyReply(request, reply.get.headers)
else: else:
sp.peerSyncChainEmptyReply(request) sp.peerSyncChainEmptyReply(request)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -65,68 +65,62 @@
import import
std/[sequtils, sets, tables, hashes], std/[sequtils, sets, tables, hashes],
chronos, chronos,
stint, eth/[common/eth_types, p2p],
nimcrypto/keccak, nimcrypto/keccak,
eth/[common/eth_types, rlp, p2p], stint,
".."/[protocol, protocol/pickeled_eth_tracers, sync_types] ".."/[protocol, protocol/pickeled_eth_tracers],
"."/[base_desc, path_desc, pie/peer_desc, timer_helper, types]
type type
NodeDataRequestQueue* = ref object of typeof SyncPeer().nodeDataRequestsBase NodeDataRequest = ref object of NodeDataRequestBase
liveRequests*: HashSet[NodeDataRequest] sp: SnapPeerEx
empties*: int hashes: seq[NodeHash]
# `OrderedSet` was considered instead of `seq` here, but it has a slow future: Future[NodeDataReply]
# implementation of `excl`, defeating the motivation for using it. timer: TimerCallback
waitingOnEmpties*: seq[NodeDataRequest] pathRange: (InteriorPath, InteriorPath)
beforeFirstHash*: seq[NodeDataRequest] fullHashed: bool
beforeFullHash*: HashSet[NodeDataRequest]
# We need to be able to lookup requests by the hash of reply data.
# `ptr NodeHash` is used here so the table doesn't require an independent
# copy of the hash. The hash is part of the request object.
itemHash*: Table[ptr NodeHash, (NodeDataRequest, int)]
NodeDataRequest* = ref object
sp*: SyncPeer
hashes*: seq[NodeHash]
future*: Future[NodeDataReply]
timer*: TimerCallback
pathRange*: (InteriorPath, InteriorPath)
fullHashed*: bool
NodeDataReply* = ref object NodeDataReply* = ref object
reverseMap: seq[int] # Access with `reversMap(i)` instead. reverseMap: seq[int] # Access with `reversMap(i)` instead.
hashVerifiedData*: seq[Blob] hashVerifiedData*: seq[Blob]
template reverseMap*(reply: NodeDataReply, index: int): int =
## Given an index into the request hash list, return index into the reply
## `hashVerifiedData`, or -1 if there is no data for that request hash.
if index < reply.reverseMap.len: reply.reverseMap[index] - 1
elif index < reply.hashVerifiedData.len: index
else: -1
template nodeDataRequests*(sp: SyncPeer): auto = proc ex(base: NodeDataRequestBase): NodeDataRequest =
## Make `sp.nodeDataRequests` available with the real object type. ## to extended object version
sync_types.nodeDataRequestsBase(sp).NodeDataRequestQueue base.NodeDataRequest
template nodeDataHash*(data: Blob): NodeHash = keccak256.digest(data).NodeHash proc ex(pair: (NodeDataRequestBase,int)): (NodeDataRequest, int) =
## to extended object version
(pair[0].ex, pair[1])
proc hash(request: NodeDataRequest|NodeDataRequestBase): Hash =
hash(cast[pointer](request))
proc hash(hash: ptr Hash256): Hash =
cast[ptr Hash](addr hash.data)[]
proc `==`(hash1, hash2: ptr Hash256): bool =
hash1[] == hash2[]
# ------------------------------------------------------------------------------
# Private logging helpers
# ------------------------------------------------------------------------------
template pathRange(request: NodeDataRequest): string = template pathRange(request: NodeDataRequest): string =
pathRange(request.pathRange[0], request.pathRange[1]) pathRange(request.pathRange[0], request.pathRange[1])
template `$`*(paths: (InteriorPath, InteriorPath)): string =
pathRange(paths[0], paths[1])
proc traceGetNodeDataSending(request: NodeDataRequest) = proc traceGetNodeDataSending(request: NodeDataRequest) =
traceSending "GetNodeData", peer=request.sp, traceSendSending "GetNodeData", peer=request.sp,
hashes=request.hashes.len, pathRange=request.pathRange hashes=request.hashes.len, pathRange=request.pathRange
proc traceGetNodeDataDelaying(request: NodeDataRequest) = proc traceGetNodeDataDelaying(request: NodeDataRequest) =
traceDelaying "GetNodeData", peer=request.sp, traceSendDelaying "GetNodeData", peer=request.sp,
hashes=request.hashes.len, pathRange=request.pathRange hashes=request.hashes.len, pathRange=request.pathRange
proc traceGetNodeDataSendError(request: NodeDataRequest, proc traceGetNodeDataSendError(request: NodeDataRequest,
e: ref CatchableError) = e: ref CatchableError) =
traceRecvError "sending GetNodeData", traceRecvError "sending GetNodeData", peer=request.sp,
peer=request.sp, error=e.msg, error=e.msg, hashes=request.hashes.len, pathRange=request.pathRange
hashes=request.hashes.len, pathRange=request.pathRange
proc traceNodeDataReplyError(request: NodeDataRequest, proc traceNodeDataReplyError(request: NodeDataRequest,
e: ref CatchableError) = e: ref CatchableError) =
@ -135,27 +129,26 @@ proc traceNodeDataReplyError(request: NodeDataRequest,
hashes=request.hashes.len, pathRange=request.pathRange hashes=request.hashes.len, pathRange=request.pathRange
proc traceNodeDataReplyTimeout(request: NodeDataRequest) = proc traceNodeDataReplyTimeout(request: NodeDataRequest) =
traceTimeoutWaiting "for reply to GetNodeData", traceRecvTimeoutWaiting "for reply to GetNodeData",
hashes=request.hashes.len, pathRange=request.pathRange, peer=request.sp hashes=request.hashes.len, pathRange=request.pathRange, peer=request.sp
proc traceGetNodeDataDisconnected(request: NodeDataRequest) = proc traceGetNodeDataDisconnected(request: NodeDataRequest) =
traceRecvError "peer disconnected, not sending GetNodeData", traceRecvError "peer disconnected, not sending GetNodeData",
peer=request.sp, hashes=request.hashes.len, pathRange=request.pathRange peer=request.sp, hashes=request.hashes.len, pathRange=request.pathRange
proc traceNodeDataReplyEmpty(sp: SyncPeer, request: NodeDataRequest) = proc traceNodeDataReplyEmpty(sp: SnapPeerEx, request: NodeDataRequest) =
# `request` can be `nil` because we don't always know which request # `request` can be `nil` because we don't always know which request
# the empty reply goes with. Therefore `sp` must be included. # the empty reply goes with. Therefore `sp` must be included.
if request.isNil: if request.isNil:
traceGot "EMPTY NodeData", peer=sp, traceRecvGot "EMPTY NodeData", peer=sp, got=0
got=0
else: else:
traceGot "NodeData", peer=sp, traceRecvGot "NodeData", peer=sp, got=0,
got=0, requested=request.hashes.len, pathRange=request.pathRange requested=request.hashes.len, pathRange=request.pathRange
proc traceNodeDataReplyUnmatched(sp: SyncPeer, got: int) = proc traceNodeDataReplyUnmatched(sp: SnapPeerEx, got: int) =
# There is no request for this reply. Therefore `sp` must be included. # There is no request for this reply. Therefore `sp` must be included.
traceProtocolViolation "non-reply NodeData", peer=sp, got traceRecvProtocolViolation "non-reply NodeData", peer=sp, got
debug "Sync: Warning: Unexpected non-reply NodeData from peer" debug "Snap: Warning: Unexpected non-reply NodeData from peer"
proc traceNodeDataReply(request: NodeDataRequest, proc traceNodeDataReply(request: NodeDataRequest,
got, use, unmatched, other, duplicates: int) = got, use, unmatched, other, duplicates: int) =
@ -165,11 +158,11 @@ proc traceNodeDataReply(request: NodeDataRequest,
logScope: pathRange=request.pathRange logScope: pathRange=request.pathRange
logScope: peer=request.sp logScope: peer=request.sp
if got > request.hashes.len and (unmatched + other) == 0: if got > request.hashes.len and (unmatched + other) == 0:
traceGot "EXCESS reply NodeData" traceRecvGot "EXCESS reply NodeData"
elif got == request.hashes.len or use != got: elif got == request.hashes.len or use != got:
traceGot "reply NodeData" traceRecvGot "reply NodeData"
elif got < request.hashes.len: elif got < request.hashes.len:
traceGot "TRUNCATED reply NodeData" traceRecvGot "TRUNCATED reply NodeData"
if use != got: if use != got:
logScope: logScope:
@ -180,22 +173,22 @@ proc traceNodeDataReply(request: NodeDataRequest,
pathRange=request.pathRange pathRange=request.pathRange
peer=request.sp peer=request.sp
if unmatched > 0: if unmatched > 0:
traceProtocolViolation "incorrect hashes in NodeData" traceRecvProtocolViolation "incorrect hashes in NodeData"
debug "Sync: Warning: NodeData has nodes with incorrect hashes" debug "Snap: Warning: NodeData has nodes with incorrect hashes"
elif other > 0: elif other > 0:
traceProtocolViolation "mixed request nodes in NodeData" traceRecvProtocolViolation "mixed request nodes in NodeData"
debug "Sync: Warning: NodeData has nodes from mixed requests" debug "Snap: Warning: NodeData has nodes from mixed requests"
elif got > request.hashes.len: elif got > request.hashes.len:
# Excess without unmatched/other is only possible with duplicates > 0. # Excess without unmatched/other is only possible with duplicates > 0.
traceProtocolViolation "excess nodes in NodeData" traceRecvProtocolViolation "excess nodes in NodeData"
debug "Sync: Warning: NodeData has more nodes than requested" debug "Snap: Warning: NodeData has more nodes than requested"
else: else:
traceProtocolViolation "duplicate nodes in NodeData" traceRecvProtocolViolation "duplicate nodes in NodeData"
debug "Sync: Warning: NodeData has duplicate nodes" debug "Snap: Warning: NodeData has duplicate nodes"
proc hash(hash: ptr Hash256): Hash = cast[ptr Hash](addr hash.data)[] # ------------------------------------------------------------------------------
proc `==`(hash1, hash2: ptr Hash256): bool = hash1[] == hash2[] # Private functions
proc hash(request: NodeDataRequest): Hash = hash(cast[pointer](request)) # ------------------------------------------------------------------------------
proc nodeDataMatchRequest(rq: NodeDataRequestQueue, data: openArray[Blob], proc nodeDataMatchRequest(rq: NodeDataRequestQueue, data: openArray[Blob],
reverseMap: var seq[int], reverseMap: var seq[int],
@ -219,7 +212,7 @@ proc nodeDataMatchRequest(rq: NodeDataRequestQueue, data: openArray[Blob],
for i in 0 ..< data.len: for i in 0 ..< data.len:
var itemRequest: NodeDataRequest var itemRequest: NodeDataRequest
var index = 0 var index = 0
let hash = nodeDataHash(data[i]) let hash = data[i].toNodeHash
if i == 0: if i == 0:
# Efficiently guess the request belongs to the oldest queued request and # Efficiently guess the request belongs to the oldest queued request and
# the items are in requested order. This lets us skip storing any item # the items are in requested order. This lets us skip storing any item
@ -227,7 +220,7 @@ proc nodeDataMatchRequest(rq: NodeDataRequestQueue, data: openArray[Blob],
# make sure we always find the oldest queued request first. # make sure we always find the oldest queued request first.
var j = 0 var j = 0
while j < rq.beforeFirstHash.len: while j < rq.beforeFirstHash.len:
let hashRequest = rq.beforeFirstHash[j] let hashRequest = rq.beforeFirstHash[j].NodeDataRequest
if hashRequest.hashes[0] == hash: if hashRequest.hashes[0] == hash:
itemRequest = hashRequest itemRequest = hashRequest
break break
@ -236,7 +229,7 @@ proc nodeDataMatchRequest(rq: NodeDataRequestQueue, data: openArray[Blob],
# in the global request table when replies have items in requested # in the global request table when replies have items in requested
# order, even though replies themselves are out of order. # order, even though replies themselves are out of order.
if j == 0: if j == 0:
(itemRequest, index) = rq.itemHash.getOrDefault(unsafeAddr hash) (itemRequest, index) = rq.itemHash.getOrDefault(unsafeAddr hash).ex
if not itemRequest.isNil: if not itemRequest.isNil:
break break
rq.itemHash[addr hashRequest.hashes[0]] = (hashRequest, 0) rq.itemHash[addr hashRequest.hashes[0]] = (hashRequest, 0)
@ -254,7 +247,7 @@ proc nodeDataMatchRequest(rq: NodeDataRequestQueue, data: openArray[Blob],
# If this succeeds, the reply must have items out of requested order. # If this succeeds, the reply must have items out of requested order.
# If it fails, a peer sent a bad reply. # If it fails, a peer sent a bad reply.
if itemRequest.isNil: if itemRequest.isNil:
(itemRequest, index) = rq.itemHash.getOrDefault(unsafeAddr hash) (itemRequest, index) = rq.itemHash.getOrDefault(unsafeAddr hash).ex
if itemRequest.isNil: if itemRequest.isNil:
# Hash and search items in the current request first, if there is one. # Hash and search items in the current request first, if there is one.
if not request.isNil and not request.fullHashed: if not request.isNil and not request.fullHashed:
@ -262,7 +255,7 @@ proc nodeDataMatchRequest(rq: NodeDataRequestQueue, data: openArray[Blob],
for j in 0 ..< request.hashes.len: for j in 0 ..< request.hashes.len:
rq.itemHash[addr request.hashes[j]] = (request, j) rq.itemHash[addr request.hashes[j]] = (request, j)
(itemRequest, index) = (itemRequest, index) =
rq.itemHash.getOrDefault(unsafeAddr hash) rq.itemHash.getOrDefault(unsafeAddr hash).ex
if itemRequest.isNil: if itemRequest.isNil:
# Hash and search all items across all requests. # Hash and search all items across all requests.
if rq.beforeFirstHash.len + rq.beforeFullHash.len > 0: if rq.beforeFirstHash.len + rq.beforeFullHash.len > 0:
@ -270,12 +263,12 @@ proc nodeDataMatchRequest(rq: NodeDataRequestQueue, data: openArray[Blob],
rq.beforeFirstHash.add(rq.beforeFullHash.toSeq) rq.beforeFirstHash.add(rq.beforeFullHash.toSeq)
rq.beforeFullHash.clear() rq.beforeFullHash.clear()
for hashRequest in rq.beforeFirstHash: for hashRequest in rq.beforeFirstHash:
if not hashRequest.fullHashed: if not hashRequest.ex.fullHashed:
hashRequest.fullHashed = true hashRequest.ex.fullHashed = true
for j in 0 ..< hashRequest.hashes.len: for j in 0 ..< hashRequest.ex.hashes.len:
rq.itemHash[addr hashRequest.hashes[j]] = (hashRequest, j) rq.itemHash[addr hashRequest.ex.hashes[j]] = (hashRequest, j)
rq.beforeFirstHash.setLen(0) rq.beforeFirstHash.setLen(0)
(itemRequest, index) = rq.itemHash.getOrDefault(unsafeAddr hash) (itemRequest, index) = rq.itemHash.getOrDefault(unsafeAddr hash).ex
if itemRequest.isNil: if itemRequest.isNil:
# Not found anywhere. # Not found anywhere.
inc unmatched inc unmatched
@ -332,7 +325,7 @@ proc nodeDataComplete(request: NodeDataRequest, reply: NodeDataReply,
# Subtle: Timer can trigger and its callback be added to Chronos run loop, # Subtle: Timer can trigger and its callback be added to Chronos run loop,
# then data event trigger and call `clearTimer()`. The timer callback # then data event trigger and call `clearTimer()`. The timer callback
# will then run but it must be ignored. # will then run but it must be ignored.
debug "Sync: Warning: Resolved timer race over NodeData reply" debug "Snap: Warning: Resolved timer race over NodeData reply"
else: else:
request.timer.clearTimer() request.timer.clearTimer()
request.future.complete(reply) request.future.complete(reply)
@ -358,17 +351,17 @@ proc nodeDataTryEmpties(rq: NodeDataRequestQueue) =
if rq.liveRequests.len > 0: if rq.liveRequests.len > 0:
# Careful: Use `.toSeq` below because we must not use the `HashSet` # Careful: Use `.toSeq` below because we must not use the `HashSet`
# iterator while the set is being changed. # iterator while the set is being changed.
for request in rq.liveRequests.toSeq: for request in rq.liveRequests.toSeq.mapIt(it.ex):
# Constructed reply object, because empty is different from timeout. # Constructed reply object, because empty is different from timeout.
request.nodeDataComplete(NodeDataReply(), true) request.nodeDataComplete(NodeDataReply(), true)
# Move all temporarily delayed requests to the live state, and send them. # Move all temporarily delayed requests to the live state, and send them.
if rq.waitingOnEmpties.len > 0: if rq.waitingOnEmpties.len > 0:
var tmpList: seq[NodeDataRequest] var tmpList: seq[NodeDataRequestBase]
swap(tmpList, rq.waitingOnEmpties) swap(tmpList, rq.waitingOnEmpties)
for i in 0 ..< tmpList.len: for i in 0 ..< tmpList.len:
asyncSpawn nodeDataEnqueueAndSend(tmpList[i]) asyncSpawn nodeDataEnqueueAndSend(tmpList[i].ex)
proc nodeDataNewRequest(sp: SyncPeer, hashes: seq[NodeHash], proc nodeDataNewRequest(sp: SnapPeerEx, hashes: seq[NodeHash],
pathFrom, pathTo: InteriorPath pathFrom, pathTo: InteriorPath
): NodeDataRequest = ): NodeDataRequest =
## Make a new `NodeDataRequest` to receive a reply or timeout in future. The ## Make a new `NodeDataRequest` to receive a reply or timeout in future. The
@ -406,14 +399,14 @@ proc nodeDataEnqueueAndSend(request: NodeDataRequest) {.async.} =
try: try:
# TODO: What exactly does this `await` do, wait for space in send buffer? # TODO: What exactly does this `await` do, wait for space in send buffer?
# TODO: Check if this copies the hashes redundantly. # TODO: Check if this copies the hashes redundantly.
await sp.peer.getNodeData(request.hashes) await sp.peer.getNodeData(request.hashes.untie)
except CatchableError as e: except CatchableError as e:
request.traceGetNodeDataSendError(e) request.traceGetNodeDataSendError(e)
inc sp.stats.major.networkErrors inc sp.stats.major.networkErrors
sp.stopped = true sp.stopped = true
request.future.fail(e) request.future.fail(e)
proc onNodeData(sp: SyncPeer, data: openArray[Blob]) = proc onNodeData(sp: SnapPeerEx, data: openArray[Blob]) =
## Handle an incoming `eth.NodeData` reply. ## Handle an incoming `eth.NodeData` reply.
## Practically, this is also where all the incoming packet trace messages go. ## Practically, this is also where all the incoming packet trace messages go.
let rq = sp.nodeDataRequests let rq = sp.nodeDataRequests
@ -427,7 +420,7 @@ proc onNodeData(sp: SyncPeer, data: openArray[Blob]) =
# through until the "non-reply" protocol violation error. # through until the "non-reply" protocol violation error.
if rq.liveRequests.len > 0: if rq.liveRequests.len > 0:
sp.traceNodeDataReplyEmpty(if rq.liveRequests.len != 1: nil sp.traceNodeDataReplyEmpty(if rq.liveRequests.len != 1: nil
else: rq.liveRequests.toSeq[0]) else: rq.liveRequests.toSeq[0].ex)
inc rq.empties inc rq.empties
# It may now be possible to match empty replies to earlier requests. # It may now be possible to match empty replies to earlier requests.
rq.nodeDataTryEmpties() rq.nodeDataTryEmpties()
@ -461,8 +454,13 @@ proc onNodeData(sp: SyncPeer, data: openArray[Blob]) =
doAssert reply.hashVerifiedData.len == use doAssert reply.hashVerifiedData.len == use
request.nodeDataComplete(reply) request.nodeDataComplete(reply)
proc getNodeData*(sp: SyncPeer, hashes: seq[NodeHash], # ------------------------------------------------------------------------------
pathFrom, pathTo: InteriorPath): Future[NodeDataReply] {.async.} = # Public functions
# ------------------------------------------------------------------------------
proc getNodeData*(sp: SnapPeerEx, hashes: seq[NodeHash],
pathFrom, pathTo: InteriorPath): Future[NodeDataReply]
{.async.} =
## Async function to send a `GetNodeData` request to a peer, and when the ## Async function to send a `GetNodeData` request to a peer, and when the
## peer replies, or on timeout or error, return `NodeDataReply`. ## peer replies, or on timeout or error, return `NodeDataReply`.
## ##
@ -495,8 +493,8 @@ proc getNodeData*(sp: SyncPeer, hashes: seq[NodeHash],
# always received just valid data with hashes already verified, or `nil`. # always received just valid data with hashes already verified, or `nil`.
return reply return reply
proc setupGetNodeData*(sp: SyncPeer) = proc setupGetNodeData*(sp: SnapPeerEx) =
## Initialise `SyncPeer` to support `getNodeData` calls. ## Initialise `SnapPeerEx` to support `GetNodeData` calls.
if sp.nodeDataRequests.isNil: if sp.nodeDataRequests.isNil:
sp.nodeDataRequests = NodeDataRequestQueue() sp.nodeDataRequests = NodeDataRequestQueue()
@ -506,7 +504,18 @@ proc setupGetNodeData*(sp: SyncPeer) =
{.gcsafe.}: onNodeData(sp, data) {.gcsafe.}: onNodeData(sp, data)
sp.peer.state(eth).onGetNodeData = sp.peer.state(eth).onGetNodeData =
proc (_: Peer, hashes: openArray[NodeHash], data: var seq[Blob]) = proc (_: Peer, hashes: openArray[Hash256], data: var seq[Blob]) =
# Return empty nodes result. This callback is installed to # Return empty nodes result. This callback is installed to
# ensure we don't reply with nodes from the chainDb. # ensure we don't reply with nodes from the chainDb.
discard discard
proc reverseMap*(reply: NodeDataReply, index: int): int =
## Given an index into the request hash list, return index into the reply
## `hashVerifiedData`, or -1 if there is no data for that request hash.
if index < reply.reverseMap.len: reply.reverseMap[index] - 1
elif index < reply.hashVerifiedData.len: index
else: -1
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,218 @@
# Nimbus - Types, data structures and shared utilities used in network sync
#
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
import
eth/[common/eth_types, p2p],
stew/byteutils,
stint
{.push raises: [Defect].}
type
InteriorPath* = object
## Path to an interior node in an Ethereum hexary trie. This is a sequence
## of 0 to 64 hex digits. 0 digits means the root node, and 64 digits
## means a leaf node whose path hasn't been converted to `LeafPath` yet.
bytes: array[32, byte]
numDigits: byte
LeafPath* = object
## Path to a leaf in an Ethereum hexary trie. Individually, each leaf path
## is a hash, but rather than being the hash of the contents, it's the hash
## of the item's address. Collectively, these hashes have some 256-bit
## numerical properties: ordering, intervals and meaningful difference.
number: UInt256
LeafRange* = object
leafLow*, leafHigh*: LeafPath
const
interiorPathMaxDepth = 64
leafPathBytes = sizeof(LeafPath().number.toBytesBE)
# ------------------------------------------------------------------------------
# Public getters
# ------------------------------------------------------------------------------
proc maxDepth*(_: InteriorPath | typedesc[InteriorPath]): int =
interiorPathMaxDepth
proc depth*(path: InteriorPath): int =
path.numDigits.int
proc digit*(path: InteriorPath, index: int): int =
doAssert 0 <= index and index < path.depth
let b = path.bytes[index shr 1]
(if (index and 1) == 0: (b shr 4) else: (b and 0x0f)).int
proc low*(_: LeafPath | type LeafPath): LeafPath =
LeafPath(number: low(UInt256))
proc high*(_: LeafPath | type LeafPath): LeafPath =
LeafPath(number: high(UInt256))
# ------------------------------------------------------------------------------
# Public setters
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Public `InteriorPath` functions
# ------------------------------------------------------------------------------
proc toInteriorPath*(interiorPath: InteriorPath): InteriorPath =
interiorPath
proc toInteriorPath*(leafPath: LeafPath): InteriorPath =
doAssert sizeof(leafPath.number.toBytesBE) * 2 == interiorPathMaxDepth
doAssert sizeof(leafPath.number.toBytesBE) == sizeof(InteriorPath().bytes)
InteriorPath(bytes: leafPath.number.toBytesBE,
numDigits: interiorPathMaxDepth)
proc add*(path: var InteriorPath, digit: byte) =
doAssert path.numDigits < interiorPathMaxDepth
inc path.numDigits
if (path.numDigits and 1) != 0:
path.bytes[path.numDigits shr 1] = (digit shl 4)
else:
path.bytes[(path.numDigits shr 1) - 1] += (digit and 0x0f)
proc addPair*(path: var InteriorPath, digitPair: byte) =
doAssert path.numDigits < interiorPathMaxDepth - 1
path.numDigits += 2
if (path.numDigits and 1) == 0:
path.bytes[(path.numDigits shr 1) - 1] = digitPair
else:
path.bytes[(path.numDigits shr 1) - 1] += (digitPair shr 4)
path.bytes[path.numDigits shr 1] = (digitPair shl 4)
proc pop*(path: var InteriorPath) =
doAssert path.numDigits >= 1
dec path.numDigits
path.bytes[path.numDigits shr 1] =
if (path.numDigits and 1) == 0: 0.byte
else: path.bytes[path.numDigits shr 1] and 0xf0
# ------------------------------------------------------------------------------
# Public comparison functions for `InteriorPath`
# ------------------------------------------------------------------------------
proc `==`*(path1, path2: InteriorPath): bool =
# Paths are zero-padded to the end of the array, so comparison is easy.
for i in 0 ..< (max(path1.numDigits, path2.numDigits).int + 1) shr 1:
if path1.bytes[i] != path2.bytes[i]:
return false
return true
proc `<=`*(path1, path2: InteriorPath): bool =
# Paths are zero-padded to the end of the array, so comparison is easy.
for i in 0 ..< (max(path1.numDigits, path2.numDigits).int + 1) shr 1:
if path1.bytes[i] != path2.bytes[i]:
return path1.bytes[i] <= path2.bytes[i]
return true
proc cmp*(path1, path2: InteriorPath): int =
# Paths are zero-padded to the end of the array, so comparison is easy.
for i in 0 ..< (max(path1.numDigits, path2.numDigits).int + 1) shr 1:
if path1.bytes[i] != path2.bytes[i]:
return path1.bytes[i].int - path2.bytes[i].int
return 0
proc `!=`*(path1, path2: InteriorPath): bool = not(path1 == path2)
proc `<`*(path1, path2: InteriorPath): bool = not(path2 <= path1)
proc `>=`*(path1, path2: InteriorPath): bool = path2 <= path1
proc `>`*(path1, path2: InteriorPath): bool = not(path1 <= path2)
# ------------------------------------------------------------------------------
# Public string output functions for `LeafPath`
# ------------------------------------------------------------------------------
proc toHex*(path: InteriorPath, withEllipsis = true): string =
const hexChars = "0123456789abcdef"
let digits = path.depth
if not withEllipsis:
result = newString(digits)
else:
result = newString(min(digits + 3, 64))
result[^3] = '.'
result[^2] = '.'
result[^1] = '.'
for i in 0 ..< digits:
result[i] = hexChars[path.digit(i)]
proc pathRange*(path1, path2: InteriorPath): string =
path1.toHex(withEllipsis = false) & '-' & path2.toHex(withEllipsis = false)
proc `$`*(path: InteriorPath): string =
path.toHex
proc `$`*(paths: (InteriorPath, InteriorPath)): string =
pathRange(paths[0], paths[1])
# ------------------------------------------------------------------------------
# Public `LeafPath` functions
# ------------------------------------------------------------------------------
proc toLeafPath*(leafPath: LeafPath): LeafPath =
leafPath
proc toLeafPath*(interiorPath: InteriorPath): LeafPath =
doAssert interiorPath.depth == InteriorPath.maxDepth
doAssert sizeof(interiorPath.bytes) * 2 == InteriorPath.maxDepth
doAssert sizeof(interiorPath.bytes) == leafPathBytes
LeafPath(number: UInt256.fromBytesBE(interiorPath.bytes))
proc toLeafPath*(bytes: array[leafPathBytes, byte]): LeafPath =
doAssert sizeof(bytes) == leafPathBytes
LeafPath(number: UInt256.fromBytesBE(bytes))
proc toBytes*(leafPath: LeafPath): array[leafPathBytes, byte] =
doAssert sizeof(LeafPath().number.toBytesBE) == leafPathBytes
leafPath.number.toBytesBE
# Note, `{.borrow.}` didn't work for these symbols (with Nim 1.2.12) when we
# defined `LeafPath = distinct UInt256`. The `==` didn't match any symbol to
# borrow from, and the auto-generated `<` failed to compile, with a peculiar
# type mismatch error.
proc `==`*(path1, path2: LeafPath): bool = path1.number == path2.number
proc `!=`*(path1, path2: LeafPath): bool = path1.number != path2.number
proc `<`*(path1, path2: LeafPath): bool = path1.number < path2.number
proc `<=`*(path1, path2: LeafPath): bool = path1.number <= path2.number
proc `>`*(path1, path2: LeafPath): bool = path1.number > path2.number
proc `>=`*(path1, path2: LeafPath): bool = path1.number >= path2.number
proc cmp*(path1, path2: LeafPath): int = cmp(path1.number, path2.number)
proc `-`*(path1, path2: LeafPath): UInt256 =
path1.number - path2.number
proc `+`*(base: LeafPath, step: UInt256): LeafPath =
LeafPath(number: base.number + step)
proc `+`*(base: LeafPath, step: SomeInteger): LeafPath =
LeafPath(number: base.number + step.u256)
proc `-`*(base: LeafPath, step: UInt256): LeafPath =
LeafPath(number: base.number - step)
proc `-`*(base: LeafPath, step: SomeInteger): LeafPath =
LeafPath(number: base.number - step.u256)
# ------------------------------------------------------------------------------
# Public string output functions for `LeafPath`
# ------------------------------------------------------------------------------
proc toHex*(path: LeafPath): string =
path.number.toBytesBE.toHex
proc `$`*(path: LeafPath): string =
path.toHex
proc pathRange*(path1, path2: LeafPath): string =
path1.toHex & '-' & path2.toHex
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -2,111 +2,44 @@
# #
# Copyright (c) 2021 Status Research & Development GmbH # Copyright (c) 2021 Status Research & Development GmbH
# Licensed under either of # Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) # http://www.apache.org/licenses/LICENSE-2.0)
# at your option. This file may not be copied, modified, or distributed except according to those terms. # * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
{.push raises: [Defect].} # at your option. This file may not be copied, modified, or distributed
# except according to those terms.
import import
std/[sets, sequtils, strutils], std/[sets, sequtils, strutils],
chronos, chronos,
eth/[common/eth_types, rlp, p2p], chronicles,
eth/[common/eth_types, p2p],
stint, stint,
../../sync_types ../path_desc,
"."/[peer_desc, sync_desc]
type {.push raises: [Defect].}
LeafRange* = object
leafLow*, leafHigh*: LeafPath
SharedFetchState* = ref object of typeof SyncPeer().sharedFetchBase proc hasSlice*(sp: SnapPeerEx): bool =
## Account fetching state that is shared among all peers.
# Leaf path ranges not fetched or in progress on any peer.
leafRanges: seq[LeafRange]
countAccounts*: int64
countAccountBytes*: int64
countRange*: UInt256
countRangeStarted*: bool
countRangeSnap*: UInt256
countRangeSnapStarted*: bool
countRangeTrie*: UInt256
countRangeTrieStarted*: bool
displayTimer: TimerCallback
template sharedFetch*(sp: SyncPeer): auto =
sync_types.sharedFetchBase(sp).SharedFetchState
proc rangeFraction(value: UInt256, discriminator: bool): int =
## Format a value in the range 0..2^256 as a percentage, 0-100%. As the
## top of the range 2^256 cannot be represented in `UInt256` it actually
## has the value `0: UInt256`, and with that value, `discriminator` is
## consulted to decide between 0% and 100%. For other values, the value is
## constrained to be between slightly above 0% and slightly below 100%,
## so that the endpoints are distinctive when displayed.
const multiplier = 10000
var fraction: int = 0 # Fixed point, fraction 0.0-1.0 multiplied up.
if value == 0:
return if discriminator: multiplier else: 0 # Either 100.00% or 0.00%.
const shift = 8 * (sizeof(value) - sizeof(uint64))
const wordHigh: uint64 = (high(typeof(value)) shr shift).truncate(uint64)
# Divide `wordHigh+1` by `multiplier`, rounding up, avoiding overflow.
const wordDiv: uint64 = 1 + ((wordHigh shr 1) div (multiplier.uint64 shr 1))
let wordValue: uint64 = (value shr shift).truncate(uint64)
let divided: uint64 = wordValue div wordDiv
return if divided >= multiplier: multiplier - 1
elif divided <= 0: 1
else: divided.int
proc percent(value: UInt256, discriminator: bool): string =
var str = intToStr(rangeFraction(value, discriminator), 3)
str.insert(".", str.len - 2)
str.add('%')
return str
proc setDisplayTimer(sp: SyncPeer, at: Moment) {.gcsafe.}
proc displayUpdate(sp: SyncPeer) {.gcsafe.} =
let sharedFetch = sp.sharedFetch
doAssert not sharedFetch.isNil
info "State: Account sync progress",
percent=percent(sharedFetch.countRange, sharedFetch.countRangeStarted),
accounts=sharedFetch.countAccounts,
snap=percent(sharedFetch.countRangeSnap, sharedFetch.countRangeSnapStarted),
trie=percent(sharedFetch.countRangeTrie, sharedFetch.countRangeTrieStarted)
sp.setDisplayTimer(Moment.fromNow(1.seconds))
proc setDisplayTimer(sp: SyncPeer, at: Moment) =
sp.sharedFetch.displayTimer = safeSetTimer(at, displayUpdate, sp)
proc newSharedFetchState(sp: SyncPeer): SharedFetchState =
result = SharedFetchState(
leafRanges: @[LeafRange(leafLow: low(LeafPath), leafHigh: high(LeafPath))]
)
result.displayTimer = safeSetTimer(Moment.fromNow(100.milliseconds),
displayUpdate, sp)
proc hasSlice*(sp: SyncPeer): bool =
## Return `true` iff `getSlice` would return a free slice to work on. ## Return `true` iff `getSlice` would return a free slice to work on.
if sp.sharedFetch.isNil: if sp.nsx.sharedFetch.isNil:
sp.sharedFetch = newSharedFetchState(sp) sp.nsx.sharedFetch = SharedFetchState.new
result = not sp.sharedFetch.isNil and sp.sharedFetch.leafRanges.len > 0 result = 0 < sp.nsx.sharedFetch.leafRanges.len
trace "Sync: hasSlice", peer=sp, hasSlice=result trace "Snap: hasSlice", peer=sp, hasSlice=result
proc getSlice*(sp: SyncPeer, leafLow, leafHigh: var LeafPath): bool = proc getSlice*(sp: SnapPeerEx, leafLow, leafHigh: var LeafPath): bool =
## Claim a free slice to work on. If a slice was available, it's claimed, ## Claim a free slice to work on. If a slice was available, it's claimed,
## `leadLow` and `leafHigh` are set to the slice range and `true` is ## `leadLow` and `leafHigh` are set to the slice range and `true` is
## returned. Otherwise `false` is returned. ## returned. Otherwise `false` is returned.
if sp.sharedFetch.isNil: if sp.nsx.sharedFetch.isNil:
sp.sharedFetch = newSharedFetchState(sp) sp.nsx.sharedFetch = SharedFetchState.new
let sharedFetch = sp.sharedFetch let sharedFetch = sp.nsx.sharedFetch
template ranges: auto = sharedFetch.leafRanges template ranges: auto = sharedFetch.leafRanges
const leafMaxFetchRange = (high(LeafPath) - low(LeafPath)) div 1000 const leafMaxFetchRange = (high(LeafPath) - low(LeafPath)) div 1000
if ranges.len == 0: if ranges.len == 0:
trace "Sync: getSlice", leafRange="none" trace "Snap: getSlice", leafRange="none"
return false return false
leafLow = ranges[0].leafLow leafLow = ranges[0].leafLow
if ranges[0].leafHigh - ranges[0].leafLow <= leafMaxFetchRange: if ranges[0].leafHigh - ranges[0].leafLow <= leafMaxFetchRange:
@ -115,16 +48,16 @@ proc getSlice*(sp: SyncPeer, leafLow, leafHigh: var LeafPath): bool =
else: else:
leafHigh = leafLow + leafMaxFetchRange leafHigh = leafLow + leafMaxFetchRange
ranges[0].leafLow = leafHigh + 1 ranges[0].leafLow = leafHigh + 1
trace "Sync: getSlice", peer=sp, leafRange=pathRange(leafLow, leafHigh) trace "Snap: getSlice", peer=sp, leafRange=pathRange(leafLow, leafHigh)
return true return true
proc putSlice*(sp: SyncPeer, leafLow, leafHigh: LeafPath) = proc putSlice*(sp: SnapPeerEx, leafLow, leafHigh: LeafPath) =
## Return a slice to the free list, merging with the rest of the list. ## Return a slice to the free list, merging with the rest of the list.
let sharedFetch = sp.sharedFetch let sharedFetch = sp.nsx.sharedFetch
template ranges: auto = sharedFetch.leafRanges template ranges: auto = sharedFetch.leafRanges
trace "Sync: putSlice", leafRange=pathRange(leafLow, leafHigh), peer=sp trace "Snap: putSlice", leafRange=pathRange(leafLow, leafHigh), peer=sp
var i = 0 var i = 0
while i < ranges.len and leafLow > ranges[i].leafHigh: while i < ranges.len and leafLow > ranges[i].leafHigh:
inc i inc i
@ -146,25 +79,25 @@ proc putSlice*(sp: SyncPeer, leafLow, leafHigh: LeafPath) =
if leafHigh > ranges[i].leafHigh: if leafHigh > ranges[i].leafHigh:
ranges[i].leafHigh = leafHigh ranges[i].leafHigh = leafHigh
template getSlice*(sp: SyncPeer, leafRange: var LeafRange): bool = template getSlice*(sp: SnapPeerEx, leafRange: var LeafRange): bool =
sp.getSlice(leafRange.leafLow, leafRange.leafHigh) sp.getSlice(leafRange.leafLow, leafRange.leafHigh)
template putSlice*(sp: SyncPeer, leafRange: LeafRange) = template putSlice*(sp: SnapPeerEx, leafRange: LeafRange) =
sp.putSlice(leafRange.leafLow, leafRange.leafHigh) sp.putSlice(leafRange.leafLow, leafRange.leafHigh)
proc countSlice*(sp: SyncPeer, leafLow, leafHigh: LeafPath, which: bool) = proc countSlice*(sp: SnapPeerEx, leafLow, leafHigh: LeafPath, which: bool) =
doAssert leafLow <= leafHigh doAssert leafLow <= leafHigh
sp.sharedFetch.countRange += leafHigh - leafLow + 1 sp.nsx.sharedFetch.countRange += leafHigh - leafLow + 1
sp.sharedFetch.countRangeStarted = true sp.nsx.sharedFetch.countRangeStarted = true
if which: if which:
sp.sharedFetch.countRangeSnap += leafHigh - leafLow + 1 sp.nsx.sharedFetch.countRangeSnap += leafHigh - leafLow + 1
sp.sharedFetch.countRangeSnapStarted = true sp.nsx.sharedFetch.countRangeSnapStarted = true
else: else:
sp.sharedFetch.countRangeTrie += leafHigh - leafLow + 1 sp.nsx.sharedFetch.countRangeTrie += leafHigh - leafLow + 1
sp.sharedFetch.countRangeTrieStarted = true sp.nsx.sharedFetch.countRangeTrieStarted = true
template countSlice*(sp: SyncPeer, leafRange: LeafRange, which: bool) = template countSlice*(sp: SnapPeerEx, leafRange: LeafRange, which: bool) =
sp.countSlice(leafRange.leafLow, leafRange.leafHigh, which) sp.countSlice(leafRange.leafLow, leafRange.leafHigh, which)
proc countAccounts*(sp: SyncPeer, len: int) = proc countAccounts*(sp: SnapPeerEx, len: int) =
sp.sharedFetch.countAccounts += len sp.nsx.sharedFetch.countAccounts += len

View File

@ -24,18 +24,19 @@
import import
std/sets, std/sets,
chronos, chronos,
eth/[common/eth_types, rlp, p2p], eth/[common/eth_types, p2p],
nimcrypto/keccak, nimcrypto/keccak,
stint, stint,
"../.."/[protocol, protocol/pickeled_snap_tracers, sync_types, trace_helper], "../.."/[protocol, protocol/pickeled_snap_tracers, trace_helper],
./common ".."/[base_desc, path_desc, types],
"."/[common, peer_desc]
const const
snapRequestBytesLimit = 2 * 1024 * 1024 snapRequestBytesLimit = 2 * 1024 * 1024
## Soft bytes limit to request in `snap` protocol calls. ## Soft bytes limit to request in `snap` protocol calls.
proc snapFetch*(sp: SyncPeer, stateRoot: TrieHash, proc snapFetch*(sp: SnapPeerEx, stateRoot: TrieHash, leafRange: LeafRange)
leafRange: LeafRange) {.async.} = {.async.} =
var origin = leafRange.leafLow var origin = leafRange.leafLow
var limit = leafRange.leafHigh var limit = leafRange.leafHigh
const responseBytes = 2 * 1024 * 1024 const responseBytes = 2 * 1024 * 1024
@ -47,15 +48,15 @@ proc snapFetch*(sp: SyncPeer, stateRoot: TrieHash,
sp.putSlice(leafRange) sp.putSlice(leafRange)
if tracePackets: if tracePackets:
traceSending "GetAccountRange", traceSendSending "GetAccountRange",
accountRange=pathRange(origin, limit), accountRange=pathRange(origin, limit),
stateRoot=($stateRoot), bytesLimit=snapRequestBytesLimit, peer=sp stateRoot=($stateRoot), bytesLimit=snapRequestBytesLimit, peer=sp
var reply: typeof await sp.peer.getAccountRange(stateRoot, origin, limit, var
snapRequestBytesLimit) reply: Option[accountRangeObj]
try: try:
reply = await sp.peer.getAccountRange(stateRoot, origin, limit, reply = await sp.peer.getAccountRange(
snapRequestBytesLimit) stateRoot.untie, origin, limit, snapRequestBytesLimit)
except CatchableError as e: except CatchableError as e:
traceRecvError "waiting for reply to GetAccountRange", traceRecvError "waiting for reply to GetAccountRange",
peer=sp, error=e.msg peer=sp, error=e.msg
@ -65,7 +66,7 @@ proc snapFetch*(sp: SyncPeer, stateRoot: TrieHash,
return return
if reply.isNone: if reply.isNone:
traceTimeoutWaiting "for reply to GetAccountRange", traceRecvTimeoutWaiting "for reply to GetAccountRange",
peer=sp peer=sp
sp.putSlice(leafRange) sp.putSlice(leafRange)
return return
@ -88,14 +89,14 @@ proc snapFetch*(sp: SyncPeer, stateRoot: TrieHash,
# This makes all the difference to terminating the fetch. For now we'll # This makes all the difference to terminating the fetch. For now we'll
# trust the mere existence of the proof rather than verifying it. # trust the mere existence of the proof rather than verifying it.
if proof.len == 0: if proof.len == 0:
traceGot "EMPTY reply AccountRange", peer=sp, traceRecvGot "EMPTY reply AccountRange", peer=sp,
got=len, proofLen=proof.len, gotRange="-", got=len, proofLen=proof.len, gotRange="-",
requestedRange=pathRange(origin, limit), stateRoot=($stateRoot) requestedRange=pathRange(origin, limit), stateRoot=($stateRoot)
sp.putSlice(leafRange) sp.putSlice(leafRange)
# Don't keep retrying snap for this state. # Don't keep retrying snap for this state.
sp.stopThisState = true sp.stopThisState = true
else: else:
traceGot "END reply AccountRange", peer=sp, traceRecvGot "END reply AccountRange", peer=sp,
got=len, proofLen=proof.len, gotRange=pathRange(origin, high(LeafPath)), got=len, proofLen=proof.len, gotRange=pathRange(origin, high(LeafPath)),
requestedRange=pathRange(origin, limit), stateRoot=($stateRoot) requestedRange=pathRange(origin, limit), stateRoot=($stateRoot)
# Current slicer can't accept more result data than was requested, so # Current slicer can't accept more result data than was requested, so
@ -104,14 +105,14 @@ proc snapFetch*(sp: SyncPeer, stateRoot: TrieHash,
return return
var lastPath = accounts[len-1].accHash var lastPath = accounts[len-1].accHash
traceGot "reply AccountRange", peer=sp, traceRecvGot "reply AccountRange", peer=sp,
got=len, proofLen=proof.len, gotRange=pathRange(origin, lastPath), got=len, proofLen=proof.len, gotRange=pathRange(origin, lastPath),
requestedRange=pathRange(origin, limit), stateRoot=($stateRoot) requestedRange=pathRange(origin, limit), stateRoot=($stateRoot)
# Missing proof isn't allowed, unless `origin` is min path in which case # Missing proof isn't allowed, unless `origin` is min path in which case
# there might be no proof if the result spans the entire range. # there might be no proof if the result spans the entire range.
if proof.len == 0 and origin != low(LeafPath): if proof.len == 0 and origin != low(LeafPath):
traceProtocolViolation "missing proof in AccountRange", traceRecvProtocolViolation "missing proof in AccountRange",
peer=sp, got=len, proofLen=proof.len, gotRange=pathRange(origin,lastPath), peer=sp, got=len, proofLen=proof.len, gotRange=pathRange(origin,lastPath),
requestedRange=pathRange(origin, limit), stateRoot=($stateRoot) requestedRange=pathRange(origin, limit), stateRoot=($stateRoot)
sp.putSlice(leafRange) sp.putSlice(leafRange)
@ -133,5 +134,5 @@ proc snapFetch*(sp: SyncPeer, stateRoot: TrieHash,
sp.countAccounts(keepAccounts) sp.countAccounts(keepAccounts)
proc peerSupportsSnap*(sp: SyncPeer): bool = proc peerSupportsSnap*(sp: SnapPeerEx): bool =
not sp.stopped and sp.peer.supports(snap) not sp.stopped and sp.peer.supports(snap)

View File

@ -22,37 +22,15 @@
## the entire Ethereum state to be fetched, even following hash trie ## the entire Ethereum state to be fetched, even following hash trie
## pointers, without significant random access database I/O. ## pointers, without significant random access database I/O.
{.push raises: [Defect].}
import import
std/[sets, tables, algorithm], std/[sets, tables, algorithm],
chronos, chronos,
eth/[common/eth_types, rlp, p2p], eth/[common/eth_types, p2p],
stint, ../../trace_helper,
"../.."/[sync_types, trace_helper], ".."/[base_desc, get_nodedata, path_desc, types, validate_trienode],
".."/[get_nodedata, validate_trienode], "."/[common, peer_desc, sync_desc]
./common
type {.push raises: [Defect].}
FetchState = ref object of typeof SyncPeer().fetchBase
## Account fetching state on a single peer.
sp: SyncPeer
nodeGetQueue: seq[SingleNodeRequest]
nodeGetsInFlight: int
scheduledBatch: bool
progressPrefix: string
progressCount: int
nodesInFlight: int
getNodeDataErrors: int
leafRange: LeafRange
unwindAccounts: int64
unwindAccountBytes: int64
finish: Future[void]
SingleNodeRequest = ref object
hash: NodeHash
path: InteriorPath
future: Future[Blob]
const const
maxBatchGetNodeData = 384 maxBatchGetNodeData = 384
@ -61,12 +39,27 @@ const
maxParallelGetNodeData = 32 maxParallelGetNodeData = 32
## Maximum number of `GetNodeData` requests in parallel to a single peer. ## Maximum number of `GetNodeData` requests in parallel to a single peer.
template fetch*(sp: SyncPeer): auto = type
sync_types.fetchBase(sp).FetchState SingleNodeRequestEx = ref object of SingleNodeRequestBase
hash: NodeHash
path: InteriorPath
future: Future[Blob]
proc hash(n: SingleNodeRequestBase): NodeHash =
n.SingleNodeRequestEx.hash
proc path(n: SingleNodeRequestBase): InteriorPath =
n.SingleNodeRequestEx.path
proc future(n: SingleNodeRequestBase): Future[Blob] =
n.SingleNodeRequestEx.future
# Forward declaration. # Forward declaration.
proc scheduleBatchGetNodeData(fetch: FetchState) {.gcsafe.} proc scheduleBatchGetNodeData(fetch: FetchState) {.gcsafe.}
# ---
proc wrapCallGetNodeData(fetch: FetchState, hashes: seq[NodeHash], proc wrapCallGetNodeData(fetch: FetchState, hashes: seq[NodeHash],
futures: seq[Future[Blob]], futures: seq[Future[Blob]],
pathFrom, pathTo: InteriorPath) {.async.} = pathFrom, pathTo: InteriorPath) {.async.} =
@ -140,14 +133,14 @@ proc batchGetNodeData(fetch: FetchState) =
# use them in lookups, many remote databases store the leaves in some way # use them in lookups, many remote databases store the leaves in some way
# indexed by path. If so, this order will greatly improve lookup locality, # indexed by path. If so, this order will greatly improve lookup locality,
# which directly reduces the amount of storage I/O time and latency. # which directly reduces the amount of storage I/O time and latency.
# - The left-to-right order is beneficial to the local database writes as well. # - The left-to-right order is beneficial to the local database writes too.
# - If the local database indexes by path, the left-to-right write order will # - If the local database indexes by path, the left-to-right write order will
# increase storage density by a lot in a B-tree compared with random order. # increase storage density by a lot in a B-tree compared with random order.
# - If the local database doesn't index by path at all, but does use "rowid" # - If the local database doesn't index by path at all, but does use "rowid"
# internally (like SQLite by default), the left-to-right write order will # internally (like SQLite by default), the left-to-right write order will
# improve read performance when other peers sync reading this local node. # improve read performance when other peers sync reading this local node.
proc cmpSingleNodeRequest(x, y: SingleNodeRequest): int = proc cmpSingleNodeRequest(x, y: SingleNodeRequestBase): int =
# `x` and `y` are deliberately swapped to get descending order. See above. # `x` and `y` are deliberately swapped to get descending order. See above.
cmp(y.path, x.path) cmp(y.path, x.path)
sort(fetch.nodeGetQueue, cmpSingleNodeRequest) sort(fetch.nodeGetQueue, cmpSingleNodeRequest)
@ -199,16 +192,15 @@ proc getNodeData(fetch: FetchState,
hash: TrieHash, path: InteriorPath): Future[Blob] {.async.} = hash: TrieHash, path: InteriorPath): Future[Blob] {.async.} =
## Request _one_ item of trie node data asynchronously. This function ## Request _one_ item of trie node data asynchronously. This function
## batches requested into larger `eth.GetNodeData` requests efficiently. ## batches requested into larger `eth.GetNodeData` requests efficiently.
if traceIndividualNodes: traceIndividualNode "> Fetching individual NodeData", peer=fetch.sp,
trace "> Fetching individual NodeData", peer=fetch.sp, depth=path.depth, path, hash=($hash)
depth=path.depth, path, hash=($hash)
let future = newFuture[Blob]() let future = newFuture[Blob]()
fetch.nodeGetQueue.add(SingleNodeRequest( fetch.nodeGetQueue.add SingleNodeRequestEx(
hash: hash, hash: hash.NodeHash,
path: path, path: path,
future: future future: future)
))
if not fetch.scheduledBatch: if not fetch.scheduledBatch:
fetch.scheduleBatchGetNodeData() fetch.scheduleBatchGetNodeData()
let nodeBytes = await future let nodeBytes = await future
@ -217,17 +209,16 @@ proc getNodeData(fetch: FetchState,
return nodebytes return nodebytes
if tracePackets: if tracePackets:
doAssert nodeBytes.len == 0 or nodeDataHash(nodeBytes) == hash doAssert nodeBytes.len == 0 or nodeBytes.toNodeHash == hash
if traceIndividualNodes: if nodeBytes.len > 0:
if nodeBytes.len > 0: traceIndividualNode "< Received individual NodeData", peer=fetch.sp,
trace "< Received individual NodeData", peer=fetch.sp, depth=path.depth, path, hash=($hash),
depth=path.depth, path, hash=($hash), nodeLen=nodeBytes.len, nodeBytes
nodeLen=nodeBytes.len, nodeBytes=nodeBytes.toHex else:
else: traceIndividualNode "< Received EMPTY individual NodeData", peer=fetch.sp,
trace "< Received EMPTY individual NodeData", peer=fetch.sp, depth=path.depth, path, hash,
depth=path.depth, path, hash=($hash), nodeLen=nodeBytes.len
nodeLen=nodeBytes.len
return nodeBytes return nodeBytes
proc pathInRange(fetch: FetchState, path: InteriorPath): bool = proc pathInRange(fetch: FetchState, path: InteriorPath): bool =
@ -257,7 +248,7 @@ proc traverse(fetch: FetchState, hash: NodeHash, path: InteriorPath,
if fetch.sp.stopThisState or fetch.sp.stopped: if fetch.sp.stopThisState or fetch.sp.stopped:
errorReturn() errorReturn()
let nodeBytes = await fetch.getNodeData(hash, path) let nodeBytes = await fetch.getNodeData(hash.TrieHash, path)
# If something triggered stop, clean up now. # If something triggered stop, clean up now.
if fetch.sp.stopThisState or fetch.sp.stopped: if fetch.sp.stopThisState or fetch.sp.stopped:
@ -305,14 +296,15 @@ proc traverse(fetch: FetchState, hash: NodeHash, path: InteriorPath,
template leafBytes: auto = leafPtr[2] template leafBytes: auto = leafPtr[2]
inc fetch.unwindAccounts inc fetch.unwindAccounts
fetch.unwindAccountBytes += leafBytes.len fetch.unwindAccountBytes += leafBytes.len
inc fetch.sp.sharedFetch.countAccounts inc fetch.sp.nsx.sharedFetch.countAccounts
fetch.sp.sharedFetch.countAccountBytes += leafBytes.len fetch.sp.nsx.sharedFetch.countAccountBytes += leafBytes.len
dec fetch.nodesInFlight dec fetch.nodesInFlight
if fetch.nodesInFlight == 0: if fetch.nodesInFlight == 0:
fetch.finish.complete() fetch.finish.complete()
proc probeGetNodeData(sp: SyncPeer, stateRoot: TrieHash): Future[bool] {.async.} = proc probeGetNodeData(sp: SnapPeerEx, stateRoot: TrieHash): Future[bool]
{.async.} =
# Before doing real trie traversal on this peer, send a probe request for # Before doing real trie traversal on this peer, send a probe request for
# `stateRoot` to see if it's worth pursuing at all. We will avoid reserving # `stateRoot` to see if it's worth pursuing at all. We will avoid reserving
# a slice of leafspace, even temporarily, if no traversal will take place. # a slice of leafspace, even temporarily, if no traversal will take place.
@ -331,15 +323,15 @@ proc probeGetNodeData(sp: SyncPeer, stateRoot: TrieHash): Future[bool] {.async.}
# send an empty reply. We don't want to cut off a peer for other purposes # send an empty reply. We don't want to cut off a peer for other purposes
# such as a source of blocks and transactions, just because it doesn't # such as a source of blocks and transactions, just because it doesn't
# reply to `GetNodeData`. # reply to `GetNodeData`.
let reply = await sp.getNodeData(@[stateRoot], let reply = await sp.getNodeData(
rootInteriorPath, rootInteriorPath) @[stateRoot.NodeHash], InteriorPath(), InteriorPath())
return not reply.isNil and reply.hashVerifiedData.len == 1 return not reply.isNil and reply.hashVerifiedData.len == 1
proc trieFetch*(sp: SyncPeer, stateRoot: TrieHash, proc trieFetch*(sp: SnapPeerEx, stateRoot: TrieHash,
leafRange: LeafRange) {.async.} = leafRange: LeafRange) {.async.} =
if sp.fetch.isNil: if sp.fetchState.isNil:
sp.fetch = FetchState(sp: sp) sp.fetchState = FetchState(sp: sp)
template fetch: auto = sp.fetch template fetch: auto = sp.fetchState
fetch.leafRange = leafRange fetch.leafRange = leafRange
fetch.finish = newFuture[void]() fetch.finish = newFuture[void]()
@ -347,14 +339,15 @@ proc trieFetch*(sp: SyncPeer, stateRoot: TrieHash,
fetch.unwindAccountBytes = 0 fetch.unwindAccountBytes = 0
inc fetch.nodesInFlight inc fetch.nodesInFlight
await fetch.traverse(stateRoot.NodeHash, rootInteriorPath, false) await fetch.traverse(stateRoot.NodeHash, InteriorPath(), false)
await fetch.finish await fetch.finish
if fetch.getNodeDataErrors == 0: if fetch.getNodeDataErrors == 0:
sp.countSlice(leafRange, false) sp.countSlice(leafRange, false)
else: else:
sp.sharedFetch.countAccounts -= fetch.unwindAccounts sp.nsx.sharedFetch.countAccounts -= fetch.unwindAccounts
sp.sharedFetch.countAccountBytes -= fetch.unwindAccountBytes sp.nsx.sharedFetch.countAccountBytes -= fetch.unwindAccountBytes
sp.putSlice(leafRange) sp.putSlice(leafRange)
proc peerSupportsGetNodeData*(sp: SyncPeer): bool = proc peerSupportsGetNodeData*(sp: SnapPeerEx): bool =
template fetch(sp): FetchState = sp.fetchState
not sp.stopped and (sp.fetch.isNil or sp.fetch.getNodeDataErrors == 0) not sp.stopped and (sp.fetch.isNil or sp.fetch.getNodeDataErrors == 0)

View File

@ -0,0 +1,78 @@
# Nimbus - Types, data structures and shared utilities used in network sync
#
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
import
std/[sets, tables],
chronos,
stint,
".."/[base_desc, path_desc, types]
type
NodeDataRequestBase* = ref object of RootObj
## Stub object, to be inherited
SingleNodeRequestBase* = ref object of RootObj
## Stub object, to be inherited
NodeDataRequestQueue* = ref object
liveRequests*: HashSet[NodeDataRequestBase]
empties*: int
# `OrderedSet` was considered instead of `seq` here, but it has a slow
# implementation of `excl`, defeating the motivation for using it.
waitingOnEmpties*: seq[NodeDataRequestBase]
beforeFirstHash*: seq[NodeDataRequestBase]
beforeFullHash*: HashSet[NodeDataRequestBase]
# We need to be able to lookup requests by the hash of reply data.
# `ptr NodeHash` is used here so the table doesn't require an independent
# copy of the hash. The hash is part of the request object.
itemHash*: Table[ptr NodeHash, (NodeDataRequestBase, int)]
FetchState* = ref object
## Account fetching state on a single peer.
sp*: SnapPeerEx
nodeGetQueue*: seq[SingleNodeRequestBase]
nodeGetsInFlight*: int
scheduledBatch*: bool
progressPrefix*: string
progressCount*: int
nodesInFlight*: int
getNodeDataErrors*: int
leafRange*: LeafRange
unwindAccounts*: int64
unwindAccountBytes*: int64
finish*: Future[void]
SnapPeerEx* = ref object of SnapPeerBase
nodeDataRequests*: NodeDataRequestQueue
fetchState*: FetchState
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc `$`*(sp: SnapPeerEx): string =
$sp.SnapPeerBase
# ------------------------------------------------------------------------------
# Public getter
# ------------------------------------------------------------------------------
proc ex*(base: SnapPeerBase): SnapPeerEx =
## to extended object instance version
base.SnapPeerEx
# ------------------------------------------------------------------------------
# Public setter
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -16,29 +16,29 @@ import
chronos, chronos,
nimcrypto/keccak, nimcrypto/keccak,
stint, stint,
eth/[common/eth_types, p2p, rlp], eth/[common/eth_types, p2p],
../../sync_types, ".."/[path_desc, base_desc, types],
"."/[common, fetch_trie, fetch_snap] "."/[common, fetch_trie, fetch_snap, peer_desc]
# Note: To test disabling snap (or trie), modify `peerSupportsGetNodeData` or # Note: To test disabling snap (or trie), modify `peerSupportsGetNodeData` or
# `peerSupportsSnap` where those are defined. # `peerSupportsSnap` where those are defined.
proc stateFetch*(sp: SyncPeer) {.async.} = proc stateFetch*(sp: SnapPeerEx) {.async.} =
var stateRoot = sp.syncStateRoot.get var stateRoot = sp.syncStateRoot.get
trace "Sync: Syncing from stateRoot", peer=sp, stateRoot=($stateRoot) trace "Snap: Syncing from stateRoot", peer=sp, stateRoot
while true: while true:
if not sp.peerSupportsGetNodeData() and not sp.peerSupportsSnap(): if not sp.peerSupportsGetNodeData() and not sp.peerSupportsSnap():
trace "Sync: Cannot sync more from this peer", peer=sp trace "Snap: Cannot sync more from this peer", peer=sp
return return
if not sp.hasSlice(): if not sp.hasSlice():
trace "Sync: Nothing more to sync from this peer", peer=sp trace "Snap: Nothing more to sync from this peer", peer=sp
while not sp.hasSlice(): while not sp.hasSlice():
await sleepAsync(5.seconds) # TODO: Use an event trigger instead. await sleepAsync(5.seconds) # TODO: Use an event trigger instead.
if sp.syncStateRoot.isNone: if sp.syncStateRoot.isNone:
trace "Sync: No current state root for this peer", peer=sp trace "Snap: No current state root for this peer", peer=sp
while sp.syncStateRoot.isNone and while sp.syncStateRoot.isNone and
(sp.peerSupportsGetNodeData() or sp.peerSupportsSnap()) and (sp.peerSupportsGetNodeData() or sp.peerSupportsSnap()) and
sp.hasSlice(): sp.hasSlice():
@ -46,12 +46,12 @@ proc stateFetch*(sp: SyncPeer) {.async.} =
continue continue
if stateRoot != sp.syncStateRoot.get: if stateRoot != sp.syncStateRoot.get:
trace "Sync: Syncing from new stateRoot", peer=sp, stateRoot=($stateRoot) trace "Snap: Syncing from new stateRoot", peer=sp, stateRoot
stateRoot = sp.syncStateRoot.get stateRoot = sp.syncStateRoot.get
sp.stopThisState = false sp.stopThisState = false
if sp.stopThisState: if sp.stopThisState:
trace "Sync: Pausing sync until we get a new state root", peer=sp trace "Snap: Pausing sync until we get a new state root", peer=sp
while sp.syncStateRoot.isSome and stateRoot == sp.syncStateRoot.get and while sp.syncStateRoot.isSome and stateRoot == sp.syncStateRoot.get and
(sp.peerSupportsGetNodeData() or sp.peerSupportsSnap()) and (sp.peerSupportsGetNodeData() or sp.peerSupportsSnap()) and
sp.hasSlice(): sp.hasSlice():
@ -69,13 +69,11 @@ proc stateFetch*(sp: SyncPeer) {.async.} =
if sp.peerSupportsSnap() and allowSnap: if sp.peerSupportsSnap() and allowSnap:
discard sp.getSlice(leafRange) discard sp.getSlice(leafRange)
trace "Sync: snap.GetAccountRange segment", peer=sp, trace "Snap: snap.GetAccountRange segment", peer=sp,
leafRange=pathRange(leafRange.leafLow, leafRange.leafHigh), leafRange=pathRange(leafRange.leafLow, leafRange.leafHigh), stateRoot
stateRoot=($stateRoot)
await sp.snapFetch(stateRoot, leafRange) await sp.snapFetch(stateRoot, leafRange)
elif sp.peerSupportsGetNodeData(): elif sp.peerSupportsGetNodeData():
discard sp.getSlice(leafRange) discard sp.getSlice(leafRange)
trace "Sync: eth.GetNodeData segment", peer=sp, trace "Snap: eth.GetNodeData segment", peer=sp,
leafRange=pathRange(leafRange.leafLow, leafRange.leafHigh), leafRange=pathRange(leafRange.leafLow, leafRange.leafHigh), stateRoot
stateRoot=($stateRoot)
await sp.trieFetch(stateRoot, leafRange) await sp.trieFetch(stateRoot, leafRange)

View File

@ -0,0 +1,110 @@
# Nimbus - Fetch account and storage states from peers efficiently
#
# Copyright (c) 2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
import
std/[sets, strutils],
chronos,
chronicles,
eth/[common/eth_types, p2p],
stint,
".."/[base_desc, path_desc, timer_helper]
{.push raises: [Defect].}
type
SharedFetchState* = ref object
## Account fetching state that is shared among all peers.
# Leaf path ranges not fetched or in progress on any peer.
leafRanges*: seq[LeafRange]
countAccounts*: int64
countAccountBytes*: int64
countRange*: UInt256
countRangeStarted*: bool
countRangeSnap*: UInt256
countRangeSnapStarted*: bool
countRangeTrie*: UInt256
countRangeTrieStarted*: bool
logTicker: TimerCallback
SnapSyncEx* = ref object of SnapSyncBase
sharedFetch*: SharedFetchState
# ------------------------------------------------------------------------------
# Private timer helpers
# ------------------------------------------------------------------------------
proc rangeFraction(value: UInt256, discriminator: bool): int =
## Format a value in the range 0..2^256 as a percentage, 0-100%. As the
## top of the range 2^256 cannot be represented in `UInt256` it actually
## has the value `0: UInt256`, and with that value, `discriminator` is
## consulted to decide between 0% and 100%. For other values, the value is
## constrained to be between slightly above 0% and slightly below 100%,
## so that the endpoints are distinctive when displayed.
const multiplier = 10000
var fraction: int = 0 # Fixed point, fraction 0.0-1.0 multiplied up.
if value == 0:
return if discriminator: multiplier else: 0 # Either 100.00% or 0.00%.
const shift = 8 * (sizeof(value) - sizeof(uint64))
const wordHigh: uint64 = (high(typeof(value)) shr shift).truncate(uint64)
# Divide `wordHigh+1` by `multiplier`, rounding up, avoiding overflow.
const wordDiv: uint64 = 1 + ((wordHigh shr 1) div (multiplier.uint64 shr 1))
let wordValue: uint64 = (value shr shift).truncate(uint64)
let divided: uint64 = wordValue div wordDiv
return if divided >= multiplier: multiplier - 1
elif divided <= 0: 1
else: divided.int
proc percent(value: UInt256, discriminator: bool): string =
result = intToStr(rangeFraction(value, discriminator), 3)
result.insert(".", result.len - 2)
result.add('%')
proc setLogTicker(sf: SharedFetchState; at: Moment) {.gcsafe.}
proc runLogTicker(sf: SharedFetchState) {.gcsafe.} =
doAssert not sf.isNil
info "State: Account sync progress",
percent = percent(sf.countRange, sf.countRangeStarted),
accounts = sf.countAccounts,
snap = percent(sf.countRangeSnap, sf.countRangeSnapStarted),
trie = percent(sf.countRangeTrie, sf.countRangeTrieStarted)
sf.setLogTicker(Moment.fromNow(1.seconds))
proc setLogTicker(sf: SharedFetchState; at: Moment) =
sf.logTicker = safeSetTimer(at, runLogTicker, sf)
# ------------------------------------------------------------------------------
# Public constructor
# ------------------------------------------------------------------------------
proc new*(T: type SharedFetchState; startLoggingAfter = 100.milliseconds): T =
result = SharedFetchState(
leafRanges: @[LeafRange(
leafLow: LeafPath.low,
leafHigh: LeafPath.high)])
result.logTicker = safeSetTimer(
Moment.fromNow(startLoggingAfter),
runLogTicker,
result)
# ------------------------------------------------------------------------------
# Public getters
# ------------------------------------------------------------------------------
proc nsx*[T](sp: T): SnapSyncEx =
## Handy helper, typically used with `T` instantiated as `SnapPeerEx`
sp.ns.SnapSyncEx
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,46 @@
# Nimbus - Types, data structures and shared utilities used in network sync
#
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
import
chronos
{.push raises: [Defect].}
# Use `safeSetTimer` consistently, with a `ref T` argument if including one.
type
SafeCallbackFunc*[T] = proc (objectRef: ref T) {.gcsafe, raises: [Defect].}
SafeCallbackFuncVoid* = proc () {.gcsafe, raises: [Defect].}
proc safeSetTimer*[T](at: Moment, cb: SafeCallbackFunc[T],
objectRef: ref T = nil): TimerCallback =
## Like `setTimer` but takes a typed `ref T` argument, which is passed to the
## callback function correctly typed. Stores the `ref` in a closure to avoid
## garbage collection memory corruption issues that occur when the `setTimer`
## pointer argument is used.
proc chronosTimerSafeCb(udata: pointer) = cb(objectRef)
return setTimer(at, chronosTimerSafeCb)
proc safeSetTimer*[T](at: Moment, cb: SafeCallbackFuncVoid): TimerCallback =
## Like `setTimer` but takes no pointer argument. The callback function
## takes no arguments.
proc chronosTimerSafeCb(udata: pointer) = cb()
return setTimer(at, chronosTimerSafeCb)
proc setTimer*(at: Moment, cb: CallbackFunc, udata: pointer): TimerCallback
{.error: "Do not use setTimer with a `pointer` type argument".}
## `setTimer` with a non-nil pointer argument is dangerous because
## the pointed-to object is often freed or garbage collected before the
## timer callback runs. Call `setTimer` with a `ref` argument instead.
proc setTimer*(at: Moment, cb: CallbackFunc): TimerCallback =
chronos.setTimer(at, cb, nil)
# End

View File

@ -0,0 +1,81 @@
# Nimbus - Types, data structures and shared utilities used in network sync
#
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
import
eth/common/eth_types,
nimcrypto/keccak,
stew/byteutils
{.push raises: [Defect].}
type
TxHash* = distinct Hash256
## Hash of a transaction.
##
## Note that the `ethXX` protocol driver always uses the
## underlying `Hash256` type which needs to be converted to `TxHash`.
NodeHash* = distinct Hash256
## Hash of a trie node or other blob carried over `NodeData` account trie
## nodes, storage trie nodes, contract code.
##
## Note that the `ethXX` and `snapXX` protocol drivers always use the
## underlying `Hash256` type which needs to be converted to `NodeHash`.
BlockHash* = distinct Hash256
## Hash of a block, goes with `BlockNumber`.
##
## Note that the `ethXX` protocol driver always uses the
## underlying `Hash256` type which needs to be converted to `TxHash`.
TrieHash* = distinct Hash256
## Hash of a trie root: accounts, storage, receipts or transactions.
##
## Note that the `snapXX` protocol driver always uses the underlying
## `Hash256` type which needs to be converted to `TrieHash`.
# ------------------------------------------------------------------------------
# Public Constructor
# ------------------------------------------------------------------------------
proc new*(T: type NodeHash): T = Hash256().T
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc untie*(w: TrieHash|NodeHash|BlockHash): Hash256 =
## Get rid of `distinct` harness, needed for `snap1` and `eth1` protocol
## driver access.
w.Hash256
proc untie*(w: seq[NodeHash|NodeHash]): seq[Hash256] =
## Ditto
cast[seq[Hash256]](w)
proc `==`*(a: NodeHash; b: TrieHash): bool = a.Hash256 == b.Hash256
proc `==`*(a,b: TrieHash): bool {.borrow.}
proc `==`*(a,b: NodeHash): bool {.borrow.}
proc `==`*(a,b: BlockHash): bool {.borrow.}
proc toNodeHash*(data: Blob): NodeHash =
keccak256.digest(data).NodeHash
# ------------------------------------------------------------------------------
# Public debugging helpers
# ------------------------------------------------------------------------------
proc `$`*(th: TrieHash|NodeHash): string =
th.Hash256.data.toHex
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -2,9 +2,12 @@
# #
# Copyright (c) 2021 Status Research & Development GmbH # Copyright (c) 2021 Status Research & Development GmbH
# Licensed under either of # Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) # http://www.apache.org/licenses/LICENSE-2.0)
# at your option. This file may not be copied, modified, or distributed except according to those terms. # * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
## This module parses Ethereum hexary trie nodes from bytes received over the ## This module parses Ethereum hexary trie nodes from bytes received over the
## network. The data is untrusted, and a non-canonical RLP encoding of the ## network. The data is untrusted, and a non-canonical RLP encoding of the
@ -27,11 +30,12 @@
## `try..except RlpError as e` outside its trie node parsing loop, and pass the ## `try..except RlpError as e` outside its trie node parsing loop, and pass the
## exception to `parseTrieNodeError` if it occurs. ## exception to `parseTrieNodeError` if it occurs.
{.push raises: [Defect].}
import import
eth/[common/eth_types, rlp, p2p], eth/[common/eth_types, p2p, rlp],
".."/[sync_types, trace_helper] ../trace_helper,
"."/[base_desc, path_desc, types]
{.push raises: [Defect].}
type type
TrieNodeParseContext* = object TrieNodeParseContext* = object
@ -39,44 +43,46 @@ type
leafQueue*: seq[(LeafPath, NodeHash, Blob)] leafQueue*: seq[(LeafPath, NodeHash, Blob)]
errors*: int errors*: int
template read(rlp: var Rlp, T: type NodeHash): T =
rlp.read(Hash256).T
template maybeHash(nodeHash: NodeHash, nodeBytes: Blob): string = template maybeHash(nodeHash: NodeHash, nodeBytes: Blob): string =
if nodeBytes.len >= 32: $nodeHash else: "selfEncoded" if nodeBytes.len >= 32: $nodeHash else: "selfEncoded"
proc combinePaths(nodePath, childPath: InteriorPath): string = proc combinePaths(nodePath, childPath: InteriorPath): string =
let nodeHex = nodePath.toHex(false) let nodeHex = nodePath.toHex(withEllipsis = false)
let childHex = childPath.toHex(true) let childHex = childPath.toHex(withEllipsis = true)
nodeHex & "::" & childHex[nodeHex.len..^1] nodeHex & "::" & childHex[nodeHex.len..^1]
template leafError(msg: string{lit}, more: varargs[untyped]) = template leafError(msg: string{lit}, more: varargs[untyped]) =
mixin sp, leafPath, nodePath, nodeHash, nodeBytes, context mixin sp, leafPath, nodePath, nodeHash, nodeBytes, context
debug "Trie leaf data error: " & msg, peer=sp, debug "Trie leaf data error: " & msg, peer=sp,
depth=nodePath.depth, leafDepth=leafPath.depth, `more`, depth=nodePath.depth, leafDepth=leafPath.depth,
path=combinePaths(nodePath, leafPath), `more`, path=combinePaths(nodePath, leafPath),
hash=maybeHash(nodeHash, nodeBytes), hash=maybeHash(nodeHash, nodeBytes), nodeLen=nodeBytes.len, nodeBytes
nodeLen=nodeBytes.len, nodeBytes=nodeBytes.toHex #echo inspect(rlpFromBytes(nodeBytes))
echo inspect(rlpFromBytes(nodeBytes))
inc context.errors inc context.errors
template childError(msg: string{lit}, more: varargs[untyped]) = template childError(msg: string{lit}, more: varargs[untyped]) =
mixin sp, childPath, nodePath, nodeHash, nodeBytes, context mixin sp, childPath, nodePath, nodeHash, nodeBytes, context
debug "Trie data error: " & msg, peer=sp, debug "Trie data error: " & msg, peer=sp,
depth=nodePath.depth, childDepth=childPath.depth, `more`, depth=nodePath.depth, childDepth=childPath.depth,
path=combinePaths(nodePath, childPath), `more`, path=combinePaths(nodePath, childPath),
hash=maybeHash(nodeHash, nodeBytes), hash=maybeHash(nodeHash, nodeBytes), nodeLen=nodeBytes.len, nodeBytes
nodeLen=nodeBytes.len, nodeBytes=nodeBytes.toHex #echo inspect(rlpFromBytes(nodeBytes))
echo inspect(rlpFromBytes(nodeBytes))
inc context.errors inc context.errors
template nodeError(msg: string{lit}, more: varargs[untyped]) = template nodeError(msg: string{lit}, more: varargs[untyped]) =
mixin sp, nodePath, nodeHash, nodeBytes, context mixin sp, nodePath, nodeHash, nodeBytes, context
debug "Trie data error: " & msg, peer=sp, debug "Trie data error: " & msg, peer=sp,
depth=nodePath.depth, `more`, depth=nodePath.depth,
path=nodePath.toHex(true), hash=maybeHash(nodeHash, nodeBytes), `more`, path=nodePath.toHex(withEllipsis = true),
nodeLen=nodeBytes.len, nodeBytes=nodeBytes.toHex hash=maybeHash(nodeHash, nodeBytes), nodeLen=nodeBytes.len, nodeBytes
echo inspect(rlpFromBytes(nodeBytes)) #echo inspect(rlpFromBytes(nodeBytes))
inc context.errors inc context.errors
proc parseLeafValue(sp: SyncPeer, proc parseLeafValue(sp: SnapPeerBase,
nodePath: InteriorPath, nodeHash: NodeHash, nodeBytes: Blob, nodePath: InteriorPath, nodeHash: NodeHash, nodeBytes: Blob,
nodeRlp: var Rlp, leafPath: InteriorPath, nodeRlp: var Rlp, leafPath: InteriorPath,
context: var TrieNodeParseContext context: var TrieNodeParseContext
@ -113,17 +119,17 @@ proc parseLeafValue(sp: SyncPeer,
trace "Trie: Account leaf found", peer=sp, trace "Trie: Account leaf found", peer=sp,
path=combinePaths(nodePath, leafPath), path=combinePaths(nodePath, leafPath),
nodeHash=maybeHash(nodeHash, nodeBytes), nodeHash=maybeHash(nodeHash, nodeBytes),
leafLen, leafBytes=leafBytes.toHex leafLen, leafBytes
# echo inspect(rlpFromBytes(leafBytes)) # echo inspect(rlpFromBytes(leafBytes))
# Forward declaration, used for bounded, rare recursion. # Forward declaration, used for bounded, rare recursion.
proc parseTrieNode*(sp: SyncPeer, proc parseTrieNode*(sp: SnapPeerBase,
nodePath: InteriorPath, nodeHash: NodeHash, nodeBytes: Blob, nodePath: InteriorPath, nodeHash: NodeHash, nodeBytes: Blob,
fromExtension: bool, fromExtension: bool,
context: var TrieNodeParseContext context: var TrieNodeParseContext
) {.gcsafe, raises: [Defect, RlpError].} ) {.gcsafe, raises: [Defect, RlpError].}
proc parseExtensionChild(sp: SyncPeer, proc parseExtensionChild(sp: SnapPeerBase,
nodePath: InteriorPath, nodeHash: NodeHash, nodePath: InteriorPath, nodeHash: NodeHash,
nodeBytes: Blob, nodeRlp: var Rlp, nodeBytes: Blob, nodeRlp: var Rlp,
childPath: InteriorPath, childPath: InteriorPath,
@ -167,11 +173,11 @@ proc parseExtensionChild(sp: SyncPeer,
# RLP can be < 32 bytes. Because this is hard to test, let's make < 32 # RLP can be < 32 bytes. Because this is hard to test, let's make < 32
# exit the program for now to see if any appear on Mainnet. # exit the program for now to see if any appear on Mainnet.
doAssert childLen == 32 doAssert childLen == 32
sp.parseTrieNode(childPath, NodeHash(), nodeRlp.toBytes, true, context) sp.parseTrieNode(childPath, NodeHash.new, nodeRlp.toBytes, true, context)
else: else:
childError "Extension node child (RLP element 1) has length > 32 bytes" childError "Extension node child (RLP element 1) has length > 32 bytes"
proc parseExtensionOrLeaf(sp: SyncPeer, proc parseExtensionOrLeaf(sp: SnapPeerBase,
nodePath: InteriorPath, nodeHash: NodeHash, nodePath: InteriorPath, nodeHash: NodeHash,
nodeBytes: Blob, nodeRlp: var Rlp, nodeBytes: Blob, nodeRlp: var Rlp,
fromExtension: bool, fromExtension: bool,
@ -205,11 +211,11 @@ proc parseExtensionOrLeaf(sp: SyncPeer,
# nibble in the first byte must be zero. # nibble in the first byte must be zero.
if (firstByte and (if oddLen == 0: 0xcf else: 0xc0)) != 0: if (firstByte and (if oddLen == 0: 0xcf else: 0xc0)) != 0:
if isLeaf != 0: if isLeaf != 0:
nodeError "Leaf node path suffix (RLP element 0) starts with invalid byte", nodeError "Leaf node path suffix, RLP starts with invalid byte",
invalidByte=[firstByte].toHex invalidByte=[firstByte]
else: else:
nodeError "Extension node prefix (RLP element 0) starts with invalid byte", nodeError "Extension node prefix RLP starts with invalid byte",
invalidByte=[firstByte].toHex invalidByte=[firstByte]
return return
# In the canonical trie encoding, an extension node's child is not allowed to # In the canonical trie encoding, an extension node's child is not allowed to
@ -259,7 +265,7 @@ proc parseExtensionOrLeaf(sp: SyncPeer,
sp.parseExtensionChild(nodePath, nodeHash, nodeBytes, nodeRlp, sp.parseExtensionChild(nodePath, nodeHash, nodeBytes, nodeRlp,
childPath, context) childPath, context)
proc parseBranchNode(sp: SyncPeer, proc parseBranchNode(sp: SnapPeerBase,
nodePath: InteriorPath, nodeHash: NodeHash, nodePath: InteriorPath, nodeHash: NodeHash,
nodeBytes: Blob, nodeRlp: var Rlp, nodeBytes: Blob, nodeRlp: var Rlp,
context: var TrieNodeParseContext context: var TrieNodeParseContext
@ -300,7 +306,7 @@ proc parseBranchNode(sp: SyncPeer,
# RLP can be < 32 bytes. Because this is hard to test, let's make < 32 # RLP can be < 32 bytes. Because this is hard to test, let's make < 32
# exit the program for now to see if any appear on Mainnet. # exit the program for now to see if any appear on Mainnet.
doAssert childLen == 32 doAssert childLen == 32
sp.parseTrieNode(childPath, NodeHash(), nodeRlp.toBytes, false, context) sp.parseTrieNode(childPath, NodeHash.new, nodeRlp.toBytes, false, context)
nodeRlp.skipElem() nodeRlp.skipElem()
else: else:
childError "Branch node child (RLP element i in 0..15) has length > 32 bytes", i childError "Branch node child (RLP element i in 0..15) has length > 32 bytes", i
@ -333,7 +339,7 @@ proc parseBranchNode(sp: SyncPeer,
branches=branchCount, minBranches=2 branches=branchCount, minBranches=2
return return
proc parseTrieNode*(sp: SyncPeer, proc parseTrieNode*(sp: SnapPeerBase,
nodePath: InteriorPath, nodeHash: NodeHash, nodeBytes: Blob, nodePath: InteriorPath, nodeHash: NodeHash, nodeBytes: Blob,
fromExtension: bool, context: var TrieNodeParseContext fromExtension: bool, context: var TrieNodeParseContext
) {.raises: [Defect, RlpError].} = ) {.raises: [Defect, RlpError].} =
@ -369,7 +375,7 @@ proc parseTrieNode*(sp: SyncPeer,
## root node of a trie, otherwise it is the value stored in `childQueue` ## root node of a trie, otherwise it is the value stored in `childQueue`
## from parsing the parent node. ## from parsing the parent node.
## ##
## - The `sp: SyncPeer` is like the hash, only used for diagnostics. When ## - The `sp: SnapPeerBase` is like the hash, only used for diagnostics. When
## there is invalid data, it's useful to show where we got it from. ## there is invalid data, it's useful to show where we got it from.
## ##
## - Some limited recursion is possible while parsing, because of how < 32 ## - Some limited recursion is possible while parsing, because of how < 32
@ -433,7 +439,7 @@ proc parseTrieNode*(sp: SyncPeer,
listLen=nodeListLen listLen=nodeListLen
return return
proc parseTrieNodeError*(sp: SyncPeer, nodePath: InteriorPath, proc parseTrieNodeError*(sp: SnapPeerBase, nodePath: InteriorPath,
nodeHash: NodeHash, nodeBytes: Blob, nodeHash: NodeHash, nodeBytes: Blob,
context: var TrieNodeParseContext, context: var TrieNodeParseContext,
exception: ref RlpError) = exception: ref RlpError) =

View File

@ -1,318 +0,0 @@
# Nimbus - Types, data structures and shared utilities used in network sync
#
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
## Shared types, data structures and shared utilities used by the eth1
## network sync processes.
import
std/options,
stint, stew/byteutils, chronicles, chronos,
eth/[common/eth_types, p2p]
type
SnapSync* = ref object of RootObj
## Shared state among all peers of a syncing node.
syncPeers*: seq[SyncPeer]
sharedFetch: SharedFetchState # Exported via templates.
SyncPeer* = ref object
## Peer state tracking.
ns*: SnapSync
peer*: Peer # p2pProtocol(eth65).
stopped*: bool
pendingGetBlockHeaders*:bool
stats*: SyncPeerStats
# Peer canonical chain head ("best block") search state.
syncMode*: SyncPeerMode
bestBlockNumber*: BlockNumber
bestBlockHash*: BlockHash
huntLow*: BlockNumber # Recent highest known present block.
huntHigh*: BlockNumber # Recent lowest known absent block.
huntStep*: typeof(BlocksRequest.skip)
# State root to fetch state for.
# This changes during sync and is slightly different for each peer.
syncStateRoot*: Option[TrieHash]
nodeDataRequests: NodeDataRequestQueue # Exported via templates.
fetch: FetchState # Exported via templates.
startedFetch*: bool
stopThisState*: bool
SyncPeerMode* = enum
## The current state of tracking the peer's canonical chain head.
## `bestBlockNumber` is only valid when this is `SyncLocked`.
SyncLocked
SyncOnlyHash
SyncHuntForward
SyncHuntBackward
SyncHuntRange
SyncHuntRangeFinal
SyncPeerStats = object
## Statistics counters for events associated with this peer.
## These may be used to recognise errors and select good peers.
ok*: SyncPeerStatsOk
minor*: SyncPeerStatsMinor
major*: SyncPeerStatsMajor
SyncPeerStatsOk = object
reorgDetected*: Stat
getBlockHeaders*: Stat
getNodeData*: Stat
SyncPeerStatsMinor = object
timeoutBlockHeaders*: Stat
unexpectedBlockHash*: Stat
SyncPeerStatsMajor = object
networkErrors*: Stat
excessBlockHeaders*: Stat
wrongBlockHeader*: Stat
Stat = distinct int
BlockHash* = Hash256
## Hash of a block, goes with `BlockNumber`.
TxHash* = Hash256
## Hash of a transaction.
TrieHash* = Hash256
## Hash of a trie root: accounts, storage, receipts or transactions.
NodeHash* = Hash256
## Hash of a trie node or other blob carried over `eth.NodeData`:
## account trie nodes, storage trie nodes, contract code.
InteriorPath* = object
## Path to an interior node in an Ethereum hexary trie. This is a sequence
## of 0 to 64 hex digits. 0 digits means the root node, and 64 digits
## means a leaf node whose path hasn't been converted to `LeafPath` yet.
bytes: array[32, byte] # Access with `path.digit(i)` instead.
numDigits: byte # Access with `path.depth` instead.
LeafPath* = object
## Path to a leaf in an Ethereum hexary trie. Individually, each leaf path
## is a hash, but rather than being the hash of the contents, it's the hash
## of the item's address. Collectively, these hashes have some 256-bit
## numerical properties: ordering, intervals and meaningful difference.
number: UInt256
# Use `import snap/get_nodedata` to access the real type's methods.
NodeDataRequestQueue {.inheritable, pure.} = ref object
# Use `import snap/pie/trie_fetch` to access the real type's methods.
SharedFetchState {.inheritable, pure.} = ref object
# Use `import snap/pie/trie_fetch` to access the real type's methods.
FetchState {.inheritable, pure.} = ref object
proc inc(stat: var Stat) {.borrow.}
template nodeDataRequestsBase*(sp: SyncPeer): auto =
sp.nodeDataRequests
template `nodeDataRequests=`*(sp: SyncPeer, value: auto) =
sp.nodeDataRequests = value
template sharedFetchBase*(sp: SyncPeer): auto =
sp.ns.sharedFetch
template `sharedFetch=`*(sp: SyncPeer, value: auto) =
sp.ns.sharedFetch = value
template fetchBase*(sp: SyncPeer): auto =
sp.fetch
template `fetch=`*(sp: SyncPeer, value: auto) =
sp.fetch = value
## `InteriorPath` methods.
template maxDepth*(_: InteriorPath | typedesc[InteriorPath]): int = 64
template rootInteriorPath*(): InteriorPath =
# Initialised to empty sequence.
InteriorPath()
template toInteriorPath*(interiorpath: InteriorPath): InteriorPath =
interiorPath
template toInteriorPath*(leafPath: LeafPath): InteriorPath =
doAssert sizeof(leafPath.number.toBytesBE) * 2 == InteriorPath.maxDepth
doAssert sizeof(leafPath.number.toBytesBE) == sizeof(InteriorPath().bytes)
InteriorPath(bytes: leafPath.number.toBytesBE,
numDigits: InteriorPath.maxDepth)
template depth*(path: InteriorPath): int =
path.numDigits.int
proc digit*(path: InteriorPath, index: int): int =
doAssert index >= 0 and index < path.numDigits.int
let b = path.bytes[index shr 1]
(if (index and 1) == 0: (b shr 4) else: (b and 0x0f)).int
proc add*(path: var InteriorPath, digit: byte) =
doAssert path.numDigits < InteriorPath.maxDepth
inc path.numDigits
if (path.numDigits and 1) != 0:
path.bytes[path.numDigits shr 1] = (digit shl 4)
else:
path.bytes[(path.numDigits shr 1) - 1] += (digit and 0x0f)
proc addPair*(path: var InteriorPath, digitPair: byte) =
doAssert path.numDigits < InteriorPath.maxDepth - 1
path.numDigits += 2
if (path.numDigits and 1) == 0:
path.bytes[(path.numDigits shr 1) - 1] = digitPair
else:
path.bytes[(path.numDigits shr 1) - 1] += (digitPair shr 4)
path.bytes[path.numDigits shr 1] = (digitPair shl 4)
proc pop*(path: var InteriorPath) =
doAssert path.numDigits >= 1
dec path.numDigits
path.bytes[path.numDigits shr 1] =
if (path.numDigits and 1) == 0: 0.byte
else: path.bytes[path.numDigits shr 1] and 0xf0
proc `==`*(path1, path2: InteriorPath): bool =
# Paths are zero-padded to the end of the array, so comparison is easy.
for i in 0 ..< (max(path1.numDigits, path2.numDigits).int + 1) shr 1:
if path1.bytes[i] != path2.bytes[i]:
return false
return true
proc `<=`*(path1, path2: InteriorPath): bool =
# Paths are zero-padded to the end of the array, so comparison is easy.
for i in 0 ..< (max(path1.numDigits, path2.numDigits).int + 1) shr 1:
if path1.bytes[i] != path2.bytes[i]:
return path1.bytes[i] <= path2.bytes[i]
return true
proc cmp*(path1, path2: InteriorPath): int =
# Paths are zero-padded to the end of the array, so comparison is easy.
for i in 0 ..< (max(path1.numDigits, path2.numDigits).int + 1) shr 1:
if path1.bytes[i] != path2.bytes[i]:
return path1.bytes[i].int - path2.bytes[i].int
return 0
template `!=`*(path1, path2: InteriorPath): auto = not(path1 == path2)
template `<`*(path1, path2: InteriorPath): auto = not(path2 <= path1)
template `>=`*(path1, path2: InteriorPath): auto = path2 <= path1
template `>`*(path1, path2: InteriorPath): auto = not(path1 <= path2)
## `LeafPath` methods.
template low*(_: LeafPath | type LeafPath): auto =
LeafPath(number: low(UInt256))
template high*(_: LeafPath | type LeafPath): auto =
LeafPath(number: high(UInt256))
const leafPathBytes = sizeof(LeafPath().number.toBytesBE)
template toLeafPath*(leafPath: LeafPath): LeafPath =
leafPath
template toLeafPath*(interiorPath: InteriorPath): LeafPath =
doAssert interiorPath.numDigits == InteriorPath.maxDepth
doAssert sizeof(interiorPath.bytes) * 2 == InteriorPath.maxDepth
doAssert sizeof(interiorPath.bytes) == leafPathBytes
LeafPath(number: UInt256.fromBytesBE(interiorPath.bytes))
template toLeafPath*(bytes: array[leafPathBytes, byte]): LeafPath =
doAssert sizeof(bytes) == leafPathBytes
LeafPath(number: UInt256.fromBytesBE(bytes))
template toBytes*(leafPath: LeafPath): array[leafPathBytes, byte] =
doAssert sizeof(LeafPath().number.toBytesBE) == leafPathBytes
leafPath.number.toBytesBE
# Note, `{.borrow.}` didn't work for these symbols (with Nim 1.2.12) when we
# defined `LeafPath = distinct UInt256`. The `==` didn't match any symbol to
# borrow from, and the auto-generated `<` failed to compile, with a peculiar
# type mismatch error.
template `==`*(path1, path2: LeafPath): auto = path1.number == path2.number
template `!=`*(path1, path2: LeafPath): auto = path1.number != path2.number
template `<`*(path1, path2: LeafPath): auto = path1.number < path2.number
template `<=`*(path1, path2: LeafPath): auto = path1.number <= path2.number
template `>`*(path1, path2: LeafPath): auto = path1.number > path2.number
template `>=`*(path1, path2: LeafPath): auto = path1.number >= path2.number
template cmp*(path1, path2: LeafPath): auto = cmp(path1.number, path2.number)
template `-`*(path1, path2: LeafPath): UInt256 =
path1.number - path2.number
template `+`*(base: LeafPath, step: Uint256 | SomeInteger): LeafPath =
LeafPath(number: base.number + step)
template `-`*(base: LeafPath, step: Uint256 | SomeInteger): LeafPath =
LeafPath(number: base.number - step)
## String output functions.
template `$`*(sp: SyncPeer): string = $sp.peer
template `$`*(hash: Hash256): string = hash.data.toHex
template `$`*(blob: Blob): string = blob.toHex
template `$`*(hashOrNum: HashOrNum): string =
# It's always obvious which one from the visible length of the string.
if hashOrNum.isHash: $hashOrNum.hash
else: $hashOrNum.number
proc toHex*(path: InteriorPath, withEllipsis = true): string =
const hexChars = "0123456789abcdef"
let digits = path.numDigits.int
if not withEllipsis:
result = newString(digits)
else:
result = newString(min(digits + 3, 64))
result[^3] = '.'
result[^2] = '.'
result[^1] = '.'
for i in 0 ..< digits:
result[i] = hexChars[path.digit(i)]
template `$`*(path: InteriorPath): string = path.toHex
proc pathRange*(path1, path2: InteriorPath): string =
path1.toHex(false) & '-' & path2.toHex(false)
template toHex*(path: LeafPath): string = path.number.toBytesBE.toHex
template `$`*(path: LeafPath): string = path.toHex
proc pathRange*(path1, path2: LeafPath): string =
path1.toHex & '-' & path2.toHex
export Blob, Hash256, toHex
# The files and lines clutter more useful details when sync tracing is enabled.
publicLogScope: chroniclesLineNumbers=false
# Use `safeSetTimer` consistently, with a `ref T` argument if including one.
type
SafeCallbackFunc*[T] = proc (objectRef: ref T) {.gcsafe, raises: [Defect].}
SafeCallbackFuncVoid* = proc () {.gcsafe, raises: [Defect].}
proc safeSetTimer*[T](at: Moment, cb: SafeCallbackFunc[T],
objectRef: ref T = nil): TimerCallback =
## Like `setTimer` but takes a typed `ref T` argument, which is passed to the
## callback function correctly typed. Stores the `ref` in a closure to avoid
## garbage collection memory corruption issues that occur when the `setTimer`
## pointer argument is used.
proc chronosTimerSafeCb(udata: pointer) = cb(objectRef)
return setTimer(at, chronosTimerSafeCb)
proc safeSetTimer*[T](at: Moment, cb: SafeCallbackFuncVoid): TimerCallback =
## Like `setTimer` but takes no pointer argument. The callback function
## takes no arguments.
proc chronosTimerSafeCb(udata: pointer) = cb()
return setTimer(at, chronosTimerSafeCb)
proc setTimer*(at: Moment, cb: CallbackFunc, udata: pointer): TimerCallback
{.error: "Do not use setTimer with a `pointer` type argument".}
## `setTimer` with a non-nil pointer argument is dangerous because
## the pointed-to object is often freed or garbage collected before the
## timer callback runs. Call `setTimer` with a `ref` argument instead.
proc setTimer*(at: Moment, cb: CallbackFunc): TimerCallback =
chronos.setTimer(at, cb, nil)

View File

@ -14,6 +14,8 @@ import
eth/common/eth_types, eth/common/eth_types,
stew/byteutils stew/byteutils
{.push raises: [Defect].}
const const
tracePackets* = true tracePackets* = true
## Whether to `trace` log each sync network message. ## Whether to `trace` log each sync network message.
@ -40,6 +42,8 @@ template traceNetworkError*(msg: static[string], args: varargs[untyped]) =
if traceNetworkErrors: trace `msg`, `args` if traceNetworkErrors: trace `msg`, `args`
template tracePacketError*(msg: static[string], args: varargs[untyped]) = template tracePacketError*(msg: static[string], args: varargs[untyped]) =
if tracePacketErrors: trace `msg`, `args` if tracePacketErrors: trace `msg`, `args`
template traceIndividualNode*(msg: static[string], args: varargs[untyped]) =
if traceIndividualNodes: trace `msg`, `args`
func toHex*(hash: Hash256): string = func toHex*(hash: Hash256): string =
## Shortcut for buteutils.toHex(hash.data) ## Shortcut for buteutils.toHex(hash.data)
@ -51,4 +55,15 @@ func traceStep*(request: BlocksRequest): string =
return str & $(request.skip + 1) return str & $(request.skip + 1)
return static($(high(typeof(request.skip)).u256 + 1)) return static($(high(typeof(request.skip)).u256 + 1))
proc `$`*(hash: Hash256): string =
hash.data.toHex
proc `$`*(blob: Blob): string =
blob.toHex
proc `$`*(hashOrNum: HashOrNum): string =
# It's always obvious which one from the visible length of the string.
if hashOrNum.isHash: $hashOrNum.hash
else: $hashOrNum.number
# End # End