2022-05-19 22:28:53 +00:00
|
|
|
## Nim-Codex
|
|
|
|
## Copyright (c) 2021 Status Research & Development GmbH
|
|
|
|
## Licensed under either of
|
|
|
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
|
|
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
|
|
|
## at your option.
|
|
|
|
## This file may not be copied, modified, or distributed except according to
|
|
|
|
## those terms.
|
|
|
|
|
|
|
|
import std/sequtils
|
|
|
|
import std/sets
|
2022-07-28 00:39:17 +00:00
|
|
|
import std/options
|
|
|
|
import std/algorithm
|
2023-11-14 12:02:17 +00:00
|
|
|
import std/sugar
|
2022-05-19 22:28:53 +00:00
|
|
|
|
|
|
|
import pkg/chronos
|
2023-11-14 12:02:17 +00:00
|
|
|
import pkg/libp2p/[cid, switch, multihash, multicodec]
|
2023-07-20 07:56:28 +00:00
|
|
|
import pkg/metrics
|
2022-11-15 15:46:21 +00:00
|
|
|
import pkg/stint
|
2024-04-24 07:30:02 +00:00
|
|
|
import pkg/questionable
|
2022-05-19 22:28:53 +00:00
|
|
|
|
|
|
|
import ../../stores/blockstore
|
2023-11-14 12:02:17 +00:00
|
|
|
import ../../blocktype
|
2022-05-19 22:28:53 +00:00
|
|
|
import ../../utils
|
2024-12-16 06:01:49 +00:00
|
|
|
import ../../utils/exceptions
|
|
|
|
import ../../utils/trackedfutures
|
2023-11-14 12:02:17 +00:00
|
|
|
import ../../merkletree
|
feat: create logging proxy (#663)
* implement a logging proxy
The logging proxy:
- prevents the need to import chronicles (as well as export except toJson),
- prevents the need to override `writeValue` or use or import nim-json-seralization elsewhere in the codebase, allowing for sole use of utils/json for de/serialization,
- and handles json formatting correctly in chronicles json sinks
* Rename logging -> logutils to avoid ambiguity with common names
* clean up
* add setProperty for JsonRecord, remove nim-json-serialization conflict
* Allow specifying textlines and json format separately
Not specifying a LogFormat will apply the formatting to both textlines and json sinks.
Specifying a LogFormat will apply the formatting to only that sink.
* remove unneeded usages of std/json
We only need to import utils/json instead of std/json
* move serialization from rest/json to utils/json so it can be shared
* fix NoColors ambiguity
Was causing unit tests to fail on Windows.
* Remove nre usage to fix Windows error
Windows was erroring with `could not load: pcre64.dll`. Instead of fixing that error, remove the pcre usage :)
* Add logutils module doc
* Shorten logutils.formatIt for `NBytes`
Both json and textlines formatIt were not needed, and could be combined into one formatIt
* remove debug integration test config
debug output and logformat of json for integration test logs
* Use ## module doc to support docgen
* bump nim-poseidon2 to export fromBytes
Before the changes in this branch, fromBytes was likely being resolved by nim-stew, or other dependency. With the changes in this branch, that dependency was removed and fromBytes could no longer be resolved. By exporting fromBytes from nim-poseidon, the correct resolution is now happening.
* fixes to get compiling after rebasing master
* Add support for Result types being logged using formatIt
2024-01-23 07:35:03 +00:00
|
|
|
import ../../logutils
|
2024-04-24 07:30:02 +00:00
|
|
|
import ../../manifest
|
2022-05-19 22:28:53 +00:00
|
|
|
|
|
|
|
import ../protobuf/blockexc
|
|
|
|
import ../protobuf/presence
|
|
|
|
|
|
|
|
import ../network
|
|
|
|
import ../peers
|
|
|
|
|
|
|
|
import ./payments
|
|
|
|
import ./discovery
|
2024-08-26 13:18:59 +00:00
|
|
|
import ./advertiser
|
2022-05-19 22:28:53 +00:00
|
|
|
import ./pendingblocks
|
|
|
|
|
|
|
|
export peers, pendingblocks, payments, discovery
|
|
|
|
|
|
|
|
logScope:
|
2022-11-15 15:46:21 +00:00
|
|
|
topics = "codex blockexcengine"
|
2022-05-19 22:28:53 +00:00
|
|
|
|
2023-11-03 15:21:54 +00:00
|
|
|
declareCounter(codex_block_exchange_want_have_lists_sent, "codex blockexchange wantHave lists sent")
|
|
|
|
declareCounter(codex_block_exchange_want_have_lists_received, "codex blockexchange wantHave lists received")
|
|
|
|
declareCounter(codex_block_exchange_want_block_lists_sent, "codex blockexchange wantBlock lists sent")
|
|
|
|
declareCounter(codex_block_exchange_want_block_lists_received, "codex blockexchange wantBlock lists received")
|
|
|
|
declareCounter(codex_block_exchange_blocks_sent, "codex blockexchange blocks sent")
|
|
|
|
declareCounter(codex_block_exchange_blocks_received, "codex blockexchange blocks received")
|
2023-07-20 07:56:28 +00:00
|
|
|
|
2022-05-19 22:28:53 +00:00
|
|
|
const
|
|
|
|
DefaultMaxPeersPerRequest* = 10
|
|
|
|
DefaultTaskQueueSize = 100
|
|
|
|
DefaultConcurrentTasks = 10
|
2023-03-09 11:23:45 +00:00
|
|
|
# DefaultMaxRetries = 3
|
|
|
|
# DefaultConcurrentDiscRequests = 10
|
|
|
|
# DefaultConcurrentAdvertRequests = 10
|
|
|
|
# DefaultDiscoveryTimeout = 1.minutes
|
|
|
|
# DefaultMaxQueriedBlocksCache = 1000
|
|
|
|
# DefaultMinPeersPerBlock = 3
|
2022-05-19 22:28:53 +00:00
|
|
|
|
|
|
|
type
|
|
|
|
TaskHandler* = proc(task: BlockExcPeerCtx): Future[void] {.gcsafe.}
|
|
|
|
TaskScheduler* = proc(task: BlockExcPeerCtx): bool {.gcsafe.}
|
|
|
|
|
|
|
|
BlockExcEngine* = ref object of RootObj
|
|
|
|
localStore*: BlockStore # Local block store for this instance
|
|
|
|
network*: BlockExcNetwork # Petwork interface
|
|
|
|
peers*: PeerCtxStore # Peers we're currently actively exchanging with
|
|
|
|
taskQueue*: AsyncHeapQueue[BlockExcPeerCtx] # Peers we're currently processing tasks for
|
|
|
|
concurrentTasks: int # Number of concurrent peers we're serving at any given time
|
2024-12-16 06:01:49 +00:00
|
|
|
trackedFutures: TrackedFutures # Tracks futures of blockexc tasks
|
2022-05-19 22:28:53 +00:00
|
|
|
blockexcRunning: bool # Indicates if the blockexc task is running
|
|
|
|
pendingBlocks*: PendingBlocksManager # Blocks we're awaiting to be resolved
|
|
|
|
peersPerRequest: int # Max number of peers to request from
|
|
|
|
wallet*: WalletRef # Nitro wallet for micropayments
|
|
|
|
pricing*: ?Pricing # Optional bandwidth pricing
|
2024-03-15 21:50:56 +00:00
|
|
|
blockFetchTimeout*: Duration # Timeout for fetching blocks over the network
|
2022-05-19 22:28:53 +00:00
|
|
|
discovery*: DiscoveryEngine
|
2024-08-26 13:18:59 +00:00
|
|
|
advertiser*: Advertiser
|
2022-05-19 22:28:53 +00:00
|
|
|
|
|
|
|
Pricing* = object
|
|
|
|
address*: EthAddress
|
|
|
|
price*: UInt256
|
|
|
|
|
|
|
|
# attach task scheduler to engine
|
|
|
|
proc scheduleTask(b: BlockExcEngine, task: BlockExcPeerCtx): bool {.gcsafe} =
|
|
|
|
b.taskQueue.pushOrUpdateNoWait(task).isOk()
|
|
|
|
|
2024-12-16 06:01:49 +00:00
|
|
|
proc blockexcTaskRunner(b: BlockExcEngine) {.async: (raises: []).}
|
2022-05-19 22:28:53 +00:00
|
|
|
|
|
|
|
proc start*(b: BlockExcEngine) {.async.} =
|
|
|
|
## Start the blockexc task
|
|
|
|
##
|
|
|
|
|
|
|
|
await b.discovery.start()
|
2024-08-26 13:18:59 +00:00
|
|
|
await b.advertiser.start()
|
2022-05-19 22:28:53 +00:00
|
|
|
|
|
|
|
trace "Blockexc starting with concurrent tasks", tasks = b.concurrentTasks
|
|
|
|
if b.blockexcRunning:
|
|
|
|
warn "Starting blockexc twice"
|
|
|
|
return
|
|
|
|
|
|
|
|
b.blockexcRunning = true
|
|
|
|
for i in 0..<b.concurrentTasks:
|
2024-12-18 07:39:03 +00:00
|
|
|
let fut = b.blockexcTaskRunner()
|
|
|
|
b.trackedFutures.track(fut)
|
2024-12-16 06:01:49 +00:00
|
|
|
asyncSpawn fut
|
2022-05-19 22:28:53 +00:00
|
|
|
|
|
|
|
proc stop*(b: BlockExcEngine) {.async.} =
|
|
|
|
## Stop the blockexc blockexc
|
|
|
|
##
|
|
|
|
|
|
|
|
await b.discovery.stop()
|
2024-08-26 13:18:59 +00:00
|
|
|
await b.advertiser.stop()
|
2022-05-19 22:28:53 +00:00
|
|
|
|
|
|
|
trace "NetworkStore stop"
|
|
|
|
if not b.blockexcRunning:
|
|
|
|
warn "Stopping blockexc without starting it"
|
|
|
|
return
|
|
|
|
|
|
|
|
b.blockexcRunning = false
|
2024-12-16 06:01:49 +00:00
|
|
|
await b.trackedFutures.cancelTracked()
|
2022-05-19 22:28:53 +00:00
|
|
|
|
|
|
|
trace "NetworkStore stopped"
|
|
|
|
|
2023-11-14 12:02:17 +00:00
|
|
|
proc sendWantHave(
|
2023-11-14 17:52:27 +00:00
|
|
|
b: BlockExcEngine,
|
2024-12-04 13:33:48 +00:00
|
|
|
addresses: seq[BlockAddress],
|
2023-11-27 18:25:53 +00:00
|
|
|
excluded: seq[BlockExcPeerCtx],
|
2023-11-14 12:02:17 +00:00
|
|
|
peers: seq[BlockExcPeerCtx]): Future[void] {.async.} =
|
2023-07-18 05:50:47 +00:00
|
|
|
for p in peers:
|
2023-11-27 18:25:53 +00:00
|
|
|
if p notin excluded:
|
2024-12-04 13:33:48 +00:00
|
|
|
let toAsk = addresses.filterIt(it notin p.peerHave)
|
|
|
|
trace "Sending wantHave request", toAsk, peer = p.id
|
|
|
|
await b.network.request.sendWantList(
|
|
|
|
p.id,
|
|
|
|
toAsk,
|
|
|
|
wantType = WantType.WantHave)
|
2023-07-18 05:50:47 +00:00
|
|
|
|
2023-11-14 12:02:17 +00:00
|
|
|
proc sendWantBlock(
|
2023-11-14 17:52:27 +00:00
|
|
|
b: BlockExcEngine,
|
2024-12-04 13:33:48 +00:00
|
|
|
addresses: seq[BlockAddress],
|
2023-11-14 12:02:17 +00:00
|
|
|
blockPeer: BlockExcPeerCtx): Future[void] {.async.} =
|
2024-12-04 13:33:48 +00:00
|
|
|
trace "Sending wantBlock request to", addresses, peer = blockPeer.id
|
2023-07-18 05:50:47 +00:00
|
|
|
await b.network.request.sendWantList(
|
|
|
|
blockPeer.id,
|
2024-12-04 13:33:48 +00:00
|
|
|
addresses,
|
2023-07-18 05:50:47 +00:00
|
|
|
wantType = WantType.WantBlock) # we want this remote to send us a block
|
|
|
|
|
2024-03-12 12:10:14 +00:00
|
|
|
proc monitorBlockHandle(
|
|
|
|
b: BlockExcEngine,
|
|
|
|
handle: Future[Block],
|
|
|
|
address: BlockAddress,
|
|
|
|
peerId: PeerId) {.async.} =
|
|
|
|
|
2023-11-14 12:02:17 +00:00
|
|
|
try:
|
|
|
|
discard await handle
|
2024-03-12 12:10:14 +00:00
|
|
|
except CancelledError as exc:
|
|
|
|
trace "Block handle cancelled", address, peerId
|
2023-11-14 12:02:17 +00:00
|
|
|
except CatchableError as exc:
|
2024-05-16 17:06:12 +00:00
|
|
|
warn "Error block handle, disconnecting peer", address, exc = exc.msg, peerId
|
2023-11-14 12:02:17 +00:00
|
|
|
|
|
|
|
# TODO: really, this is just a quick and dirty way of
|
|
|
|
# preventing hitting the same "bad" peer every time, however,
|
|
|
|
# we might as well discover this on or next iteration, so
|
|
|
|
# it doesn't mean that we're never talking to this peer again.
|
|
|
|
# TODO: we need a lot more work around peer selection and
|
|
|
|
# prioritization
|
|
|
|
|
|
|
|
# drop unresponsive peer
|
|
|
|
await b.network.switch.disconnect(peerId)
|
2023-11-27 18:25:53 +00:00
|
|
|
b.discovery.queueFindBlocksReq(@[address.cidOrTreeCid])
|
2023-11-14 12:02:17 +00:00
|
|
|
|
2022-05-19 22:28:53 +00:00
|
|
|
proc requestBlock*(
|
|
|
|
b: BlockExcEngine,
|
2023-11-14 12:02:17 +00:00
|
|
|
address: BlockAddress,
|
2024-03-15 21:50:56 +00:00
|
|
|
): Future[?!Block] {.async.} =
|
|
|
|
let blockFuture = b.pendingBlocks.getWantHandle(address, b.blockFetchTimeout)
|
2023-11-14 12:02:17 +00:00
|
|
|
|
2024-03-15 21:50:56 +00:00
|
|
|
if not b.pendingBlocks.isInFlight(address):
|
2024-03-12 12:10:14 +00:00
|
|
|
let peers = b.peers.selectCheapest(address)
|
|
|
|
if peers.len == 0:
|
|
|
|
b.discovery.queueFindBlocksReq(@[address.cidOrTreeCid])
|
|
|
|
|
|
|
|
let maybePeer =
|
|
|
|
if peers.len > 0:
|
|
|
|
peers[hash(address) mod peers.len].some
|
|
|
|
elif b.peers.len > 0:
|
|
|
|
toSeq(b.peers)[hash(address) mod b.peers.len].some
|
|
|
|
else:
|
|
|
|
BlockExcPeerCtx.none
|
|
|
|
|
|
|
|
if peer =? maybePeer:
|
|
|
|
asyncSpawn b.monitorBlockHandle(blockFuture, address, peer.id)
|
|
|
|
b.pendingBlocks.setInFlight(address)
|
2024-12-04 13:33:48 +00:00
|
|
|
# TODO: Send more block addresses if at all sensible.
|
|
|
|
await b.sendWantBlock(@[address], peer)
|
2024-03-12 12:10:14 +00:00
|
|
|
codex_block_exchange_want_block_lists_sent.inc()
|
2024-12-04 13:33:48 +00:00
|
|
|
await b.sendWantHave(@[address], @[peer], toSeq(b.peers))
|
2024-03-12 12:10:14 +00:00
|
|
|
codex_block_exchange_want_have_lists_sent.inc()
|
|
|
|
|
2024-03-15 21:50:56 +00:00
|
|
|
# Don't let timeouts bubble up. We can't be too broad here or we break
|
|
|
|
# cancellations.
|
|
|
|
try:
|
2024-03-12 12:10:14 +00:00
|
|
|
success await blockFuture
|
2024-03-15 21:50:56 +00:00
|
|
|
except AsyncTimeoutError as err:
|
|
|
|
failure err
|
2023-07-20 07:56:28 +00:00
|
|
|
|
2023-11-14 12:02:17 +00:00
|
|
|
proc requestBlock*(
|
|
|
|
b: BlockExcEngine,
|
2024-03-15 21:50:56 +00:00
|
|
|
cid: Cid
|
|
|
|
): Future[?!Block] =
|
2023-11-14 12:02:17 +00:00
|
|
|
b.requestBlock(BlockAddress.init(cid))
|
2022-05-19 22:28:53 +00:00
|
|
|
|
|
|
|
proc blockPresenceHandler*(
|
|
|
|
b: BlockExcEngine,
|
2022-11-15 15:46:21 +00:00
|
|
|
peer: PeerId,
|
2022-05-19 22:28:53 +00:00
|
|
|
blocks: seq[BlockPresence]) {.async.} =
|
2022-11-15 15:46:21 +00:00
|
|
|
let
|
|
|
|
peerCtx = b.peers.get(peer)
|
|
|
|
wantList = toSeq(b.pendingBlocks.wantList)
|
|
|
|
|
|
|
|
if peerCtx.isNil:
|
2022-05-19 22:28:53 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
for blk in blocks:
|
|
|
|
if presence =? Presence.init(blk):
|
2022-11-15 15:46:21 +00:00
|
|
|
peerCtx.setPresence(presence)
|
2022-05-19 22:28:53 +00:00
|
|
|
|
2022-11-15 15:46:21 +00:00
|
|
|
let
|
|
|
|
peerHave = peerCtx.peerHave
|
|
|
|
dontWantCids = peerHave.filterIt(
|
|
|
|
it notin wantList
|
2022-05-19 22:28:53 +00:00
|
|
|
)
|
|
|
|
|
2022-11-15 15:46:21 +00:00
|
|
|
if dontWantCids.len > 0:
|
|
|
|
peerCtx.cleanPresence(dontWantCids)
|
|
|
|
|
|
|
|
let
|
|
|
|
wantCids = wantList.filterIt(
|
|
|
|
it in peerHave
|
|
|
|
)
|
|
|
|
|
|
|
|
if wantCids.len > 0:
|
2023-10-03 11:33:01 +00:00
|
|
|
trace "Peer has blocks in our wantList", peer, wantCount = wantCids.len
|
2024-12-04 13:33:48 +00:00
|
|
|
await b.sendWantBlock(wantCids, peerCtx)
|
2022-05-19 22:28:53 +00:00
|
|
|
|
|
|
|
# if none of the connected peers report our wants in their have list,
|
|
|
|
# fire up discovery
|
|
|
|
b.discovery.queueFindBlocksReq(
|
2023-11-14 12:02:17 +00:00
|
|
|
toSeq(b.pendingBlocks.wantListCids)
|
2022-05-19 22:28:53 +00:00
|
|
|
.filter do(cid: Cid) -> bool:
|
2023-11-14 12:02:17 +00:00
|
|
|
not b.peers.anyIt( cid in it.peerHaveCids ))
|
2022-05-19 22:28:53 +00:00
|
|
|
|
2023-11-14 12:02:17 +00:00
|
|
|
proc scheduleTasks(b: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.async.} =
|
2022-05-19 22:28:53 +00:00
|
|
|
let
|
2023-11-14 12:02:17 +00:00
|
|
|
cids = blocksDelivery.mapIt( it.blk.cid )
|
2022-05-19 22:28:53 +00:00
|
|
|
|
|
|
|
# schedule any new peers to provide blocks to
|
|
|
|
for p in b.peers:
|
|
|
|
for c in cids: # for each cid
|
2022-07-28 00:39:17 +00:00
|
|
|
# schedule a peer if it wants at least one cid
|
|
|
|
# and we have it in our local store
|
2023-11-14 12:02:17 +00:00
|
|
|
if c in p.peerWantsCids:
|
2022-07-28 00:39:17 +00:00
|
|
|
if await (c in b.localStore):
|
|
|
|
if b.scheduleTask(p):
|
|
|
|
trace "Task scheduled for peer", peer = p.id
|
|
|
|
else:
|
2024-04-30 09:31:06 +00:00
|
|
|
warn "Unable to schedule task for peer", peer = p.id
|
2022-07-28 00:39:17 +00:00
|
|
|
|
|
|
|
break # do next peer
|
|
|
|
|
2024-02-22 14:54:45 +00:00
|
|
|
proc cancelBlocks(b: BlockExcEngine, addrs: seq[BlockAddress]) {.async.} =
|
|
|
|
## Tells neighboring peers that we're no longer interested in a block.
|
|
|
|
trace "Sending block request cancellations to peers", addrs = addrs.len
|
|
|
|
|
|
|
|
let failed = (await allFinished(
|
|
|
|
b.peers.mapIt(
|
|
|
|
b.network.request.sendWantCancellations(
|
|
|
|
peer = it.id,
|
|
|
|
addresses = addrs))))
|
|
|
|
.filterIt(it.failed)
|
|
|
|
|
|
|
|
if failed.len > 0:
|
2024-04-30 09:31:06 +00:00
|
|
|
warn "Failed to send block request cancellations to peers", peers = failed.len
|
2024-02-22 14:54:45 +00:00
|
|
|
|
2024-04-24 07:30:02 +00:00
|
|
|
proc resolveBlocks*(b: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.async.} =
|
|
|
|
b.pendingBlocks.resolve(blocksDelivery)
|
|
|
|
await b.scheduleTasks(blocksDelivery)
|
2024-02-22 14:54:45 +00:00
|
|
|
await b.cancelBlocks(blocksDelivery.mapIt(it.address))
|
2024-04-24 07:30:02 +00:00
|
|
|
|
2023-11-14 12:02:17 +00:00
|
|
|
proc resolveBlocks*(b: BlockExcEngine, blocks: seq[Block]) {.async.} =
|
2024-03-12 12:10:14 +00:00
|
|
|
await b.resolveBlocks(
|
|
|
|
blocks.mapIt(
|
|
|
|
BlockDelivery(blk: it, address: BlockAddress(leaf: false, cid: it.cid)
|
|
|
|
)))
|
2022-05-19 22:28:53 +00:00
|
|
|
|
|
|
|
proc payForBlocks(engine: BlockExcEngine,
|
|
|
|
peer: BlockExcPeerCtx,
|
2023-11-14 12:02:17 +00:00
|
|
|
blocksDelivery: seq[BlockDelivery]) {.async.} =
|
2022-11-15 15:46:21 +00:00
|
|
|
let
|
|
|
|
sendPayment = engine.network.request.sendPayment
|
2023-11-14 12:02:17 +00:00
|
|
|
price = peer.price(blocksDelivery.mapIt(it.address))
|
2022-11-15 15:46:21 +00:00
|
|
|
|
|
|
|
if payment =? engine.wallet.pay(peer, price):
|
2024-05-16 17:06:12 +00:00
|
|
|
trace "Sending payment for blocks", price, len = blocksDelivery.len
|
2022-07-29 16:19:34 +00:00
|
|
|
await sendPayment(peer.id, payment)
|
2022-05-19 22:28:53 +00:00
|
|
|
|
2023-11-14 12:02:17 +00:00
|
|
|
proc validateBlockDelivery(
|
|
|
|
b: BlockExcEngine,
|
2024-03-12 12:10:14 +00:00
|
|
|
bd: BlockDelivery): ?!void =
|
2023-11-14 12:02:17 +00:00
|
|
|
if bd.address notin b.pendingBlocks:
|
|
|
|
return failure("Received block is not currently a pending block")
|
|
|
|
|
|
|
|
if bd.address.leaf:
|
|
|
|
without proof =? bd.proof:
|
|
|
|
return failure("Missing proof")
|
2023-11-14 17:52:27 +00:00
|
|
|
|
2023-11-14 12:02:17 +00:00
|
|
|
if proof.index != bd.address.index:
|
|
|
|
return failure("Proof index " & $proof.index & " doesn't match leaf index " & $bd.address.index)
|
|
|
|
|
|
|
|
without leaf =? bd.blk.cid.mhash.mapFailure, err:
|
|
|
|
return failure("Unable to get mhash from cid for block, nested err: " & err.msg)
|
|
|
|
|
|
|
|
without treeRoot =? bd.address.treeCid.mhash.mapFailure, err:
|
|
|
|
return failure("Unable to get mhash from treeCid for block, nested err: " & err.msg)
|
|
|
|
|
2023-12-21 06:41:43 +00:00
|
|
|
if err =? proof.verify(leaf, treeRoot).errorOption:
|
2023-11-14 12:02:17 +00:00
|
|
|
return failure("Unable to verify proof for block, nested err: " & err.msg)
|
|
|
|
|
|
|
|
else: # not leaf
|
|
|
|
if bd.address.cid != bd.blk.cid:
|
|
|
|
return failure("Delivery cid " & $bd.address.cid & " doesn't match block cid " & $bd.blk.cid)
|
|
|
|
|
|
|
|
return success()
|
|
|
|
|
|
|
|
proc blocksDeliveryHandler*(
|
2022-05-19 22:28:53 +00:00
|
|
|
b: BlockExcEngine,
|
2022-11-15 15:46:21 +00:00
|
|
|
peer: PeerId,
|
2023-11-14 12:02:17 +00:00
|
|
|
blocksDelivery: seq[BlockDelivery]) {.async.} =
|
2024-05-16 17:06:12 +00:00
|
|
|
trace "Received blocks from peer", peer, blocks = (blocksDelivery.mapIt($it.address)).join(",")
|
2023-11-14 12:02:17 +00:00
|
|
|
|
|
|
|
var validatedBlocksDelivery: seq[BlockDelivery]
|
|
|
|
for bd in blocksDelivery:
|
|
|
|
logScope:
|
|
|
|
peer = peer
|
|
|
|
address = bd.address
|
|
|
|
|
|
|
|
if err =? b.validateBlockDelivery(bd).errorOption:
|
|
|
|
warn "Block validation failed", msg = err.msg
|
|
|
|
continue
|
|
|
|
|
|
|
|
if err =? (await b.localStore.putBlock(bd.blk)).errorOption:
|
|
|
|
error "Unable to store block", err = err.msg
|
|
|
|
continue
|
|
|
|
|
|
|
|
if bd.address.leaf:
|
|
|
|
without proof =? bd.proof:
|
|
|
|
error "Proof expected for a leaf block delivery"
|
|
|
|
continue
|
2024-01-08 22:52:46 +00:00
|
|
|
if err =? (await b.localStore.putCidAndProof(
|
|
|
|
bd.address.treeCid,
|
|
|
|
bd.address.index,
|
|
|
|
bd.blk.cid,
|
|
|
|
proof)).errorOption:
|
|
|
|
|
2023-11-14 12:02:17 +00:00
|
|
|
error "Unable to store proof and cid for a block"
|
|
|
|
continue
|
2022-05-19 22:28:53 +00:00
|
|
|
|
2023-11-14 12:02:17 +00:00
|
|
|
validatedBlocksDelivery.add(bd)
|
|
|
|
|
|
|
|
await b.resolveBlocks(validatedBlocksDelivery)
|
|
|
|
codex_block_exchange_blocks_received.inc(validatedBlocksDelivery.len.int64)
|
2023-07-20 07:56:28 +00:00
|
|
|
|
2022-11-15 15:46:21 +00:00
|
|
|
let
|
|
|
|
peerCtx = b.peers.get(peer)
|
|
|
|
|
2022-05-19 22:28:53 +00:00
|
|
|
if peerCtx != nil:
|
2023-11-14 12:02:17 +00:00
|
|
|
await b.payForBlocks(peerCtx, blocksDelivery)
|
2023-07-18 05:50:47 +00:00
|
|
|
## shouldn't we remove them from the want-list instead of this:
|
2023-11-14 12:02:17 +00:00
|
|
|
peerCtx.cleanPresence(blocksDelivery.mapIt( it.address ))
|
2022-05-19 22:28:53 +00:00
|
|
|
|
|
|
|
proc wantListHandler*(
|
|
|
|
b: BlockExcEngine,
|
2022-11-15 15:46:21 +00:00
|
|
|
peer: PeerId,
|
2023-11-14 12:02:17 +00:00
|
|
|
wantList: WantList) {.async.} =
|
2023-07-18 05:50:47 +00:00
|
|
|
let
|
|
|
|
peerCtx = b.peers.get(peer)
|
2022-05-19 22:28:53 +00:00
|
|
|
if isNil(peerCtx):
|
|
|
|
return
|
|
|
|
|
2022-11-15 15:46:21 +00:00
|
|
|
var
|
2023-07-18 05:50:47 +00:00
|
|
|
presence: seq[BlockPresence]
|
2022-11-15 15:46:21 +00:00
|
|
|
|
|
|
|
for e in wantList.entries:
|
|
|
|
let
|
2024-02-22 14:54:45 +00:00
|
|
|
idx = peerCtx.peerWants.findIt(it.address == e.address)
|
2022-11-15 15:46:21 +00:00
|
|
|
|
|
|
|
logScope:
|
|
|
|
peer = peerCtx.id
|
2023-11-14 12:02:17 +00:00
|
|
|
address = e.address
|
2022-11-15 15:46:21 +00:00
|
|
|
wantType = $e.wantType
|
|
|
|
|
|
|
|
if idx < 0: # updating entry
|
|
|
|
let
|
2023-11-14 12:02:17 +00:00
|
|
|
have = await e.address in b.localStore
|
2022-11-15 15:46:21 +00:00
|
|
|
price = @(
|
|
|
|
b.pricing.get(Pricing(price: 0.u256))
|
|
|
|
.price.toBytesBE)
|
|
|
|
|
2023-07-20 07:56:28 +00:00
|
|
|
if e.wantType == WantType.WantHave:
|
2023-11-03 15:21:54 +00:00
|
|
|
codex_block_exchange_want_have_lists_received.inc()
|
2023-07-20 07:56:28 +00:00
|
|
|
|
2022-11-15 15:46:21 +00:00
|
|
|
if not have and e.sendDontHave:
|
2023-07-18 05:50:47 +00:00
|
|
|
presence.add(
|
2022-11-15 15:46:21 +00:00
|
|
|
BlockPresence(
|
2023-11-14 12:02:17 +00:00
|
|
|
address: e.address,
|
2022-11-15 15:46:21 +00:00
|
|
|
`type`: BlockPresenceType.DontHave,
|
|
|
|
price: price))
|
|
|
|
elif have and e.wantType == WantType.WantHave:
|
2023-07-18 05:50:47 +00:00
|
|
|
presence.add(
|
2022-11-15 15:46:21 +00:00
|
|
|
BlockPresence(
|
2023-11-14 12:02:17 +00:00
|
|
|
address: e.address,
|
2022-11-15 15:46:21 +00:00
|
|
|
`type`: BlockPresenceType.Have,
|
|
|
|
price: price))
|
|
|
|
elif e.wantType == WantType.WantBlock:
|
|
|
|
peerCtx.peerWants.add(e)
|
2023-11-03 15:21:54 +00:00
|
|
|
codex_block_exchange_want_block_lists_received.inc()
|
2022-11-15 15:46:21 +00:00
|
|
|
else:
|
2022-05-19 22:28:53 +00:00
|
|
|
# peer doesn't want this block anymore
|
|
|
|
if e.cancel:
|
|
|
|
peerCtx.peerWants.del(idx)
|
2022-11-15 15:46:21 +00:00
|
|
|
else:
|
|
|
|
# peer might want to ask for the same cid with
|
|
|
|
# different want params
|
|
|
|
peerCtx.peerWants[idx] = e # update entry
|
2022-05-19 22:28:53 +00:00
|
|
|
|
2023-07-18 05:50:47 +00:00
|
|
|
if presence.len > 0:
|
2024-05-16 17:06:12 +00:00
|
|
|
trace "Sending presence to remote", items = presence.mapIt($it).join(",")
|
2023-07-18 05:50:47 +00:00
|
|
|
await b.network.request.sendPresence(peer, presence)
|
2022-05-19 22:28:53 +00:00
|
|
|
|
|
|
|
if not b.scheduleTask(peerCtx):
|
2024-05-16 17:06:12 +00:00
|
|
|
warn "Unable to schedule task for peer", peer
|
2022-05-19 22:28:53 +00:00
|
|
|
|
|
|
|
proc accountHandler*(
|
|
|
|
engine: BlockExcEngine,
|
2022-11-15 15:46:21 +00:00
|
|
|
peer: PeerId,
|
2022-05-19 22:28:53 +00:00
|
|
|
account: Account) {.async.} =
|
2023-07-18 05:50:47 +00:00
|
|
|
let
|
|
|
|
context = engine.peers.get(peer)
|
2022-05-19 22:28:53 +00:00
|
|
|
if context.isNil:
|
|
|
|
return
|
|
|
|
|
|
|
|
context.account = account.some
|
|
|
|
|
|
|
|
proc paymentHandler*(
|
|
|
|
engine: BlockExcEngine,
|
|
|
|
peer: PeerId,
|
|
|
|
payment: SignedState) {.async.} =
|
2022-11-15 15:46:21 +00:00
|
|
|
trace "Handling payments", peer
|
|
|
|
|
2022-05-19 22:28:53 +00:00
|
|
|
without context =? engine.peers.get(peer).option and
|
|
|
|
account =? context.account:
|
2022-11-15 15:46:21 +00:00
|
|
|
trace "No context or account for peer", peer
|
2022-05-19 22:28:53 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
if channel =? context.paymentChannel:
|
2023-07-18 05:50:47 +00:00
|
|
|
let
|
|
|
|
sender = account.address
|
2022-05-19 22:28:53 +00:00
|
|
|
discard engine.wallet.acceptPayment(channel, Asset, sender, payment)
|
|
|
|
else:
|
|
|
|
context.paymentChannel = engine.wallet.acceptChannel(payment).option
|
|
|
|
|
2022-11-15 15:46:21 +00:00
|
|
|
proc setupPeer*(b: BlockExcEngine, peer: PeerId) {.async.} =
|
2022-05-19 22:28:53 +00:00
|
|
|
## Perform initial setup, such as want
|
|
|
|
## list exchange
|
|
|
|
##
|
|
|
|
|
2023-11-14 12:02:17 +00:00
|
|
|
trace "Setting up peer", peer
|
|
|
|
|
2022-05-19 22:28:53 +00:00
|
|
|
if peer notin b.peers:
|
2022-11-15 15:46:21 +00:00
|
|
|
trace "Setting up new peer", peer
|
2022-05-19 22:28:53 +00:00
|
|
|
b.peers.add(BlockExcPeerCtx(
|
|
|
|
id: peer
|
|
|
|
))
|
2022-11-15 15:46:21 +00:00
|
|
|
trace "Added peer", peers = b.peers.len
|
2022-05-19 22:28:53 +00:00
|
|
|
|
|
|
|
# broadcast our want list, the other peer will do the same
|
2023-11-14 12:02:17 +00:00
|
|
|
if b.pendingBlocks.wantListLen > 0:
|
|
|
|
trace "Sending our want list to a peer", peer
|
|
|
|
let cids = toSeq(b.pendingBlocks.wantList)
|
2022-07-29 16:19:34 +00:00
|
|
|
await b.network.request.sendWantList(
|
2023-11-14 12:02:17 +00:00
|
|
|
peer, cids, full = true)
|
2022-05-19 22:28:53 +00:00
|
|
|
|
|
|
|
if address =? b.pricing.?address:
|
2022-07-29 16:19:34 +00:00
|
|
|
await b.network.request.sendAccount(peer, Account(address: address))
|
2022-05-19 22:28:53 +00:00
|
|
|
|
2022-11-15 15:46:21 +00:00
|
|
|
proc dropPeer*(b: BlockExcEngine, peer: PeerId) =
|
2022-05-19 22:28:53 +00:00
|
|
|
## Cleanup disconnected peer
|
|
|
|
##
|
|
|
|
|
|
|
|
trace "Dropping peer", peer
|
|
|
|
|
|
|
|
# drop the peer from the peers table
|
|
|
|
b.peers.remove(peer)
|
|
|
|
|
|
|
|
proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} =
|
2022-11-15 15:46:21 +00:00
|
|
|
# Send to the peer blocks he wants to get,
|
2022-07-28 00:39:17 +00:00
|
|
|
# if they present in our local store
|
2022-05-19 22:28:53 +00:00
|
|
|
|
|
|
|
# TODO: There should be all sorts of accounting of
|
|
|
|
# bytes sent/received here
|
2022-07-28 00:39:17 +00:00
|
|
|
|
2022-11-15 15:46:21 +00:00
|
|
|
var
|
|
|
|
wantsBlocks = task.peerWants.filterIt(
|
2024-02-29 07:37:12 +00:00
|
|
|
it.wantType == WantType.WantBlock and not it.inFlight
|
2022-11-15 15:46:21 +00:00
|
|
|
)
|
2022-07-28 00:39:17 +00:00
|
|
|
|
2024-02-29 07:37:12 +00:00
|
|
|
proc updateInFlight(addresses: seq[BlockAddress], inFlight: bool) =
|
|
|
|
for peerWant in task.peerWants.mitems:
|
|
|
|
if peerWant.address in addresses:
|
|
|
|
peerWant.inFlight = inFlight
|
|
|
|
|
2022-05-19 22:28:53 +00:00
|
|
|
if wantsBlocks.len > 0:
|
2024-02-29 07:37:12 +00:00
|
|
|
# Mark wants as in-flight.
|
|
|
|
let wantAddresses = wantsBlocks.mapIt(it.address)
|
|
|
|
updateInFlight(wantAddresses, true)
|
2022-07-28 00:39:17 +00:00
|
|
|
wantsBlocks.sort(SortOrder.Descending)
|
|
|
|
|
2023-11-14 12:02:17 +00:00
|
|
|
proc localLookup(e: WantListEntry): Future[?!BlockDelivery] {.async.} =
|
|
|
|
if e.address.leaf:
|
|
|
|
(await b.localStore.getBlockAndProof(e.address.treeCid, e.address.index)).map(
|
2023-12-21 06:41:43 +00:00
|
|
|
(blkAndProof: (Block, CodexProof)) =>
|
2023-11-14 12:02:17 +00:00
|
|
|
BlockDelivery(address: e.address, blk: blkAndProof[0], proof: blkAndProof[1].some)
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
(await b.localStore.getBlock(e.address)).map(
|
2023-12-21 06:41:43 +00:00
|
|
|
(blk: Block) => BlockDelivery(address: e.address, blk: blk, proof: CodexProof.none)
|
2023-11-14 12:02:17 +00:00
|
|
|
)
|
|
|
|
|
2022-11-15 15:46:21 +00:00
|
|
|
let
|
2023-11-14 12:02:17 +00:00
|
|
|
blocksDeliveryFut = await allFinished(wantsBlocks.map(localLookup))
|
|
|
|
blocksDelivery = blocksDeliveryFut
|
2022-11-15 15:46:21 +00:00
|
|
|
.filterIt(it.completed and it.read.isOk)
|
|
|
|
.mapIt(it.read.get)
|
2022-05-19 22:28:53 +00:00
|
|
|
|
2024-02-29 07:37:12 +00:00
|
|
|
# All the wants that failed local lookup must be set to not-in-flight again.
|
|
|
|
let
|
|
|
|
successAddresses = blocksDelivery.mapIt(it.address)
|
|
|
|
failedAddresses = wantAddresses.filterIt(it notin successAddresses)
|
|
|
|
updateInFlight(failedAddresses, false)
|
|
|
|
|
2023-11-14 12:02:17 +00:00
|
|
|
if blocksDelivery.len > 0:
|
2024-05-16 17:06:12 +00:00
|
|
|
trace "Sending blocks to peer", peer = task.id, blocks = (blocksDelivery.mapIt($it.address)).join(",")
|
2023-11-14 12:02:17 +00:00
|
|
|
await b.network.request.sendBlocksDelivery(
|
2022-05-19 22:28:53 +00:00
|
|
|
task.id,
|
2023-11-14 12:02:17 +00:00
|
|
|
blocksDelivery
|
|
|
|
)
|
2022-05-19 22:28:53 +00:00
|
|
|
|
2023-11-14 12:02:17 +00:00
|
|
|
codex_block_exchange_blocks_sent.inc(blocksDelivery.len.int64)
|
2023-07-20 07:56:28 +00:00
|
|
|
|
2024-02-29 07:37:12 +00:00
|
|
|
task.peerWants.keepItIf(it.address notin successAddresses)
|
2022-05-19 22:28:53 +00:00
|
|
|
|
2024-12-16 06:01:49 +00:00
|
|
|
proc blockexcTaskRunner(b: BlockExcEngine) {.async: (raises: []).} =
|
2022-05-19 22:28:53 +00:00
|
|
|
## process tasks
|
|
|
|
##
|
|
|
|
|
|
|
|
trace "Starting blockexc task runner"
|
|
|
|
while b.blockexcRunning:
|
2024-12-16 06:01:49 +00:00
|
|
|
try:
|
|
|
|
let
|
|
|
|
peerCtx = await b.taskQueue.pop()
|
2022-05-19 22:28:53 +00:00
|
|
|
|
2024-12-16 06:01:49 +00:00
|
|
|
await b.taskHandler(peerCtx)
|
|
|
|
except CancelledError:
|
|
|
|
break # do not propagate as blockexcTaskRunner was asyncSpawned
|
|
|
|
except CatchableError as e:
|
|
|
|
error "error running block exchange task", error = e.msgDetail
|
2022-05-19 22:28:53 +00:00
|
|
|
|
2024-04-30 09:31:06 +00:00
|
|
|
info "Exiting blockexc task runner"
|
2022-05-19 22:28:53 +00:00
|
|
|
|
|
|
|
proc new*(
|
2023-06-22 15:11:18 +00:00
|
|
|
T: type BlockExcEngine,
|
|
|
|
localStore: BlockStore,
|
|
|
|
wallet: WalletRef,
|
|
|
|
network: BlockExcNetwork,
|
|
|
|
discovery: DiscoveryEngine,
|
2024-08-26 13:18:59 +00:00
|
|
|
advertiser: Advertiser,
|
2023-06-22 15:11:18 +00:00
|
|
|
peerStore: PeerCtxStore,
|
|
|
|
pendingBlocks: PendingBlocksManager,
|
|
|
|
concurrentTasks = DefaultConcurrentTasks,
|
2024-03-15 21:50:56 +00:00
|
|
|
peersPerRequest = DefaultMaxPeersPerRequest,
|
|
|
|
blockFetchTimeout = DefaultBlockTimeout,
|
2023-06-22 15:11:18 +00:00
|
|
|
): BlockExcEngine =
|
|
|
|
## Create new block exchange engine instance
|
2023-07-20 07:56:28 +00:00
|
|
|
##
|
2022-05-19 22:28:53 +00:00
|
|
|
|
|
|
|
let
|
|
|
|
engine = BlockExcEngine(
|
|
|
|
localStore: localStore,
|
|
|
|
peers: peerStore,
|
|
|
|
pendingBlocks: pendingBlocks,
|
|
|
|
peersPerRequest: peersPerRequest,
|
|
|
|
network: network,
|
|
|
|
wallet: wallet,
|
|
|
|
concurrentTasks: concurrentTasks,
|
2024-12-16 06:01:49 +00:00
|
|
|
trackedFutures: TrackedFutures.new(),
|
2022-05-19 22:28:53 +00:00
|
|
|
taskQueue: newAsyncHeapQueue[BlockExcPeerCtx](DefaultTaskQueueSize),
|
2024-03-15 21:50:56 +00:00
|
|
|
discovery: discovery,
|
2024-08-26 13:18:59 +00:00
|
|
|
advertiser: advertiser,
|
2024-03-15 21:50:56 +00:00
|
|
|
blockFetchTimeout: blockFetchTimeout)
|
2022-05-19 22:28:53 +00:00
|
|
|
|
2022-11-15 15:46:21 +00:00
|
|
|
proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} =
|
2022-05-19 22:28:53 +00:00
|
|
|
if event.kind == PeerEventKind.Joined:
|
2022-07-29 16:19:34 +00:00
|
|
|
await engine.setupPeer(peerId)
|
2022-05-19 22:28:53 +00:00
|
|
|
else:
|
|
|
|
engine.dropPeer(peerId)
|
|
|
|
|
|
|
|
if not isNil(network.switch):
|
|
|
|
network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined)
|
|
|
|
network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left)
|
|
|
|
|
|
|
|
proc blockWantListHandler(
|
2022-11-15 15:46:21 +00:00
|
|
|
peer: PeerId,
|
2023-11-14 12:02:17 +00:00
|
|
|
wantList: WantList): Future[void] {.gcsafe.} =
|
2022-05-19 22:28:53 +00:00
|
|
|
engine.wantListHandler(peer, wantList)
|
|
|
|
|
|
|
|
proc blockPresenceHandler(
|
2022-11-15 15:46:21 +00:00
|
|
|
peer: PeerId,
|
2022-05-19 22:28:53 +00:00
|
|
|
presence: seq[BlockPresence]): Future[void] {.gcsafe.} =
|
|
|
|
engine.blockPresenceHandler(peer, presence)
|
|
|
|
|
2023-11-14 12:02:17 +00:00
|
|
|
proc blocksDeliveryHandler(
|
2022-11-15 15:46:21 +00:00
|
|
|
peer: PeerId,
|
2023-11-14 12:02:17 +00:00
|
|
|
blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.} =
|
|
|
|
engine.blocksDeliveryHandler(peer, blocksDelivery)
|
2022-05-19 22:28:53 +00:00
|
|
|
|
|
|
|
proc accountHandler(peer: PeerId, account: Account): Future[void] {.gcsafe.} =
|
|
|
|
engine.accountHandler(peer, account)
|
|
|
|
|
|
|
|
proc paymentHandler(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.} =
|
|
|
|
engine.paymentHandler(peer, payment)
|
|
|
|
|
|
|
|
network.handlers = BlockExcHandlers(
|
|
|
|
onWantList: blockWantListHandler,
|
2023-11-14 12:02:17 +00:00
|
|
|
onBlocksDelivery: blocksDeliveryHandler,
|
2022-05-19 22:28:53 +00:00
|
|
|
onPresence: blockPresenceHandler,
|
|
|
|
onAccount: accountHandler,
|
|
|
|
onPayment: paymentHandler)
|
|
|
|
|
|
|
|
return engine
|