2023-12-01 16:20:52 +00:00
|
|
|
# Fluffy
|
2024-01-25 10:04:09 +00:00
|
|
|
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
2021-07-09 11:34:16 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2021-10-20 20:31:05 +00:00
|
|
|
## Implementation of the Portal wire protocol as specified at:
|
|
|
|
## https://github.com/ethereum/portal-network-specs/blob/master/portal-wire-protocol.md
|
|
|
|
|
2023-01-31 12:38:08 +00:00
|
|
|
{.push raises: [].}
|
2021-07-09 11:34:16 +00:00
|
|
|
|
|
|
|
import
|
2023-10-03 16:48:00 +00:00
|
|
|
std/[sequtils, sets, algorithm, tables],
|
2024-05-30 12:54:03 +00:00
|
|
|
stew/[byteutils, leb128, endians2],
|
|
|
|
results,
|
2024-02-28 17:31:45 +00:00
|
|
|
chronicles,
|
|
|
|
chronos,
|
|
|
|
nimcrypto/hash,
|
|
|
|
bearssl,
|
|
|
|
ssz_serialization,
|
|
|
|
metrics,
|
|
|
|
faststreams,
|
2024-10-04 21:21:26 +00:00
|
|
|
minilru,
|
2024-02-28 17:31:45 +00:00
|
|
|
eth/rlp,
|
2024-10-04 21:21:26 +00:00
|
|
|
eth/p2p/discoveryv5/[protocol, node, enr, routing_table, random2, nodes_verification],
|
2022-01-18 08:01:22 +00:00
|
|
|
"."/[portal_stream, portal_protocol_config],
|
2021-09-20 10:55:03 +00:00
|
|
|
./messages
|
2021-07-09 11:34:16 +00:00
|
|
|
|
2023-09-04 10:21:01 +00:00
|
|
|
export messages, routing_table, protocol
|
2021-07-09 11:34:16 +00:00
|
|
|
|
2022-03-23 15:32:59 +00:00
|
|
|
declareCounter portal_message_requests_incoming,
|
|
|
|
"Portal wire protocol incoming message requests",
|
|
|
|
labels = ["protocol_id", "message_type"]
|
|
|
|
declareCounter portal_message_decoding_failures,
|
2024-02-28 17:31:45 +00:00
|
|
|
"Portal wire protocol message decoding failures", labels = ["protocol_id"]
|
2022-03-23 15:32:59 +00:00
|
|
|
declareCounter portal_message_requests_outgoing,
|
|
|
|
"Portal wire protocol outgoing message requests",
|
|
|
|
labels = ["protocol_id", "message_type"]
|
|
|
|
declareCounter portal_message_response_incoming,
|
|
|
|
"Portal wire protocol incoming message responses",
|
|
|
|
labels = ["protocol_id", "message_type"]
|
|
|
|
|
|
|
|
const requestBuckets = [1.0, 3.0, 5.0, 7.0, 9.0, Inf]
|
|
|
|
declareHistogram portal_lookup_node_requests,
|
|
|
|
"Portal wire protocol amount of requests per node lookup",
|
2024-02-28 17:31:45 +00:00
|
|
|
labels = ["protocol_id"],
|
|
|
|
buckets = requestBuckets
|
2022-03-23 15:32:59 +00:00
|
|
|
declareHistogram portal_lookup_content_requests,
|
|
|
|
"Portal wire protocol amount of requests per node lookup",
|
2024-02-28 17:31:45 +00:00
|
|
|
labels = ["protocol_id"],
|
|
|
|
buckets = requestBuckets
|
2022-03-23 15:32:59 +00:00
|
|
|
declareCounter portal_lookup_content_failures,
|
2024-02-28 17:31:45 +00:00
|
|
|
"Portal wire protocol content lookup failures", labels = ["protocol_id"]
|
2022-03-23 15:32:59 +00:00
|
|
|
|
|
|
|
const contentKeysBuckets = [0.0, 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, Inf]
|
|
|
|
declareHistogram portal_content_keys_offered,
|
|
|
|
"Portal wire protocol amount of content keys per offer message send",
|
2024-02-28 17:31:45 +00:00
|
|
|
labels = ["protocol_id"],
|
|
|
|
buckets = contentKeysBuckets
|
2022-03-23 15:32:59 +00:00
|
|
|
declareHistogram portal_content_keys_accepted,
|
|
|
|
"Portal wire protocol amount of content keys per accept message received",
|
2024-02-28 17:31:45 +00:00
|
|
|
labels = ["protocol_id"],
|
|
|
|
buckets = contentKeysBuckets
|
2022-04-01 16:01:50 +00:00
|
|
|
declareCounter portal_gossip_offers_successful,
|
|
|
|
"Portal wire protocol successful content offers from neighborhood gossip",
|
|
|
|
labels = ["protocol_id"]
|
|
|
|
declareCounter portal_gossip_offers_failed,
|
|
|
|
"Portal wire protocol failed content offers from neighborhood gossip",
|
|
|
|
labels = ["protocol_id"]
|
2022-05-07 11:50:16 +00:00
|
|
|
declareCounter portal_gossip_with_lookup,
|
|
|
|
"Portal wire protocol neighborhood gossip that required a node lookup",
|
|
|
|
labels = ["protocol_id"]
|
|
|
|
declareCounter portal_gossip_without_lookup,
|
|
|
|
"Portal wire protocol neighborhood gossip that did not require a node lookup",
|
|
|
|
labels = ["protocol_id"]
|
2024-10-16 13:05:39 +00:00
|
|
|
declareCounter portal_content_cache_hits,
|
|
|
|
"Portal wire protocol local content lookups that hit the cache",
|
|
|
|
labels = ["protocol_id"]
|
|
|
|
declareCounter portal_content_cache_misses,
|
|
|
|
"Portal wire protocol local content lookups that don't hit the cache",
|
|
|
|
labels = ["protocol_id"]
|
2022-05-07 11:50:16 +00:00
|
|
|
|
2024-09-25 09:30:42 +00:00
|
|
|
declareCounter portal_poke_offers,
|
|
|
|
"Portal wire protocol offers through poke mechanism", labels = ["protocol_id"]
|
|
|
|
|
2022-03-24 20:06:13 +00:00
|
|
|
# Note: These metrics are to get some idea on how many enrs are send on average.
|
|
|
|
# Relevant issue: https://github.com/ethereum/portal-network-specs/issues/136
|
|
|
|
const enrsBuckets = [0.0, 1.0, 3.0, 5.0, 8.0, 9.0, Inf]
|
|
|
|
declareHistogram portal_nodes_enrs_packed,
|
|
|
|
"Portal wire protocol amount of enrs packed in a nodes message",
|
2024-02-28 17:31:45 +00:00
|
|
|
labels = ["protocol_id"],
|
|
|
|
buckets = enrsBuckets
|
2022-03-24 20:06:13 +00:00
|
|
|
# This one will currently hit the max numbers because all neighbours are send,
|
|
|
|
# not only the ones closer to the content.
|
|
|
|
declareHistogram portal_content_enrs_packed,
|
|
|
|
"Portal wire protocol amount of enrs packed in a content message",
|
2024-02-28 17:31:45 +00:00
|
|
|
labels = ["protocol_id"],
|
|
|
|
buckets = enrsBuckets
|
2022-03-24 20:06:13 +00:00
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
const distanceBuckets = [
|
|
|
|
float64 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253,
|
|
|
|
254, 255, 256,
|
|
|
|
]
|
2023-11-23 17:49:15 +00:00
|
|
|
declareHistogram portal_find_content_log_distance,
|
|
|
|
"Portal wire protocol logarithmic distance of requested content",
|
2024-02-28 17:31:45 +00:00
|
|
|
labels = ["protocol_id"],
|
|
|
|
buckets = distanceBuckets
|
2023-11-23 17:49:15 +00:00
|
|
|
|
|
|
|
declareHistogram portal_offer_log_distance,
|
|
|
|
"Portal wire protocol logarithmic distance of offered content",
|
2024-02-28 17:31:45 +00:00
|
|
|
labels = ["protocol_id"],
|
|
|
|
buckets = distanceBuckets
|
2023-11-23 17:49:15 +00:00
|
|
|
|
2021-07-09 11:34:16 +00:00
|
|
|
logScope:
|
2021-09-22 15:07:14 +00:00
|
|
|
topics = "portal_wire"
|
2021-07-09 11:34:16 +00:00
|
|
|
|
|
|
|
const
|
2021-12-08 10:54:22 +00:00
|
|
|
alpha = 3 ## Kademlia concurrency factor
|
2021-12-20 10:57:55 +00:00
|
|
|
enrsResultLimit* = 32 ## Maximum amount of ENRs in the total Nodes messages
|
2021-09-15 09:24:03 +00:00
|
|
|
## that will be processed
|
2021-12-08 10:54:22 +00:00
|
|
|
refreshInterval = 5.minutes ## Interval of launching a random query to
|
2021-07-30 19:19:03 +00:00
|
|
|
## refresh the routing table.
|
2024-09-25 15:38:33 +00:00
|
|
|
revalidateMax = 4000 ## Revalidation of a peer is done between 0 and this
|
2021-07-30 19:19:03 +00:00
|
|
|
## value in milliseconds
|
2021-12-08 10:54:22 +00:00
|
|
|
initialLookups = 1 ## Amount of lookups done when populating the routing table
|
2021-07-09 11:34:16 +00:00
|
|
|
|
2022-04-01 16:01:50 +00:00
|
|
|
# These are the concurrent offers per Portal wire protocol that is running.
|
|
|
|
# Using the `offerQueue` allows for limiting the amount of offers send and
|
|
|
|
# thus how many streams can be started.
|
|
|
|
# TODO:
|
|
|
|
# More thought needs to go into this as it is currently on a per network
|
|
|
|
# basis. Keep it simple like that? Or limit it better at the stream transport
|
|
|
|
# level? In the latter case, this might still need to be checked/blocked at
|
|
|
|
# the very start of sending the offer, because blocking/waiting too long
|
|
|
|
# between the received accept message and actually starting the stream and
|
|
|
|
# sending data could give issues due to timeouts on the other side.
|
|
|
|
# And then there are still limits to be applied also for FindContent and the
|
|
|
|
# incoming directions.
|
|
|
|
concurrentOffers = 50
|
|
|
|
|
2021-07-09 11:34:16 +00:00
|
|
|
type
|
2022-01-06 08:06:05 +00:00
|
|
|
ToContentIdHandler* =
|
2024-07-17 15:07:27 +00:00
|
|
|
proc(contentKey: ContentKeyByteList): results.Opt[ContentId] {.raises: [], gcsafe.}
|
2021-09-03 08:57:19 +00:00
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
DbGetHandler* = proc(
|
2024-07-17 15:07:27 +00:00
|
|
|
contentKey: ContentKeyByteList, contentId: ContentId
|
2024-02-28 17:31:45 +00:00
|
|
|
): results.Opt[seq[byte]] {.raises: [], gcsafe.}
|
2022-11-08 17:31:45 +00:00
|
|
|
|
2024-07-17 15:07:27 +00:00
|
|
|
DbStoreHandler* = proc(
|
|
|
|
contentKey: ContentKeyByteList, contentId: ContentId, content: seq[byte]
|
|
|
|
) {.raises: [], gcsafe.}
|
2022-08-01 19:00:21 +00:00
|
|
|
|
2024-09-05 16:31:55 +00:00
|
|
|
DbRadiusHandler* = proc(): UInt256 {.raises: [], gcsafe.}
|
|
|
|
|
2021-10-20 20:31:05 +00:00
|
|
|
PortalProtocolId* = array[2, byte]
|
|
|
|
|
2022-01-19 14:56:14 +00:00
|
|
|
RadiusCache* = LRUCache[NodeId, UInt256]
|
2022-04-11 17:42:38 +00:00
|
|
|
|
2024-10-16 13:05:39 +00:00
|
|
|
ContentCache = LRUCache[ContentId, seq[byte]]
|
|
|
|
|
2023-08-24 16:19:29 +00:00
|
|
|
ContentKV* = object
|
2024-07-17 15:07:27 +00:00
|
|
|
contentKey*: ContentKeyByteList
|
2022-04-11 09:25:36 +00:00
|
|
|
content*: seq[byte]
|
|
|
|
|
|
|
|
OfferRequestType = enum
|
2024-02-28 17:31:45 +00:00
|
|
|
Direct
|
|
|
|
Database
|
2022-04-11 09:25:36 +00:00
|
|
|
|
|
|
|
OfferRequest = object
|
|
|
|
dst: Node
|
|
|
|
case kind: OfferRequestType
|
|
|
|
of Direct:
|
2023-08-24 16:19:29 +00:00
|
|
|
contentList: List[ContentKV, contentKeysLimit]
|
2022-04-11 09:25:36 +00:00
|
|
|
of Database:
|
|
|
|
contentKeys: ContentKeysList
|
2022-01-19 14:56:14 +00:00
|
|
|
|
2021-07-09 11:34:16 +00:00
|
|
|
PortalProtocol* = ref object of TalkProtocol
|
2022-03-18 12:06:57 +00:00
|
|
|
protocolId*: PortalProtocolId
|
2021-10-09 11:22:03 +00:00
|
|
|
routingTable*: RoutingTable
|
2021-07-09 11:34:16 +00:00
|
|
|
baseProtocol*: protocol.Protocol
|
2022-05-24 11:27:22 +00:00
|
|
|
toContentId*: ToContentIdHandler
|
2024-10-16 13:05:39 +00:00
|
|
|
contentCache: ContentCache
|
2022-08-01 19:00:21 +00:00
|
|
|
dbGet*: DbGetHandler
|
2022-11-08 17:31:45 +00:00
|
|
|
dbPut*: DbStoreHandler
|
2024-09-05 16:31:55 +00:00
|
|
|
dataRadius*: DbRadiusHandler
|
2021-09-23 12:26:41 +00:00
|
|
|
bootstrapRecords*: seq[Record]
|
2021-07-30 19:19:03 +00:00
|
|
|
lastLookup: chronos.Moment
|
|
|
|
refreshLoop: Future[void]
|
|
|
|
revalidateLoop: Future[void]
|
2022-02-11 13:43:10 +00:00
|
|
|
stream*: PortalStream
|
2022-01-19 14:56:14 +00:00
|
|
|
radiusCache: RadiusCache
|
2022-04-11 09:25:36 +00:00
|
|
|
offerQueue: AsyncQueue[OfferRequest]
|
2022-04-01 16:01:50 +00:00
|
|
|
offerWorkers: seq[Future[void]]
|
2023-10-03 16:48:00 +00:00
|
|
|
pingTimings: Table[NodeId, chronos.Moment]
|
2024-10-16 13:05:39 +00:00
|
|
|
config*: PortalProtocolConfig
|
2021-07-30 19:19:03 +00:00
|
|
|
|
2023-08-23 16:03:09 +00:00
|
|
|
PortalResult*[T] = Result[T, string]
|
2021-07-30 19:19:03 +00:00
|
|
|
|
2022-01-14 15:07:14 +00:00
|
|
|
FoundContentKind* = enum
|
2024-02-28 17:31:45 +00:00
|
|
|
Nodes
|
2022-01-14 15:07:14 +00:00
|
|
|
Content
|
2021-09-02 12:35:25 +00:00
|
|
|
|
2022-01-14 15:07:14 +00:00
|
|
|
FoundContent* = object
|
2022-04-06 11:47:23 +00:00
|
|
|
src*: Node
|
2022-01-14 15:07:14 +00:00
|
|
|
case kind*: FoundContentKind
|
2021-09-02 12:35:25 +00:00
|
|
|
of Content:
|
2022-01-20 20:21:20 +00:00
|
|
|
content*: seq[byte]
|
2023-04-28 07:04:21 +00:00
|
|
|
utpTransfer*: bool
|
2022-01-14 15:07:14 +00:00
|
|
|
of Nodes:
|
|
|
|
nodes*: seq[Node]
|
2021-08-20 14:02:29 +00:00
|
|
|
|
2022-04-06 11:47:23 +00:00
|
|
|
ContentLookupResult* = object
|
|
|
|
content*: seq[byte]
|
2023-08-24 16:19:29 +00:00
|
|
|
utpTransfer*: bool
|
2022-04-06 11:47:23 +00:00
|
|
|
# List of nodes which do not have requested content, and for which
|
|
|
|
# content is in their range
|
|
|
|
nodesInterestedInContent*: seq[Node]
|
|
|
|
|
2023-10-30 14:48:06 +00:00
|
|
|
TraceResponse* = object
|
|
|
|
durationMs*: int64
|
|
|
|
respondedWith*: seq[NodeId]
|
|
|
|
|
|
|
|
NodeMetadata* = object
|
|
|
|
enr*: Record
|
|
|
|
distance*: UInt256
|
|
|
|
|
|
|
|
TraceObject* = object
|
|
|
|
origin*: NodeId
|
|
|
|
targetId: UInt256
|
2023-11-27 09:21:19 +00:00
|
|
|
receivedFrom*: Opt[NodeId]
|
2023-10-30 14:48:06 +00:00
|
|
|
responses*: Table[string, TraceResponse]
|
|
|
|
metadata*: Table[string, NodeMetadata]
|
|
|
|
cancelled*: seq[NodeId]
|
|
|
|
startedAtMs*: int64
|
|
|
|
|
|
|
|
TraceContentLookupResult* = object
|
2023-11-27 09:21:19 +00:00
|
|
|
content*: Opt[seq[byte]]
|
2023-10-30 14:48:06 +00:00
|
|
|
utpTransfer*: bool
|
|
|
|
trace*: TraceObject
|
|
|
|
|
2024-07-17 15:07:27 +00:00
|
|
|
func init*(T: type ContentKV, contentKey: ContentKeyByteList, content: seq[byte]): T =
|
2024-02-28 17:31:45 +00:00
|
|
|
ContentKV(contentKey: contentKey, content: content)
|
2022-04-11 09:25:36 +00:00
|
|
|
|
2023-11-23 21:20:23 +00:00
|
|
|
func init*(
|
2024-02-28 17:31:45 +00:00
|
|
|
T: type ContentLookupResult,
|
|
|
|
content: seq[byte],
|
|
|
|
utpTransfer: bool,
|
|
|
|
nodesInterestedInContent: seq[Node],
|
|
|
|
): T =
|
2022-04-06 11:47:23 +00:00
|
|
|
ContentLookupResult(
|
2022-04-11 17:42:38 +00:00
|
|
|
content: content,
|
2023-08-24 16:19:29 +00:00
|
|
|
utpTransfer: utpTransfer,
|
2024-02-28 17:31:45 +00:00
|
|
|
nodesInterestedInContent: nodesInterestedInContent,
|
2022-04-06 11:47:23 +00:00
|
|
|
)
|
|
|
|
|
2024-06-18 07:32:57 +00:00
|
|
|
func getProtocolId*(
|
|
|
|
network: PortalNetwork, subnetwork: PortalSubnetwork
|
|
|
|
): PortalProtocolId =
|
|
|
|
const portalPrefix = byte(0x50)
|
|
|
|
|
|
|
|
case network
|
|
|
|
of PortalNetwork.none, PortalNetwork.mainnet:
|
|
|
|
case subnetwork
|
|
|
|
of PortalSubnetwork.state:
|
|
|
|
[portalPrefix, 0x0A]
|
|
|
|
of PortalSubnetwork.history:
|
|
|
|
[portalPrefix, 0x0B]
|
|
|
|
of PortalSubnetwork.beacon:
|
|
|
|
[portalPrefix, 0x0C]
|
|
|
|
of PortalSubnetwork.transactionIndex:
|
|
|
|
[portalPrefix, 0x0D]
|
|
|
|
of PortalSubnetwork.verkleState:
|
|
|
|
[portalPrefix, 0x0E]
|
|
|
|
of PortalSubnetwork.transactionGossip:
|
|
|
|
[portalPrefix, 0x0F]
|
|
|
|
of PortalNetwork.angelfood:
|
|
|
|
case subnetwork
|
|
|
|
of PortalSubnetwork.state:
|
|
|
|
[portalPrefix, 0x4A]
|
|
|
|
of PortalSubnetwork.history:
|
|
|
|
[portalPrefix, 0x4B]
|
|
|
|
of PortalSubnetwork.beacon:
|
|
|
|
[portalPrefix, 0x4C]
|
|
|
|
of PortalSubnetwork.transactionIndex:
|
|
|
|
[portalPrefix, 0x4D]
|
|
|
|
of PortalSubnetwork.verkleState:
|
|
|
|
[portalPrefix, 0x4E]
|
|
|
|
of PortalSubnetwork.transactionGossip:
|
|
|
|
[portalPrefix, 0x4F]
|
|
|
|
|
2022-03-17 12:19:36 +00:00
|
|
|
func `$`(id: PortalProtocolId): string =
|
|
|
|
id.toHex()
|
|
|
|
|
2021-08-20 14:02:29 +00:00
|
|
|
proc addNode*(p: PortalProtocol, node: Node): NodeStatus =
|
|
|
|
p.routingTable.addNode(node)
|
|
|
|
|
2021-09-23 12:26:41 +00:00
|
|
|
proc addNode*(p: PortalProtocol, r: Record): bool =
|
2024-06-27 15:59:08 +00:00
|
|
|
p.addNode(Node.fromRecord(r)) == Added
|
2021-09-23 12:26:41 +00:00
|
|
|
|
2023-11-23 21:20:23 +00:00
|
|
|
func getNode*(p: PortalProtocol, id: NodeId): Opt[Node] =
|
2022-12-16 07:49:18 +00:00
|
|
|
p.routingTable.getNode(id)
|
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
func localNode*(p: PortalProtocol): Node =
|
|
|
|
p.baseProtocol.localNode
|
2021-09-23 12:26:41 +00:00
|
|
|
|
2021-12-08 10:54:22 +00:00
|
|
|
func neighbours*(p: PortalProtocol, id: NodeId, seenOnly = false): seq[Node] =
|
2021-08-20 14:02:29 +00:00
|
|
|
p.routingTable.neighbours(id = id, seenOnly = seenOnly)
|
|
|
|
|
2023-11-23 21:20:23 +00:00
|
|
|
func distance(p: PortalProtocol, a, b: NodeId): UInt256 =
|
|
|
|
p.routingTable.distance(a, b)
|
|
|
|
|
|
|
|
func logDistance(p: PortalProtocol, a, b: NodeId): uint16 =
|
|
|
|
p.routingTable.logDistance(a, b)
|
|
|
|
|
|
|
|
func inRange(
|
2024-02-28 17:31:45 +00:00
|
|
|
p: PortalProtocol, nodeId: NodeId, nodeRadius: UInt256, contentId: ContentId
|
|
|
|
): bool =
|
2023-11-23 21:20:23 +00:00
|
|
|
let distance = p.distance(nodeId, contentId)
|
2022-04-06 11:47:23 +00:00
|
|
|
distance <= nodeRadius
|
|
|
|
|
2024-09-05 16:31:55 +00:00
|
|
|
proc inRange*(p: PortalProtocol, contentId: ContentId): bool =
|
|
|
|
p.inRange(p.localNode.id, p.dataRadius(), contentId)
|
2022-01-06 08:06:05 +00:00
|
|
|
|
2022-03-24 20:06:13 +00:00
|
|
|
func truncateEnrs(
|
2024-02-28 17:31:45 +00:00
|
|
|
nodes: seq[Node], maxSize: int, enrOverhead: int
|
2024-07-17 15:07:27 +00:00
|
|
|
): List[ByteList[2048], 32] =
|
|
|
|
var enrs: List[ByteList[2048], 32]
|
2022-03-24 20:06:13 +00:00
|
|
|
var totalSize = 0
|
|
|
|
for n in nodes:
|
2024-07-17 15:07:27 +00:00
|
|
|
let enr = ByteList[2048].init(n.record.raw)
|
2022-03-24 20:06:13 +00:00
|
|
|
if totalSize + enr.len() + enrOverhead <= maxSize:
|
2024-01-25 10:04:09 +00:00
|
|
|
let res = enrs.add(enr)
|
|
|
|
# With max payload of discv5 and the sizes of ENRs this should not occur.
|
|
|
|
doAssert(res, "32 limit will not be reached")
|
2024-01-25 11:30:37 +00:00
|
|
|
totalSize = totalSize + enr.len() + enrOverhead
|
2022-03-24 20:06:13 +00:00
|
|
|
else:
|
|
|
|
break
|
|
|
|
|
|
|
|
enrs
|
|
|
|
|
2024-09-05 16:31:55 +00:00
|
|
|
proc handlePing(p: PortalProtocol, ping: PingMessage, srcId: NodeId): seq[byte] =
|
2022-01-19 14:56:14 +00:00
|
|
|
# TODO: This should become custom per Portal Network
|
|
|
|
# TODO: Need to think about the effect of malicious actor sending lots of
|
|
|
|
# pings from different nodes to clear the LRU.
|
|
|
|
let customPayloadDecoded =
|
2024-02-28 17:31:45 +00:00
|
|
|
try:
|
|
|
|
SSZ.decode(ping.customPayload.asSeq(), CustomPayload)
|
2023-10-17 12:19:50 +00:00
|
|
|
except SerializationError:
|
2022-01-19 14:56:14 +00:00
|
|
|
# invalid custom payload, send empty back
|
|
|
|
return @[]
|
|
|
|
p.radiusCache.put(srcId, customPayloadDecoded.dataRadius)
|
|
|
|
|
2024-09-05 16:31:55 +00:00
|
|
|
let customPayload = CustomPayload(dataRadius: p.dataRadius())
|
2024-02-28 17:31:45 +00:00
|
|
|
let p = PongMessage(
|
|
|
|
enrSeq: p.localNode.record.seqNum,
|
2024-07-17 15:07:27 +00:00
|
|
|
customPayload: ByteList[2048](SSZ.encode(customPayload)),
|
2024-02-28 17:31:45 +00:00
|
|
|
)
|
2021-07-09 11:34:16 +00:00
|
|
|
|
|
|
|
encodeMessage(p)
|
|
|
|
|
2022-03-24 20:06:13 +00:00
|
|
|
proc handleFindNodes(p: PortalProtocol, fn: FindNodesMessage): seq[byte] =
|
2021-07-09 11:34:16 +00:00
|
|
|
if fn.distances.len == 0:
|
2024-07-17 15:07:27 +00:00
|
|
|
let enrs = List[ByteList[2048], 32](@[])
|
2021-07-09 11:34:16 +00:00
|
|
|
encodeMessage(NodesMessage(total: 1, enrs: enrs))
|
|
|
|
elif fn.distances.contains(0):
|
|
|
|
# A request for our own record.
|
2024-07-17 15:07:27 +00:00
|
|
|
let enr = ByteList[2048](rlp.encode(p.localNode.record))
|
|
|
|
encodeMessage(NodesMessage(total: 1, enrs: List[ByteList[2048], 32](@[enr])))
|
2021-07-09 11:34:16 +00:00
|
|
|
else:
|
2021-07-13 13:15:33 +00:00
|
|
|
let distances = fn.distances.asSeq()
|
2024-02-28 17:31:45 +00:00
|
|
|
if distances.all(
|
|
|
|
proc(x: uint16): bool =
|
|
|
|
return x <= 256
|
|
|
|
):
|
|
|
|
let nodes = p.routingTable.neighboursAtDistances(distances, seenOnly = true)
|
2021-07-13 13:15:33 +00:00
|
|
|
|
2022-03-24 20:06:13 +00:00
|
|
|
# TODO: Total amount of messages is set fixed to 1 for now, else we would
|
|
|
|
# need to either move the send of the talkresp messages here, or allow for
|
2021-07-13 13:15:33 +00:00
|
|
|
# returning multiple messages.
|
|
|
|
# On the long run, it might just be better to use a stream in these cases?
|
2022-03-24 20:06:13 +00:00
|
|
|
# Size calculation is done to truncate the ENR results in order to not go
|
|
|
|
# over the discv5 packet size limits. ENRs are sorted so the closest nodes
|
|
|
|
# will still be passed.
|
|
|
|
const
|
2022-05-03 07:18:33 +00:00
|
|
|
nodesOverhead = 1 + 1 + 4 # msg id + total + container offset
|
2024-09-19 18:30:10 +00:00
|
|
|
maxPayloadSize = maxDiscv5TalkRespPayload - nodesOverhead
|
2022-03-24 20:06:13 +00:00
|
|
|
enrOverhead = 4 # per added ENR, 4 bytes offset overhead
|
|
|
|
|
|
|
|
let enrs = truncateEnrs(nodes, maxPayloadSize, enrOverhead)
|
2024-02-28 17:31:45 +00:00
|
|
|
portal_nodes_enrs_packed.observe(enrs.len().int64, labelValues = [$p.protocolId])
|
2022-03-24 20:06:13 +00:00
|
|
|
|
|
|
|
encodeMessage(NodesMessage(total: 1, enrs: enrs))
|
2021-07-13 13:15:33 +00:00
|
|
|
else:
|
|
|
|
# invalid request, send empty back
|
2024-07-17 15:07:27 +00:00
|
|
|
let enrs = List[ByteList[2048], 32](@[])
|
2021-07-13 13:15:33 +00:00
|
|
|
encodeMessage(NodesMessage(total: 1, enrs: enrs))
|
2021-07-09 11:34:16 +00:00
|
|
|
|
2022-01-14 15:07:14 +00:00
|
|
|
proc handleFindContent(
|
2024-02-28 17:31:45 +00:00
|
|
|
p: PortalProtocol, fc: FindContentMessage, srcId: NodeId
|
|
|
|
): seq[byte] =
|
2022-08-01 19:00:21 +00:00
|
|
|
const
|
|
|
|
contentOverhead = 1 + 1 # msg id + SSZ Union selector
|
2024-09-19 18:30:10 +00:00
|
|
|
maxPayloadSize = maxDiscv5TalkRespPayload - contentOverhead
|
2022-08-01 19:00:21 +00:00
|
|
|
enrOverhead = 4 # per added ENR, 4 bytes offset overhead
|
|
|
|
|
2023-06-27 17:43:32 +00:00
|
|
|
let contentId = p.toContentId(fc.contentKey).valueOr:
|
2022-10-17 18:38:51 +00:00
|
|
|
# Return empty response when content key validation fails
|
|
|
|
# TODO: Better would be to return no message at all? Needs changes on
|
|
|
|
# discv5 layer.
|
|
|
|
return @[]
|
|
|
|
|
2023-11-23 21:20:23 +00:00
|
|
|
let logDistance = p.logDistance(contentId, p.localNode.id)
|
2023-11-23 17:49:15 +00:00
|
|
|
portal_find_content_log_distance.observe(
|
2024-02-28 17:31:45 +00:00
|
|
|
int64(logDistance), labelValues = [$p.protocolId]
|
|
|
|
)
|
2023-11-23 17:49:15 +00:00
|
|
|
|
2023-11-24 14:07:23 +00:00
|
|
|
# Check first if content is in range, as this is a cheaper operation
|
|
|
|
if p.inRange(contentId):
|
|
|
|
let contentResult = p.dbGet(fc.contentKey, contentId)
|
|
|
|
if contentResult.isOk():
|
|
|
|
let content = contentResult.get()
|
|
|
|
if content.len <= maxPayloadSize:
|
2024-02-28 17:31:45 +00:00
|
|
|
return encodeMessage(
|
2024-07-17 15:07:27 +00:00
|
|
|
ContentMessage(
|
|
|
|
contentMessageType: contentType, content: ByteList[2048](content)
|
|
|
|
)
|
2024-02-28 17:31:45 +00:00
|
|
|
)
|
2023-11-24 14:07:23 +00:00
|
|
|
else:
|
|
|
|
let connectionId = p.stream.addContentRequest(srcId, content)
|
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
return encodeMessage(
|
|
|
|
ContentMessage(
|
|
|
|
contentMessageType: connectionIdType, connectionId: connectionId
|
|
|
|
)
|
|
|
|
)
|
2023-11-24 14:07:23 +00:00
|
|
|
|
|
|
|
# Node does not have the content, or content is not even in radius,
|
|
|
|
# send closest neighbours to the requested content id.
|
|
|
|
let
|
2024-02-28 17:31:45 +00:00
|
|
|
closestNodes = p.routingTable.neighbours(NodeId(contentId), seenOnly = true)
|
2023-11-24 14:07:23 +00:00
|
|
|
enrs = truncateEnrs(closestNodes, maxPayloadSize, enrOverhead)
|
2024-02-28 17:31:45 +00:00
|
|
|
portal_content_enrs_packed.observe(enrs.len().int64, labelValues = [$p.protocolId])
|
2023-11-24 14:07:23 +00:00
|
|
|
|
|
|
|
encodeMessage(ContentMessage(contentMessageType: enrsType, enrs: enrs))
|
2022-01-06 08:06:05 +00:00
|
|
|
|
2022-01-14 15:07:14 +00:00
|
|
|
proc handleOffer(p: PortalProtocol, o: OfferMessage, srcId: NodeId): seq[byte] =
|
2023-10-20 16:03:20 +00:00
|
|
|
# Early return when our contentQueue is full. This means there is a backlog
|
|
|
|
# of content to process and potentially gossip around. Don't accept more
|
|
|
|
# data in this case.
|
|
|
|
if p.stream.contentQueue.full():
|
2024-02-28 17:31:45 +00:00
|
|
|
return encodeMessage(
|
|
|
|
AcceptMessage(
|
|
|
|
connectionId: Bytes2([byte 0x00, 0x00]),
|
|
|
|
contentKeys: ContentKeysBitList.init(o.contentKeys.len),
|
|
|
|
)
|
|
|
|
)
|
2023-10-20 16:03:20 +00:00
|
|
|
|
2022-01-14 15:07:14 +00:00
|
|
|
var contentKeysBitList = ContentKeysBitList.init(o.contentKeys.len)
|
|
|
|
var contentKeys = ContentKeysList.init(@[])
|
2022-01-06 08:06:05 +00:00
|
|
|
# TODO: Do we need some protection against a peer offering lots (64x) of
|
|
|
|
# content that fits our Radius but is actually bogus?
|
|
|
|
# Additional TODO, but more of a specification clarification: What if we don't
|
|
|
|
# want any of the content? Reply with empty bitlist and a connectionId of
|
|
|
|
# all zeroes but don't actually allow an uTP connection?
|
|
|
|
for i, contentKey in o.contentKeys:
|
2022-11-08 17:31:45 +00:00
|
|
|
let contentIdResult = p.toContentId(contentKey)
|
|
|
|
if contentIdResult.isOk():
|
|
|
|
let contentId = contentIdResult.get()
|
2023-11-23 17:49:15 +00:00
|
|
|
|
2023-11-23 21:20:23 +00:00
|
|
|
let logDistance = p.logDistance(contentId, p.localNode.id)
|
2023-11-23 17:49:15 +00:00
|
|
|
portal_offer_log_distance.observe(
|
2024-02-28 17:31:45 +00:00
|
|
|
int64(logDistance), labelValues = [$p.protocolId]
|
|
|
|
)
|
2023-11-23 17:49:15 +00:00
|
|
|
|
2022-01-06 08:06:05 +00:00
|
|
|
if p.inRange(contentId):
|
2022-11-08 17:31:45 +00:00
|
|
|
if p.dbGet(contentKey, contentId).isErr:
|
2022-01-14 15:07:14 +00:00
|
|
|
contentKeysBitList.setBit(i)
|
|
|
|
discard contentKeys.add(contentKey)
|
2022-01-06 08:06:05 +00:00
|
|
|
else:
|
|
|
|
# Return empty response when content key validation fails
|
|
|
|
return @[]
|
|
|
|
|
2022-03-18 17:29:06 +00:00
|
|
|
let connectionId =
|
2023-10-20 16:03:20 +00:00
|
|
|
if contentKeysBitList.countOnes() != 0:
|
2022-03-18 17:29:06 +00:00
|
|
|
p.stream.addContentOffer(srcId, contentKeys)
|
|
|
|
else:
|
|
|
|
# When the node does not accept any of the content offered, reply with an
|
|
|
|
# all zeroes bitlist and connectionId.
|
|
|
|
# Note: What to do in this scenario is not defined in the Portal spec.
|
|
|
|
Bytes2([byte 0x00, 0x00])
|
2022-01-06 08:06:05 +00:00
|
|
|
|
2021-09-22 09:28:04 +00:00
|
|
|
encodeMessage(
|
2024-02-28 17:31:45 +00:00
|
|
|
AcceptMessage(connectionId: connectionId, contentKeys: contentKeysBitList)
|
|
|
|
)
|
2021-07-09 11:34:16 +00:00
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
proc messageHandler(
|
|
|
|
protocol: TalkProtocol,
|
|
|
|
request: seq[byte],
|
|
|
|
srcId: NodeId,
|
|
|
|
srcUdpAddress: Address,
|
|
|
|
nodeOpt: Opt[Node],
|
|
|
|
): seq[byte] =
|
2021-07-09 11:34:16 +00:00
|
|
|
doAssert(protocol of PortalProtocol)
|
|
|
|
|
2022-03-17 12:19:36 +00:00
|
|
|
logScope:
|
|
|
|
protocolId = p.protocolId
|
|
|
|
|
2021-07-09 11:34:16 +00:00
|
|
|
let p = PortalProtocol(protocol)
|
|
|
|
|
|
|
|
let decoded = decodeMessage(request)
|
|
|
|
if decoded.isOk():
|
|
|
|
let message = decoded.get()
|
2021-10-22 09:03:00 +00:00
|
|
|
trace "Received message request", srcId, srcUdpAddress, kind = message.kind
|
2023-08-30 18:38:15 +00:00
|
|
|
# Received a proper Portal message, check first if an ENR is provided by
|
|
|
|
# the discovery v5 layer and add it to the portal network routing table.
|
|
|
|
# If not provided through the handshake, try to get it from the discovery v5
|
|
|
|
# routing table.
|
|
|
|
# When the node would be eligable for the portal network routing table, it
|
|
|
|
# is possible that it exists in the base discv5 routing table as the same
|
|
|
|
# node ids are used. It is not certain at all however as more nodes might
|
|
|
|
# exists on the base layer, and it will also depend on the distance,
|
|
|
|
# order of lookups, etc.
|
|
|
|
# Note: As third measure, could run a findNodes request with distance 0.
|
2023-12-11 10:53:32 +00:00
|
|
|
if nodeOpt.isSome():
|
|
|
|
let node = nodeOpt.value()
|
|
|
|
let status = p.addNode(node)
|
2024-02-28 17:31:45 +00:00
|
|
|
trace "Adding new node to routing table after incoming request", status, node
|
2023-08-30 18:38:15 +00:00
|
|
|
else:
|
2023-12-11 10:53:32 +00:00
|
|
|
let nodeOpt = p.baseProtocol.getNode(srcId)
|
|
|
|
if nodeOpt.isSome():
|
|
|
|
let node = nodeOpt.value()
|
|
|
|
let status = p.addNode(node)
|
2024-02-28 17:31:45 +00:00
|
|
|
trace "Adding new node to routing table after incoming request", status, node
|
2021-11-16 16:50:08 +00:00
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
portal_message_requests_incoming.inc(labelValues = [$p.protocolId, $message.kind])
|
2022-03-23 15:32:59 +00:00
|
|
|
|
2021-07-09 11:34:16 +00:00
|
|
|
case message.kind
|
|
|
|
of MessageKind.ping:
|
2022-01-19 14:56:14 +00:00
|
|
|
p.handlePing(message.ping, srcId)
|
2022-09-10 19:00:27 +00:00
|
|
|
of MessageKind.findNodes:
|
2021-12-08 10:54:22 +00:00
|
|
|
p.handleFindNodes(message.findNodes)
|
2022-09-10 19:00:27 +00:00
|
|
|
of MessageKind.findContent:
|
|
|
|
p.handleFindContent(message.findContent, srcId)
|
2021-09-22 09:28:04 +00:00
|
|
|
of MessageKind.offer:
|
2022-01-14 15:07:14 +00:00
|
|
|
p.handleOffer(message.offer, srcId)
|
2021-07-09 11:34:16 +00:00
|
|
|
else:
|
2021-11-17 16:11:17 +00:00
|
|
|
# This would mean a that Portal wire response message is being send over a
|
|
|
|
# discv5 talkreq message.
|
|
|
|
debug "Invalid Portal wire message type over talkreq", kind = message.kind
|
2021-07-09 11:34:16 +00:00
|
|
|
@[]
|
|
|
|
else:
|
2022-03-23 15:32:59 +00:00
|
|
|
portal_message_decoding_failures.inc(labelValues = [$p.protocolId])
|
2021-10-22 09:03:00 +00:00
|
|
|
debug "Packet decoding error", error = decoded.error, srcId, srcUdpAddress
|
2021-07-09 11:34:16 +00:00
|
|
|
@[]
|
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
proc new*(
|
|
|
|
T: type PortalProtocol,
|
2021-09-22 15:07:14 +00:00
|
|
|
baseProtocol: protocol.Protocol,
|
2021-10-20 20:31:05 +00:00
|
|
|
protocolId: PortalProtocolId,
|
2022-01-06 08:06:05 +00:00
|
|
|
toContentId: ToContentIdHandler,
|
2022-08-01 19:00:21 +00:00
|
|
|
dbGet: DbGetHandler,
|
2024-09-09 15:52:11 +00:00
|
|
|
dbPut: DbStoreHandler,
|
2024-09-05 16:31:55 +00:00
|
|
|
dbRadius: DbRadiusHandler,
|
2022-08-17 07:32:06 +00:00
|
|
|
stream: PortalStream,
|
2021-12-13 08:06:29 +00:00
|
|
|
bootstrapRecords: openArray[Record] = [],
|
2022-01-18 08:01:22 +00:00
|
|
|
distanceCalculator: DistanceCalculator = XorDistanceCalculator,
|
2024-02-28 17:31:45 +00:00
|
|
|
config: PortalProtocolConfig = defaultPortalProtocolConfig,
|
|
|
|
): T =
|
2021-07-09 11:34:16 +00:00
|
|
|
let proto = PortalProtocol(
|
2021-09-23 12:26:41 +00:00
|
|
|
protocolHandler: messageHandler,
|
|
|
|
protocolId: protocolId,
|
2022-01-18 08:01:22 +00:00
|
|
|
routingTable: RoutingTable.init(
|
2024-02-28 17:31:45 +00:00
|
|
|
baseProtocol.localNode, config.bitsPerHop, config.tableIpLimits, baseProtocol.rng,
|
|
|
|
distanceCalculator,
|
|
|
|
),
|
2021-07-09 11:34:16 +00:00
|
|
|
baseProtocol: baseProtocol,
|
2022-01-06 08:06:05 +00:00
|
|
|
toContentId: toContentId,
|
2024-10-16 13:05:39 +00:00
|
|
|
contentCache:
|
|
|
|
ContentCache.init(if config.disableContentCache: 0 else: config.contentCacheSize),
|
2022-08-01 19:00:21 +00:00
|
|
|
dbGet: dbGet,
|
2024-09-09 15:52:11 +00:00
|
|
|
dbPut: dbPut,
|
2024-09-05 16:31:55 +00:00
|
|
|
dataRadius: dbRadius,
|
2022-01-19 14:56:14 +00:00
|
|
|
bootstrapRecords: @bootstrapRecords,
|
2022-08-17 07:32:06 +00:00
|
|
|
stream: stream,
|
2022-04-01 16:01:50 +00:00
|
|
|
radiusCache: RadiusCache.init(256),
|
2023-08-30 08:01:00 +00:00
|
|
|
offerQueue: newAsyncQueue[OfferRequest](concurrentOffers),
|
2024-06-10 09:05:30 +00:00
|
|
|
pingTimings: Table[NodeId, chronos.Moment](),
|
2024-10-16 13:05:39 +00:00
|
|
|
config: config,
|
2024-02-28 17:31:45 +00:00
|
|
|
)
|
2021-07-09 11:34:16 +00:00
|
|
|
|
2021-10-20 20:31:05 +00:00
|
|
|
proto.baseProtocol.registerTalkProtocol(@(proto.protocolId), proto).expect(
|
2024-02-28 17:31:45 +00:00
|
|
|
"Only one protocol should have this id"
|
|
|
|
)
|
2021-07-09 11:34:16 +00:00
|
|
|
|
2022-01-06 08:06:05 +00:00
|
|
|
proto
|
2021-07-09 11:34:16 +00:00
|
|
|
|
2023-10-05 17:29:39 +00:00
|
|
|
# Sends the discv5 talkreq message with provided Portal message, awaits and
|
2021-09-13 13:56:44 +00:00
|
|
|
# validates the proper response, and updates the Portal Network routing table.
|
2021-08-05 14:04:29 +00:00
|
|
|
proc reqResponse[Request: SomeMessage, Response: SomeMessage](
|
2024-02-28 17:31:45 +00:00
|
|
|
p: PortalProtocol, dst: Node, request: Request
|
2024-06-14 12:21:30 +00:00
|
|
|
): Future[PortalResult[Response]] {.async: (raises: [CancelledError]).} =
|
2022-03-17 12:19:36 +00:00
|
|
|
logScope:
|
|
|
|
protocolId = p.protocolId
|
|
|
|
|
|
|
|
trace "Send message request", dstId = dst.id, kind = messageKind(Request)
|
2022-03-23 15:32:59 +00:00
|
|
|
portal_message_requests_outgoing.inc(
|
2024-02-28 17:31:45 +00:00
|
|
|
labelValues = [$p.protocolId, $messageKind(Request)]
|
|
|
|
)
|
2022-03-17 12:19:36 +00:00
|
|
|
|
2021-09-23 12:26:41 +00:00
|
|
|
let talkresp =
|
2022-09-10 19:00:27 +00:00
|
|
|
await talkReq(p.baseProtocol, dst, @(p.protocolId), encodeMessage(request))
|
2021-09-13 13:56:44 +00:00
|
|
|
|
2021-09-23 12:26:41 +00:00
|
|
|
# Note: Failure of `decodeMessage` might also simply mean that the peer is
|
|
|
|
# not supporting the specific talk protocol, as according to specification
|
|
|
|
# an empty response needs to be send in that case.
|
|
|
|
# See: https://github.com/ethereum/devp2p/blob/master/discv5/discv5-wire.md#talkreq-request-0x05
|
2023-08-23 16:03:09 +00:00
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
let messageResponse = talkresp
|
|
|
|
.mapErr(
|
|
|
|
proc(x: cstring): string =
|
|
|
|
$x
|
|
|
|
)
|
|
|
|
.flatMap(
|
|
|
|
proc(x: seq[byte]): Result[Message, string] =
|
|
|
|
decodeMessage(x)
|
|
|
|
)
|
|
|
|
.flatMap(
|
|
|
|
proc(m: Message): Result[Response, string] =
|
|
|
|
getInnerMessage[Response](m)
|
|
|
|
)
|
2021-08-05 14:04:29 +00:00
|
|
|
|
2021-09-23 12:26:41 +00:00
|
|
|
if messageResponse.isOk():
|
2024-02-28 17:31:45 +00:00
|
|
|
trace "Received message response",
|
|
|
|
srcId = dst.id, srcAddress = dst.address, kind = messageKind(Response)
|
2022-03-23 15:32:59 +00:00
|
|
|
portal_message_response_incoming.inc(
|
2024-02-28 17:31:45 +00:00
|
|
|
labelValues = [$p.protocolId, $messageKind(Response)]
|
|
|
|
)
|
2022-03-23 15:32:59 +00:00
|
|
|
|
2022-03-17 12:19:36 +00:00
|
|
|
p.routingTable.setJustSeen(dst)
|
2021-09-23 12:26:41 +00:00
|
|
|
else:
|
2024-02-28 17:31:45 +00:00
|
|
|
debug "Error receiving message response",
|
|
|
|
error = messageResponse.error, srcId = dst.id, srcAddress = dst.address
|
2023-10-03 16:48:00 +00:00
|
|
|
p.pingTimings.del(dst.id)
|
2022-03-17 12:19:36 +00:00
|
|
|
p.routingTable.replaceNode(dst)
|
2021-09-23 12:26:41 +00:00
|
|
|
|
|
|
|
return messageResponse
|
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
proc pingImpl*(
|
|
|
|
p: PortalProtocol, dst: Node
|
2024-06-14 12:21:30 +00:00
|
|
|
): Future[PortalResult[PongMessage]] {.async: (raises: [CancelledError]).} =
|
2024-09-05 16:31:55 +00:00
|
|
|
let customPayload = CustomPayload(dataRadius: p.dataRadius())
|
2024-02-28 17:31:45 +00:00
|
|
|
let ping = PingMessage(
|
|
|
|
enrSeq: p.localNode.record.seqNum,
|
2024-07-17 15:07:27 +00:00
|
|
|
customPayload: ByteList[2048](SSZ.encode(customPayload)),
|
2024-02-28 17:31:45 +00:00
|
|
|
)
|
2021-07-09 11:34:16 +00:00
|
|
|
|
2021-09-22 15:07:14 +00:00
|
|
|
return await reqResponse[PingMessage, PongMessage](p, dst, ping)
|
2021-07-09 11:34:16 +00:00
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
proc findNodesImpl*(
|
|
|
|
p: PortalProtocol, dst: Node, distances: List[uint16, 256]
|
2024-06-14 12:21:30 +00:00
|
|
|
): Future[PortalResult[NodesMessage]] {.async: (raises: [CancelledError]).} =
|
2021-12-08 10:54:22 +00:00
|
|
|
let fn = FindNodesMessage(distances: distances)
|
2021-07-09 11:34:16 +00:00
|
|
|
|
2021-08-05 14:04:29 +00:00
|
|
|
# TODO Add nodes validation
|
2021-12-08 10:54:22 +00:00
|
|
|
return await reqResponse[FindNodesMessage, NodesMessage](p, dst, fn)
|
2021-07-09 11:34:16 +00:00
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
proc findContentImpl*(
|
2024-07-17 15:07:27 +00:00
|
|
|
p: PortalProtocol, dst: Node, contentKey: ContentKeyByteList
|
2024-06-14 12:21:30 +00:00
|
|
|
): Future[PortalResult[ContentMessage]] {.async: (raises: [CancelledError]).} =
|
2021-09-03 08:57:19 +00:00
|
|
|
let fc = FindContentMessage(contentKey: contentKey)
|
2021-07-09 11:34:16 +00:00
|
|
|
|
2021-10-13 19:35:54 +00:00
|
|
|
return await reqResponse[FindContentMessage, ContentMessage](p, dst, fc)
|
2021-07-30 19:19:03 +00:00
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
proc offerImpl*(
|
|
|
|
p: PortalProtocol, dst: Node, contentKeys: ContentKeysList
|
2024-06-14 12:21:30 +00:00
|
|
|
): Future[PortalResult[AcceptMessage]] {.async: (raises: [CancelledError]).} =
|
2021-09-22 09:28:04 +00:00
|
|
|
let offer = OfferMessage(contentKeys: contentKeys)
|
|
|
|
|
2021-09-22 15:07:14 +00:00
|
|
|
return await reqResponse[OfferMessage, AcceptMessage](p, dst, offer)
|
2021-09-22 09:28:04 +00:00
|
|
|
|
2024-07-17 15:07:27 +00:00
|
|
|
proc recordsFromBytes*(
|
|
|
|
rawRecords: List[ByteList[2048], 32]
|
|
|
|
): PortalResult[seq[Record]] =
|
2021-07-30 19:19:03 +00:00
|
|
|
var records: seq[Record]
|
|
|
|
for r in rawRecords.asSeq():
|
2024-06-27 15:59:08 +00:00
|
|
|
let record = enr.Record.fromBytes(r.asSeq()).valueOr:
|
2021-12-08 08:26:31 +00:00
|
|
|
# If any of the ENRs is invalid, fail immediatly. This is similar as what
|
|
|
|
# is done on the discovery v5 layer.
|
|
|
|
return err("Deserialization of an ENR failed")
|
2021-07-30 19:19:03 +00:00
|
|
|
|
2024-06-27 15:59:08 +00:00
|
|
|
records.add(record)
|
|
|
|
|
2021-12-08 08:26:31 +00:00
|
|
|
ok(records)
|
2021-07-30 19:19:03 +00:00
|
|
|
|
2024-06-14 12:21:30 +00:00
|
|
|
proc ping*(
|
|
|
|
p: PortalProtocol, dst: Node
|
|
|
|
): Future[PortalResult[PongMessage]] {.async: (raises: [CancelledError]).} =
|
2022-01-19 14:56:14 +00:00
|
|
|
let pongResponse = await p.pingImpl(dst)
|
|
|
|
|
2022-09-10 19:00:27 +00:00
|
|
|
if pongResponse.isOk():
|
2023-10-03 16:48:00 +00:00
|
|
|
# Update last time we pinged this node
|
|
|
|
p.pingTimings[dst.id] = now(chronos.Moment)
|
|
|
|
|
2022-01-19 14:56:14 +00:00
|
|
|
let pong = pongResponse.get()
|
|
|
|
# TODO: This should become custom per Portal Network
|
|
|
|
let customPayloadDecoded =
|
2024-02-28 17:31:45 +00:00
|
|
|
try:
|
|
|
|
SSZ.decode(pong.customPayload.asSeq(), CustomPayload)
|
2024-06-14 12:21:30 +00:00
|
|
|
except SerializationError:
|
2022-01-19 14:56:14 +00:00
|
|
|
# invalid custom payload
|
|
|
|
return err("Pong message contains invalid custom payload")
|
|
|
|
|
|
|
|
p.radiusCache.put(dst.id, customPayloadDecoded.dataRadius)
|
|
|
|
|
|
|
|
return pongResponse
|
|
|
|
|
2022-03-19 07:54:42 +00:00
|
|
|
proc findNodes*(
|
2024-02-28 17:31:45 +00:00
|
|
|
p: PortalProtocol, dst: Node, distances: seq[uint16]
|
2024-06-14 12:21:30 +00:00
|
|
|
): Future[PortalResult[seq[Node]]] {.async: (raises: [CancelledError]).} =
|
2022-03-19 07:54:42 +00:00
|
|
|
let nodesMessage = await p.findNodesImpl(dst, List[uint16, 256](distances))
|
|
|
|
if nodesMessage.isOk():
|
|
|
|
let records = recordsFromBytes(nodesMessage.get().enrs)
|
|
|
|
if records.isOk():
|
|
|
|
# TODO: distance function is wrong here for state, fix + tests
|
2024-02-28 17:31:45 +00:00
|
|
|
return ok(verifyNodesRecords(records.get(), dst, enrsResultLimit, distances))
|
2022-03-19 07:54:42 +00:00
|
|
|
else:
|
|
|
|
return err(records.error)
|
|
|
|
else:
|
|
|
|
return err(nodesMessage.error)
|
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
proc findContent*(
|
2024-07-17 15:07:27 +00:00
|
|
|
p: PortalProtocol, dst: Node, contentKey: ContentKeyByteList
|
2024-06-14 12:21:30 +00:00
|
|
|
): Future[PortalResult[FoundContent]] {.async: (raises: [CancelledError]).} =
|
2022-12-16 16:47:52 +00:00
|
|
|
logScope:
|
|
|
|
node = dst
|
|
|
|
contentKey
|
2022-07-29 12:24:07 +00:00
|
|
|
|
2022-01-14 15:07:14 +00:00
|
|
|
let contentMessageResponse = await p.findContentImpl(dst, contentKey)
|
|
|
|
|
|
|
|
if contentMessageResponse.isOk():
|
|
|
|
let m = contentMessageResponse.get()
|
2024-02-28 17:31:45 +00:00
|
|
|
case m.contentMessageType
|
2022-01-14 15:07:14 +00:00
|
|
|
of connectionIdType:
|
2022-01-25 09:19:16 +00:00
|
|
|
let nodeAddress = NodeAddress.init(dst)
|
|
|
|
if nodeAddress.isNone():
|
2022-12-16 16:47:52 +00:00
|
|
|
# It should not happen as we are already after the succesfull
|
|
|
|
# talkreq/talkresp cycle
|
2024-02-28 17:31:45 +00:00
|
|
|
error "Trying to connect to node with unknown address", id = dst.id
|
2022-01-25 09:19:16 +00:00
|
|
|
return err("Trying to connect to node with unknown address")
|
2022-06-24 13:35:31 +00:00
|
|
|
|
2022-12-16 16:47:52 +00:00
|
|
|
# uTP protocol uses BE for all values in the header, incl. connection id
|
2024-02-28 17:31:45 +00:00
|
|
|
let socket = (
|
|
|
|
await p.stream.connectTo(
|
|
|
|
nodeAddress.unsafeGet(), uint16.fromBytesBE(m.connectionId)
|
|
|
|
)
|
|
|
|
).valueOr:
|
2023-06-27 17:43:32 +00:00
|
|
|
debug "uTP connection error for find content", error
|
2022-03-30 05:12:39 +00:00
|
|
|
return err("Error connecting uTP socket")
|
2022-04-01 16:01:50 +00:00
|
|
|
|
2022-05-31 12:28:02 +00:00
|
|
|
try:
|
|
|
|
# Read all bytes from the socket
|
|
|
|
# This will either end with a FIN, or because the read action times out.
|
2023-06-27 17:43:32 +00:00
|
|
|
# A FIN does not necessarily mean that the data read is complete.
|
|
|
|
# Further validation is required, using a length prefix here might be
|
|
|
|
# beneficial for this.
|
2022-05-31 12:28:02 +00:00
|
|
|
let readFut = socket.read()
|
|
|
|
|
|
|
|
readFut.cancelCallback = proc(udate: pointer) {.gcsafe.} =
|
2024-02-28 17:31:45 +00:00
|
|
|
debug "Socket read cancelled", socketKey = socket.socketKey
|
2022-05-31 12:28:02 +00:00
|
|
|
# In case this `findContent` gets cancelled while reading the data,
|
|
|
|
# send a FIN and clean up the socket.
|
|
|
|
socket.close()
|
|
|
|
|
2022-06-24 13:35:31 +00:00
|
|
|
if await readFut.withTimeout(p.stream.contentReadTimeout):
|
2024-06-14 12:21:30 +00:00
|
|
|
let content = await readFut
|
2022-05-31 12:28:02 +00:00
|
|
|
# socket received remote FIN and drained whole buffer, it can be
|
|
|
|
# safely destroyed without notifing remote
|
2024-02-28 17:31:45 +00:00
|
|
|
debug "Socket read fully", socketKey = socket.socketKey
|
2022-05-31 12:28:02 +00:00
|
|
|
socket.destroy()
|
2024-02-28 17:31:45 +00:00
|
|
|
return ok(
|
|
|
|
FoundContent(src: dst, kind: Content, content: content, utpTransfer: true)
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
debug "Socket read time-out", socketKey = socket.socketKey
|
2023-06-27 17:43:32 +00:00
|
|
|
# Note: This might look a bit strange, but not doing a socket.close()
|
2023-06-21 16:56:57 +00:00
|
|
|
# here as this is already done internally. utp_socket `checkTimeouts`
|
|
|
|
# already does a socket.destroy() on timeout. Might want to change the
|
|
|
|
# API on this later though.
|
2022-05-31 12:28:02 +00:00
|
|
|
return err("Reading data from socket timed out, content request failed")
|
|
|
|
except CancelledError as exc:
|
|
|
|
# even though we already installed cancelCallback on readFut, it is worth
|
2022-06-24 13:35:31 +00:00
|
|
|
# catching CancelledError in case that withTimeout throws CancelledError
|
2022-05-31 12:28:02 +00:00
|
|
|
# but readFut have already finished.
|
2024-02-28 17:31:45 +00:00
|
|
|
debug "Socket read cancelled", socketKey = socket.socketKey
|
2022-06-24 13:35:31 +00:00
|
|
|
|
2022-01-20 14:04:23 +00:00
|
|
|
socket.close()
|
2022-05-31 12:28:02 +00:00
|
|
|
raise exc
|
2022-01-14 15:07:14 +00:00
|
|
|
of contentType:
|
2024-02-28 17:31:45 +00:00
|
|
|
return ok(
|
|
|
|
FoundContent(
|
|
|
|
src: dst, kind: Content, content: m.content.asSeq(), utpTransfer: false
|
|
|
|
)
|
|
|
|
)
|
2022-01-14 15:07:14 +00:00
|
|
|
of enrsType:
|
|
|
|
let records = recordsFromBytes(m.enrs)
|
|
|
|
if records.isOk():
|
2024-02-28 17:31:45 +00:00
|
|
|
let verifiedNodes = verifyNodesRecords(records.get(), dst, enrsResultLimit)
|
2022-01-14 15:07:14 +00:00
|
|
|
|
2022-04-06 11:47:23 +00:00
|
|
|
return ok(FoundContent(src: dst, kind: Nodes, nodes: verifiedNodes))
|
2022-01-14 15:07:14 +00:00
|
|
|
else:
|
|
|
|
return err("Content message returned invalid ENRs")
|
2022-07-29 12:24:07 +00:00
|
|
|
else:
|
2022-12-16 16:47:52 +00:00
|
|
|
warn "FindContent failed due to find content request failure ",
|
|
|
|
error = contentMessageResponse.error
|
2022-07-29 12:24:07 +00:00
|
|
|
|
|
|
|
return err("No content response")
|
2022-01-14 15:07:14 +00:00
|
|
|
|
2022-04-11 17:42:38 +00:00
|
|
|
proc getContentKeys(o: OfferRequest): ContentKeysList =
|
2022-04-11 09:25:36 +00:00
|
|
|
case o.kind
|
|
|
|
of Direct:
|
2024-02-28 17:31:45 +00:00
|
|
|
var contentKeys: ContentKeysList
|
2022-04-11 09:25:36 +00:00
|
|
|
for info in o.contentList:
|
|
|
|
discard contentKeys.add(info.contentKey)
|
|
|
|
return contentKeys
|
|
|
|
of Database:
|
|
|
|
return o.contentKeys
|
|
|
|
|
2022-08-09 12:32:41 +00:00
|
|
|
func getMaxOfferedContentKeys*(protocolIdLen: uint32, maxKeySize: uint32): int =
|
|
|
|
## Calculates how many ContentKeys will fit in one offer message which
|
|
|
|
## will be small enouch to fit into discv5 limit.
|
|
|
|
## This is neccesarry as contentKeysLimit (64) is sometimes to big, and even
|
|
|
|
## half of this can be too much to fit into discv5 limits.
|
|
|
|
|
|
|
|
let maxTalkReqPayload = maxDiscv5PacketSize - getTalkReqOverhead(int(protocolIdLen))
|
|
|
|
# To calculate how much bytes, `n` content keys of size `maxKeySize` will take
|
|
|
|
# we can use following equation:
|
|
|
|
# bytes = (n * (maxKeySize + perContentKeyOverhead)) + offerMessageOverhead
|
2024-03-26 00:10:46 +00:00
|
|
|
# to calculate maximal number of keys which will given space this can be
|
2022-08-09 12:32:41 +00:00
|
|
|
# transformed to:
|
|
|
|
# n = trunc((bytes - offerMessageOverhead) / (maxKeySize + perContentKeyOverhead))
|
2024-02-28 17:31:45 +00:00
|
|
|
return ((maxTalkReqPayload - 5) div (int(maxKeySize) + 4))
|
2022-08-09 12:32:41 +00:00
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
proc offer(
|
|
|
|
p: PortalProtocol, o: OfferRequest
|
2024-06-14 12:21:30 +00:00
|
|
|
): Future[PortalResult[ContentKeysBitList]] {.async: (raises: [CancelledError]).} =
|
2022-04-11 09:25:36 +00:00
|
|
|
## Offer triggers offer-accept interaction with one peer
|
|
|
|
## Whole flow has two phases:
|
2022-12-16 16:47:52 +00:00
|
|
|
## 1. Come to an agreement on what content to transfer, by using offer and
|
|
|
|
## accept messages.
|
2022-04-11 09:25:36 +00:00
|
|
|
## 2. Open uTP stream from content provider to content receiver and transfer
|
|
|
|
## agreed content.
|
|
|
|
## There are two types of possible offer requests:
|
|
|
|
## Direct - when caller provides content to transfer. This way, content is
|
|
|
|
## guaranteed to be transferred as it stays in memory until whole transfer
|
|
|
|
## is completed.
|
|
|
|
## Database - when caller provides keys of content to be transferred. This
|
|
|
|
## way content is provided from database just before it is transferred through
|
|
|
|
## uTP socket. This is useful when there is a lot of content to be transferred
|
|
|
|
## to many peers, and keeping it all in memory could exhaust node resources.
|
|
|
|
## Main drawback is that content may be deleted from the node database
|
|
|
|
## by the cleanup process before it will be transferred, so this way does not
|
2022-04-11 17:42:38 +00:00
|
|
|
## guarantee content transfer.
|
2022-04-11 09:25:36 +00:00
|
|
|
let contentKeys = getContentKeys(o)
|
2022-07-29 12:24:07 +00:00
|
|
|
|
2022-12-16 16:47:52 +00:00
|
|
|
logScope:
|
|
|
|
node = o.dst
|
|
|
|
contentKeys
|
|
|
|
|
|
|
|
debug "Offering content"
|
2022-07-29 12:24:07 +00:00
|
|
|
|
2023-12-01 13:59:17 +00:00
|
|
|
portal_content_keys_offered.observe(
|
2024-02-28 17:31:45 +00:00
|
|
|
contentKeys.len().int64, labelValues = [$p.protocolId]
|
|
|
|
)
|
2022-04-11 09:25:36 +00:00
|
|
|
|
|
|
|
let acceptMessageResponse = await p.offerImpl(o.dst, contentKeys)
|
2022-01-14 15:07:14 +00:00
|
|
|
|
|
|
|
if acceptMessageResponse.isOk():
|
|
|
|
let m = acceptMessageResponse.get()
|
2022-04-13 15:42:32 +00:00
|
|
|
|
|
|
|
let contentKeysLen =
|
|
|
|
case o.kind
|
|
|
|
of Direct:
|
|
|
|
o.contentList.len()
|
|
|
|
of Database:
|
|
|
|
o.contentKeys.len()
|
|
|
|
|
|
|
|
if m.contentKeys.len() != contentKeysLen:
|
|
|
|
# TODO:
|
|
|
|
# When there is such system, the peer should get scored negatively here.
|
|
|
|
error "Accepted content key bitlist has invalid size"
|
|
|
|
return err("Accepted content key bitlist has invalid size")
|
|
|
|
|
2022-04-11 09:25:36 +00:00
|
|
|
let acceptedKeysAmount = m.contentKeys.countOnes()
|
2023-12-01 13:59:17 +00:00
|
|
|
portal_content_keys_accepted.observe(
|
2024-02-28 17:31:45 +00:00
|
|
|
acceptedKeysAmount.int64, labelValues = [$p.protocolId]
|
|
|
|
)
|
2022-04-11 09:25:36 +00:00
|
|
|
if acceptedKeysAmount == 0:
|
2022-12-16 16:47:52 +00:00
|
|
|
debug "No content accepted"
|
2022-03-18 17:29:06 +00:00
|
|
|
# Don't open an uTP stream if no content was requested
|
2023-01-17 13:47:10 +00:00
|
|
|
return ok(m.contentKeys)
|
2022-03-18 17:29:06 +00:00
|
|
|
|
2022-04-11 09:25:36 +00:00
|
|
|
let nodeAddress = NodeAddress.init(o.dst)
|
2022-01-25 09:19:16 +00:00
|
|
|
if nodeAddress.isNone():
|
|
|
|
# It should not happen as we are already after succesfull talkreq/talkresp
|
|
|
|
# cycle
|
2024-02-28 17:31:45 +00:00
|
|
|
error "Trying to connect to node with unknown address", id = o.dst.id
|
2022-01-25 09:19:16 +00:00
|
|
|
return err("Trying to connect to node with unknown address")
|
2022-04-01 16:01:50 +00:00
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
let socket = (
|
|
|
|
await p.stream.connectTo(
|
|
|
|
nodeAddress.unsafeGet(), uint16.fromBytesBE(m.connectionId)
|
|
|
|
)
|
|
|
|
).valueOr:
|
2023-06-27 17:43:32 +00:00
|
|
|
debug "uTP connection error for offer content", error
|
2022-03-30 05:12:39 +00:00
|
|
|
return err("Error connecting uTP socket")
|
|
|
|
|
2022-06-24 13:35:31 +00:00
|
|
|
template lenu32(x: untyped): untyped =
|
|
|
|
uint32(len(x))
|
2022-04-01 16:01:50 +00:00
|
|
|
|
2022-04-11 09:25:36 +00:00
|
|
|
case o.kind
|
|
|
|
of Direct:
|
|
|
|
for i, b in m.contentKeys:
|
|
|
|
if b:
|
2022-06-24 13:35:31 +00:00
|
|
|
let content = o.contentList[i].content
|
2024-06-14 12:21:30 +00:00
|
|
|
# TODO: stop using faststreams for this
|
2022-06-24 13:35:31 +00:00
|
|
|
var output = memoryOutput()
|
2024-06-14 12:21:30 +00:00
|
|
|
try:
|
|
|
|
output.write(toBytes(content.lenu32, Leb128).toOpenArray())
|
|
|
|
output.write(content)
|
|
|
|
except IOError as e:
|
|
|
|
# This should not happen in case of in-memory streams
|
|
|
|
raiseAssert e.msg
|
2022-06-24 13:35:31 +00:00
|
|
|
|
2023-06-27 17:43:32 +00:00
|
|
|
let dataWritten = (await socket.write(output.getOutput)).valueOr:
|
|
|
|
debug "Error writing requested data", error
|
2022-01-14 15:07:14 +00:00
|
|
|
# No point in trying to continue writing data
|
2022-06-24 13:35:31 +00:00
|
|
|
socket.close()
|
2022-01-14 15:07:14 +00:00
|
|
|
return err("Error writing requested data")
|
2024-01-25 10:04:09 +00:00
|
|
|
|
|
|
|
trace "Offered content item send", dataWritten = dataWritten
|
2022-04-11 09:25:36 +00:00
|
|
|
of Database:
|
|
|
|
for i, b in m.contentKeys:
|
|
|
|
if b:
|
2022-11-08 17:31:45 +00:00
|
|
|
let
|
|
|
|
contentKey = o.contentKeys[i]
|
|
|
|
contentIdResult = p.toContentId(contentKey)
|
|
|
|
if contentIdResult.isOk():
|
2022-04-11 09:25:36 +00:00
|
|
|
let
|
2022-11-08 17:31:45 +00:00
|
|
|
contentId = contentIdResult.get()
|
|
|
|
contentResult = p.dbGet(contentKey, contentId)
|
2022-06-24 13:35:31 +00:00
|
|
|
|
|
|
|
var output = memoryOutput()
|
2022-11-08 17:31:45 +00:00
|
|
|
if contentResult.isOk():
|
|
|
|
let content = contentResult.get()
|
2024-06-14 12:21:30 +00:00
|
|
|
try:
|
|
|
|
output.write(toBytes(content.lenu32, Leb128).toOpenArray())
|
|
|
|
output.write(content)
|
|
|
|
except IOError as e:
|
|
|
|
# This should not happen in case of in-memory streams
|
|
|
|
raiseAssert e.msg
|
2022-06-24 13:35:31 +00:00
|
|
|
else:
|
2024-06-14 12:21:30 +00:00
|
|
|
try:
|
|
|
|
# When data turns out missing, add a 0 size varint
|
|
|
|
output.write(toBytes(0'u8, Leb128).toOpenArray())
|
|
|
|
except IOError as e:
|
|
|
|
raiseAssert e.msg
|
2022-06-24 13:35:31 +00:00
|
|
|
|
2023-06-27 17:43:32 +00:00
|
|
|
let dataWritten = (await socket.write(output.getOutput)).valueOr:
|
|
|
|
debug "Error writing requested data", error
|
2022-06-24 13:35:31 +00:00
|
|
|
# No point in trying to continue writing data
|
|
|
|
socket.close()
|
|
|
|
return err("Error writing requested data")
|
|
|
|
|
2024-01-25 10:04:09 +00:00
|
|
|
trace "Offered content item send", dataWritten = dataWritten
|
2023-04-04 15:36:57 +00:00
|
|
|
await socket.closeWait()
|
2022-12-16 16:47:52 +00:00
|
|
|
debug "Content successfully offered"
|
2022-07-29 12:24:07 +00:00
|
|
|
|
2023-01-17 13:47:10 +00:00
|
|
|
return ok(m.contentKeys)
|
2022-01-14 15:07:14 +00:00
|
|
|
else:
|
2022-12-16 16:47:52 +00:00
|
|
|
warn "Offer failed due to accept request failure ",
|
|
|
|
error = acceptMessageResponse.error
|
2022-01-14 15:07:14 +00:00
|
|
|
return err("No accept response")
|
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
proc offer*(
|
|
|
|
p: PortalProtocol, dst: Node, contentKeys: ContentKeysList
|
2024-06-14 12:21:30 +00:00
|
|
|
): Future[PortalResult[ContentKeysBitList]] {.async: (raises: [CancelledError]).} =
|
2023-01-17 13:47:10 +00:00
|
|
|
let req = OfferRequest(dst: dst, kind: Database, contentKeys: contentKeys)
|
|
|
|
return await p.offer(req)
|
2022-04-11 09:25:36 +00:00
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
proc offer*(
|
|
|
|
p: PortalProtocol, dst: Node, content: seq[ContentKV]
|
2024-06-14 12:21:30 +00:00
|
|
|
): Future[PortalResult[ContentKeysBitList]] {.async: (raises: [CancelledError]).} =
|
2023-01-17 13:47:10 +00:00
|
|
|
if len(content) > contentKeysLimit:
|
|
|
|
return err("Cannot offer more than 64 content items")
|
2024-10-07 08:49:04 +00:00
|
|
|
if len(content) == 0:
|
|
|
|
return err("Cannot offer empty content list")
|
2022-04-11 17:42:38 +00:00
|
|
|
|
2023-08-24 16:19:29 +00:00
|
|
|
let contentList = List[ContentKV, contentKeysLimit].init(content)
|
2023-01-17 13:47:10 +00:00
|
|
|
let req = OfferRequest(dst: dst, kind: Direct, contentList: contentList)
|
|
|
|
return await p.offer(req)
|
2022-04-11 09:25:36 +00:00
|
|
|
|
2024-06-14 12:21:30 +00:00
|
|
|
proc offerWorker(p: PortalProtocol) {.async: (raises: [CancelledError]).} =
|
2022-04-01 16:01:50 +00:00
|
|
|
while true:
|
2022-04-11 09:25:36 +00:00
|
|
|
let req = await p.offerQueue.popFirst()
|
2022-04-01 16:01:50 +00:00
|
|
|
|
2022-04-11 09:25:36 +00:00
|
|
|
let res = await p.offer(req)
|
2022-04-01 16:01:50 +00:00
|
|
|
if res.isOk():
|
|
|
|
portal_gossip_offers_successful.inc(labelValues = [$p.protocolId])
|
|
|
|
else:
|
|
|
|
portal_gossip_offers_failed.inc(labelValues = [$p.protocolId])
|
|
|
|
|
|
|
|
proc offerQueueEmpty*(p: PortalProtocol): bool =
|
|
|
|
p.offerQueue.empty()
|
|
|
|
|
2021-12-08 08:26:31 +00:00
|
|
|
proc lookupWorker(
|
2024-02-28 17:31:45 +00:00
|
|
|
p: PortalProtocol, dst: Node, target: NodeId
|
2024-06-14 12:21:30 +00:00
|
|
|
): Future[seq[Node]] {.async: (raises: [CancelledError]).} =
|
2021-12-08 08:26:31 +00:00
|
|
|
let distances = lookupDistances(target, dst.id)
|
2022-03-19 07:54:42 +00:00
|
|
|
let nodesMessage = await p.findNodes(dst, distances)
|
2021-12-08 08:26:31 +00:00
|
|
|
if nodesMessage.isOk():
|
|
|
|
let nodes = nodesMessage.get()
|
2021-07-30 19:19:03 +00:00
|
|
|
# Attempt to add all nodes discovered
|
|
|
|
for n in nodes:
|
2023-10-05 17:29:39 +00:00
|
|
|
discard p.addNode(n)
|
2021-12-08 08:26:31 +00:00
|
|
|
return nodes
|
|
|
|
else:
|
|
|
|
return @[]
|
2021-07-30 19:19:03 +00:00
|
|
|
|
2024-06-14 12:21:30 +00:00
|
|
|
proc lookup*(
|
|
|
|
p: PortalProtocol, target: NodeId
|
|
|
|
): Future[seq[Node]] {.async: (raises: [CancelledError]).} =
|
2021-07-30 19:19:03 +00:00
|
|
|
## Perform a lookup for the given target, return the closest n nodes to the
|
|
|
|
## target. Maximum value for n is `BUCKET_SIZE`.
|
|
|
|
# `closestNodes` holds the k closest nodes to target found, sorted by distance
|
|
|
|
# Unvalidated nodes are used for requests as a form of validation.
|
2024-02-28 17:31:45 +00:00
|
|
|
var closestNodes = p.routingTable.neighbours(target, BUCKET_SIZE, seenOnly = false)
|
2021-07-30 19:19:03 +00:00
|
|
|
|
2024-06-10 09:05:30 +00:00
|
|
|
var asked, seen = HashSet[NodeId]()
|
2023-11-23 21:20:23 +00:00
|
|
|
asked.incl(p.localNode.id) # No need to ask our own node
|
|
|
|
seen.incl(p.localNode.id) # No need to discover our own node
|
2021-07-30 19:19:03 +00:00
|
|
|
for node in closestNodes:
|
|
|
|
seen.incl(node.id)
|
|
|
|
|
2024-06-14 12:21:30 +00:00
|
|
|
var pendingQueries = newSeqOfCap[Future[seq[Node]].Raising([CancelledError])](alpha)
|
2022-03-23 15:32:59 +00:00
|
|
|
var requestAmount = 0'i64
|
2021-07-30 19:19:03 +00:00
|
|
|
|
|
|
|
while true:
|
|
|
|
var i = 0
|
|
|
|
# Doing `alpha` amount of requests at once as long as closer non queried
|
|
|
|
# nodes are discovered.
|
2021-12-08 10:54:22 +00:00
|
|
|
while i < closestNodes.len and pendingQueries.len < alpha:
|
2021-07-30 19:19:03 +00:00
|
|
|
let n = closestNodes[i]
|
|
|
|
if not asked.containsOrIncl(n.id):
|
|
|
|
pendingQueries.add(p.lookupWorker(n, target))
|
2022-03-23 15:32:59 +00:00
|
|
|
requestAmount.inc()
|
2021-07-30 19:19:03 +00:00
|
|
|
inc i
|
|
|
|
|
|
|
|
trace "Pending lookup queries", total = pendingQueries.len
|
|
|
|
|
|
|
|
if pendingQueries.len == 0:
|
|
|
|
break
|
|
|
|
|
2024-06-14 12:21:30 +00:00
|
|
|
let query =
|
|
|
|
try:
|
|
|
|
await one(pendingQueries)
|
|
|
|
except ValueError:
|
|
|
|
raiseAssert("pendingQueries should not have been empty")
|
|
|
|
|
2021-07-30 19:19:03 +00:00
|
|
|
trace "Got lookup query response"
|
|
|
|
|
|
|
|
let index = pendingQueries.find(query)
|
|
|
|
if index != -1:
|
|
|
|
pendingQueries.del(index)
|
|
|
|
else:
|
|
|
|
error "Resulting query should have been in the pending queries"
|
|
|
|
|
2024-06-14 12:21:30 +00:00
|
|
|
let nodes = await query
|
2021-07-30 19:19:03 +00:00
|
|
|
# TODO: Remove node on timed-out query?
|
|
|
|
for n in nodes:
|
|
|
|
if not seen.containsOrIncl(n.id):
|
|
|
|
# If it wasn't seen before, insert node while remaining sorted
|
2024-02-28 17:31:45 +00:00
|
|
|
closestNodes.insert(
|
|
|
|
n,
|
|
|
|
closestNodes.lowerBound(
|
|
|
|
n,
|
|
|
|
proc(x: Node, n: Node): int =
|
2024-07-22 12:22:45 +00:00
|
|
|
cmp(p.distance(x.id, target), p.distance(n.id, target)),
|
2024-02-28 17:31:45 +00:00
|
|
|
),
|
|
|
|
)
|
2021-07-30 19:19:03 +00:00
|
|
|
|
|
|
|
if closestNodes.len > BUCKET_SIZE:
|
|
|
|
closestNodes.del(closestNodes.high())
|
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
portal_lookup_node_requests.observe(requestAmount, labelValues = [$p.protocolId])
|
2021-07-30 19:19:03 +00:00
|
|
|
p.lastLookup = now(chronos.Moment)
|
|
|
|
return closestNodes
|
|
|
|
|
2022-04-06 11:47:23 +00:00
|
|
|
proc triggerPoke*(
|
2024-07-17 15:07:27 +00:00
|
|
|
p: PortalProtocol,
|
|
|
|
nodes: seq[Node],
|
|
|
|
contentKey: ContentKeyByteList,
|
|
|
|
content: seq[byte],
|
2024-02-28 17:31:45 +00:00
|
|
|
) =
|
2023-08-30 08:01:00 +00:00
|
|
|
## In order to properly test gossip mechanisms (e.g. in Portal Hive),
|
|
|
|
## we need the option to turn off the POKE functionality as it influences
|
|
|
|
## how data moves around the network.
|
2024-10-17 08:25:53 +00:00
|
|
|
if p.config.disablePoke:
|
2023-09-28 16:16:41 +00:00
|
|
|
return
|
2022-04-06 11:47:23 +00:00
|
|
|
## Triggers asynchronous offer-accept interaction to provided nodes.
|
2022-04-13 05:56:01 +00:00
|
|
|
## Provided content should be in range of provided nodes.
|
2022-04-06 11:47:23 +00:00
|
|
|
for node in nodes:
|
|
|
|
if not p.offerQueue.full():
|
|
|
|
try:
|
2022-04-13 05:56:01 +00:00
|
|
|
let
|
2023-08-24 16:19:29 +00:00
|
|
|
contentKV = ContentKV(contentKey: contentKey, content: content)
|
|
|
|
list = List[ContentKV, contentKeysLimit].init(@[contentKV])
|
2022-04-13 05:56:01 +00:00
|
|
|
req = OfferRequest(dst: node, kind: Direct, contentList: list)
|
2022-04-11 09:25:36 +00:00
|
|
|
p.offerQueue.putNoWait(req)
|
2024-09-25 09:30:42 +00:00
|
|
|
portal_poke_offers.inc(labelValues = [$p.protocolId])
|
2022-04-06 11:47:23 +00:00
|
|
|
except AsyncQueueFullError as e:
|
2022-04-13 05:56:01 +00:00
|
|
|
# Should not occur as full() check is done.
|
2022-04-06 11:47:23 +00:00
|
|
|
raiseAssert(e.msg)
|
|
|
|
else:
|
2022-04-13 05:56:01 +00:00
|
|
|
# Offer queue is full, do not start more offer-accept interactions
|
2022-04-11 17:42:38 +00:00
|
|
|
return
|
2022-04-06 11:47:23 +00:00
|
|
|
|
2021-09-02 12:35:25 +00:00
|
|
|
# TODO ContentLookup and Lookup look almost exactly the same, also lookups in other
|
|
|
|
# networks will probably be very similar. Extract lookup function to separate module
|
|
|
|
# and make it more generaic
|
2024-02-28 17:31:45 +00:00
|
|
|
proc contentLookup*(
|
2024-07-17 15:07:27 +00:00
|
|
|
p: PortalProtocol, target: ContentKeyByteList, targetId: UInt256
|
2024-06-14 12:21:30 +00:00
|
|
|
): Future[Opt[ContentLookupResult]] {.async: (raises: [CancelledError]).} =
|
2021-09-02 12:35:25 +00:00
|
|
|
## Perform a lookup for the given target, return the closest n nodes to the
|
|
|
|
## target. Maximum value for n is `BUCKET_SIZE`.
|
|
|
|
# `closestNodes` holds the k closest nodes to target found, sorted by distance
|
|
|
|
# Unvalidated nodes are used for requests as a form of validation.
|
2024-02-28 17:31:45 +00:00
|
|
|
var closestNodes = p.routingTable.neighbours(targetId, BUCKET_SIZE, seenOnly = false)
|
2022-07-05 12:42:55 +00:00
|
|
|
# Shuffling the order of the nodes in order to not always hit the same node
|
|
|
|
# first for the same request.
|
|
|
|
p.baseProtocol.rng[].shuffle(closestNodes)
|
2021-09-02 12:35:25 +00:00
|
|
|
|
2024-06-10 09:05:30 +00:00
|
|
|
var asked, seen = HashSet[NodeId]()
|
2023-11-23 21:20:23 +00:00
|
|
|
asked.incl(p.localNode.id) # No need to ask our own node
|
|
|
|
seen.incl(p.localNode.id) # No need to discover our own node
|
2021-09-02 12:35:25 +00:00
|
|
|
for node in closestNodes:
|
|
|
|
seen.incl(node.id)
|
|
|
|
|
2024-06-14 12:21:30 +00:00
|
|
|
var pendingQueries =
|
|
|
|
newSeqOfCap[Future[PortalResult[FoundContent]].Raising([CancelledError])](alpha)
|
2022-03-23 15:32:59 +00:00
|
|
|
var requestAmount = 0'i64
|
2021-09-02 12:35:25 +00:00
|
|
|
|
2022-04-06 11:47:23 +00:00
|
|
|
var nodesWithoutContent: seq[Node] = newSeq[Node]()
|
|
|
|
|
2021-09-02 12:35:25 +00:00
|
|
|
while true:
|
|
|
|
var i = 0
|
|
|
|
# Doing `alpha` amount of requests at once as long as closer non queried
|
|
|
|
# nodes are discovered.
|
2021-12-08 10:54:22 +00:00
|
|
|
while i < closestNodes.len and pendingQueries.len < alpha:
|
2021-09-02 12:35:25 +00:00
|
|
|
let n = closestNodes[i]
|
|
|
|
if not asked.containsOrIncl(n.id):
|
2022-01-14 15:07:14 +00:00
|
|
|
pendingQueries.add(p.findContent(n, target))
|
2022-03-23 15:32:59 +00:00
|
|
|
requestAmount.inc()
|
2021-09-02 12:35:25 +00:00
|
|
|
inc i
|
|
|
|
|
|
|
|
trace "Pending lookup queries", total = pendingQueries.len
|
|
|
|
|
|
|
|
if pendingQueries.len == 0:
|
|
|
|
break
|
|
|
|
|
2024-06-14 12:21:30 +00:00
|
|
|
let query =
|
|
|
|
try:
|
|
|
|
await one(pendingQueries)
|
|
|
|
except ValueError:
|
|
|
|
raiseAssert("pendingQueries should not have been empty")
|
|
|
|
|
2021-09-02 12:35:25 +00:00
|
|
|
trace "Got lookup query response"
|
|
|
|
|
|
|
|
let index = pendingQueries.find(query)
|
|
|
|
if index != -1:
|
|
|
|
pendingQueries.del(index)
|
|
|
|
else:
|
|
|
|
error "Resulting query should have been in the pending queries"
|
|
|
|
|
2024-06-14 12:21:30 +00:00
|
|
|
let contentResult = await query
|
2022-01-14 15:07:14 +00:00
|
|
|
if contentResult.isOk():
|
|
|
|
let content = contentResult.get()
|
|
|
|
|
|
|
|
case content.kind
|
|
|
|
of Nodes:
|
2022-04-06 11:47:23 +00:00
|
|
|
let maybeRadius = p.radiusCache.get(content.src.id)
|
2022-04-13 05:56:01 +00:00
|
|
|
if maybeRadius.isSome() and
|
|
|
|
p.inRange(content.src.id, maybeRadius.unsafeGet(), targetId):
|
2022-04-06 11:47:23 +00:00
|
|
|
# Only return nodes which may be interested in content.
|
|
|
|
# No need to check for duplicates in nodesWithoutContent
|
|
|
|
# as requests are never made two times to the same node.
|
|
|
|
nodesWithoutContent.add(content.src)
|
|
|
|
|
2022-01-14 15:07:14 +00:00
|
|
|
for n in content.nodes:
|
|
|
|
if not seen.containsOrIncl(n.id):
|
2023-10-05 17:29:39 +00:00
|
|
|
discard p.addNode(n)
|
2022-01-14 15:07:14 +00:00
|
|
|
# If it wasn't seen before, insert node while remaining sorted
|
2024-02-28 17:31:45 +00:00
|
|
|
closestNodes.insert(
|
|
|
|
n,
|
|
|
|
closestNodes.lowerBound(
|
|
|
|
n,
|
|
|
|
proc(x: Node, n: Node): int =
|
2024-07-22 12:22:45 +00:00
|
|
|
cmp(p.distance(x.id, targetId), p.distance(n.id, targetId)),
|
2024-02-28 17:31:45 +00:00
|
|
|
),
|
|
|
|
)
|
2022-01-14 15:07:14 +00:00
|
|
|
|
|
|
|
if closestNodes.len > BUCKET_SIZE:
|
|
|
|
closestNodes.del(closestNodes.high())
|
|
|
|
of Content:
|
2022-04-20 16:46:00 +00:00
|
|
|
# cancel any pending queries as the content has been found
|
2022-01-14 15:07:14 +00:00
|
|
|
for f in pendingQueries:
|
2023-10-17 12:19:50 +00:00
|
|
|
f.cancelSoon()
|
2023-12-01 13:59:17 +00:00
|
|
|
portal_lookup_content_requests.observe(
|
2024-02-28 17:31:45 +00:00
|
|
|
requestAmount, labelValues = [$p.protocolId]
|
|
|
|
)
|
|
|
|
return Opt.some(
|
|
|
|
ContentLookupResult.init(
|
|
|
|
content.content, content.utpTransfer, nodesWithoutContent
|
|
|
|
)
|
|
|
|
)
|
2022-01-14 15:07:14 +00:00
|
|
|
else:
|
|
|
|
# TODO: Should we do something with the node that failed responding our
|
|
|
|
# query?
|
|
|
|
discard
|
2021-09-03 08:57:19 +00:00
|
|
|
|
2023-12-01 13:59:17 +00:00
|
|
|
portal_lookup_content_failures.inc(labelValues = [$p.protocolId])
|
2022-12-09 16:59:36 +00:00
|
|
|
return Opt.none(ContentLookupResult)
|
2021-09-02 12:35:25 +00:00
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
proc traceContentLookup*(
|
2024-07-17 15:07:27 +00:00
|
|
|
p: PortalProtocol, target: ContentKeyByteList, targetId: UInt256
|
2024-06-14 12:21:30 +00:00
|
|
|
): Future[TraceContentLookupResult] {.async: (raises: [CancelledError]).} =
|
2023-10-30 14:48:06 +00:00
|
|
|
## Perform a lookup for the given target, return the closest n nodes to the
|
|
|
|
## target. Maximum value for n is `BUCKET_SIZE`.
|
|
|
|
# `closestNodes` holds the k closest nodes to target found, sorted by distance
|
|
|
|
# Unvalidated nodes are used for requests as a form of validation.
|
2024-02-28 17:31:45 +00:00
|
|
|
var closestNodes = p.routingTable.neighbours(targetId, BUCKET_SIZE, seenOnly = false)
|
2023-10-30 14:48:06 +00:00
|
|
|
# Shuffling the order of the nodes in order to not always hit the same node
|
|
|
|
# first for the same request.
|
|
|
|
p.baseProtocol.rng[].shuffle(closestNodes)
|
|
|
|
|
|
|
|
let ts = now(chronos.Moment)
|
2024-06-10 09:05:30 +00:00
|
|
|
var responses = Table[string, TraceResponse]()
|
|
|
|
var metadata = Table[string, NodeMetadata]()
|
2023-10-30 14:48:06 +00:00
|
|
|
|
2024-06-10 09:05:30 +00:00
|
|
|
var asked, seen = HashSet[NodeId]()
|
2023-11-23 21:20:23 +00:00
|
|
|
asked.incl(p.localNode.id) # No need to ask our own node
|
|
|
|
seen.incl(p.localNode.id) # No need to discover our own node
|
2023-10-30 14:48:06 +00:00
|
|
|
for node in closestNodes:
|
|
|
|
seen.incl(node.id)
|
2023-11-16 14:27:30 +00:00
|
|
|
|
2023-10-30 14:48:06 +00:00
|
|
|
# Local node should be part of the responses
|
2024-02-28 17:31:45 +00:00
|
|
|
responses["0x" & $p.localNode.id] =
|
|
|
|
TraceResponse(durationMs: 0, respondedWith: seen.toSeq())
|
2023-10-30 14:48:06 +00:00
|
|
|
|
|
|
|
metadata["0x" & $p.localNode.id] = NodeMetadata(
|
2024-02-28 17:31:45 +00:00
|
|
|
enr: p.localNode.record, distance: p.distance(p.localNode.id, targetId)
|
2023-10-30 14:48:06 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
# We should also have metadata for all the closes nodes
|
|
|
|
# in order to be able to show cancelled requests
|
|
|
|
for cn in closestNodes:
|
2024-02-28 17:31:45 +00:00
|
|
|
metadata["0x" & $cn.id] =
|
|
|
|
NodeMetadata(enr: cn.record, distance: p.distance(cn.id, targetId))
|
2023-10-30 14:48:06 +00:00
|
|
|
|
2024-06-14 12:21:30 +00:00
|
|
|
var pendingQueries =
|
|
|
|
newSeqOfCap[Future[PortalResult[FoundContent]].Raising([CancelledError])](alpha)
|
2023-10-30 14:48:06 +00:00
|
|
|
var pendingNodes = newSeq[Node]()
|
|
|
|
var requestAmount = 0'i64
|
|
|
|
|
|
|
|
var nodesWithoutContent: seq[Node] = newSeq[Node]()
|
|
|
|
|
|
|
|
while true:
|
|
|
|
var i = 0
|
|
|
|
# Doing `alpha` amount of requests at once as long as closer non queried
|
|
|
|
# nodes are discovered.
|
|
|
|
while i < closestNodes.len and pendingQueries.len < alpha:
|
|
|
|
let n = closestNodes[i]
|
|
|
|
if not asked.containsOrIncl(n.id):
|
|
|
|
pendingQueries.add(p.findContent(n, target))
|
|
|
|
pendingNodes.add(n)
|
|
|
|
requestAmount.inc()
|
|
|
|
inc i
|
|
|
|
|
|
|
|
trace "Pending lookup queries", total = pendingQueries.len
|
|
|
|
|
|
|
|
if pendingQueries.len == 0:
|
|
|
|
break
|
|
|
|
|
2024-06-14 12:21:30 +00:00
|
|
|
let query =
|
|
|
|
try:
|
|
|
|
await one(pendingQueries)
|
|
|
|
except ValueError:
|
|
|
|
raiseAssert("pendingQueries should not have been empty")
|
2023-10-30 14:48:06 +00:00
|
|
|
trace "Got lookup query response"
|
|
|
|
|
|
|
|
let index = pendingQueries.find(query)
|
|
|
|
if index != -1:
|
|
|
|
pendingQueries.del(index)
|
|
|
|
pendingNodes.del(index)
|
|
|
|
else:
|
|
|
|
error "Resulting query should have been in the pending queries"
|
|
|
|
|
2024-06-14 12:21:30 +00:00
|
|
|
let contentResult = await query
|
2023-10-30 14:48:06 +00:00
|
|
|
|
|
|
|
if contentResult.isOk():
|
|
|
|
let content = contentResult.get()
|
|
|
|
|
|
|
|
case content.kind
|
|
|
|
of Nodes:
|
|
|
|
let duration = chronos.milliseconds(now(chronos.Moment) - ts)
|
|
|
|
|
|
|
|
let maybeRadius = p.radiusCache.get(content.src.id)
|
|
|
|
if maybeRadius.isSome() and
|
|
|
|
p.inRange(content.src.id, maybeRadius.unsafeGet(), targetId):
|
|
|
|
# Only return nodes which may be interested in content.
|
|
|
|
# No need to check for duplicates in nodesWithoutContent
|
|
|
|
# as requests are never made two times to the same node.
|
|
|
|
nodesWithoutContent.add(content.src)
|
|
|
|
|
|
|
|
var respondedWith = newSeq[NodeId]()
|
|
|
|
|
|
|
|
for n in content.nodes:
|
2023-11-23 21:20:23 +00:00
|
|
|
let dist = p.distance(n.id, targetId)
|
2023-10-30 14:48:06 +00:00
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
metadata["0x" & $n.id] = NodeMetadata(enr: n.record, distance: dist)
|
2023-10-30 14:48:06 +00:00
|
|
|
respondedWith.add(n.id)
|
|
|
|
|
|
|
|
if not seen.containsOrIncl(n.id):
|
2023-11-23 21:20:23 +00:00
|
|
|
discard p.addNode(n)
|
2023-10-30 14:48:06 +00:00
|
|
|
# If it wasn't seen before, insert node while remaining sorted
|
2024-02-28 17:31:45 +00:00
|
|
|
closestNodes.insert(
|
|
|
|
n,
|
|
|
|
closestNodes.lowerBound(
|
|
|
|
n,
|
|
|
|
proc(x: Node, n: Node): int =
|
2024-07-22 12:22:45 +00:00
|
|
|
cmp(p.distance(x.id, targetId), dist),
|
2024-02-28 17:31:45 +00:00
|
|
|
),
|
|
|
|
)
|
2023-10-30 14:48:06 +00:00
|
|
|
|
|
|
|
if closestNodes.len > BUCKET_SIZE:
|
|
|
|
closestNodes.del(closestNodes.high())
|
|
|
|
|
2023-11-23 21:20:23 +00:00
|
|
|
let distance = p.distance(content.src.id, targetId)
|
2023-10-30 14:48:06 +00:00
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
responses["0x" & $content.src.id] =
|
|
|
|
TraceResponse(durationMs: duration, respondedWith: respondedWith)
|
2023-10-30 14:48:06 +00:00
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
metadata["0x" & $content.src.id] =
|
|
|
|
NodeMetadata(enr: content.src.record, distance: distance)
|
2023-10-30 14:48:06 +00:00
|
|
|
of Content:
|
|
|
|
let duration = chronos.milliseconds(now(chronos.Moment) - ts)
|
2023-11-16 14:27:30 +00:00
|
|
|
|
2023-10-30 14:48:06 +00:00
|
|
|
# cancel any pending queries as the content has been found
|
|
|
|
for f in pendingQueries:
|
2023-11-16 14:27:30 +00:00
|
|
|
f.cancelSoon()
|
2023-12-01 13:59:17 +00:00
|
|
|
portal_lookup_content_requests.observe(
|
2024-02-28 17:31:45 +00:00
|
|
|
requestAmount, labelValues = [$p.protocolId]
|
|
|
|
)
|
2023-10-30 14:48:06 +00:00
|
|
|
|
2023-11-23 21:20:23 +00:00
|
|
|
let distance = p.distance(content.src.id, targetId)
|
2023-10-30 14:48:06 +00:00
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
responses["0x" & $content.src.id] =
|
|
|
|
TraceResponse(durationMs: duration, respondedWith: newSeq[NodeId]())
|
2023-10-30 14:48:06 +00:00
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
metadata["0x" & $content.src.id] =
|
|
|
|
NodeMetadata(enr: content.src.record, distance: distance)
|
2023-10-30 14:48:06 +00:00
|
|
|
|
|
|
|
var pendingNodeIds = newSeq[NodeId]()
|
|
|
|
|
|
|
|
for pn in pendingNodes:
|
|
|
|
pendingNodeIds.add(pn.id)
|
2024-02-28 17:31:45 +00:00
|
|
|
metadata["0x" & $pn.id] =
|
|
|
|
NodeMetadata(enr: pn.record, distance: p.distance(pn.id, targetId))
|
2023-10-30 14:48:06 +00:00
|
|
|
|
2023-11-27 09:21:19 +00:00
|
|
|
return TraceContentLookupResult(
|
|
|
|
content: Opt.some(content.content),
|
2023-10-30 14:48:06 +00:00
|
|
|
utpTransfer: content.utpTransfer,
|
|
|
|
trace: TraceObject(
|
|
|
|
origin: p.localNode.id,
|
|
|
|
targetId: targetId,
|
2023-11-27 09:21:19 +00:00
|
|
|
receivedFrom: Opt.some(content.src.id),
|
2023-10-30 14:48:06 +00:00
|
|
|
responses: responses,
|
|
|
|
metadata: metadata,
|
|
|
|
cancelled: pendingNodeIds,
|
2024-02-28 17:31:45 +00:00
|
|
|
startedAtMs: chronos.epochNanoSeconds(ts) div 1_000_000,
|
|
|
|
# nanoseconds to milliseconds
|
|
|
|
),
|
2023-11-27 09:21:19 +00:00
|
|
|
)
|
2023-10-30 14:48:06 +00:00
|
|
|
else:
|
|
|
|
# TODO: Should we do something with the node that failed responding our
|
|
|
|
# query?
|
|
|
|
discard
|
|
|
|
|
2023-12-01 13:59:17 +00:00
|
|
|
portal_lookup_content_failures.inc(labelValues = [$p.protocolId])
|
2023-11-27 09:21:19 +00:00
|
|
|
return TraceContentLookupResult(
|
|
|
|
content: Opt.none(seq[byte]),
|
|
|
|
utpTransfer: false,
|
|
|
|
trace: TraceObject(
|
|
|
|
origin: p.localNode.id,
|
|
|
|
targetId: targetId,
|
|
|
|
receivedFrom: Opt.none(NodeId),
|
|
|
|
responses: responses,
|
|
|
|
metadata: metadata,
|
|
|
|
cancelled: newSeq[NodeId](),
|
2024-02-28 17:31:45 +00:00
|
|
|
startedAtMs: chronos.epochNanoSeconds(ts) div 1_000_000,
|
|
|
|
# nanoseconds to milliseconds
|
|
|
|
),
|
2023-11-27 09:21:19 +00:00
|
|
|
)
|
2023-10-30 14:48:06 +00:00
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
proc query*(
|
|
|
|
p: PortalProtocol, target: NodeId, k = BUCKET_SIZE
|
2024-06-14 12:21:30 +00:00
|
|
|
): Future[seq[Node]] {.async: (raises: [CancelledError]).} =
|
2021-07-30 19:19:03 +00:00
|
|
|
## Query k nodes for the given target, returns all nodes found, including the
|
|
|
|
## nodes queried.
|
|
|
|
##
|
|
|
|
## This will take k nodes from the routing table closest to target and
|
|
|
|
## query them for nodes closest to target. If there are less than k nodes in
|
|
|
|
## the routing table, nodes returned by the first queries will be used.
|
|
|
|
var queryBuffer = p.routingTable.neighbours(target, k, seenOnly = false)
|
|
|
|
|
2024-06-10 09:05:30 +00:00
|
|
|
var asked, seen = HashSet[NodeId]()
|
2023-11-23 21:20:23 +00:00
|
|
|
asked.incl(p.localNode.id) # No need to ask our own node
|
|
|
|
seen.incl(p.localNode.id) # No need to discover our own node
|
2021-07-30 19:19:03 +00:00
|
|
|
for node in queryBuffer:
|
|
|
|
seen.incl(node.id)
|
|
|
|
|
2024-06-14 12:21:30 +00:00
|
|
|
var pendingQueries = newSeqOfCap[Future[seq[Node]].Raising([CancelledError])](alpha)
|
2021-07-30 19:19:03 +00:00
|
|
|
|
|
|
|
while true:
|
|
|
|
var i = 0
|
2021-12-08 10:54:22 +00:00
|
|
|
while i < min(queryBuffer.len, k) and pendingQueries.len < alpha:
|
2021-07-30 19:19:03 +00:00
|
|
|
let n = queryBuffer[i]
|
|
|
|
if not asked.containsOrIncl(n.id):
|
|
|
|
pendingQueries.add(p.lookupWorker(n, target))
|
|
|
|
inc i
|
|
|
|
|
|
|
|
trace "Pending lookup queries", total = pendingQueries.len
|
|
|
|
|
|
|
|
if pendingQueries.len == 0:
|
|
|
|
break
|
|
|
|
|
2024-06-14 12:21:30 +00:00
|
|
|
let query =
|
|
|
|
try:
|
|
|
|
await one(pendingQueries)
|
|
|
|
except ValueError:
|
|
|
|
raiseAssert("pendingQueries should not have been empty")
|
2021-07-30 19:19:03 +00:00
|
|
|
trace "Got lookup query response"
|
|
|
|
|
|
|
|
let index = pendingQueries.find(query)
|
|
|
|
if index != -1:
|
|
|
|
pendingQueries.del(index)
|
|
|
|
else:
|
|
|
|
error "Resulting query should have been in the pending queries"
|
|
|
|
|
2024-06-14 12:21:30 +00:00
|
|
|
let nodes = await query
|
2021-07-30 19:19:03 +00:00
|
|
|
# TODO: Remove node on timed-out query?
|
|
|
|
for n in nodes:
|
|
|
|
if not seen.containsOrIncl(n.id):
|
|
|
|
queryBuffer.add(n)
|
|
|
|
|
|
|
|
p.lastLookup = now(chronos.Moment)
|
|
|
|
return queryBuffer
|
|
|
|
|
2024-06-14 12:21:30 +00:00
|
|
|
proc queryRandom*(
|
|
|
|
p: PortalProtocol
|
|
|
|
): Future[seq[Node]] {.async: (raw: true, raises: [CancelledError]).} =
|
2021-07-30 19:19:03 +00:00
|
|
|
## Perform a query for a random target, return all nodes discovered.
|
|
|
|
p.query(NodeId.random(p.baseProtocol.rng[]))
|
|
|
|
|
2022-07-26 11:14:56 +00:00
|
|
|
proc getNClosestNodesWithRadius*(
|
2024-02-28 17:31:45 +00:00
|
|
|
p: PortalProtocol, targetId: NodeId, n: int, seenOnly: bool = false
|
|
|
|
): seq[(Node, UInt256)] =
|
|
|
|
let closestLocalNodes =
|
|
|
|
p.routingTable.neighbours(targetId, k = n, seenOnly = seenOnly)
|
2022-07-26 11:14:56 +00:00
|
|
|
|
|
|
|
var nodesWithRadiuses: seq[(Node, UInt256)]
|
|
|
|
for node in closestLocalNodes:
|
|
|
|
let radius = p.radiusCache.get(node.id)
|
|
|
|
if radius.isSome():
|
|
|
|
nodesWithRadiuses.add((node, radius.unsafeGet()))
|
|
|
|
return nodesWithRadiuses
|
|
|
|
|
2022-04-12 13:49:19 +00:00
|
|
|
proc neighborhoodGossip*(
|
2022-10-11 10:10:54 +00:00
|
|
|
p: PortalProtocol,
|
2023-09-04 10:21:01 +00:00
|
|
|
srcNodeId: Opt[NodeId],
|
2022-10-11 10:10:54 +00:00
|
|
|
contentKeys: ContentKeysList,
|
2024-02-28 17:31:45 +00:00
|
|
|
content: seq[seq[byte]],
|
2024-06-14 12:21:30 +00:00
|
|
|
): Future[int] {.async: (raises: [CancelledError]).} =
|
2023-10-05 17:29:39 +00:00
|
|
|
## Run neighborhood gossip for provided content.
|
|
|
|
## Returns the number of peers to which content was attempted to be gossiped.
|
2022-06-24 13:35:31 +00:00
|
|
|
if content.len() == 0:
|
2022-10-11 10:10:54 +00:00
|
|
|
return 0
|
2022-06-24 13:35:31 +00:00
|
|
|
|
2023-08-24 16:19:29 +00:00
|
|
|
var contentList = List[ContentKV, contentKeysLimit].init(@[])
|
2022-06-24 13:35:31 +00:00
|
|
|
for i, contentItem in content:
|
2024-02-28 17:31:45 +00:00
|
|
|
let contentKV = ContentKV(contentKey: contentKeys[i], content: contentItem)
|
2023-08-24 16:19:29 +00:00
|
|
|
discard contentList.add(contentKV)
|
2022-04-12 13:49:19 +00:00
|
|
|
|
2022-06-24 13:35:31 +00:00
|
|
|
# Just taking the first content item as target id.
|
|
|
|
# TODO: come up with something better?
|
2023-06-27 17:43:32 +00:00
|
|
|
let contentId = p.toContentId(contentList[0].contentKey).valueOr:
|
2022-10-11 10:10:54 +00:00
|
|
|
return 0
|
2022-04-12 13:49:19 +00:00
|
|
|
|
2022-05-07 11:50:16 +00:00
|
|
|
# For selecting the closest nodes to whom to gossip the content a mixed
|
|
|
|
# approach is taken:
|
|
|
|
# 1. Select the closest neighbours in the routing table
|
|
|
|
# 2. Check if the radius is known for these these nodes and whether they are
|
|
|
|
# in range of the content to be offered.
|
2024-09-25 15:38:33 +00:00
|
|
|
# 3. If more than n (= maxGossipNodes) nodes are in range, offer these nodes
|
|
|
|
# the content (maxed out at n).
|
2022-05-07 11:50:16 +00:00
|
|
|
# 4. If less than n nodes are in range, do a node lookup, and offer the nodes
|
|
|
|
# returned from the lookup the content (max nodes set at 8)
|
|
|
|
#
|
|
|
|
# This should give a bigger rate of success and avoid the data being stopped
|
|
|
|
# in its propagation than when looking only for nodes in the own routing
|
|
|
|
# table, but at the same time avoid unnecessary node lookups.
|
|
|
|
# It might still cause issues in data getting propagated in a wider id range.
|
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
let closestLocalNodes =
|
|
|
|
p.routingTable.neighbours(NodeId(contentId), k = 16, seenOnly = true)
|
2022-05-07 11:50:16 +00:00
|
|
|
|
|
|
|
var gossipNodes: seq[Node]
|
|
|
|
for node in closestLocalNodes:
|
|
|
|
let radius = p.radiusCache.get(node.id)
|
|
|
|
if radius.isSome():
|
|
|
|
if p.inRange(node.id, radius.unsafeGet(), contentId):
|
2023-09-04 10:21:01 +00:00
|
|
|
if srcNodeId.isNone:
|
|
|
|
gossipNodes.add(node)
|
|
|
|
elif node.id != srcNodeId.get():
|
|
|
|
gossipNodes.add(node)
|
2022-05-07 11:50:16 +00:00
|
|
|
|
2024-10-17 08:25:53 +00:00
|
|
|
if gossipNodes.len >= p.config.maxGossipNodes: # use local nodes for gossip
|
2022-05-07 11:50:16 +00:00
|
|
|
portal_gossip_without_lookup.inc(labelValues = [$p.protocolId])
|
2024-10-17 08:25:53 +00:00
|
|
|
let numberOfGossipedNodes = min(gossipNodes.len, p.config.maxGossipNodes)
|
2024-02-28 17:31:45 +00:00
|
|
|
for node in gossipNodes[0 ..< numberOfGossipedNodes]:
|
2022-05-07 11:50:16 +00:00
|
|
|
let req = OfferRequest(dst: node, kind: Direct, contentList: contentList)
|
|
|
|
await p.offerQueue.addLast(req)
|
2022-10-11 10:10:54 +00:00
|
|
|
return numberOfGossipedNodes
|
2022-05-07 11:50:16 +00:00
|
|
|
else: # use looked up nodes for gossip
|
|
|
|
portal_gossip_with_lookup.inc(labelValues = [$p.protocolId])
|
|
|
|
let closestNodes = await p.lookup(NodeId(contentId))
|
2024-10-17 08:25:53 +00:00
|
|
|
let numberOfGossipedNodes = min(closestNodes.len, p.config.maxGossipNodes)
|
2024-02-28 17:31:45 +00:00
|
|
|
for node in closestNodes[0 ..< numberOfGossipedNodes]:
|
2022-05-07 11:50:16 +00:00
|
|
|
# Note: opportunistically not checking if the radius of the node is known
|
|
|
|
# and thus if the node is in radius with the content. Reason is, these
|
|
|
|
# should really be the closest nodes in the DHT, and thus are most likely
|
|
|
|
# going to be in range of the requested content.
|
|
|
|
let req = OfferRequest(dst: node, kind: Direct, contentList: contentList)
|
|
|
|
await p.offerQueue.addLast(req)
|
2022-10-11 10:10:54 +00:00
|
|
|
return numberOfGossipedNodes
|
2022-04-12 13:49:19 +00:00
|
|
|
|
2023-10-05 17:29:39 +00:00
|
|
|
proc neighborhoodGossipDiscardPeers*(
|
|
|
|
p: PortalProtocol,
|
|
|
|
srcNodeId: Opt[NodeId],
|
|
|
|
contentKeys: ContentKeysList,
|
2024-02-28 17:31:45 +00:00
|
|
|
content: seq[seq[byte]],
|
2024-06-14 12:21:30 +00:00
|
|
|
): Future[void] {.async: (raises: [CancelledError]).} =
|
2023-10-05 17:29:39 +00:00
|
|
|
discard await p.neighborhoodGossip(srcNodeId, contentKeys, content)
|
|
|
|
|
|
|
|
proc randomGossip*(
|
|
|
|
p: PortalProtocol,
|
|
|
|
srcNodeId: Opt[NodeId],
|
|
|
|
contentKeys: ContentKeysList,
|
2024-02-28 17:31:45 +00:00
|
|
|
content: seq[seq[byte]],
|
2024-06-14 12:21:30 +00:00
|
|
|
): Future[int] {.async: (raises: [CancelledError]).} =
|
2023-10-05 17:29:39 +00:00
|
|
|
## Run random gossip for provided content.
|
|
|
|
## Returns the number of peers to which content was attempted to be gossiped.
|
|
|
|
if content.len() == 0:
|
|
|
|
return 0
|
|
|
|
|
|
|
|
var contentList = List[ContentKV, contentKeysLimit].init(@[])
|
|
|
|
for i, contentItem in content:
|
2024-02-28 17:31:45 +00:00
|
|
|
let contentKV = ContentKV(contentKey: contentKeys[i], content: contentItem)
|
2023-10-05 17:29:39 +00:00
|
|
|
discard contentList.add(contentKV)
|
|
|
|
|
2024-10-17 08:25:53 +00:00
|
|
|
let nodes = p.routingTable.randomNodes(p.config.maxGossipNodes)
|
2023-10-05 17:29:39 +00:00
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
for node in nodes[0 ..< nodes.len()]:
|
2023-10-05 17:29:39 +00:00
|
|
|
let req = OfferRequest(dst: node, kind: Direct, contentList: contentList)
|
|
|
|
await p.offerQueue.addLast(req)
|
|
|
|
return nodes.len()
|
|
|
|
|
|
|
|
proc randomGossipDiscardPeers*(
|
|
|
|
p: PortalProtocol,
|
|
|
|
srcNodeId: Opt[NodeId],
|
|
|
|
contentKeys: ContentKeysList,
|
2024-02-28 17:31:45 +00:00
|
|
|
content: seq[seq[byte]],
|
2024-06-14 12:21:30 +00:00
|
|
|
): Future[void] {.async: (raises: [CancelledError]).} =
|
2023-10-05 17:29:39 +00:00
|
|
|
discard await p.randomGossip(srcNodeId, contentKeys, content)
|
|
|
|
|
2022-11-08 17:31:45 +00:00
|
|
|
proc storeContent*(
|
2024-07-17 15:07:27 +00:00
|
|
|
p: PortalProtocol,
|
|
|
|
contentKey: ContentKeyByteList,
|
|
|
|
contentId: ContentId,
|
|
|
|
content: seq[byte],
|
2024-10-16 13:05:39 +00:00
|
|
|
cacheContent = false,
|
2024-10-09 12:23:46 +00:00
|
|
|
): bool {.discardable.} =
|
2024-10-16 13:05:39 +00:00
|
|
|
if cacheContent and not p.config.disableContentCache:
|
|
|
|
# We cache content regardless of whether it is in our radius or not
|
|
|
|
p.contentCache.put(contentId, content)
|
|
|
|
|
2024-09-09 15:52:11 +00:00
|
|
|
# Always re-check that the key is still in the node range to make sure only
|
|
|
|
# content in range is stored.
|
|
|
|
if p.inRange(contentId):
|
|
|
|
doAssert(p.dbPut != nil)
|
|
|
|
p.dbPut(contentKey, contentId, content)
|
2024-10-09 12:23:46 +00:00
|
|
|
true
|
|
|
|
else:
|
|
|
|
false
|
2022-05-12 16:04:37 +00:00
|
|
|
|
2024-10-16 13:05:39 +00:00
|
|
|
proc getLocalContent*(
|
|
|
|
p: PortalProtocol, contentKey: ContentKeyByteList, contentId: ContentId
|
|
|
|
): Opt[seq[byte]] =
|
|
|
|
# The cache can contain content that is not in our radius
|
|
|
|
let maybeContent = p.contentCache.get(contentId)
|
|
|
|
if maybeContent.isSome():
|
|
|
|
portal_content_cache_hits.inc(labelValues = [$p.protocolId])
|
|
|
|
return maybeContent
|
|
|
|
|
|
|
|
portal_content_cache_misses.inc(labelValues = [$p.protocolId])
|
|
|
|
|
|
|
|
# Check first if content is in range, as this is a cheaper operation
|
|
|
|
# than the database lookup.
|
|
|
|
if p.inRange(contentId):
|
|
|
|
doAssert(p.dbGet != nil)
|
|
|
|
p.dbGet(contentKey, contentId)
|
|
|
|
else:
|
|
|
|
Opt.none(seq[byte])
|
|
|
|
|
2021-09-23 12:26:41 +00:00
|
|
|
proc seedTable*(p: PortalProtocol) =
|
2021-11-16 16:50:08 +00:00
|
|
|
## Seed the table with specifically provided Portal bootstrap nodes. These are
|
|
|
|
## nodes that must support the wire protocol for the specific content network.
|
2021-09-23 12:26:41 +00:00
|
|
|
# Note: We allow replacing the bootstrap nodes in the routing table as it is
|
|
|
|
# possible that some of these are not supporting the specific portal network.
|
2021-11-16 16:50:08 +00:00
|
|
|
# Other note: One could also pick nodes from the discv5 routing table to
|
|
|
|
# bootstrap the portal networks, however it would require a flag in the ENR to
|
|
|
|
# be added and there might be none in the routing table due to low amount of
|
|
|
|
# Portal nodes versus other nodes.
|
2022-03-17 12:19:36 +00:00
|
|
|
logScope:
|
|
|
|
protocolId = p.protocolId
|
2021-09-23 12:26:41 +00:00
|
|
|
|
|
|
|
for record in p.bootstrapRecords:
|
|
|
|
if p.addNode(record):
|
2024-02-28 17:31:45 +00:00
|
|
|
debug "Added bootstrap node", uri = toURI(record), protocolId = p.protocolId
|
2021-09-23 12:26:41 +00:00
|
|
|
else:
|
2024-02-28 17:31:45 +00:00
|
|
|
error "Bootstrap node could not be added",
|
|
|
|
uri = toURI(record), protocolId = p.protocolId
|
2021-09-23 12:26:41 +00:00
|
|
|
|
2024-06-14 12:21:30 +00:00
|
|
|
proc populateTable(p: PortalProtocol) {.async: (raises: [CancelledError]).} =
|
2021-07-30 19:19:03 +00:00
|
|
|
## Do a set of initial lookups to quickly populate the table.
|
|
|
|
# start with a self target query (neighbour nodes)
|
2022-03-17 12:19:36 +00:00
|
|
|
logScope:
|
|
|
|
protocolId = p.protocolId
|
|
|
|
|
2023-11-23 21:20:23 +00:00
|
|
|
let selfQuery = await p.query(p.localNode.id)
|
2021-07-30 19:19:03 +00:00
|
|
|
trace "Discovered nodes in self target query", nodes = selfQuery.len
|
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
for i in 0 ..< initialLookups:
|
2021-07-30 19:19:03 +00:00
|
|
|
let randomQuery = await p.queryRandom()
|
|
|
|
trace "Discovered nodes in random target query", nodes = randomQuery.len
|
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
debug "Total nodes in routing table after populate", total = p.routingTable.len()
|
2021-07-30 19:19:03 +00:00
|
|
|
|
2024-06-14 12:21:30 +00:00
|
|
|
proc revalidateNode*(p: PortalProtocol, n: Node) {.async: (raises: [CancelledError]).} =
|
2021-07-30 19:19:03 +00:00
|
|
|
let pong = await p.ping(n)
|
|
|
|
|
2022-09-10 19:00:27 +00:00
|
|
|
if pong.isOk():
|
2021-07-30 19:19:03 +00:00
|
|
|
let res = pong.get()
|
|
|
|
if res.enrSeq > n.record.seqNum:
|
|
|
|
# Request new ENR
|
2022-03-19 07:54:42 +00:00
|
|
|
let nodesMessage = await p.findNodes(n, @[0'u16])
|
2021-12-08 08:26:31 +00:00
|
|
|
if nodesMessage.isOk():
|
|
|
|
let nodes = nodesMessage.get()
|
|
|
|
if nodes.len > 0: # Normally a node should only return 1 record actually
|
2023-10-05 17:29:39 +00:00
|
|
|
discard p.addNode(nodes[0])
|
2021-07-30 19:19:03 +00:00
|
|
|
|
2023-10-03 16:48:00 +00:00
|
|
|
proc getNodeForRevalidation(p: PortalProtocol): Opt[Node] =
|
|
|
|
let node = p.routingTable.nodeToRevalidate()
|
|
|
|
if node.isNil:
|
2024-09-25 09:30:42 +00:00
|
|
|
# This should not occur except for when the RT is empty
|
2023-10-03 16:48:00 +00:00
|
|
|
return Opt.none(Node)
|
|
|
|
|
|
|
|
let now = now(chronos.Moment)
|
2024-09-25 09:30:42 +00:00
|
|
|
let timestamp = p.pingTimings.getOrDefault(node.id, Moment.init(0'i64, Second))
|
2023-10-03 16:48:00 +00:00
|
|
|
|
2024-09-25 09:30:42 +00:00
|
|
|
if (timestamp + revalidationTimeout) < now:
|
2023-10-03 16:48:00 +00:00
|
|
|
Opt.some(node)
|
|
|
|
else:
|
|
|
|
Opt.none(Node)
|
|
|
|
|
2024-06-14 12:21:30 +00:00
|
|
|
proc revalidateLoop(p: PortalProtocol) {.async: (raises: []).} =
|
2021-07-30 19:19:03 +00:00
|
|
|
## Loop which revalidates the nodes in the routing table by sending the ping
|
|
|
|
## message.
|
|
|
|
try:
|
|
|
|
while true:
|
2021-12-08 10:54:22 +00:00
|
|
|
await sleepAsync(milliseconds(p.baseProtocol.rng[].rand(revalidateMax)))
|
2023-10-03 16:48:00 +00:00
|
|
|
let n = getNodeForRevalidation(p)
|
|
|
|
if n.isSome:
|
|
|
|
asyncSpawn p.revalidateNode(n.get())
|
2021-07-30 19:19:03 +00:00
|
|
|
except CancelledError:
|
|
|
|
trace "revalidateLoop canceled"
|
|
|
|
|
2024-06-14 12:21:30 +00:00
|
|
|
proc refreshLoop(p: PortalProtocol) {.async: (raises: []).} =
|
2021-07-30 19:19:03 +00:00
|
|
|
## Loop that refreshes the routing table by starting a random query in case
|
|
|
|
## no queries were done since `refreshInterval` or more.
|
|
|
|
## It also refreshes the majority address voted for via pong responses.
|
2022-03-17 12:19:36 +00:00
|
|
|
logScope:
|
|
|
|
protocolId = p.protocolId
|
|
|
|
|
2021-07-30 19:19:03 +00:00
|
|
|
try:
|
|
|
|
while true:
|
2021-11-16 16:50:08 +00:00
|
|
|
# TODO: It would be nicer and more secure if this was event based and/or
|
|
|
|
# steered from the routing table.
|
|
|
|
while p.routingTable.len() == 0:
|
|
|
|
p.seedTable()
|
|
|
|
await p.populateTable()
|
|
|
|
await sleepAsync(5.seconds)
|
|
|
|
|
2021-07-30 19:19:03 +00:00
|
|
|
let currentTime = now(chronos.Moment)
|
2021-12-08 10:54:22 +00:00
|
|
|
if currentTime > (p.lastLookup + refreshInterval):
|
2021-07-30 19:19:03 +00:00
|
|
|
let randomQuery = await p.queryRandom()
|
|
|
|
trace "Discovered nodes in random target query", nodes = randomQuery.len
|
|
|
|
debug "Total nodes in routing table", total = p.routingTable.len()
|
|
|
|
|
2021-12-08 10:54:22 +00:00
|
|
|
await sleepAsync(refreshInterval)
|
2021-07-30 19:19:03 +00:00
|
|
|
except CancelledError:
|
|
|
|
trace "refreshLoop canceled"
|
|
|
|
|
|
|
|
proc start*(p: PortalProtocol) =
|
|
|
|
p.refreshLoop = refreshLoop(p)
|
|
|
|
p.revalidateLoop = revalidateLoop(p)
|
|
|
|
|
2022-04-01 16:01:50 +00:00
|
|
|
for i in 0 ..< concurrentOffers:
|
|
|
|
p.offerWorkers.add(offerWorker(p))
|
|
|
|
|
2024-09-20 12:54:36 +00:00
|
|
|
proc stop*(p: PortalProtocol) {.async: (raises: []).} =
|
|
|
|
var futures: seq[Future[void]]
|
|
|
|
|
|
|
|
if not p.revalidateLoop.isNil():
|
|
|
|
futures.add(p.revalidateLoop.cancelAndWait())
|
|
|
|
if not p.refreshLoop.isNil():
|
|
|
|
futures.add(p.refreshLoop.cancelAndWait())
|
2021-12-08 08:26:31 +00:00
|
|
|
|
2022-04-01 16:01:50 +00:00
|
|
|
for worker in p.offerWorkers:
|
2024-09-20 12:54:36 +00:00
|
|
|
futures.add(worker.cancelAndWait())
|
|
|
|
|
|
|
|
await noCancel(allFutures(futures))
|
|
|
|
|
|
|
|
p.revalidateLoop = nil
|
|
|
|
p.refreshLoop = nil
|
2022-04-01 16:01:50 +00:00
|
|
|
p.offerWorkers = @[]
|
|
|
|
|
2024-06-14 12:21:30 +00:00
|
|
|
proc resolve*(
|
|
|
|
p: PortalProtocol, id: NodeId
|
|
|
|
): Future[Opt[Node]] {.async: (raises: [CancelledError]).} =
|
2021-12-08 08:26:31 +00:00
|
|
|
## Resolve a `Node` based on provided `NodeId`.
|
|
|
|
##
|
|
|
|
## This will first look in the own routing table. If the node is known, it
|
|
|
|
## will try to contact if for newer information. If node is not known or it
|
|
|
|
## does not reply, a lookup is done to see if it can find a (newer) record of
|
|
|
|
## the node on the network.
|
|
|
|
if id == p.localNode.id:
|
2023-06-27 17:43:32 +00:00
|
|
|
return Opt.some(p.localNode)
|
2021-12-08 08:26:31 +00:00
|
|
|
|
2023-06-27 17:43:32 +00:00
|
|
|
let node = p.getNode(id)
|
2021-12-08 08:26:31 +00:00
|
|
|
if node.isSome():
|
2022-03-19 07:54:42 +00:00
|
|
|
let nodesMessage = await p.findNodes(node.get(), @[0'u16])
|
2021-12-08 08:26:31 +00:00
|
|
|
# TODO: Handle failures better. E.g. stop on different failures than timeout
|
|
|
|
if nodesMessage.isOk() and nodesMessage[].len > 0:
|
2023-06-27 17:43:32 +00:00
|
|
|
return Opt.some(nodesMessage[][0])
|
2021-12-08 08:26:31 +00:00
|
|
|
|
|
|
|
let discovered = await p.lookup(id)
|
|
|
|
for n in discovered:
|
|
|
|
if n.id == id:
|
|
|
|
if node.isSome() and node.get().record.seqNum >= n.record.seqNum:
|
|
|
|
return node
|
|
|
|
else:
|
2023-06-27 17:43:32 +00:00
|
|
|
return Opt.some(n)
|
2021-12-08 08:26:31 +00:00
|
|
|
|
|
|
|
return node
|
2022-07-20 10:46:42 +00:00
|
|
|
|
2023-06-27 17:43:32 +00:00
|
|
|
proc resolveWithRadius*(
|
2024-02-28 17:31:45 +00:00
|
|
|
p: PortalProtocol, id: NodeId
|
2024-06-14 12:21:30 +00:00
|
|
|
): Future[Opt[(Node, UInt256)]] {.async: (raises: [CancelledError]).} =
|
2022-07-20 10:46:42 +00:00
|
|
|
## Resolve a `Node` based on provided `NodeId`, also try to establish what
|
|
|
|
## is known radius of found node.
|
|
|
|
##
|
|
|
|
## This will first look in the own routing table. If the node is known, it
|
|
|
|
## will try to contact if for newer information. If node is not known or it
|
|
|
|
## does not reply, a lookup is done to see if it can find a (newer) record of
|
|
|
|
## the node on the network.
|
|
|
|
##
|
|
|
|
## If node is found, radius will be first checked in radius cache, it radius
|
|
|
|
## is not known node will be pinged to establish what is its current radius
|
|
|
|
##
|
|
|
|
|
|
|
|
let n = await p.resolve(id)
|
|
|
|
if n.isNone():
|
2023-06-27 17:43:32 +00:00
|
|
|
return Opt.none((Node, UInt256))
|
2022-07-20 10:46:42 +00:00
|
|
|
|
|
|
|
let node = n.unsafeGet()
|
|
|
|
|
|
|
|
let r = p.radiusCache.get(id)
|
|
|
|
if r.isSome():
|
2023-06-27 17:43:32 +00:00
|
|
|
return Opt.some((node, r.unsafeGet()))
|
2022-07-20 10:46:42 +00:00
|
|
|
|
|
|
|
let pongResult = await p.ping(node)
|
|
|
|
if pongResult.isOk():
|
|
|
|
let maybeRadius = p.radiusCache.get(id)
|
2023-06-27 17:43:32 +00:00
|
|
|
# After successful ping radius should already be in cache, but for the
|
|
|
|
# unlikely case that it is not, check it just to be sure.
|
|
|
|
# TODO: refactor ping to return node radius.
|
2022-07-20 10:46:42 +00:00
|
|
|
if maybeRadius.isNone():
|
2023-06-27 17:43:32 +00:00
|
|
|
return Opt.none((Node, UInt256))
|
|
|
|
else:
|
|
|
|
return Opt.some((node, maybeRadius.unsafeGet()))
|
2022-07-20 10:46:42 +00:00
|
|
|
else:
|
2023-06-27 17:43:32 +00:00
|
|
|
return Opt.none((Node, UInt256))
|