2022-01-14 15:07:14 +00:00
|
|
|
# Nimbus
|
2024-02-28 17:31:45 +00:00
|
|
|
# Copyright (c) 2022-2024 Status Research & Development GmbH
|
2022-01-14 15:07:14 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2023-01-31 12:38:08 +00:00
|
|
|
{.push raises: [].}
|
2022-01-20 14:04:23 +00:00
|
|
|
|
2022-01-14 15:07:14 +00:00
|
|
|
import
|
2024-12-04 00:34:13 +00:00
|
|
|
std/sets,
|
2024-02-28 17:31:45 +00:00
|
|
|
chronos,
|
|
|
|
stew/[byteutils, leb128, endians2],
|
|
|
|
chronicles,
|
2022-01-14 15:07:14 +00:00
|
|
|
eth/utp/utp_discv5_protocol,
|
|
|
|
# even though utp_discv5_protocol exports this, import is still needed,
|
|
|
|
# perhaps protocol.Protocol type of usage?
|
|
|
|
eth/p2p/discoveryv5/protocol,
|
|
|
|
./messages
|
|
|
|
|
|
|
|
export utp_discv5_protocol
|
|
|
|
|
2022-04-13 05:56:01 +00:00
|
|
|
logScope:
|
|
|
|
topics = "portal_stream"
|
|
|
|
|
2022-01-14 15:07:14 +00:00
|
|
|
const
|
2022-08-17 07:32:06 +00:00
|
|
|
utpProtocolId = "utp".toBytes()
|
2022-08-01 19:00:21 +00:00
|
|
|
defaultConnectionTimeout = 15.seconds
|
|
|
|
defaultContentReadTimeout = 60.seconds
|
2022-05-03 07:18:33 +00:00
|
|
|
|
|
|
|
# TalkReq message is used as transport for uTP. It is assumed here that Portal
|
|
|
|
# protocol messages were exchanged before sending uTP over discv5 data. This
|
|
|
|
# means that a session is established and that the discv5 messages send are
|
|
|
|
# discv5 ordinary message packets, for which below calculation applies.
|
2022-08-09 12:32:41 +00:00
|
|
|
talkReqOverhead = getTalkReqOverhead(utpProtocolId)
|
2022-05-03 07:18:33 +00:00
|
|
|
utpHeaderOverhead = 20
|
2022-08-17 07:32:06 +00:00
|
|
|
maxUtpPayloadSize = maxDiscv5PacketSize - talkReqOverhead - utpHeaderOverhead
|
2024-12-04 00:34:13 +00:00
|
|
|
maxPendingTransfersPerPeer = 128
|
2022-01-14 15:07:14 +00:00
|
|
|
|
|
|
|
type
|
2024-12-04 00:34:13 +00:00
|
|
|
ConnectionId* = uint16
|
|
|
|
|
2022-01-14 15:07:14 +00:00
|
|
|
ContentRequest = object
|
|
|
|
nodeId: NodeId
|
2024-12-04 00:34:13 +00:00
|
|
|
contentId: ContentId
|
2022-01-20 20:21:20 +00:00
|
|
|
content: seq[byte]
|
2022-01-14 15:07:14 +00:00
|
|
|
timeout: Moment
|
|
|
|
|
|
|
|
ContentOffer = object
|
|
|
|
nodeId: NodeId
|
2024-12-04 00:34:13 +00:00
|
|
|
contentIds: seq[ContentId]
|
2022-01-14 15:07:14 +00:00
|
|
|
contentKeys: ContentKeysList
|
|
|
|
timeout: Moment
|
|
|
|
|
|
|
|
PortalStream* = ref object
|
2022-03-30 05:12:39 +00:00
|
|
|
transport: UtpDiscv5Protocol
|
2022-01-14 15:07:14 +00:00
|
|
|
# TODO:
|
|
|
|
# Decide on what's the better collection to use and set some limits in them
|
|
|
|
# on how many uTP transfers allowed to happen concurrently.
|
|
|
|
# Either set some limit, and drop whatever comes next. Unsure how to
|
|
|
|
# communicate that with the peer however. Or have some more async waiting
|
|
|
|
# until a spot becomes free, like with an AsyncQueue. Although the latter
|
|
|
|
# probably can not be used here directly. This system however does needs
|
|
|
|
# some agreement on timeout values of how long a uTP socket may be
|
|
|
|
# "listening" before it times out because of inactivity.
|
|
|
|
# Or, depending on the direction, it might also depend on the time out
|
|
|
|
# values of the discovery v5 talkresp message.
|
|
|
|
# TODO: Should the content key also be stored to be able to validate the
|
|
|
|
# received data?
|
2024-12-04 00:34:13 +00:00
|
|
|
contentRequests: TableRef[ConnectionId, ContentRequest]
|
|
|
|
contentOffers: TableRef[ConnectionId, ContentOffer]
|
2022-01-18 08:01:22 +00:00
|
|
|
connectionTimeout: Duration
|
2022-06-24 13:35:31 +00:00
|
|
|
contentReadTimeout*: Duration
|
2022-07-04 07:38:02 +00:00
|
|
|
rng: ref HmacDrbgContext
|
2024-12-04 00:34:13 +00:00
|
|
|
pendingTransfers: TableRef[NodeId, HashSet[ContentId]]
|
2023-09-04 10:21:01 +00:00
|
|
|
contentQueue*: AsyncQueue[(Opt[NodeId], ContentKeysList, seq[seq[byte]])]
|
2022-02-11 13:43:10 +00:00
|
|
|
|
2022-08-17 07:32:06 +00:00
|
|
|
StreamManager* = ref object
|
|
|
|
transport: UtpDiscv5Protocol
|
|
|
|
streams: seq[PortalStream]
|
|
|
|
rng: ref HmacDrbgContext
|
|
|
|
|
2024-12-04 00:34:13 +00:00
|
|
|
proc canAddPendingTransfer(
|
|
|
|
transfers: TableRef[NodeId, HashSet[ContentId]],
|
|
|
|
nodeId: NodeId,
|
|
|
|
contentId: ContentId,
|
|
|
|
limit: int,
|
|
|
|
): bool =
|
|
|
|
if not transfers.contains(nodeId):
|
|
|
|
return true
|
|
|
|
|
|
|
|
try:
|
|
|
|
let contentIds = transfers[nodeId]
|
|
|
|
if (contentIds.len() < limit) and not contentIds.contains(contentId):
|
|
|
|
return true
|
|
|
|
else:
|
|
|
|
debug "Pending transfer limit reached for peer", nodeId, contentId
|
|
|
|
return false
|
|
|
|
except KeyError as e:
|
|
|
|
raiseAssert(e.msg)
|
|
|
|
|
|
|
|
proc addPendingTransfer(
|
|
|
|
transfers: TableRef[NodeId, HashSet[ContentId]],
|
|
|
|
nodeId: NodeId,
|
|
|
|
contentId: ContentId,
|
|
|
|
) =
|
|
|
|
if transfers.contains(nodeId):
|
|
|
|
try:
|
|
|
|
transfers[nodeId].incl(contentId)
|
|
|
|
except KeyError as e:
|
|
|
|
raiseAssert(e.msg)
|
|
|
|
else:
|
|
|
|
var contentIds = initHashSet[ContentId]()
|
|
|
|
contentIds.incl(contentId)
|
|
|
|
transfers[nodeId] = contentIds
|
|
|
|
|
|
|
|
proc removePendingTransfer(
|
|
|
|
transfers: TableRef[NodeId, HashSet[ContentId]],
|
|
|
|
nodeId: NodeId,
|
|
|
|
contentId: ContentId,
|
|
|
|
) =
|
|
|
|
doAssert transfers.contains(nodeId)
|
|
|
|
|
|
|
|
try:
|
|
|
|
transfers[nodeId].excl(contentId)
|
|
|
|
|
|
|
|
if transfers[nodeId].len() == 0:
|
|
|
|
transfers.del(nodeId)
|
|
|
|
except KeyError as e:
|
|
|
|
raiseAssert(e.msg)
|
|
|
|
|
|
|
|
template canAddPendingTransfer*(
|
|
|
|
stream: PortalStream, nodeId: NodeId, contentId: ContentId
|
|
|
|
): bool =
|
|
|
|
stream.pendingTransfers.canAddPendingTransfer(
|
|
|
|
srcId, contentId, maxPendingTransfersPerPeer
|
|
|
|
)
|
|
|
|
|
|
|
|
template addPendingTransfer*(
|
|
|
|
stream: PortalStream, nodeId: NodeId, contentId: ContentId
|
|
|
|
) =
|
|
|
|
addPendingTransfer(stream.pendingTransfers, nodeId, contentId)
|
|
|
|
|
|
|
|
template removePendingTransfer*(
|
|
|
|
stream: PortalStream, nodeId: NodeId, contentId: ContentId
|
|
|
|
) =
|
|
|
|
removePendingTransfer(stream.pendingTransfers, nodeId, contentId)
|
|
|
|
|
|
|
|
proc pruneAllowedRequestConnections*(stream: PortalStream) =
|
|
|
|
# Prune requests that didn't receive a connection request
|
2022-04-11 17:42:38 +00:00
|
|
|
# before `connectionTimeout`.
|
|
|
|
let now = Moment.now()
|
2024-12-04 00:34:13 +00:00
|
|
|
|
|
|
|
var connectionIdsToPrune = newSeq[ConnectionId]()
|
|
|
|
for connectionId, request in stream.contentRequests:
|
|
|
|
if request.timeout <= now:
|
|
|
|
stream.removePendingTransfer(request.nodeId, request.contentId)
|
|
|
|
connectionIdsToPrune.add(connectionId)
|
|
|
|
|
|
|
|
for connectionId in connectionIdsToPrune:
|
|
|
|
stream.contentRequests.del(connectionId)
|
|
|
|
|
|
|
|
proc pruneAllowedOfferConnections*(stream: PortalStream) =
|
|
|
|
# Prune offers that didn't receive a connection request
|
|
|
|
# before `connectionTimeout`.
|
|
|
|
let now = Moment.now()
|
|
|
|
|
|
|
|
var connectionIdsToPrune = newSeq[ConnectionId]()
|
|
|
|
for connectionId, offer in stream.contentOffers:
|
|
|
|
if offer.timeout <= now:
|
|
|
|
for contentId in offer.contentIds:
|
|
|
|
stream.removePendingTransfer(offer.nodeId, contentId)
|
|
|
|
connectionIdsToPrune.add(connectionId)
|
|
|
|
|
|
|
|
for connectionId in connectionIdsToPrune:
|
|
|
|
stream.contentOffers.del(connectionId)
|
2022-04-11 17:42:38 +00:00
|
|
|
|
2022-01-14 15:07:14 +00:00
|
|
|
proc addContentOffer*(
|
2024-12-04 00:34:13 +00:00
|
|
|
stream: PortalStream,
|
|
|
|
nodeId: NodeId,
|
|
|
|
contentKeys: ContentKeysList,
|
|
|
|
contentIds: seq[ContentId],
|
2024-02-28 17:31:45 +00:00
|
|
|
): Bytes2 =
|
2022-04-11 17:42:38 +00:00
|
|
|
# TODO: Should we check if `NodeId` & `connectionId` combo already exists?
|
|
|
|
# What happens if we get duplicates?
|
2022-01-14 15:07:14 +00:00
|
|
|
var connectionId: Bytes2
|
2022-07-04 07:38:02 +00:00
|
|
|
stream.rng[].generate(connectionId)
|
2022-01-14 15:07:14 +00:00
|
|
|
|
|
|
|
# uTP protocol uses BE for all values in the header, incl. connection id.
|
2024-12-04 00:34:13 +00:00
|
|
|
var id = ConnectionId.fromBytesBE(connectionId)
|
|
|
|
|
|
|
|
# Generate a new id if already existing to avoid using a duplicate
|
|
|
|
if stream.contentOffers.contains(id):
|
|
|
|
stream.rng[].generate(connectionId)
|
|
|
|
id = ConnectionId.fromBytesBE(connectionId)
|
2022-07-29 12:24:07 +00:00
|
|
|
|
|
|
|
debug "Register new incoming offer", contentKeys
|
|
|
|
|
2022-01-14 15:07:14 +00:00
|
|
|
let contentOffer = ContentOffer(
|
|
|
|
nodeId: nodeId,
|
2024-12-04 00:34:13 +00:00
|
|
|
contentIds: contentIds,
|
2022-01-14 15:07:14 +00:00
|
|
|
contentKeys: contentKeys,
|
2024-02-28 17:31:45 +00:00
|
|
|
timeout: Moment.now() + stream.connectionTimeout,
|
|
|
|
)
|
2024-12-04 00:34:13 +00:00
|
|
|
stream.contentOffers[id] = contentOffer
|
2022-01-14 15:07:14 +00:00
|
|
|
|
|
|
|
return connectionId
|
|
|
|
|
|
|
|
proc addContentRequest*(
|
2024-12-04 00:34:13 +00:00
|
|
|
stream: PortalStream, nodeId: NodeId, contentId: ContentId, content: seq[byte]
|
2024-02-28 17:31:45 +00:00
|
|
|
): Bytes2 =
|
2022-04-11 17:42:38 +00:00
|
|
|
# TODO: Should we check if `NodeId` & `connectionId` combo already exists?
|
|
|
|
# What happens if we get duplicates?
|
2022-01-14 15:07:14 +00:00
|
|
|
var connectionId: Bytes2
|
|
|
|
|
|
|
|
# uTP protocol uses BE for all values in the header, incl. connection id.
|
2024-12-04 00:34:13 +00:00
|
|
|
var id = ConnectionId.fromBytesBE(connectionId)
|
|
|
|
|
|
|
|
# Generate a new id if already existing to avoid using a duplicate
|
|
|
|
if stream.contentRequests.contains(id):
|
|
|
|
stream.rng[].generate(connectionId)
|
|
|
|
id = ConnectionId.fromBytesBE(connectionId)
|
|
|
|
|
2022-01-14 15:07:14 +00:00
|
|
|
let contentRequest = ContentRequest(
|
|
|
|
nodeId: nodeId,
|
2024-12-04 00:34:13 +00:00
|
|
|
contentId: contentId,
|
2022-01-14 15:07:14 +00:00
|
|
|
content: content,
|
2024-02-28 17:31:45 +00:00
|
|
|
timeout: Moment.now() + stream.connectionTimeout,
|
|
|
|
)
|
2024-12-04 00:34:13 +00:00
|
|
|
stream.contentRequests[id] = contentRequest
|
2022-01-14 15:07:14 +00:00
|
|
|
|
|
|
|
return connectionId
|
|
|
|
|
2022-03-30 05:12:39 +00:00
|
|
|
proc connectTo*(
|
2024-12-04 00:34:13 +00:00
|
|
|
stream: PortalStream, nodeAddress: NodeAddress, connectionId: ConnectionId
|
2024-06-14 12:21:30 +00:00
|
|
|
): Future[Result[UtpSocket[NodeAddress], string]] {.async: (raises: [CancelledError]).} =
|
2023-06-15 12:32:33 +00:00
|
|
|
let connectRes = await stream.transport.connectTo(nodeAddress, connectionId)
|
|
|
|
if connectRes.isErr():
|
2024-06-14 07:31:08 +00:00
|
|
|
case connectRes.error
|
2022-03-30 05:12:39 +00:00
|
|
|
of SocketAlreadyExists:
|
2024-12-04 12:59:08 +00:00
|
|
|
# There is already a socket to this nodeAddress with given connection id.
|
|
|
|
# This means that a peer sent a connection id which is already in use.
|
|
|
|
err(
|
|
|
|
"Socket to " & $nodeAddress & " with connection id " & $connectionId &
|
|
|
|
" already exists"
|
|
|
|
)
|
2022-03-30 05:12:39 +00:00
|
|
|
of ConnectionTimedOut:
|
2024-12-04 12:59:08 +00:00
|
|
|
# A time-out here means that a uTP SYN packet was sent 3 times and failed
|
|
|
|
# to be acked. This should be enough of indication that the remote host is
|
|
|
|
# not reachable and no new connections are attempted.
|
|
|
|
err("uTP connection timeout when connecting to node: " & $nodeAddress)
|
2023-06-15 12:32:33 +00:00
|
|
|
else:
|
2024-12-04 12:59:08 +00:00
|
|
|
ok(connectRes.value())
|
2022-03-30 05:12:39 +00:00
|
|
|
|
2022-06-24 13:35:31 +00:00
|
|
|
proc writeContentRequest(
|
2024-02-28 17:31:45 +00:00
|
|
|
socket: UtpSocket[NodeAddress], stream: PortalStream, request: ContentRequest
|
2024-06-14 07:31:08 +00:00
|
|
|
) {.async: (raises: [CancelledError]).} =
|
2024-02-28 17:31:45 +00:00
|
|
|
let dataWritten = await socket.write(request.content)
|
2022-01-14 15:07:14 +00:00
|
|
|
if dataWritten.isErr():
|
|
|
|
debug "Error writing requested data", error = dataWritten.error
|
|
|
|
|
|
|
|
await socket.closeWait()
|
|
|
|
|
2024-06-14 07:31:08 +00:00
|
|
|
proc readVarint(
|
|
|
|
socket: UtpSocket[NodeAddress]
|
2024-10-14 09:53:28 +00:00
|
|
|
): Future[Result[uint32, string]] {.async: (raises: [CancelledError]).} =
|
2024-02-28 17:31:45 +00:00
|
|
|
var buffer: array[5, byte]
|
2022-06-24 13:35:31 +00:00
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
for i in 0 ..< len(buffer):
|
2022-06-24 13:35:31 +00:00
|
|
|
let dataRead = await socket.read(1)
|
|
|
|
if dataRead.len() == 0:
|
2024-10-14 09:53:28 +00:00
|
|
|
return err("No data read")
|
2022-06-24 13:35:31 +00:00
|
|
|
|
|
|
|
buffer[i] = dataRead[0]
|
|
|
|
|
|
|
|
let (lenU32, bytesRead) = fromBytes(uint32, buffer.toOpenArray(0, i), Leb128)
|
|
|
|
if bytesRead > 0:
|
|
|
|
return ok(lenU32)
|
|
|
|
elif bytesRead == 0:
|
|
|
|
continue
|
|
|
|
else:
|
2024-10-14 09:53:28 +00:00
|
|
|
return err("Failed to read varint")
|
2022-06-24 13:35:31 +00:00
|
|
|
|
2024-10-14 09:53:28 +00:00
|
|
|
proc readContentValue(
|
2024-06-14 07:31:08 +00:00
|
|
|
socket: UtpSocket[NodeAddress]
|
2024-10-14 09:53:28 +00:00
|
|
|
): Future[Result[seq[byte], string]] {.async: (raises: [CancelledError]).} =
|
|
|
|
let len = (await socket.readVarint()).valueOr:
|
|
|
|
return err($error)
|
2022-06-24 13:35:31 +00:00
|
|
|
|
2024-10-14 09:53:28 +00:00
|
|
|
let contentValue = await socket.read(len)
|
|
|
|
if contentValue.len() == len.int:
|
|
|
|
ok(contentValue)
|
2022-06-24 13:35:31 +00:00
|
|
|
else:
|
2024-10-14 09:53:28 +00:00
|
|
|
err("Content value length mismatch")
|
2022-06-24 13:35:31 +00:00
|
|
|
|
|
|
|
proc readContentOffer(
|
2024-02-28 17:31:45 +00:00
|
|
|
socket: UtpSocket[NodeAddress], stream: PortalStream, offer: ContentOffer
|
2024-06-14 07:31:08 +00:00
|
|
|
) {.async: (raises: [CancelledError]).} =
|
2024-10-14 09:53:28 +00:00
|
|
|
# Read number of content values according to amount of ContentKeys accepted.
|
2022-06-24 13:35:31 +00:00
|
|
|
# This will either end with a FIN, or because the read action times out or
|
2024-10-14 09:53:28 +00:00
|
|
|
# because the number of expected values was read (if this happens and no FIN
|
2022-06-24 13:35:31 +00:00
|
|
|
# was received yet, a FIN will be send from this side).
|
2024-10-14 09:53:28 +00:00
|
|
|
# None of this means that the contentValues are valid, further validation is
|
2022-06-24 13:35:31 +00:00
|
|
|
# required.
|
|
|
|
# Socket will be closed when this call ends.
|
|
|
|
|
2024-10-14 09:53:28 +00:00
|
|
|
# TODO: Currently reading from the socket one value at a time, and validating
|
|
|
|
# values at later time. Uncertain what is best approach here (mostly from a
|
2022-06-24 13:35:31 +00:00
|
|
|
# security PoV), e.g. other options such as reading all content from socket at
|
2024-10-14 09:53:28 +00:00
|
|
|
# once, then processing the individual content values. Or reading and
|
2022-06-24 13:35:31 +00:00
|
|
|
# validating one per time.
|
|
|
|
let amount = offer.contentKeys.len()
|
|
|
|
|
2024-10-14 09:53:28 +00:00
|
|
|
var contentValues: seq[seq[byte]]
|
2024-02-28 17:31:45 +00:00
|
|
|
for i in 0 ..< amount:
|
2024-10-14 09:53:28 +00:00
|
|
|
let contentValueFut = socket.readContentValue()
|
|
|
|
if await contentValueFut.withTimeout(stream.contentReadTimeout):
|
|
|
|
let contentValue = await contentValueFut
|
2022-06-24 13:35:31 +00:00
|
|
|
|
2024-10-14 09:53:28 +00:00
|
|
|
if contentValue.isOk():
|
|
|
|
contentValues.add(contentValue.get())
|
2022-06-24 13:35:31 +00:00
|
|
|
else:
|
|
|
|
# Invalid data, stop reading content, but still process data received
|
|
|
|
# so far.
|
2024-10-14 09:53:28 +00:00
|
|
|
debug "Reading content value failed, content offer failed",
|
|
|
|
contentKeys = offer.contentKeys, error = contentValue.error
|
2022-06-24 13:35:31 +00:00
|
|
|
break
|
|
|
|
else:
|
|
|
|
# Read timed out, stop further reading, but still process data received
|
|
|
|
# so far.
|
2022-08-22 10:23:26 +00:00
|
|
|
debug "Reading data from socket timed out, content offer failed",
|
|
|
|
contentKeys = offer.contentKeys
|
2022-06-24 13:35:31 +00:00
|
|
|
break
|
|
|
|
|
|
|
|
if socket.atEof():
|
2022-04-13 05:56:01 +00:00
|
|
|
# Destroy socket and not closing as we already received FIN. Closing would
|
|
|
|
# send also a FIN from our side, see also:
|
2022-01-20 14:04:23 +00:00
|
|
|
# https://github.com/status-im/nim-eth/blob/b2dab4be0839c95ca2564df9eacf81995bf57802/eth/utp/utp_socket.nim#L1223
|
|
|
|
await socket.destroyWait()
|
2022-01-14 15:07:14 +00:00
|
|
|
else:
|
2022-06-24 13:35:31 +00:00
|
|
|
# This means FIN didn't arrive yet, perhaps it got dropped but it might also
|
2024-09-12 08:47:02 +00:00
|
|
|
# be still in flight.
|
|
|
|
#
|
|
|
|
# uTP has one-way FIN + FIN-ACK to destroy the connection. The stream
|
|
|
|
# already has the information from the application layer to know that all
|
|
|
|
# required data was received. But not sending a FIN from our side anyhow as
|
|
|
|
# there is probably one from the other side in flight.
|
|
|
|
# Sending a FIN from our side turns out to not to improve the speed of
|
|
|
|
# disconnecting as other implementations seems to not like the situation
|
|
|
|
# of receiving our FIN before our FIN-ACK.
|
|
|
|
# We do however put a limited timeout on the receival of the FIN and destroy
|
|
|
|
# the socket otherwise.
|
|
|
|
proc delayedDestroy(
|
|
|
|
socket: UtpSocket[NodeAddress], delay: Duration
|
|
|
|
) {.async: (raises: [CancelledError]).} =
|
|
|
|
await sleepAsync(delay)
|
|
|
|
await socket.destroyWait()
|
|
|
|
|
|
|
|
asyncSpawn socket.delayedDestroy(4.seconds)
|
2022-01-14 15:07:14 +00:00
|
|
|
|
2024-10-14 09:53:28 +00:00
|
|
|
# TODO: This could currently create a backlog of content values to be validated
|
2022-07-11 14:29:16 +00:00
|
|
|
# as `AcceptConnectionCallback` is `asyncSpawn`'ed and there are no limits
|
|
|
|
# on the `contentOffers`. Might move the queue to before the reading of the
|
|
|
|
# socket, and let the specific networks handle that.
|
2024-02-28 17:31:45 +00:00
|
|
|
await stream.contentQueue.put(
|
2024-10-14 09:53:28 +00:00
|
|
|
(Opt.some(offer.nodeId), offer.contentKeys, contentValues)
|
2024-02-28 17:31:45 +00:00
|
|
|
)
|
2022-06-24 13:35:31 +00:00
|
|
|
|
2022-08-17 07:32:06 +00:00
|
|
|
proc new(
|
2022-02-11 13:43:10 +00:00
|
|
|
T: type PortalStream,
|
2022-08-17 07:32:06 +00:00
|
|
|
transport: UtpDiscv5Protocol,
|
2023-09-04 10:21:01 +00:00
|
|
|
contentQueue: AsyncQueue[(Opt[NodeId], ContentKeysList, seq[seq[byte]])],
|
2022-08-17 07:32:06 +00:00
|
|
|
connectionTimeout: Duration,
|
|
|
|
contentReadTimeout: Duration,
|
2024-02-28 17:31:45 +00:00
|
|
|
rng: ref HmacDrbgContext,
|
|
|
|
): T =
|
2022-08-17 07:32:06 +00:00
|
|
|
let stream = PortalStream(
|
|
|
|
transport: transport,
|
2024-12-04 00:34:13 +00:00
|
|
|
contentRequests: newTable[ConnectionId, ContentRequest](),
|
|
|
|
contentOffers: newTable[ConnectionId, ContentOffer](),
|
2022-08-17 07:32:06 +00:00
|
|
|
connectionTimeout: connectionTimeout,
|
|
|
|
contentReadTimeout: contentReadTimeout,
|
2024-12-04 00:34:13 +00:00
|
|
|
pendingTransfers: newTable[NodeId, HashSet[ContentId]](),
|
2022-08-17 07:32:06 +00:00
|
|
|
contentQueue: contentQueue,
|
2024-02-28 17:31:45 +00:00
|
|
|
rng: rng,
|
2022-08-17 07:32:06 +00:00
|
|
|
)
|
2022-02-11 13:43:10 +00:00
|
|
|
|
|
|
|
stream
|
|
|
|
|
|
|
|
proc allowedConnection(
|
2024-12-04 00:34:13 +00:00
|
|
|
stream: PortalStream, address: NodeAddress, connectionId: ConnectionId
|
2024-02-28 17:31:45 +00:00
|
|
|
): bool =
|
2024-12-04 00:34:13 +00:00
|
|
|
if stream.contentRequests.contains(connectionId) and
|
|
|
|
stream.contentRequests.getOrDefault(connectionId).nodeId == address.nodeId:
|
|
|
|
return true
|
|
|
|
|
|
|
|
if stream.contentOffers.contains(connectionId) and
|
|
|
|
stream.contentOffers.getOrDefault(connectionId).nodeId == address.nodeId:
|
|
|
|
return true
|
|
|
|
|
|
|
|
return false
|
2022-02-11 13:43:10 +00:00
|
|
|
|
2022-08-22 10:23:26 +00:00
|
|
|
proc handleIncomingConnection(
|
2024-02-28 17:31:45 +00:00
|
|
|
server: UtpRouter[NodeAddress], socket: UtpSocket[NodeAddress]
|
2024-06-14 07:31:08 +00:00
|
|
|
): Future[void] {.async: (raw: true, raises: []).} =
|
2022-08-17 07:32:06 +00:00
|
|
|
let manager = getUserData[NodeAddress, StreamManager](server)
|
|
|
|
|
|
|
|
for stream in manager.streams:
|
|
|
|
# Note: Connection id of uTP SYN is different from other packets, it is
|
|
|
|
# actually the peers `send_conn_id`, opposed to `receive_conn_id` for all
|
|
|
|
# other packets.
|
2024-12-04 00:34:13 +00:00
|
|
|
|
|
|
|
if stream.contentRequests.contains(socket.connectionId):
|
|
|
|
let request = stream.contentRequests.getOrDefault(socket.connectionId)
|
|
|
|
if request.nodeId == socket.remoteAddress.nodeId:
|
2022-08-17 07:32:06 +00:00
|
|
|
let fut = socket.writeContentRequest(stream, request)
|
2024-12-04 00:34:13 +00:00
|
|
|
|
|
|
|
stream.removePendingTransfer(request.nodeId, request.contentId)
|
|
|
|
stream.contentRequests.del(socket.connectionId)
|
2024-06-14 07:31:08 +00:00
|
|
|
return noCancel(fut)
|
2022-08-17 07:32:06 +00:00
|
|
|
|
2024-12-04 00:34:13 +00:00
|
|
|
if stream.contentOffers.contains(socket.connectionId):
|
|
|
|
let offer = stream.contentOffers.getOrDefault(socket.connectionId)
|
|
|
|
if offer.nodeId == socket.remoteAddress.nodeId:
|
2022-08-17 07:32:06 +00:00
|
|
|
let fut = socket.readContentOffer(stream, offer)
|
2024-12-04 00:34:13 +00:00
|
|
|
|
|
|
|
for contentId in offer.contentIds:
|
|
|
|
stream.removePendingTransfer(offer.nodeId, contentId)
|
|
|
|
stream.contentOffers.del(socket.connectionId)
|
2024-06-14 07:31:08 +00:00
|
|
|
return noCancel(fut)
|
2022-08-17 07:32:06 +00:00
|
|
|
|
|
|
|
# TODO: Is there a scenario where this can happen,
|
|
|
|
# considering `allowRegisteredIdCallback`? If not, doAssert?
|
|
|
|
var fut = newFuture[void]("fluffy.AcceptConnectionCallback")
|
|
|
|
fut.complete()
|
|
|
|
return fut
|
|
|
|
|
2022-08-22 10:23:26 +00:00
|
|
|
proc allowIncomingConnection(
|
2024-12-04 00:34:13 +00:00
|
|
|
r: UtpRouter[NodeAddress], remoteAddress: NodeAddress, connectionId: ConnectionId
|
2024-02-28 17:31:45 +00:00
|
|
|
): bool =
|
2022-08-17 07:32:06 +00:00
|
|
|
let manager = getUserData[NodeAddress, StreamManager](r)
|
|
|
|
for stream in manager.streams:
|
|
|
|
# stream.pruneAllowedConnections()
|
|
|
|
if allowedConnection(stream, remoteAddress, connectionId):
|
|
|
|
return true
|
|
|
|
|
|
|
|
proc new*(T: type StreamManager, d: protocol.Protocol): T =
|
2022-08-22 10:23:26 +00:00
|
|
|
let
|
|
|
|
socketConfig = SocketConfig.init(
|
|
|
|
# Setting to none means that incoming sockets are in Connected state, which
|
|
|
|
# means they can send and receive data.
|
|
|
|
incomingSocketReceiveTimeout = none(Duration),
|
2024-02-28 17:31:45 +00:00
|
|
|
payloadSize = uint32(maxUtpPayloadSize),
|
2022-08-22 10:23:26 +00:00
|
|
|
)
|
|
|
|
manager = StreamManager(streams: @[], rng: d.rng)
|
|
|
|
utpOverDiscV5Protocol = UtpDiscv5Protocol.new(
|
2024-02-28 17:31:45 +00:00
|
|
|
d, utpProtocolId, handleIncomingConnection, manager, allowIncomingConnection,
|
|
|
|
socketConfig,
|
2022-08-22 10:23:26 +00:00
|
|
|
)
|
2022-08-17 07:32:06 +00:00
|
|
|
|
|
|
|
manager.transport = utpOverDiscV5Protocol
|
|
|
|
|
|
|
|
return manager
|
|
|
|
|
|
|
|
proc registerNewStream*(
|
2024-02-28 17:31:45 +00:00
|
|
|
m: StreamManager,
|
2023-09-04 10:21:01 +00:00
|
|
|
contentQueue: AsyncQueue[(Opt[NodeId], ContentKeysList, seq[seq[byte]])],
|
2022-08-17 07:32:06 +00:00
|
|
|
connectionTimeout = defaultConnectionTimeout,
|
2024-02-28 17:31:45 +00:00
|
|
|
contentReadTimeout = defaultContentReadTimeout,
|
|
|
|
): PortalStream =
|
2022-08-17 07:32:06 +00:00
|
|
|
let s = PortalStream.new(
|
2024-02-28 17:31:45 +00:00
|
|
|
m.transport, contentQueue, connectionTimeout, contentReadTimeout, m.rng
|
2022-08-17 07:32:06 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
m.streams.add(s)
|
|
|
|
|
|
|
|
return s
|