2023-04-04 15:11:36 +00:00
|
|
|
# Copyright (c) 2021-2023 Status Research & Development GmbH
|
2021-10-28 09:41:43 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2023-05-10 13:50:04 +00:00
|
|
|
{.push raises: [].}
|
2021-10-28 09:41:43 +00:00
|
|
|
|
|
|
|
import
|
2022-02-24 17:22:44 +00:00
|
|
|
std/[sugar, deques],
|
2022-06-17 20:45:37 +00:00
|
|
|
chronos, chronicles,
|
2022-01-04 08:52:38 +00:00
|
|
|
stew/[results, bitops2],
|
2021-10-28 09:41:43 +00:00
|
|
|
./growable_buffer,
|
2021-12-09 09:52:21 +00:00
|
|
|
./packets,
|
|
|
|
./ledbat_congestion_control,
|
|
|
|
./delay_histogram,
|
|
|
|
./utp_utils,
|
|
|
|
./clock_drift_calculator
|
|
|
|
|
2022-01-07 09:38:19 +00:00
|
|
|
export
|
|
|
|
chronicles
|
2021-10-28 09:41:43 +00:00
|
|
|
|
|
|
|
logScope:
|
2022-12-06 13:54:03 +00:00
|
|
|
topics = "eth utp utp_socket"
|
2021-10-28 09:41:43 +00:00
|
|
|
|
|
|
|
type
|
2021-11-09 14:29:59 +00:00
|
|
|
ConnectionState* = enum
|
2021-10-28 09:41:43 +00:00
|
|
|
SynSent,
|
|
|
|
SynRecv,
|
|
|
|
Connected,
|
|
|
|
Destroy
|
|
|
|
|
2021-11-04 06:38:46 +00:00
|
|
|
ConnectionDirection = enum
|
|
|
|
Outgoing, Incoming
|
|
|
|
|
2021-10-28 09:41:43 +00:00
|
|
|
UtpSocketKey*[A] = object
|
|
|
|
remoteAddress*: A
|
|
|
|
rcvId*: uint16
|
2021-12-20 12:14:50 +00:00
|
|
|
|
2021-10-28 09:41:43 +00:00
|
|
|
OutgoingPacket = object
|
|
|
|
packetBytes: seq[byte]
|
|
|
|
transmissions: uint16
|
|
|
|
needResend: bool
|
2021-11-24 16:49:13 +00:00
|
|
|
payloadLength: uint32
|
2021-10-28 09:41:43 +00:00
|
|
|
timeSent: Moment
|
|
|
|
|
|
|
|
AckResult = enum
|
|
|
|
PacketAcked, PacketAlreadyAcked, PacketNotSentYet
|
|
|
|
|
|
|
|
# Socket callback to send data to remote peer
|
2023-04-04 15:11:36 +00:00
|
|
|
SendCallback*[A] =
|
2023-05-10 13:50:04 +00:00
|
|
|
proc (to: A, data: seq[byte]): Future[void] {.gcsafe, raises: []}
|
2021-10-28 09:41:43 +00:00
|
|
|
|
2021-11-04 06:38:46 +00:00
|
|
|
SocketConfig* = object
|
2023-04-04 15:11:36 +00:00
|
|
|
# This is configurable (in contrast to reference impl), as with standard 2
|
|
|
|
# SYN resends, the default timeout of 3 seconds and the doubling of the
|
|
|
|
# timeout with each resend, it means that the initial connection would
|
|
|
|
# timeout only after 21s, which seems rather long.
|
2021-11-04 06:38:46 +00:00
|
|
|
initialSynTimeout*: Duration
|
|
|
|
|
2023-04-04 15:11:36 +00:00
|
|
|
# Number of resend retries of each data packet, before declaring the
|
|
|
|
# connection as failed.
|
2021-11-04 06:38:46 +00:00
|
|
|
dataResendsBeforeFailure*: uint16
|
|
|
|
|
2022-11-16 16:44:00 +00:00
|
|
|
# Maximal size of receive buffer in bytes
|
2021-11-12 09:58:49 +00:00
|
|
|
optRcvBuffer*: uint32
|
|
|
|
|
2022-11-16 16:44:00 +00:00
|
|
|
# Maximal size of send buffer in bytes
|
2021-12-02 15:51:44 +00:00
|
|
|
optSndBuffer*: uint32
|
|
|
|
|
2021-11-19 10:36:46 +00:00
|
|
|
# If set to some(`Duration`), the incoming socket will be initialized in
|
|
|
|
# `SynRecv` state and the remote peer will have `Duration` to transfer data
|
|
|
|
# to move the socket in `Connected` state.
|
|
|
|
# If set to none, the incoming socket will immediately be set to `Connected`
|
|
|
|
# state and will be able to transfer data.
|
|
|
|
incomingSocketReceiveTimeout*: Option[Duration]
|
|
|
|
|
2023-04-04 15:11:36 +00:00
|
|
|
# Timeout after which the send window will be reset to its minimal value
|
|
|
|
# after it dropped to zero.
|
|
|
|
# i.e when a packet is received from a peer with `wndSize` set to 0.
|
2021-12-02 14:46:18 +00:00
|
|
|
remoteWindowResetTimeout*: Duration
|
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
# Size of reorder buffer calculated as fraction of optRcvBuffer
|
|
|
|
maxSizeOfReorderBuffer: uint32
|
|
|
|
|
2023-04-04 15:11:36 +00:00
|
|
|
# Maximal number of payload bytes per data packet. Total packet size will be
|
|
|
|
# equal to payloadSize + 20 (size of header of data packet).
|
|
|
|
# TODO: for now we enable only static configuration of packet sizes. In the
|
|
|
|
# future it would be nice to add option which enables automatic packet size
|
|
|
|
# discovery based on traffic.
|
2022-04-04 11:44:32 +00:00
|
|
|
payloadSize*: uint32
|
|
|
|
|
2023-04-04 15:11:36 +00:00
|
|
|
# Maximal number of open uTP connections. When hit, no more incoming
|
|
|
|
# connections will be allowed, but it will still be possible to open new
|
|
|
|
# outgoing uTP connections.
|
2022-08-04 13:55:39 +00:00
|
|
|
maxNumberOfOpenConnections*: int
|
|
|
|
|
2021-12-02 14:46:18 +00:00
|
|
|
WriteErrorType* = enum
|
2021-12-20 12:14:50 +00:00
|
|
|
SocketNotWriteable,
|
2021-12-02 14:46:18 +00:00
|
|
|
FinSent
|
|
|
|
|
|
|
|
WriteError* = object
|
|
|
|
case kind*: WriteErrorType
|
|
|
|
of SocketNotWriteable:
|
|
|
|
currentState*: ConnectionState
|
|
|
|
of FinSent:
|
|
|
|
discard
|
|
|
|
|
|
|
|
WriteResult* = Result[int, WriteError]
|
|
|
|
|
|
|
|
WriteRequestType = enum
|
|
|
|
Data, Close
|
|
|
|
|
|
|
|
WriteRequest = object
|
|
|
|
case kind: WriteRequestType
|
|
|
|
of Data:
|
|
|
|
data: seq[byte]
|
|
|
|
writer: Future[WriteResult]
|
|
|
|
of Close:
|
|
|
|
discard
|
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
SocketEventType = enum
|
|
|
|
NewPacket, CheckTimeouts, CloseReq, WriteReq, ReadReqType
|
|
|
|
|
|
|
|
ReadReq = object
|
|
|
|
bytesToRead: int
|
|
|
|
bytesAvailable: seq[uint8]
|
|
|
|
reader: Future[seq[uint8]]
|
|
|
|
|
|
|
|
ReadResult = enum
|
|
|
|
ReadCancelled, ReadFinished, ReadNotFinished, SocketAlreadyFinished
|
|
|
|
|
|
|
|
SocketEvent = object
|
|
|
|
case kind: SocketEventType
|
|
|
|
of CheckTimeouts:
|
|
|
|
discard
|
|
|
|
of NewPacket:
|
|
|
|
packet: Packet
|
|
|
|
of CloseReq:
|
|
|
|
discard
|
|
|
|
of WriteReq:
|
|
|
|
data: seq[byte]
|
|
|
|
writer: Future[WriteResult]
|
|
|
|
of ReadReqType:
|
|
|
|
readReq: ReadReq
|
|
|
|
|
2021-10-28 09:41:43 +00:00
|
|
|
UtpSocket*[A] = ref object
|
|
|
|
remoteAddress*: A
|
|
|
|
state: ConnectionState
|
2021-11-04 06:38:46 +00:00
|
|
|
direction: ConnectionDirection
|
|
|
|
socketConfig: SocketConfig
|
|
|
|
|
2023-04-04 15:11:36 +00:00
|
|
|
# Connection id for received packets
|
2021-11-05 08:41:41 +00:00
|
|
|
connectionIdRcv*: uint16
|
2023-04-04 15:11:36 +00:00
|
|
|
# Connection id for send packets
|
2021-11-05 08:41:41 +00:00
|
|
|
connectionIdSnd*: uint16
|
2021-10-28 09:41:43 +00:00
|
|
|
# Sequence number for the next packet to be sent.
|
|
|
|
seqNr: uint16
|
2023-04-04 15:11:36 +00:00
|
|
|
# All sequence numbers up to this have been correctly acked by us.
|
2021-10-28 09:41:43 +00:00
|
|
|
ackNr: uint16
|
2021-12-20 12:14:50 +00:00
|
|
|
|
2023-04-04 15:11:36 +00:00
|
|
|
# Should be completed after successful connection to remote host or after
|
|
|
|
# timeout for the first SYN packet.
|
2021-10-28 09:41:43 +00:00
|
|
|
connectionFuture: Future[void]
|
2021-12-20 12:14:50 +00:00
|
|
|
|
2023-04-04 15:11:36 +00:00
|
|
|
# The number of packets in the send queue. Packets that haven't
|
|
|
|
# been sent yet and packets marked as needing to be resend count.
|
|
|
|
# The oldest un-acked packet in the send queue is seq_nr - cur_window_packets
|
2021-10-28 09:41:43 +00:00
|
|
|
curWindowPackets: uint16
|
|
|
|
|
2023-04-04 15:11:36 +00:00
|
|
|
# outgoing buffer for all send packets
|
2021-10-28 09:41:43 +00:00
|
|
|
outBuffer: GrowableCircularBuffer[OutgoingPacket]
|
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
# current number of bytes in send buffer
|
|
|
|
outBufferBytes: uint32
|
|
|
|
|
|
|
|
# current number of bytes in flight
|
|
|
|
currentWindow: uint32
|
|
|
|
|
|
|
|
# current max window broadcasted by remote peer
|
|
|
|
maxRemoteWindow: uint32
|
|
|
|
|
|
|
|
# current max window calculated by ledbat congestion controller
|
|
|
|
maxWindow: uint32
|
|
|
|
|
2021-10-28 09:41:43 +00:00
|
|
|
# incoming buffer for out of order packets
|
|
|
|
inBuffer: GrowableCircularBuffer[Packet]
|
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
# number of bytes in reorder buffer
|
|
|
|
inBufferBytes: uint32
|
|
|
|
|
2021-11-04 06:38:46 +00:00
|
|
|
# Number of packets waiting in reorder buffer
|
|
|
|
reorderCount: uint16
|
|
|
|
|
2021-10-28 09:41:43 +00:00
|
|
|
# current retransmit Timeout used to calculate rtoTimeout
|
|
|
|
retransmitTimeout: Duration
|
2021-12-20 12:14:50 +00:00
|
|
|
|
2021-10-28 09:41:43 +00:00
|
|
|
# calculated round trip time during communication with remote peer
|
|
|
|
rtt: Duration
|
|
|
|
# calculated round trip time variance
|
|
|
|
rttVar: Duration
|
2022-11-16 16:44:00 +00:00
|
|
|
# Round trip timeout dynamically updated based on acks received from remote
|
2021-10-28 09:41:43 +00:00
|
|
|
# peer
|
|
|
|
rto: Duration
|
|
|
|
|
|
|
|
# RTO timeout will happen when currenTime > rtoTimeout
|
|
|
|
rtoTimeout: Moment
|
|
|
|
|
2021-12-20 12:14:50 +00:00
|
|
|
# rcvBuffer
|
2022-02-24 17:22:44 +00:00
|
|
|
rcvBuffer: seq[byte]
|
|
|
|
|
|
|
|
# current size of rcv buffer
|
|
|
|
offset: int
|
2022-04-12 19:11:01 +00:00
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
# readers waiting for data
|
|
|
|
pendingReads: Deque[ReadReq]
|
2021-10-28 09:41:43 +00:00
|
|
|
|
|
|
|
# loop called every 500ms to check for on going timeout status
|
|
|
|
checkTimeoutsLoop: Future[void]
|
|
|
|
|
2022-11-16 16:44:00 +00:00
|
|
|
# number on consecutive re-transmissions
|
2021-10-28 09:41:43 +00:00
|
|
|
retransmitCount: uint32
|
|
|
|
|
2022-11-16 16:44:00 +00:00
|
|
|
# Event which will complete whenever socket gets in destroy state
|
2021-10-28 09:41:43 +00:00
|
|
|
closeEvent: AsyncEvent
|
|
|
|
|
|
|
|
# All callback to be called whenever socket gets in destroy state
|
|
|
|
closeCallbacks: seq[Future[void]]
|
|
|
|
|
2021-11-09 14:29:59 +00:00
|
|
|
# socket is closed for reading
|
|
|
|
readShutdown: bool
|
|
|
|
|
|
|
|
# we sent out fin packet
|
|
|
|
finSent: bool
|
|
|
|
|
2021-12-02 14:46:18 +00:00
|
|
|
# we requested to close the socket by sending fin packet
|
|
|
|
sendFinRequested: bool
|
|
|
|
|
2021-11-09 14:29:59 +00:00
|
|
|
# have our fin been acked
|
|
|
|
finAcked: bool
|
|
|
|
|
|
|
|
# have we received remote fin
|
|
|
|
gotFin: bool
|
|
|
|
|
|
|
|
# have we reached remote fin packet
|
|
|
|
reachedFin: bool
|
|
|
|
|
|
|
|
# sequence number of remoted fin packet
|
|
|
|
eofPktNr: uint16
|
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
pendingWrites: Deque[WriteRequest]
|
2021-12-02 14:46:18 +00:00
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
eventQueue: AsyncQueue[SocketEvent]
|
2021-12-02 14:46:18 +00:00
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
eventLoop: Future[void]
|
2021-12-02 14:46:18 +00:00
|
|
|
|
2022-02-10 07:05:44 +00:00
|
|
|
# timer which is started when peer max window drops below current packet size
|
|
|
|
zeroWindowTimer: Option[Moment]
|
2021-11-24 16:49:13 +00:00
|
|
|
|
2021-12-09 09:52:21 +00:00
|
|
|
# last measured delay between current local timestamp, and remote sent
|
|
|
|
# timestamp. In microseconds
|
|
|
|
replayMicro: uint32
|
|
|
|
|
|
|
|
# indicator if we're in slow-start (exponential growth) phase
|
|
|
|
slowStart: bool
|
|
|
|
|
2022-11-16 16:44:00 +00:00
|
|
|
# indicator if we're in fast time out mode i.e we will resend
|
|
|
|
# oldest packet un-acked in case of newer packet arriving
|
2022-01-26 08:49:34 +00:00
|
|
|
fastTimeout: bool
|
|
|
|
|
|
|
|
# Sequence number of the next packet we are allowed to fast-resend. This is
|
|
|
|
# necessary to make sure we only fast resend once per packet
|
|
|
|
fastResendSeqNr: uint16
|
|
|
|
|
2022-01-27 10:07:40 +00:00
|
|
|
# last time we decreased max window
|
|
|
|
lastWindowDecay: Moment
|
|
|
|
|
|
|
|
# counter of duplicate acks
|
|
|
|
duplicateAck: uint16
|
|
|
|
|
2021-12-09 09:52:21 +00:00
|
|
|
#the slow-start threshold, in bytes
|
2022-11-16 16:44:00 +00:00
|
|
|
slowStartThreshold: uint32
|
2021-12-09 09:52:21 +00:00
|
|
|
|
|
|
|
# history of our delays
|
|
|
|
ourHistogram: DelayHistogram
|
|
|
|
|
|
|
|
# history of remote delays
|
|
|
|
remoteHistogram: DelayHistogram
|
|
|
|
|
2022-11-16 16:44:00 +00:00
|
|
|
# calculator of drifting between local and remote clocks
|
2021-12-09 09:52:21 +00:00
|
|
|
driftCalculator: ClockDriftCalculator
|
|
|
|
|
2021-10-28 09:41:43 +00:00
|
|
|
# socket identifier
|
|
|
|
socketKey*: UtpSocketKey[A]
|
|
|
|
|
|
|
|
send: SendCallback[A]
|
|
|
|
|
|
|
|
# User driven call back to be called whenever socket is permanently closed i.e
|
|
|
|
# reaches destroy state
|
2023-05-10 13:50:04 +00:00
|
|
|
SocketCloseCallback* = proc (): void {.gcsafe, raises: [].}
|
2021-10-28 09:41:43 +00:00
|
|
|
|
|
|
|
ConnectionError* = object of CatchableError
|
|
|
|
|
2021-11-18 09:05:56 +00:00
|
|
|
OutgoingConnectionErrorType* = enum
|
2022-02-24 17:22:44 +00:00
|
|
|
SocketAlreadyExists, ConnectionTimedOut
|
2021-12-20 12:14:50 +00:00
|
|
|
|
2021-11-18 09:05:56 +00:00
|
|
|
OutgoingConnectionError* = object
|
|
|
|
case kind*: OutgoingConnectionErrorType
|
|
|
|
of SocketAlreadyExists, ConnectionTimedOut:
|
|
|
|
discard
|
|
|
|
|
|
|
|
ConnectionResult*[A] = Result[UtpSocket[A], OutgoingConnectionError]
|
|
|
|
|
2021-10-28 09:41:43 +00:00
|
|
|
const
|
2023-04-04 15:11:36 +00:00
|
|
|
# Default maximum size of the data packet payload. With this configuration
|
2022-04-04 11:44:32 +00:00
|
|
|
# data packets will have 508 bytes (488 + 20 header).
|
2023-04-04 15:11:36 +00:00
|
|
|
# 508 bytes of UDP payload can translate into 576 bytes UDP packet i.e
|
|
|
|
# 508 bytes + 60 bytes (max IP header) + 8 bytes (UDP header) = 576 bytes.
|
|
|
|
# 576 bytes is defined as minimum reassembly buffer size, i.e the minimum
|
|
|
|
# datagram size that any implementation must support.
|
|
|
|
# From RFC791: All hosts must be prepared to accept datagrams of up to 576
|
|
|
|
# octets (whether they arrive whole or in fragments).
|
2022-04-04 11:44:32 +00:00
|
|
|
defaultPayloadSize = 488
|
2021-10-28 09:41:43 +00:00
|
|
|
|
2023-04-04 15:11:36 +00:00
|
|
|
# How often each socket check its different ongoing timers
|
2021-10-28 09:41:43 +00:00
|
|
|
checkTimeoutsLoopInterval = milliseconds(500)
|
|
|
|
|
2023-04-04 15:11:36 +00:00
|
|
|
# Default initial timeout for first SYN packet
|
2021-10-28 09:41:43 +00:00
|
|
|
defaultInitialSynTimeout = milliseconds(3000)
|
|
|
|
|
2023-04-04 15:11:36 +00:00
|
|
|
# Initial timeout to receive first Data data packet after receiving initial
|
|
|
|
# SYN packet.
|
2021-11-19 10:36:46 +00:00
|
|
|
defaultRcvRetransmitTimeout = milliseconds(10000)
|
2021-10-28 09:41:43 +00:00
|
|
|
|
2021-11-04 06:38:46 +00:00
|
|
|
# Number of times each data packet will be resend before declaring connection
|
2023-04-04 15:11:36 +00:00
|
|
|
# dead. 4 is taken from reference implementation.
|
2021-11-04 06:38:46 +00:00
|
|
|
defaultDataResendsBeforeFailure = 4'u16
|
|
|
|
|
2021-11-12 09:58:49 +00:00
|
|
|
# default size of rcv buffer in bytes
|
|
|
|
# rationale form C reference impl:
|
|
|
|
# 1 MB of receive buffer (i.e. max bandwidth delay product)
|
|
|
|
# means that from a peer with 200 ms RTT, we cannot receive
|
|
|
|
# faster than 5 MB/s
|
|
|
|
# from a peer with 10 ms RTT, we cannot receive faster than
|
|
|
|
# 100 MB/s. This is assumed to be good enough, since bandwidth
|
|
|
|
# often is proportional to RTT anyway
|
|
|
|
defaultOptRcvBuffer: uint32 = 1024 * 1024
|
|
|
|
|
2021-11-15 10:32:00 +00:00
|
|
|
# rationale from C reference impl:
|
|
|
|
# Allow a reception window of at least 3 ack_nrs behind seq_nr
|
|
|
|
# A non-SYN packet with an ack_nr difference greater than this is
|
|
|
|
# considered suspicious and ignored
|
|
|
|
allowedAckWindow*: uint16 = 3
|
|
|
|
|
2023-04-04 15:11:36 +00:00
|
|
|
# Timeout after which the send window will be reset to its minimal value after
|
|
|
|
# it dropped lower than our current packet size. i.e when we received a packet
|
|
|
|
# from remote peer with `wndSize` set to number <= current packet size.
|
2021-12-02 14:46:18 +00:00
|
|
|
defaultResetWindowTimeout = seconds(15)
|
|
|
|
|
2021-11-04 06:38:46 +00:00
|
|
|
reorderBufferMaxSize = 1024
|
|
|
|
|
2022-01-27 10:07:40 +00:00
|
|
|
duplicateAcksBeforeResend = 3
|
|
|
|
|
2022-11-16 16:44:00 +00:00
|
|
|
# minimal time before subsequent window decays
|
2022-01-27 10:07:40 +00:00
|
|
|
maxWindowDecay = milliseconds(100)
|
|
|
|
|
2023-04-04 15:11:36 +00:00
|
|
|
# Maximal size of reorder buffer as fraction of optRcvBuffer size.
|
|
|
|
# Following semantics apply based on a rcvBuffer set to 1000 bytes:
|
|
|
|
# - if there are already 1000 bytes in rcvBuffer no more bytes will be
|
|
|
|
# accepted to reorder buffer
|
|
|
|
# - if there are already 500 bytes in reorder buffer, no more bytes will be
|
|
|
|
# accepted to it, and only 500 bytes can be accepted to rcvBuffer
|
|
|
|
# This way there is always a space in rcvBuffer to fit new data if the
|
|
|
|
# reordering happens.
|
2022-02-24 17:22:44 +00:00
|
|
|
maxReorderBufferSize = 0.5
|
|
|
|
|
2022-08-04 13:55:39 +00:00
|
|
|
# Default number of of open utp connections
|
2023-04-04 15:11:36 +00:00
|
|
|
# - libutp uses 3000
|
|
|
|
# - libtorrent uses ~16000
|
2022-08-04 13:55:39 +00:00
|
|
|
defaultMaxOpenConnections = 8000
|
|
|
|
|
2021-10-28 09:41:43 +00:00
|
|
|
proc init*[A](T: type UtpSocketKey, remoteAddress: A, rcvId: uint16): T =
|
|
|
|
UtpSocketKey[A](remoteAddress: remoteAddress, rcvId: rcvId)
|
|
|
|
|
2021-11-12 09:58:49 +00:00
|
|
|
proc init(
|
|
|
|
T: type OutgoingPacket,
|
|
|
|
packetBytes: seq[byte],
|
|
|
|
transmissions: uint16,
|
|
|
|
needResend: bool,
|
2021-11-24 16:49:13 +00:00
|
|
|
payloadLength: uint32,
|
2021-12-10 15:28:00 +00:00
|
|
|
timeSent: Moment = getMonoTimestamp().moment): T =
|
2021-10-28 09:41:43 +00:00
|
|
|
OutgoingPacket(
|
|
|
|
packetBytes: packetBytes,
|
|
|
|
transmissions: transmissions,
|
|
|
|
needResend: needResend,
|
2021-11-24 16:49:13 +00:00
|
|
|
payloadLength: payloadLength,
|
2021-10-28 09:41:43 +00:00
|
|
|
timeSent: timeSent
|
|
|
|
)
|
|
|
|
|
2021-11-04 06:38:46 +00:00
|
|
|
proc init*(
|
2021-12-20 12:14:50 +00:00
|
|
|
T: type SocketConfig,
|
2021-11-04 06:38:46 +00:00
|
|
|
initialSynTimeout: Duration = defaultInitialSynTimeout,
|
2021-11-12 09:58:49 +00:00
|
|
|
dataResendsBeforeFailure: uint16 = defaultDataResendsBeforeFailure,
|
2021-11-19 10:36:46 +00:00
|
|
|
optRcvBuffer: uint32 = defaultOptRcvBuffer,
|
2021-12-02 14:46:18 +00:00
|
|
|
incomingSocketReceiveTimeout: Option[Duration] = some(defaultRcvRetransmitTimeout),
|
2021-12-02 15:51:44 +00:00
|
|
|
remoteWindowResetTimeout: Duration = defaultResetWindowTimeout,
|
2022-04-04 11:44:32 +00:00
|
|
|
optSndBuffer: uint32 = defaultOptRcvBuffer,
|
2022-08-04 13:55:39 +00:00
|
|
|
payloadSize: uint32 = defaultPayloadSize,
|
|
|
|
maxNumberOfOpenConnections: int = defaultMaxOpenConnections
|
2021-11-04 06:38:46 +00:00
|
|
|
): T =
|
2023-04-04 15:11:36 +00:00
|
|
|
# Make sure there is always some payload in data packets, and that packets are
|
|
|
|
# not to large. With 1480 packet boundary, data packets will have 1500 bytes
|
|
|
|
# which seems reasonable.
|
|
|
|
doAssert(payloadSize > 0 and payloadSize <= 1480,
|
|
|
|
"payloadSize should always be positive number <= 1480")
|
2022-02-24 17:22:44 +00:00
|
|
|
# TODO make sure optRcvBuffer is nicely divisible by maxReorderBufferSize
|
|
|
|
let reorderBufferSize = uint32(maxReorderBufferSize * float64(optRcvBuffer))
|
2021-11-04 06:38:46 +00:00
|
|
|
SocketConfig(
|
|
|
|
initialSynTimeout: initialSynTimeout,
|
2021-11-12 09:58:49 +00:00
|
|
|
dataResendsBeforeFailure: dataResendsBeforeFailure,
|
2021-11-19 10:36:46 +00:00
|
|
|
optRcvBuffer: optRcvBuffer,
|
2021-12-02 15:51:44 +00:00
|
|
|
optSndBuffer: optSndBuffer,
|
2021-12-02 14:46:18 +00:00
|
|
|
incomingSocketReceiveTimeout: incomingSocketReceiveTimeout,
|
2022-02-24 17:22:44 +00:00
|
|
|
remoteWindowResetTimeout: remoteWindowResetTimeout,
|
2022-04-04 11:44:32 +00:00
|
|
|
maxSizeOfReorderBuffer: reorderBufferSize,
|
2022-08-04 13:55:39 +00:00
|
|
|
payloadSize: payloadSize,
|
|
|
|
maxNumberOfOpenConnections: maxNumberOfOpenConnections
|
2021-11-04 06:38:46 +00:00
|
|
|
)
|
2021-10-28 09:41:43 +00:00
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
# number of bytes which will fit in current send window
|
2022-04-12 19:11:01 +00:00
|
|
|
proc freeWindowBytes(socket: UtpSocket): uint32 =
|
2022-02-24 17:22:44 +00:00
|
|
|
let maxSend = min(socket.maxRemoteWindow, socket.maxWindow)
|
|
|
|
if (maxSend <= socket.currentWindow):
|
|
|
|
return 0
|
|
|
|
else:
|
|
|
|
return maxSend - socket.currentWindow
|
|
|
|
|
2021-11-12 09:58:49 +00:00
|
|
|
proc getRcvWindowSize(socket: UtpSocket): uint32 =
|
2022-02-24 17:22:44 +00:00
|
|
|
let currentDataSize = socket.offset
|
2021-11-12 09:58:49 +00:00
|
|
|
if currentDataSize > int(socket.socketConfig.optRcvBuffer):
|
|
|
|
0'u32
|
|
|
|
else:
|
|
|
|
socket.socketConfig.optRcvBuffer - uint32(currentDataSize)
|
|
|
|
|
2021-10-28 09:41:43 +00:00
|
|
|
proc registerOutgoingPacket(socket: UtpSocket, oPacket: OutgoingPacket) =
|
|
|
|
## Adds packet to outgoing buffer and updates all related fields
|
|
|
|
socket.outBuffer.ensureSize(socket.seqNr, socket.curWindowPackets)
|
|
|
|
socket.outBuffer.put(socket.seqNr, oPacket)
|
2022-02-24 17:22:44 +00:00
|
|
|
socket.outBufferBytes = socket.outBufferBytes + oPacket.payloadLength
|
2021-10-28 09:41:43 +00:00
|
|
|
inc socket.seqNr
|
|
|
|
inc socket.curWindowPackets
|
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
proc sendData(socket: UtpSocket, data: seq[byte]) =
|
2022-01-07 09:38:19 +00:00
|
|
|
let f = socket.send(socket.remoteAddress, data)
|
|
|
|
f.callback = proc(data: pointer) {.gcsafe.} =
|
|
|
|
if f.failed:
|
|
|
|
warn "UTP send failed", msg = f.readError.msg
|
2021-10-28 09:41:43 +00:00
|
|
|
|
2022-04-12 19:11:01 +00:00
|
|
|
proc sendPacket(socket: UtpSocket, seqNr: uint16) =
|
2022-02-24 17:22:44 +00:00
|
|
|
proc setSend(p: var OutgoingPacket): seq[byte] =
|
|
|
|
let timestampInfo = getMonoTimestamp()
|
2021-12-10 09:18:00 +00:00
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
if p.transmissions == 0 or p.needResend:
|
|
|
|
socket.currentWindow = socket.currentWindow + p.payloadLength
|
2021-12-10 09:18:00 +00:00
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
inc p.transmissions
|
|
|
|
p.needResend = false
|
|
|
|
p.timeSent = timestampInfo.moment
|
|
|
|
# all bytearrays in outgoing buffer should be properly encoded utp packets
|
|
|
|
# so it is safe to directly modify fields
|
|
|
|
modifyTimeStampAndAckNr(p.packetBytes, timestampInfo.timestamp, socket.ackNr)
|
2021-10-28 09:41:43 +00:00
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
return p.packetBytes
|
2022-04-12 19:11:01 +00:00
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
socket.sendData(setSend(socket.outBuffer[seqNr]))
|
2022-04-12 19:11:01 +00:00
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
proc resetSendTimeout(socket: UtpSocket) =
|
|
|
|
socket.retransmitTimeout = socket.rto
|
|
|
|
socket.rtoTimeout = getMonoTimestamp().moment + socket.retransmitTimeout
|
|
|
|
|
|
|
|
proc flushPackets(socket: UtpSocket) =
|
|
|
|
let oldestOutgoingPacketSeqNr = socket.seqNr - socket.curWindowPackets
|
|
|
|
var i: uint16 = oldestOutgoingPacketSeqNr
|
2021-10-28 09:41:43 +00:00
|
|
|
while i != socket.seqNr:
|
|
|
|
# sending only packet which were not transmitted yet or need a resend
|
2023-04-04 15:11:36 +00:00
|
|
|
let shouldSendPacket = socket.outBuffer.exists(
|
|
|
|
i, (p: OutgoingPacket) => (p.transmissions == 0 or p.needResend == true))
|
2021-10-28 09:41:43 +00:00
|
|
|
if (shouldSendPacket):
|
2022-02-24 17:22:44 +00:00
|
|
|
if (socket.freeWindowBytes() > 0):
|
|
|
|
# this our first send packet reset rto timeout
|
2023-04-04 15:11:36 +00:00
|
|
|
if i == oldestOutgoingPacketSeqNr and
|
|
|
|
socket.curWindowPackets == 1 and
|
|
|
|
socket.outBuffer[i].transmissions == 0:
|
2022-02-24 17:22:44 +00:00
|
|
|
socket.resetSendTimeout()
|
|
|
|
|
|
|
|
debug "Flushing packet",
|
2022-01-20 12:20:30 +00:00
|
|
|
pkSeqNr = i
|
2022-02-24 17:22:44 +00:00
|
|
|
socket.sendPacket(i)
|
2021-12-02 14:46:18 +00:00
|
|
|
else:
|
2022-02-24 17:22:44 +00:00
|
|
|
debug "Should resend packet during flush but there is no place in send window",
|
|
|
|
currentBytesWindow = socket.currentWindow,
|
|
|
|
maxRemoteWindow = socket.maxRemoteWindow,
|
|
|
|
maxWindow = socket.maxWindow,
|
2022-01-20 12:20:30 +00:00
|
|
|
pkSeqNr = i
|
2021-12-02 14:46:18 +00:00
|
|
|
# there is no place in send buffer, stop flushing
|
|
|
|
return
|
2021-10-28 09:41:43 +00:00
|
|
|
inc i
|
|
|
|
|
|
|
|
proc markAllPacketAsLost(s: UtpSocket) =
|
|
|
|
var i = 0'u16
|
|
|
|
while i < s.curWindowPackets:
|
|
|
|
let packetSeqNr = s.seqNr - 1 - i
|
2023-04-04 15:11:36 +00:00
|
|
|
if (s.outBuffer.exists(
|
|
|
|
packetSeqNr,
|
|
|
|
(p: OutgoingPacket) => p.transmissions > 0 and p.needResend == false)):
|
2022-01-20 12:20:30 +00:00
|
|
|
debug "Marking packet as lost",
|
|
|
|
pkSeqNr = packetSeqNr
|
2021-10-28 09:41:43 +00:00
|
|
|
s.outBuffer[packetSeqNr].needResend = true
|
2021-11-24 16:49:13 +00:00
|
|
|
let packetPayloadLength = s.outBuffer[packetSeqNr].payloadLength
|
2023-04-04 15:11:36 +00:00
|
|
|
doAssert(s.currentWindow >= packetPayloadLength,
|
|
|
|
"Window should always be larger than packet length")
|
2022-02-24 17:22:44 +00:00
|
|
|
s.currentWindow = s.currentWindow - packetPayloadLength
|
2021-10-28 09:41:43 +00:00
|
|
|
|
|
|
|
inc i
|
|
|
|
|
|
|
|
proc isOpened(socket:UtpSocket): bool =
|
|
|
|
return (
|
2021-12-20 12:14:50 +00:00
|
|
|
socket.state == SynRecv or
|
|
|
|
socket.state == SynSent or
|
2021-12-02 14:46:18 +00:00
|
|
|
socket.state == Connected
|
2021-10-28 09:41:43 +00:00
|
|
|
)
|
|
|
|
|
2021-12-20 12:14:50 +00:00
|
|
|
proc shouldDisconnectFromFailedRemote(socket: UtpSocket): bool =
|
|
|
|
(socket.state == SynSent and socket.retransmitCount >= 2) or
|
2021-11-04 06:38:46 +00:00
|
|
|
(socket.retransmitCount >= socket.socketConfig.dataResendsBeforeFailure)
|
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
proc checkTimeouts(socket: UtpSocket) =
|
2021-12-10 15:28:00 +00:00
|
|
|
let currentTime = getMonoTimestamp().moment
|
2021-10-28 09:41:43 +00:00
|
|
|
# flush all packets which needs to be re-send
|
|
|
|
if socket.state != Destroy:
|
2022-02-24 17:22:44 +00:00
|
|
|
socket.flushPackets()
|
2021-10-28 09:41:43 +00:00
|
|
|
|
|
|
|
if socket.isOpened():
|
2022-04-04 11:44:32 +00:00
|
|
|
let currentPacketSize = socket.getPacketSize()
|
2021-12-20 12:14:50 +00:00
|
|
|
|
2022-02-10 07:05:44 +00:00
|
|
|
if (socket.zeroWindowTimer.isSome() and currentTime > socket.zeroWindowTimer.unsafeGet()):
|
2022-02-24 17:22:44 +00:00
|
|
|
if socket.maxRemoteWindow <= currentPacketSize:
|
2022-04-04 11:44:32 +00:00
|
|
|
# Reset remote window, to minimal value which will fit at least two packet
|
|
|
|
let minimalRemoteWindow = 2 * socket.socketConfig.payloadSize
|
2022-02-24 17:22:44 +00:00
|
|
|
socket.maxRemoteWindow = minimalRemoteWindow
|
2022-04-04 11:44:32 +00:00
|
|
|
debug "Reset remote window to minimal value",
|
|
|
|
minRemote = minimalRemoteWindow
|
2022-02-10 07:05:44 +00:00
|
|
|
socket.zeroWindowTimer = none[Moment]()
|
2022-04-12 19:11:01 +00:00
|
|
|
|
2021-10-28 09:41:43 +00:00
|
|
|
if (currentTime > socket.rtoTimeout):
|
2022-01-07 09:38:19 +00:00
|
|
|
debug "CheckTimeouts rto timeout",
|
|
|
|
socketKey = socket.socketKey,
|
|
|
|
state = socket.state,
|
2022-02-24 17:22:44 +00:00
|
|
|
maxWindow = socket.maxWindow,
|
2022-01-07 09:38:19 +00:00
|
|
|
curWindowPackets = socket.curWindowPackets,
|
2022-02-24 17:22:44 +00:00
|
|
|
curWindowBytes = socket.currentWindow
|
2021-12-20 12:14:50 +00:00
|
|
|
|
2022-11-16 16:44:00 +00:00
|
|
|
# TODO add handling of probe time outs. Reference implementation has mechanism
|
2021-10-28 09:41:43 +00:00
|
|
|
# of sending probes to determine mtu size. Probe timeouts do not count to standard
|
|
|
|
# timeouts calculations
|
|
|
|
|
|
|
|
# client initiated connections, but did not send following data packet in rto
|
2021-11-19 10:36:46 +00:00
|
|
|
# time and our socket is configured to start in SynRecv state.
|
2021-10-28 09:41:43 +00:00
|
|
|
if (socket.state == SynRecv):
|
2021-11-09 14:29:59 +00:00
|
|
|
socket.destroy()
|
2021-10-28 09:41:43 +00:00
|
|
|
return
|
2021-12-20 12:14:50 +00:00
|
|
|
|
2021-11-04 06:38:46 +00:00
|
|
|
if socket.shouldDisconnectFromFailedRemote():
|
2022-01-07 09:38:19 +00:00
|
|
|
debug "Remote host failed",
|
|
|
|
state = socket.state,
|
2022-04-12 19:11:01 +00:00
|
|
|
retransmitCount = socket.retransmitCount
|
2022-01-07 09:38:19 +00:00
|
|
|
|
2021-10-28 09:41:43 +00:00
|
|
|
if socket.state == SynSent and (not socket.connectionFuture.finished()):
|
|
|
|
socket.connectionFuture.fail(newException(ConnectionError, "Connection to peer timed out"))
|
|
|
|
|
2021-11-09 14:29:59 +00:00
|
|
|
socket.destroy()
|
2021-10-28 09:41:43 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
let newTimeout = socket.retransmitTimeout * 2
|
|
|
|
socket.retransmitTimeout = newTimeout
|
|
|
|
socket.rtoTimeout = currentTime + newTimeout
|
2021-12-20 12:14:50 +00:00
|
|
|
|
2022-01-27 10:07:40 +00:00
|
|
|
# on timeout reset duplicate ack counter
|
|
|
|
socket.duplicateAck = 0
|
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
if (socket.curWindowPackets == 0 and socket.maxWindow > currentPacketSize):
|
2021-12-09 09:52:21 +00:00
|
|
|
# there are no packets in flight even though there is place for more than whole packet
|
|
|
|
# this means connection is just idling. Reset window by 1/3'rd but no more
|
|
|
|
# than to fit at least one packet.
|
2022-02-24 17:22:44 +00:00
|
|
|
let oldMaxWindow = socket.maxWindow
|
2021-12-09 09:52:21 +00:00
|
|
|
let newMaxWindow = max((oldMaxWindow * 2) div 3, currentPacketSize)
|
2022-01-20 12:20:30 +00:00
|
|
|
|
|
|
|
debug "Decaying max window due to socket idling",
|
|
|
|
oldMaxWindow = oldMaxWindow,
|
|
|
|
newMaxWindow = newMaxWindow
|
2022-02-24 17:22:44 +00:00
|
|
|
|
2022-04-12 19:11:01 +00:00
|
|
|
socket.maxWindow = newMaxWindow
|
2022-02-24 17:22:44 +00:00
|
|
|
elif (socket.maxWindow < currentPacketSize):
|
2022-01-21 13:48:01 +00:00
|
|
|
# due to high delay window has shrunk below packet size
|
|
|
|
# which means that we cannot send more data
|
|
|
|
# reset it to fit at least one packet
|
2022-11-16 16:44:00 +00:00
|
|
|
debug "Resetting window size do fit a least one packet",
|
2022-02-24 17:22:44 +00:00
|
|
|
oldWindowSize = socket.maxWindow,
|
2022-01-20 12:20:30 +00:00
|
|
|
newWindowSize = currentPacketSize
|
|
|
|
|
2021-12-09 09:52:21 +00:00
|
|
|
# delay was so high that window has shrunk below one packet. Reset window
|
|
|
|
# to fit a least one packet and start with slow start
|
2022-02-24 17:22:44 +00:00
|
|
|
socket.maxWindow = currentPacketSize
|
2021-12-09 09:52:21 +00:00
|
|
|
socket.slowStart = true
|
2021-10-28 09:41:43 +00:00
|
|
|
|
|
|
|
# This will have much more sense when we will add handling of selective acks
|
2022-11-16 16:44:00 +00:00
|
|
|
# as then every selectively acked packet resets timeout timer and removes packet
|
2021-10-28 09:41:43 +00:00
|
|
|
# from out buffer.
|
|
|
|
markAllPacketAsLost(socket)
|
2021-12-20 12:14:50 +00:00
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
let oldestPacketSeqNr = socket.seqNr - socket.curWindowPackets
|
|
|
|
# resend oldest packet if there are some packets in flight, and oldestpacket was already sent
|
|
|
|
if (socket.curWindowPackets > 0 and socket.outBuffer[oldestPacketSeqNr].transmissions > 0):
|
2022-01-26 08:49:34 +00:00
|
|
|
inc socket.retransmitCount
|
|
|
|
socket.fastTimeout = true
|
2022-04-12 19:11:01 +00:00
|
|
|
|
2022-01-26 08:49:34 +00:00
|
|
|
debug "Resending oldest packet",
|
|
|
|
pkSeqNr = oldestPacketSeqNr,
|
|
|
|
retransmitCount = socket.retransmitCount,
|
|
|
|
curWindowPackets = socket.curWindowPackets
|
2022-01-20 12:20:30 +00:00
|
|
|
|
2022-01-26 08:49:34 +00:00
|
|
|
# Oldest packet should always be present, so it is safe to call force
|
|
|
|
# resend
|
2022-02-24 17:22:44 +00:00
|
|
|
socket.sendPacket(oldestPacketSeqNr)
|
2022-04-12 19:11:01 +00:00
|
|
|
|
2021-10-28 09:41:43 +00:00
|
|
|
# TODO add sending keep alives when necessary
|
|
|
|
|
|
|
|
proc checkTimeoutsLoop(s: UtpSocket) {.async.} =
|
2021-11-19 10:36:46 +00:00
|
|
|
## Loop that check timeouts in the socket.
|
2021-10-28 09:41:43 +00:00
|
|
|
try:
|
|
|
|
while true:
|
|
|
|
await sleepAsync(checkTimeoutsLoopInterval)
|
2022-03-28 10:35:08 +00:00
|
|
|
s.eventQueue.putNoWait(SocketEvent(kind: CheckTimeouts))
|
|
|
|
except CancelledError as exc:
|
|
|
|
# check timeouts loop is last running future managed by socket, if its
|
|
|
|
# cancelled we can fire closeEvent
|
|
|
|
s.closeEvent.fire()
|
2021-10-28 09:41:43 +00:00
|
|
|
trace "checkTimeoutsLoop canceled"
|
2022-03-28 10:35:08 +00:00
|
|
|
raise exc
|
2021-10-28 09:41:43 +00:00
|
|
|
|
|
|
|
proc startTimeoutLoop(s: UtpSocket) =
|
|
|
|
s.checkTimeoutsLoop = checkTimeoutsLoop(s)
|
|
|
|
|
2022-04-04 11:44:32 +00:00
|
|
|
proc getPacketSize*(socket: UtpSocket): uint32 =
|
|
|
|
socket.socketConfig.payloadSize
|
2021-12-02 14:46:18 +00:00
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
proc handleDataWrite(socket: UtpSocket, data: seq[byte]): int =
|
2022-04-04 11:44:32 +00:00
|
|
|
let pSize = int(socket.getPacketSize())
|
2022-02-10 07:05:44 +00:00
|
|
|
let endIndex = data.high()
|
|
|
|
var i = 0
|
|
|
|
var bytesWritten = 0
|
|
|
|
|
|
|
|
while i <= endIndex:
|
|
|
|
let lastIndex = i + pSize - 1
|
|
|
|
let lastOrEnd = min(lastIndex, endIndex)
|
|
|
|
let dataSlice = data[i..lastOrEnd]
|
|
|
|
let payloadLength = uint32(len(dataSlice))
|
2022-02-24 17:22:44 +00:00
|
|
|
if (socket.outBufferBytes + payloadLength <= socket.socketConfig.optSndBuffer):
|
2021-12-02 14:46:18 +00:00
|
|
|
let wndSize = socket.getRcvWindowSize()
|
2022-02-10 07:05:44 +00:00
|
|
|
let dataPacket =
|
|
|
|
dataPacket(
|
|
|
|
socket.seqNr,
|
|
|
|
socket.connectionIdSnd,
|
|
|
|
socket.ackNr,
|
|
|
|
wndSize,
|
|
|
|
dataSlice,
|
|
|
|
socket.replayMicro
|
|
|
|
)
|
2023-04-04 15:11:36 +00:00
|
|
|
let outgoingPacket = OutgoingPacket.init(
|
|
|
|
encodePacket(dataPacket), 0, false, payloadLength)
|
2022-02-10 07:05:44 +00:00
|
|
|
socket.registerOutgoingPacket(outgoingPacket)
|
2022-02-24 17:22:44 +00:00
|
|
|
bytesWritten = bytesWritten + len(dataSlice)
|
2023-04-04 15:11:36 +00:00
|
|
|
# TODO: When flushPackets early ended because of send window being full,
|
|
|
|
# it keeps trying here again for each dataSlice. Sounds waistfull?
|
2022-02-24 17:22:44 +00:00
|
|
|
socket.flushPackets()
|
2021-11-19 10:36:46 +00:00
|
|
|
else:
|
2022-02-24 17:22:44 +00:00
|
|
|
debug "No more place in write buffer",
|
|
|
|
currentBufferSize = socket.outBufferBytes,
|
|
|
|
maxBufferSize = socket.socketConfig.optSndBuffer,
|
|
|
|
nexPacketSize = payloadLength
|
|
|
|
break
|
2021-11-19 10:36:46 +00:00
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
i = lastOrEnd + 1
|
2021-10-28 09:41:43 +00:00
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
return bytesWritten
|
|
|
|
|
|
|
|
proc handleClose(socket: UtpSocket) =
|
|
|
|
let finEncoded =
|
|
|
|
encodePacket(
|
|
|
|
finPacket(
|
|
|
|
socket.seqNr,
|
|
|
|
socket.connectionIdSnd,
|
|
|
|
socket.ackNr,
|
|
|
|
socket.getRcvWindowSize(),
|
|
|
|
socket.replayMicro
|
|
|
|
)
|
|
|
|
)
|
|
|
|
socket.finSent = true
|
|
|
|
socket.registerOutgoingPacket(OutgoingPacket.init(finEncoded, 0, false, 0))
|
|
|
|
socket.flushPackets()
|
2021-12-20 12:14:50 +00:00
|
|
|
|
2021-10-28 09:41:43 +00:00
|
|
|
proc isConnected*(socket: UtpSocket): bool =
|
2021-12-02 14:46:18 +00:00
|
|
|
socket.state == Connected
|
2021-10-28 09:41:43 +00:00
|
|
|
|
2021-11-19 10:36:46 +00:00
|
|
|
proc isClosed*(socket: UtpSocket): bool =
|
|
|
|
socket.state == Destroy and socket.closeEvent.isSet()
|
|
|
|
|
2022-03-28 10:35:08 +00:00
|
|
|
proc isClosedAndCleanedUpAllResources*(socket: UtpSocket): bool =
|
2022-04-12 19:11:01 +00:00
|
|
|
## Test Api to check that all resources are properly cleaned up
|
2022-03-28 10:35:08 +00:00
|
|
|
socket.isClosed() and socket.eventLoop.cancelled() and socket.checkTimeoutsLoop.cancelled()
|
|
|
|
|
2021-11-09 14:29:59 +00:00
|
|
|
proc destroy*(s: UtpSocket) =
|
2022-04-12 19:11:01 +00:00
|
|
|
debug "Destroying socket", to = s.socketKey
|
2022-11-16 16:44:00 +00:00
|
|
|
## Moves socket to destroy state and clean all resources.
|
2021-11-09 14:29:59 +00:00
|
|
|
## Remote is not notified in any way about socket end of life
|
2021-11-05 08:41:41 +00:00
|
|
|
s.state = Destroy
|
2022-02-24 17:22:44 +00:00
|
|
|
s.eventLoop.cancel()
|
2022-03-28 10:35:08 +00:00
|
|
|
# This procedure initiate cleanup process which goes like:
|
|
|
|
# Cancel EventLoop -> Cancel timeoutsLoop -> Fire closeEvent
|
|
|
|
# This is necessary due to how evenLoop look like i.e it has only one await
|
2022-11-16 16:44:00 +00:00
|
|
|
# point on `eventQueue.get` which trigger cancellation exception only when
|
2022-03-28 10:35:08 +00:00
|
|
|
# someone will try run `eventQueue.put`. Without `eventQueue.put` , eventLoop
|
|
|
|
# future shows as cancelled, but handler for CancelledError is not run
|
2021-10-28 09:41:43 +00:00
|
|
|
|
2021-11-09 14:29:59 +00:00
|
|
|
proc destroyWait*(s: UtpSocket) {.async.} =
|
|
|
|
## Moves socket to destroy state and clean all reasources and wait for all registered
|
|
|
|
## callback to fire
|
|
|
|
## Remote is not notified in any way about socket end of life
|
|
|
|
s.destroy()
|
|
|
|
await s.closeEvent.wait()
|
2021-11-05 08:41:41 +00:00
|
|
|
await allFutures(s.closeCallbacks)
|
|
|
|
|
2021-10-28 09:41:43 +00:00
|
|
|
proc setCloseCallback(s: UtpSocket, cb: SocketCloseCallback) {.async.} =
|
|
|
|
## Set callback which will be called whenever the socket is permanently closed
|
|
|
|
try:
|
|
|
|
await s.closeEvent.wait()
|
|
|
|
cb()
|
|
|
|
except CancelledError:
|
|
|
|
trace "closeCallback cancelled"
|
|
|
|
|
|
|
|
proc registerCloseCallback*(s: UtpSocket, cb: SocketCloseCallback) =
|
|
|
|
s.closeCallbacks.add(s.setCloseCallback(cb))
|
|
|
|
|
|
|
|
proc updateTimeouts(socket: UtpSocket, timeSent: Moment, currentTime: Moment) =
|
|
|
|
## Update timeouts according to spec:
|
|
|
|
## delta = rtt - packet_rtt
|
|
|
|
## rtt_var += (abs(delta) - rtt_var) / 4;
|
|
|
|
## rtt += (packet_rtt - rtt) / 8;
|
2021-12-20 12:14:50 +00:00
|
|
|
|
2021-10-28 09:41:43 +00:00
|
|
|
let packetRtt = currentTime - timeSent
|
|
|
|
|
|
|
|
if (socket.rtt.isZero):
|
|
|
|
socket.rtt = packetRtt
|
|
|
|
socket.rttVar = packetRtt div 2
|
|
|
|
else:
|
|
|
|
let packetRttMicro = packetRtt.microseconds()
|
|
|
|
let rttVarMicro = socket.rttVar.microseconds()
|
|
|
|
let rttMicro = socket.rtt.microseconds()
|
|
|
|
|
|
|
|
let delta = rttMicro - packetRttMicro
|
|
|
|
|
|
|
|
let newVar = microseconds(rttVarMicro + (abs(delta) - rttVarMicro) div 4)
|
|
|
|
let newRtt = socket.rtt - (socket.rtt div 8) + (packetRtt div 8)
|
|
|
|
|
|
|
|
socket.rttVar = newVar
|
|
|
|
socket.rtt = newRtt
|
2021-12-20 12:14:50 +00:00
|
|
|
|
2021-10-28 09:41:43 +00:00
|
|
|
# according to spec it should be: timeout = max(rtt + rtt_var * 4, 500)
|
|
|
|
# but usually spec lags after implementation so milliseconds(1000) is used
|
|
|
|
socket.rto = max(socket.rtt + (socket.rttVar * 4), milliseconds(1000))
|
|
|
|
|
2021-12-10 15:28:00 +00:00
|
|
|
proc ackPacket(socket: UtpSocket, seqNr: uint16, currentTime: Moment): AckResult =
|
2021-10-28 09:41:43 +00:00
|
|
|
let packetOpt = socket.outBuffer.get(seqNr)
|
|
|
|
if packetOpt.isSome():
|
|
|
|
let packet = packetOpt.get()
|
|
|
|
|
|
|
|
if packet.transmissions == 0:
|
2021-12-20 12:14:50 +00:00
|
|
|
# according to reference impl it can happen when we get an ack_nr that
|
|
|
|
# does not exceed what we have stuffed into the outgoing buffer,
|
2021-10-28 09:41:43 +00:00
|
|
|
# but does exceed what we have sent
|
|
|
|
# TODO analyze if this case can happen with our impl
|
|
|
|
return PacketNotSentYet
|
2021-12-20 12:14:50 +00:00
|
|
|
|
2021-10-28 09:41:43 +00:00
|
|
|
socket.outBuffer.delete(seqNr)
|
|
|
|
|
2022-01-07 09:38:19 +00:00
|
|
|
debug "Acked packet (deleted from outgoing buffer)",
|
|
|
|
pkSeqNr = seqNr,
|
|
|
|
pkTransmissions = packet.transmissions,
|
2022-11-16 16:44:00 +00:00
|
|
|
pkNeedResend = packet.needResend
|
2022-01-07 09:38:19 +00:00
|
|
|
|
2021-12-20 12:14:50 +00:00
|
|
|
# from spec: The rtt and rtt_var is only updated for packets that were sent only once.
|
2021-10-28 09:41:43 +00:00
|
|
|
# This avoids problems with figuring out which packet was acked, the first or the second one.
|
|
|
|
# it is standard solution to retransmission ambiguity problem
|
|
|
|
if packet.transmissions == 1:
|
|
|
|
socket.updateTimeouts(packet.timeSent, currentTime)
|
|
|
|
|
|
|
|
socket.retransmitTimeout = socket.rto
|
|
|
|
socket.rtoTimeout = currentTime + socket.rto
|
|
|
|
|
2021-11-24 16:49:13 +00:00
|
|
|
# if need_resend is set, this packet has already
|
|
|
|
# been considered timed-out, and is not included in
|
|
|
|
# the cur_window anymore
|
|
|
|
if (not packet.needResend):
|
2022-02-24 17:22:44 +00:00
|
|
|
doAssert(socket.currentWindow >= packet.payloadLength, "Window should always be larger than packet length")
|
|
|
|
socket.currentWindow = socket.currentWindow - packet.payloadLength
|
2022-01-26 08:49:34 +00:00
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
# we removed packet from our out going buffer
|
|
|
|
socket.outBufferBytes = socket.outBufferBytes - packet.payloadLength
|
2021-10-28 09:41:43 +00:00
|
|
|
|
|
|
|
socket.retransmitCount = 0
|
|
|
|
PacketAcked
|
|
|
|
else:
|
2022-01-07 09:38:19 +00:00
|
|
|
debug "Tried to ack packet which was already acked or not sent yet"
|
2021-10-28 09:41:43 +00:00
|
|
|
# the packet has already been acked (or not sent)
|
|
|
|
PacketAlreadyAcked
|
|
|
|
|
2021-12-10 15:28:00 +00:00
|
|
|
proc ackPackets(socket: UtpSocket, nrPacketsToAck: uint16, currentTime: Moment) =
|
2021-10-28 09:41:43 +00:00
|
|
|
## Ack packets in outgoing buffer based on ack number in the received packet
|
|
|
|
var i = 0
|
2021-12-20 12:14:50 +00:00
|
|
|
while i < int(nrPacketsToAck):
|
2021-12-10 15:28:00 +00:00
|
|
|
let result = socket.ackPacket(socket.seqNr - socket.curWindowPackets, currentTime)
|
2021-10-28 09:41:43 +00:00
|
|
|
case result
|
|
|
|
of PacketAcked:
|
|
|
|
dec socket.curWindowPackets
|
|
|
|
of PacketAlreadyAcked:
|
|
|
|
dec socket.curWindowPackets
|
|
|
|
of PacketNotSentYet:
|
|
|
|
debug "Tried to ack packed which was not sent yet"
|
|
|
|
break
|
|
|
|
|
|
|
|
inc i
|
|
|
|
|
2021-12-09 09:52:21 +00:00
|
|
|
proc calculateAckedbytes(socket: UtpSocket, nrPacketsToAck: uint16, now: Moment): (uint32, Duration) =
|
|
|
|
var i: uint16 = 0
|
|
|
|
var ackedBytes: uint32 = 0
|
|
|
|
var minRtt: Duration = InfiniteDuration
|
2021-12-20 12:14:50 +00:00
|
|
|
while i < nrPacketsToAck:
|
2021-12-09 09:52:21 +00:00
|
|
|
let seqNr = socket.seqNr - socket.curWindowPackets + i
|
|
|
|
let packetOpt = socket.outBuffer.get(seqNr)
|
|
|
|
if (packetOpt.isSome() and packetOpt.unsafeGet().transmissions > 0):
|
|
|
|
let packet = packetOpt.unsafeGet()
|
|
|
|
|
|
|
|
ackedBytes = ackedBytes + packet.payloadLength
|
|
|
|
|
|
|
|
# safety check in case clock is not monotonic
|
|
|
|
if packet.timeSent < now:
|
|
|
|
minRtt = min(minRtt, now - packet.timeSent)
|
|
|
|
else:
|
|
|
|
minRtt = min(minRtt, microseconds(50000))
|
|
|
|
|
|
|
|
inc i
|
|
|
|
(ackedBytes, minRtt)
|
|
|
|
|
2021-11-04 06:38:46 +00:00
|
|
|
proc initializeAckNr(socket: UtpSocket, packetSeqNr: uint16) =
|
|
|
|
if (socket.state == SynSent):
|
2023-04-04 15:11:36 +00:00
|
|
|
# Different from the uTP spec but in accordance with libutp and libtorrent.
|
|
|
|
# When receiving the ACK of a SYN packet, the socket ackNr gets initialized
|
|
|
|
# as the packet seqNr - 1. This way, the socket ackNr is set up as one less
|
|
|
|
# the next seqNr for an incoming DATA packet. The seqNr in STATE packets
|
|
|
|
# should basically be seen as the seqNr for the next DATA or FIN packet.
|
|
|
|
# See also:
|
|
|
|
# - libutp: https://github.com/bittorrent/libutp/blob/master/utp_internal.cpp#L1874
|
|
|
|
# - libtorrent: https://github.com/arvidn/libtorrent/blob/RC_2_0/src/utp_stream.cpp#L2924
|
2021-11-04 06:38:46 +00:00
|
|
|
socket.ackNr = packetSeqNr - 1
|
|
|
|
|
2021-11-15 10:32:00 +00:00
|
|
|
proc isAckNrInvalid(socket: UtpSocket, packet: Packet): bool =
|
|
|
|
let ackWindow = max(socket.curWindowPackets + allowedAckWindow, allowedAckWindow)
|
|
|
|
(
|
|
|
|
(packet.header.pType != ST_SYN or socket.state != SynRecv) and
|
|
|
|
(
|
|
|
|
# packet ack number must be smaller than our last send packet i.e
|
|
|
|
# remote should not ack packets from the future
|
|
|
|
wrapCompareLess(socket.seqNr - 1, packet.header.ackNr) or
|
|
|
|
# packet ack number should not be too old
|
|
|
|
wrapCompareLess(packet.header.ackNr, socket.seqNr - 1 - ackWindow)
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
2022-01-04 08:52:38 +00:00
|
|
|
# counts the number of bytes acked by selective ack header
|
|
|
|
proc calculateSelectiveAckBytes*(socket: UtpSocket, receivedPackedAckNr: uint16, ext: SelectiveAckExtension): uint32 =
|
2022-11-16 16:44:00 +00:00
|
|
|
# we add 2, as the first bit in the mask therefore represents ackNr + 2 because
|
2022-01-04 08:52:38 +00:00
|
|
|
# ackNr + 1 (i.e next expected packet) is considered lost.
|
|
|
|
let base = receivedPackedAckNr + 2
|
|
|
|
|
|
|
|
if socket.curWindowPackets == 0:
|
|
|
|
return 0
|
|
|
|
|
|
|
|
var ackedBytes = 0'u32
|
|
|
|
|
|
|
|
var bits = (len(ext.acks)) * 8 - 1
|
2022-01-10 12:49:36 +00:00
|
|
|
|
2022-01-04 08:52:38 +00:00
|
|
|
while bits >= 0:
|
|
|
|
let v = base + uint16(bits)
|
|
|
|
|
|
|
|
if (socket.seqNr - v - 1) >= socket.curWindowPackets - 1:
|
|
|
|
dec bits
|
|
|
|
continue
|
|
|
|
|
|
|
|
let maybePacket = socket.outBuffer.get(v)
|
|
|
|
|
|
|
|
if (maybePacket.isNone() or maybePacket.unsafeGet().transmissions == 0):
|
|
|
|
dec bits
|
|
|
|
continue
|
2022-01-10 12:49:36 +00:00
|
|
|
|
2022-01-04 08:52:38 +00:00
|
|
|
let pkt = maybePacket.unsafeGet()
|
|
|
|
|
|
|
|
if (getBit(ext.acks, bits)):
|
|
|
|
ackedBytes = ackedBytes + pkt.payloadLength
|
2022-01-10 12:49:36 +00:00
|
|
|
|
2022-01-04 08:52:38 +00:00
|
|
|
dec bits
|
|
|
|
|
|
|
|
return ackedBytes
|
|
|
|
|
2022-01-27 10:07:40 +00:00
|
|
|
# decays maxWindow size by half if time is right i.e it is at least 100m since last
|
|
|
|
# window decay
|
|
|
|
proc tryDecayWindow(socket: UtpSocket, now: Moment) =
|
|
|
|
if (now - socket.lastWindowDecay >= maxWindowDecay):
|
|
|
|
socket.lastWindowDecay = now
|
2022-02-24 17:22:44 +00:00
|
|
|
let newMaxWindow = max(uint32(0.5 * float64(socket.maxWindow)), uint32(minWindowSize))
|
2022-04-12 19:11:01 +00:00
|
|
|
|
2022-01-27 10:07:40 +00:00
|
|
|
debug "Decaying maxWindow",
|
2022-02-24 17:22:44 +00:00
|
|
|
oldWindow = socket.maxWindow,
|
2022-01-27 10:07:40 +00:00
|
|
|
newWindow = newMaxWindow
|
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
socket.maxWindow = newMaxWindow
|
2022-01-27 10:07:40 +00:00
|
|
|
socket.slowStart = false
|
2022-11-16 16:44:00 +00:00
|
|
|
socket.slowStartThreshold = newMaxWindow
|
2022-04-12 19:11:01 +00:00
|
|
|
|
2022-01-04 08:52:38 +00:00
|
|
|
# ack packets (removes them from out going buffer) based on selective ack extension header
|
|
|
|
proc selectiveAckPackets(socket: UtpSocket, receivedPackedAckNr: uint16, ext: SelectiveAckExtension, currentTime: Moment): void =
|
2022-11-16 16:44:00 +00:00
|
|
|
# we add 2, as the first bit in the mask therefore represents ackNr + 2 because
|
2022-01-04 08:52:38 +00:00
|
|
|
# ackNr + 1 (i.e next expected packet) is considered lost.
|
|
|
|
let base = receivedPackedAckNr + 2
|
|
|
|
|
|
|
|
if socket.curWindowPackets == 0:
|
|
|
|
return
|
|
|
|
|
|
|
|
var bits = (len(ext.acks)) * 8 - 1
|
2022-01-10 12:49:36 +00:00
|
|
|
|
2022-01-27 10:07:40 +00:00
|
|
|
# number of packets acked by this selective acks, it also works as duplicate ack
|
|
|
|
# counter.
|
|
|
|
# from spec: Each packet that is acked in the selective ack message counts as one duplicate ack
|
|
|
|
var counter = 0
|
|
|
|
|
|
|
|
# sequence numbers of packets which should be resend
|
|
|
|
var resends: seq[uint16] = @[]
|
|
|
|
|
2022-01-04 08:52:38 +00:00
|
|
|
while bits >= 0:
|
|
|
|
let v = base + uint16(bits)
|
|
|
|
|
|
|
|
if (socket.seqNr - v - 1) >= socket.curWindowPackets - 1:
|
|
|
|
dec bits
|
|
|
|
continue
|
2022-04-12 19:11:01 +00:00
|
|
|
|
2022-01-27 10:07:40 +00:00
|
|
|
let bitSet: bool = getBit(ext.acks, bits)
|
|
|
|
|
|
|
|
if bitSet:
|
|
|
|
inc counter
|
2022-01-04 08:52:38 +00:00
|
|
|
|
|
|
|
let maybePacket = socket.outBuffer.get(v)
|
|
|
|
|
|
|
|
if (maybePacket.isNone() or maybePacket.unsafeGet().transmissions == 0):
|
|
|
|
dec bits
|
|
|
|
continue
|
2022-01-10 12:49:36 +00:00
|
|
|
|
2022-01-04 08:52:38 +00:00
|
|
|
let pkt = maybePacket.unsafeGet()
|
|
|
|
|
2022-01-27 10:07:40 +00:00
|
|
|
if bitSet:
|
|
|
|
debug "Packet acked by selective ack",
|
|
|
|
pkSeqNr = v
|
2022-01-04 08:52:38 +00:00
|
|
|
discard socket.ackPacket(v, currentTime)
|
2022-01-27 10:07:40 +00:00
|
|
|
dec bits
|
|
|
|
continue
|
2022-04-12 19:11:01 +00:00
|
|
|
|
2022-01-27 10:07:40 +00:00
|
|
|
if counter >= duplicateAcksBeforeResend and (v - socket.fastResendSeqNr) <= reorderBufferMaxSize:
|
|
|
|
debug "No ack for packet",
|
|
|
|
pkAckNr = v,
|
|
|
|
dupAckCounter = counter,
|
|
|
|
fastResSeqNr = socket.fastResendSeqNr
|
|
|
|
resends.add(v)
|
2022-01-10 12:49:36 +00:00
|
|
|
|
2022-01-04 08:52:38 +00:00
|
|
|
dec bits
|
|
|
|
|
2022-01-27 10:07:40 +00:00
|
|
|
let nextExpectedPacketSeqNr = base - 1'u16
|
|
|
|
# if we are about to start to resending first packet should be the first unacked packet
|
|
|
|
# ie. base - 1
|
|
|
|
if counter >= duplicateAcksBeforeResend and (nextExpectedPacketSeqNr - socket.fastResendSeqNr) <= reorderBufferMaxSize:
|
|
|
|
debug "No ack for packet",
|
|
|
|
pkAckNr = nextExpectedPacketSeqNr,
|
|
|
|
dupAckCounter = counter,
|
|
|
|
fastResSeqNr = socket.fastResendSeqNr
|
|
|
|
resends.add(nextExpectedPacketSeqNr)
|
|
|
|
|
|
|
|
var i = high(resends)
|
|
|
|
var registerLoss: bool = false
|
|
|
|
var packetsSent = 0
|
|
|
|
while i >= 0:
|
|
|
|
let seqNrToResend: uint16 = resends[i]
|
|
|
|
|
|
|
|
let maybePkt = socket.outBuffer.get(seqNrToResend)
|
|
|
|
|
|
|
|
if maybePkt.isNone():
|
|
|
|
# packet is no longer in send buffer ignore whole further processing
|
|
|
|
dec i
|
|
|
|
continue
|
2022-04-12 19:11:01 +00:00
|
|
|
|
2022-01-27 10:07:40 +00:00
|
|
|
registerLoss = true
|
|
|
|
# it is safe to call as we already checked that packet is in send buffer
|
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
socket.sendPacket(seqNrToResend)
|
2022-01-27 10:07:40 +00:00
|
|
|
socket.fastResendSeqNr = seqNrToResend + 1
|
|
|
|
|
|
|
|
debug "Resent packet",
|
|
|
|
pkSeqNr = seqNrToResend,
|
|
|
|
fastResendSeqNr = socket.fastResendSeqNr
|
|
|
|
|
|
|
|
inc packetsSent
|
|
|
|
|
|
|
|
# resend max 4 packets, this is not defined in spec but reference impl has
|
|
|
|
# that check
|
|
|
|
if packetsSent >= 4:
|
|
|
|
break
|
|
|
|
|
|
|
|
dec i
|
|
|
|
|
|
|
|
if registerLoss:
|
|
|
|
socket.tryDecayWindow(Moment.now())
|
|
|
|
|
|
|
|
socket.duplicateAck = uint16(counter)
|
2022-01-04 08:52:38 +00:00
|
|
|
|
|
|
|
# Public mainly for test purposes
|
|
|
|
# generates bit mask which indicates which packets are already in socket
|
|
|
|
# reorder buffer
|
|
|
|
# from speck:
|
|
|
|
# The bitmask has reverse byte order. The first byte represents packets [ack_nr + 2, ack_nr + 2 + 7] in reverse order
|
|
|
|
# The least significant bit in the byte represents ack_nr + 2, the most significant bit in the byte represents ack_nr + 2 + 7
|
|
|
|
# The next byte in the mask represents [ack_nr + 2 + 8, ack_nr + 2 + 15] in reverse order, and so on
|
2022-01-10 12:49:36 +00:00
|
|
|
proc generateSelectiveAckBitMask*(socket: UtpSocket): array[4, byte] =
|
2022-01-04 08:52:38 +00:00
|
|
|
let window = min(32, socket.inBuffer.len())
|
|
|
|
var arr: array[4, uint8] = [0'u8, 0, 0, 0]
|
|
|
|
var i = 0
|
|
|
|
while i < window:
|
|
|
|
if (socket.inBuffer.get(socket.ackNr + uint16(i) + 2).isSome()):
|
|
|
|
setBit(arr, i)
|
|
|
|
inc i
|
|
|
|
return arr
|
|
|
|
|
|
|
|
# Generates ack packet based on current state of the socket.
|
2022-01-10 12:49:36 +00:00
|
|
|
proc generateAckPacket*(socket: UtpSocket): Packet =
|
2022-01-04 08:52:38 +00:00
|
|
|
let bitmask =
|
|
|
|
if (socket.reorderCount != 0 and (not socket.reachedFin)):
|
|
|
|
some(socket.generateSelectiveAckBitMask())
|
|
|
|
else:
|
|
|
|
none[array[4, byte]]()
|
|
|
|
|
2022-02-10 07:05:44 +00:00
|
|
|
let bufferSize = socket.getRcvWindowSize()
|
|
|
|
|
2022-01-04 08:52:38 +00:00
|
|
|
ackPacket(
|
|
|
|
socket.seqNr,
|
|
|
|
socket.connectionIdSnd,
|
|
|
|
socket.ackNr,
|
2022-02-10 07:05:44 +00:00
|
|
|
bufferSize,
|
2022-01-04 08:52:38 +00:00
|
|
|
socket.replayMicro,
|
|
|
|
bitmask
|
|
|
|
)
|
|
|
|
|
2022-04-12 19:11:01 +00:00
|
|
|
proc sendAck(socket: UtpSocket) =
|
2023-04-04 15:11:36 +00:00
|
|
|
## Creates and sends ack, based on current socket state. Acks are different
|
|
|
|
## from other packets as we do not track them in outgoing buffer.
|
2022-01-04 08:52:38 +00:00
|
|
|
let ackPacket = socket.generateAckPacket()
|
2022-01-20 12:20:30 +00:00
|
|
|
|
|
|
|
debug "Sending STATE packet",
|
|
|
|
pkSeqNr = ackPacket.header.seqNr,
|
|
|
|
pkAckNr = ackPacket.header.ackNr,
|
|
|
|
gotEACK = ackPacket.eack.isSome()
|
|
|
|
|
2022-01-04 08:52:38 +00:00
|
|
|
socket.sendData(encodePacket(ackPacket))
|
|
|
|
|
2022-03-18 07:13:17 +00:00
|
|
|
|
|
|
|
proc tryfinalizeConnection(socket: UtpSocket, p: Packet) =
|
|
|
|
# To avoid amplification attacks, server socket is in SynRecv state until
|
2022-11-16 16:44:00 +00:00
|
|
|
# it receives first data transfer
|
2022-03-18 07:13:17 +00:00
|
|
|
# https://www.usenix.org/system/files/conference/woot15/woot15-paper-adamsky.pdf
|
|
|
|
# Socket is in SynRecv state only when recv timeout is configured
|
|
|
|
if (socket.state == SynRecv and p.header.pType == ST_DATA):
|
|
|
|
socket.state = Connected
|
|
|
|
|
|
|
|
if (socket.state == SynSent and p.header.pType == ST_STATE):
|
|
|
|
socket.state = Connected
|
|
|
|
socket.ackNr = p.header.seqNr - 1
|
|
|
|
|
|
|
|
debug "Received Syn-Ack finalizing connection",
|
2022-03-18 14:53:51 +00:00
|
|
|
socketAckNr = socket.ackNr
|
2022-03-18 07:13:17 +00:00
|
|
|
|
|
|
|
if (not socket.connectionFuture.finished()):
|
|
|
|
socket.connectionFuture.complete()
|
|
|
|
|
2023-04-04 15:11:36 +00:00
|
|
|
# TODO: at socket level we should handle only FIN/DATA/ACK packets. Refactor to
|
|
|
|
# make it enforceable by type system
|
2022-02-24 17:22:44 +00:00
|
|
|
proc processPacketInternal(socket: UtpSocket, p: Packet) =
|
2022-01-10 12:49:36 +00:00
|
|
|
debug "Process packet",
|
2022-01-07 09:38:19 +00:00
|
|
|
socketKey = socket.socketKey,
|
2022-01-20 08:22:53 +00:00
|
|
|
socketAckNr = socket.ackNr,
|
|
|
|
socketSeqNr = socket.seqNr,
|
2022-01-20 12:20:30 +00:00
|
|
|
windowPackets = socket.curWindowPackets,
|
2022-03-03 21:38:13 +00:00
|
|
|
rcvBufferSize = socket.offset,
|
2022-01-10 12:49:36 +00:00
|
|
|
packetType = p.header.pType,
|
2022-01-07 09:38:19 +00:00
|
|
|
seqNr = p.header.seqNr,
|
|
|
|
ackNr = p.header.ackNr,
|
|
|
|
timestamp = p.header.timestamp,
|
2022-02-10 07:05:44 +00:00
|
|
|
timestampDiff = p.header.timestampDiff,
|
|
|
|
remoteWindow = p.header.wndSize
|
2022-01-07 09:38:19 +00:00
|
|
|
|
2021-12-10 15:28:00 +00:00
|
|
|
let timestampInfo = getMonoTimestamp()
|
2022-01-10 12:49:36 +00:00
|
|
|
|
2021-11-15 10:32:00 +00:00
|
|
|
if socket.isAckNrInvalid(p):
|
2022-01-07 09:38:19 +00:00
|
|
|
debug "Received packet with invalid ack number",
|
|
|
|
ackNr = p.header.ackNr,
|
|
|
|
localSeqNr = socket.seqNr,
|
|
|
|
lastUnacked = socket.seqNr - socket.curWindowPackets
|
|
|
|
|
2021-11-15 10:32:00 +00:00
|
|
|
return
|
|
|
|
|
2021-10-28 09:41:43 +00:00
|
|
|
## Updates socket state based on received packet, and sends ack when necessary.
|
2022-11-16 16:44:00 +00:00
|
|
|
## Should be called in main packet receiving loop
|
2021-10-28 09:41:43 +00:00
|
|
|
let pkSeqNr = p.header.seqNr
|
|
|
|
let pkAckNr = p.header.ackNr
|
2021-11-04 06:38:46 +00:00
|
|
|
|
|
|
|
socket.initializeAckNr(pkSeqNr)
|
|
|
|
|
|
|
|
# number of packets past the expected
|
|
|
|
# ack_nr is the last acked, seq_nr is the
|
2022-11-16 16:44:00 +00:00
|
|
|
# current. Subtracting 1 makes 0 mean "this is the next expected packet"
|
2021-11-04 06:38:46 +00:00
|
|
|
let pastExpected = pkSeqNr - socket.ackNr - 1
|
|
|
|
|
|
|
|
# acks is the number of packets that was acked, in normal case - no selective
|
|
|
|
# acks, no losses, no resends, it will usually be equal to 1
|
|
|
|
# we can calculate it here and not only for ST_STATE packet, as each utp
|
|
|
|
# packet has info about remote side last acked packet.
|
|
|
|
var acks = pkAckNr - (socket.seqNr - 1 - socket.curWindowPackets)
|
|
|
|
|
|
|
|
if acks > socket.curWindowPackets:
|
|
|
|
# this case happens if the we already received this ack nr
|
|
|
|
acks = 0
|
2021-12-20 12:14:50 +00:00
|
|
|
|
2022-01-27 10:07:40 +00:00
|
|
|
# rationale from c reference impl:
|
|
|
|
# if we get the same ack_nr as in the last packet
|
|
|
|
# increase the duplicate_ack counter, otherwise reset
|
|
|
|
# it to 0.
|
|
|
|
# It's important to only count ACKs in ST_STATE packets. Any other
|
|
|
|
# packet (primarily ST_DATA) is likely to have been sent because of the
|
|
|
|
# other end having new outgoing data, not in response to incoming data.
|
|
|
|
# For instance, if we're receiving a steady stream of payload with no
|
|
|
|
# outgoing data, and we suddently have a few bytes of payload to send (say,
|
|
|
|
# a bittorrent HAVE message), we're very likely to see 3 duplicate ACKs
|
|
|
|
# immediately after sending our payload packet. This effectively disables
|
|
|
|
# the fast-resend on duplicate-ack logic for bi-directional connections
|
|
|
|
# (except in the case of a selective ACK). This is in line with BSD4.4 TCP
|
|
|
|
# implementation.
|
2022-04-12 19:11:01 +00:00
|
|
|
if socket.curWindowPackets > 0 and
|
|
|
|
pkAckNr == socket.seqNr - socket.curWindowPackets - 1 and
|
2022-01-27 10:07:40 +00:00
|
|
|
p.header.pType == ST_STATE:
|
|
|
|
inc socket.duplicateAck
|
|
|
|
|
2022-11-16 16:44:00 +00:00
|
|
|
debug "Received duplicated ack",
|
2022-01-27 10:07:40 +00:00
|
|
|
pkAckNr = pkAckNr,
|
2022-11-16 16:44:00 +00:00
|
|
|
duplicateAckCounter = socket.duplicateAck
|
2022-01-27 10:07:40 +00:00
|
|
|
else:
|
|
|
|
socket.duplicateAck = 0
|
|
|
|
# spec says that in case of duplicate ack counter larger that duplicateAcksBeforeResend
|
2022-11-16 16:44:00 +00:00
|
|
|
# we should re-send oldest packet, on the other hand reference implementation
|
2022-01-27 10:07:40 +00:00
|
|
|
# has code path which does it commented out with todo. Currently to be as close
|
2022-11-16 16:44:00 +00:00
|
|
|
# to reference impl we do not resend packets in that case
|
2022-01-27 10:07:40 +00:00
|
|
|
|
2022-01-20 12:20:30 +00:00
|
|
|
debug "Packet state variables",
|
|
|
|
pastExpected = pastExpected,
|
|
|
|
acks = acks
|
|
|
|
|
2022-11-16 16:44:00 +00:00
|
|
|
# If packet is totally off the mark, short-circuit the processing
|
2021-11-04 06:38:46 +00:00
|
|
|
if pastExpected >= reorderBufferMaxSize:
|
2022-01-20 08:22:53 +00:00
|
|
|
|
|
|
|
# if `pastExpected` is really big number (for example: uint16.high) then most
|
|
|
|
# probably we are receiving packet which we already received
|
|
|
|
# example: we already received packet with `seqNr = 10` so our `socket.ackNr = 10`
|
2022-04-12 19:11:01 +00:00
|
|
|
# if we receive this packet once again then `pastExpected = 10 - 10 - 1` which
|
2022-01-20 08:22:53 +00:00
|
|
|
# equals (due to wrapping) 65535
|
|
|
|
# this means that remote most probably did not receive our ack, so we need to resend
|
|
|
|
# it. We are doing it for last `reorderBufferMaxSize` packets
|
|
|
|
let isPossibleDuplicatedOldPacket = pastExpected >= (int(uint16.high) + 1) - reorderBufferMaxSize
|
|
|
|
|
|
|
|
if (isPossibleDuplicatedOldPacket and p.header.pType != ST_STATE):
|
2022-02-24 17:22:44 +00:00
|
|
|
socket.sendAck()
|
2022-01-20 08:22:53 +00:00
|
|
|
|
2022-01-07 09:38:19 +00:00
|
|
|
debug "Got an invalid packet sequence number, too far off",
|
|
|
|
pastExpected = pastExpected
|
2021-11-04 06:38:46 +00:00
|
|
|
return
|
|
|
|
|
2021-12-10 15:28:00 +00:00
|
|
|
var (ackedBytes, minRtt) = socket.calculateAckedbytes(acks, timestampInfo.moment)
|
2021-12-09 09:52:21 +00:00
|
|
|
|
2022-01-20 12:20:30 +00:00
|
|
|
debug "Bytes acked by classic ack",
|
|
|
|
bytesAcked = ackedBytes
|
2022-04-12 19:11:01 +00:00
|
|
|
|
2022-01-04 08:52:38 +00:00
|
|
|
if (p.eack.isSome()):
|
|
|
|
let selectiveAckedBytes = socket.calculateSelectiveAckBytes(pkAckNr, p.eack.unsafeGet())
|
2022-01-20 12:20:30 +00:00
|
|
|
debug "Bytes acked by selective ack",
|
2022-01-07 09:38:19 +00:00
|
|
|
bytesAcked = selectiveAckedBytes
|
2022-01-04 08:52:38 +00:00
|
|
|
ackedBytes = ackedBytes + selectiveAckedBytes
|
|
|
|
|
2021-12-09 09:52:21 +00:00
|
|
|
let sentTimeRemote = p.header.timestamp
|
2021-12-20 12:14:50 +00:00
|
|
|
|
|
|
|
# we are using uint32 not a Duration, to wrap a round in case of
|
2021-12-09 09:52:21 +00:00
|
|
|
# sentTimeRemote > receipTimestamp. This can happen as local and remote
|
2022-11-16 16:44:00 +00:00
|
|
|
# clock can be not synchronized or even using different system clock.
|
2021-12-09 09:52:21 +00:00
|
|
|
# i.e this number itself does not tell anything and is only used to feedback it
|
|
|
|
# to remote peer with each sent packet
|
2021-12-20 12:14:50 +00:00
|
|
|
let remoteDelay =
|
2021-12-09 09:52:21 +00:00
|
|
|
if (sentTimeRemote == 0):
|
|
|
|
0'u32
|
|
|
|
else:
|
2021-12-10 15:28:00 +00:00
|
|
|
timestampInfo.timestamp - sentTimeRemote
|
2021-12-09 09:52:21 +00:00
|
|
|
|
|
|
|
socket.replayMicro = remoteDelay
|
|
|
|
|
|
|
|
let prevRemoteDelayBase = socket.remoteHistogram.delayBase
|
|
|
|
|
|
|
|
if (remoteDelay != 0):
|
2021-12-10 15:28:00 +00:00
|
|
|
socket.remoteHistogram.addSample(remoteDelay, timestampInfo.moment)
|
2021-12-09 09:52:21 +00:00
|
|
|
|
|
|
|
# remote new delay base is less than previous
|
|
|
|
# shift our delay base in other direction to take clock skew into account
|
|
|
|
# but no more than 10ms
|
2021-12-20 12:14:50 +00:00
|
|
|
if (prevRemoteDelayBase != 0 and
|
|
|
|
wrapCompareLess(socket.remoteHistogram.delayBase, prevRemoteDelayBase) and
|
2021-12-09 09:52:21 +00:00
|
|
|
prevRemoteDelayBase - socket.remoteHistogram.delayBase <= 10000'u32):
|
|
|
|
socket.ourHistogram.shift(prevRemoteDelayBase - socket.remoteHistogram.delayBase)
|
|
|
|
|
|
|
|
let actualDelay = p.header.timestampDiff
|
|
|
|
|
|
|
|
if actualDelay != 0:
|
2021-12-10 15:28:00 +00:00
|
|
|
socket.ourHistogram.addSample(actualDelay, timestampInfo.moment)
|
|
|
|
socket.driftCalculator.addSample(actualDelay, timestampInfo.moment)
|
2021-12-09 09:52:21 +00:00
|
|
|
|
|
|
|
# adjust base delay if delay estimates exceeds rtt
|
|
|
|
if (socket.ourHistogram.getValue() > minRtt):
|
|
|
|
let diff = uint32((socket.ourHistogram.getValue() - minRtt).microseconds())
|
2021-12-20 12:14:50 +00:00
|
|
|
socket.ourHistogram.shift(diff)
|
2021-12-09 09:52:21 +00:00
|
|
|
|
2022-04-04 11:44:32 +00:00
|
|
|
let currentPacketSize = socket.getPacketSize()
|
2022-11-16 16:44:00 +00:00
|
|
|
let (newMaxWindow, newSlowStartThreshold, newSlowStart) =
|
2021-12-09 09:52:21 +00:00
|
|
|
applyCongestionControl(
|
2022-02-24 17:22:44 +00:00
|
|
|
socket.maxWindow,
|
2021-12-09 09:52:21 +00:00
|
|
|
socket.slowStart,
|
2022-11-16 16:44:00 +00:00
|
|
|
socket.slowStartThreshold,
|
2021-12-09 09:52:21 +00:00
|
|
|
socket.socketConfig.optSndBuffer,
|
2022-02-10 07:05:44 +00:00
|
|
|
currentPacketSize,
|
2021-12-09 09:52:21 +00:00
|
|
|
microseconds(actualDelay),
|
|
|
|
ackedBytes,
|
|
|
|
minRtt,
|
|
|
|
socket.ourHistogram.getValue(),
|
|
|
|
socket.driftCalculator.clockDrift
|
|
|
|
)
|
|
|
|
|
|
|
|
# update remote window size and max window
|
2022-02-24 17:22:44 +00:00
|
|
|
socket.maxWindow = newMaxWindow
|
|
|
|
socket.maxRemoteWindow = p.header.wndSize
|
2021-12-09 09:52:21 +00:00
|
|
|
socket.slowStart = newSlowStart
|
2022-11-16 16:44:00 +00:00
|
|
|
socket.slowStartThreshold = newSlowStartThreshold
|
2021-12-09 09:52:21 +00:00
|
|
|
|
2022-01-07 09:38:19 +00:00
|
|
|
debug "Applied ledbat congestion controller",
|
|
|
|
maxWindow = newMaxWindow,
|
|
|
|
remoteWindow = p.header.wndSize,
|
2022-11-16 16:44:00 +00:00
|
|
|
slowStartThreshold = newSlowStartThreshold,
|
2022-01-07 09:38:19 +00:00
|
|
|
slowstart = newSlowStart
|
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
if (socket.zeroWindowTimer.isNone() and socket.maxRemoteWindow <= currentPacketSize):
|
2021-12-02 14:46:18 +00:00
|
|
|
# when zeroWindowTimer will be hit and maxRemoteWindow still will be equal to 0
|
|
|
|
# then it will be reset to minimal value
|
2022-02-10 07:05:44 +00:00
|
|
|
socket.zeroWindowTimer = some(timestampInfo.moment + socket.socketConfig.remoteWindowResetTimeout)
|
2021-12-20 12:14:50 +00:00
|
|
|
|
2022-02-10 07:05:44 +00:00
|
|
|
debug "Remote window size dropped below packet size",
|
2022-01-07 09:38:19 +00:00
|
|
|
currentTime = timestampInfo.moment,
|
2022-02-10 07:05:44 +00:00
|
|
|
resetZeroWindowTime = socket.zeroWindowTimer,
|
|
|
|
currentPacketSize = currentPacketSize
|
2022-01-07 09:38:19 +00:00
|
|
|
|
2022-03-18 07:13:17 +00:00
|
|
|
socket.tryfinalizeConnection(p)
|
|
|
|
|
2021-11-09 14:29:59 +00:00
|
|
|
# socket.curWindowPackets == acks means that this packet acked all remaining packets
|
|
|
|
# including the sent fin packets
|
|
|
|
if (socket.finSent and socket.curWindowPackets == acks):
|
2022-01-07 09:38:19 +00:00
|
|
|
debug "FIN acked, destroying socket"
|
2021-11-09 14:29:59 +00:00
|
|
|
socket.finAcked = true
|
|
|
|
# this bit of utp spec is a bit under specified (i.e there is not specification at all)
|
|
|
|
# reference implementation moves socket to destroy state in case that our fin was acked
|
|
|
|
# and socket is considered closed for reading and writing.
|
|
|
|
# but in theory remote could stil write some data on this socket (or even its own fin)
|
|
|
|
socket.destroy()
|
|
|
|
|
2022-01-26 08:49:34 +00:00
|
|
|
# Update fast resend counter to avoid resending old packet twice
|
|
|
|
if wrapCompareLess(socket.fastResendSeqNr, pkAckNr + 1):
|
|
|
|
socket.fastResendSeqNr = pkAckNr + 1
|
|
|
|
|
2021-12-10 15:28:00 +00:00
|
|
|
socket.ackPackets(acks, timestampInfo.moment)
|
2021-12-20 12:14:50 +00:00
|
|
|
|
2022-01-04 08:52:38 +00:00
|
|
|
# packets in front may have been acked by selective ack, decrease window until we hit
|
|
|
|
# a packet that is still waiting to be acked
|
|
|
|
while (socket.curWindowPackets > 0 and socket.outBuffer.get(socket.seqNr - socket.curWindowPackets).isNone()):
|
|
|
|
dec socket.curWindowPackets
|
2022-01-20 12:20:30 +00:00
|
|
|
debug "Packet in front hase been acked by selective ack. Decrese window",
|
|
|
|
windowPackets = socket.curWindowPackets
|
2022-01-04 08:52:38 +00:00
|
|
|
|
2022-01-26 08:49:34 +00:00
|
|
|
# fast timeout
|
|
|
|
if socket.fastTimeout:
|
|
|
|
let oldestOutstandingPktSeqNr = socket.seqNr - socket.curWindowPackets
|
|
|
|
|
|
|
|
debug "Hit fast timeout re-send",
|
|
|
|
curWindowPackets = socket.curWindowPackets,
|
|
|
|
oldesPkSeqNr = oldestOutstandingPktSeqNr,
|
|
|
|
fastResendSeqNr = socket.fastResendSeqNr
|
|
|
|
|
|
|
|
|
|
|
|
if oldestOutstandingPktSeqNr != socket.fastResendSeqNr:
|
|
|
|
# fastResendSeqNr do not point to oldest unacked packet, we probably already resent
|
|
|
|
# packet that timed-out. Leave fast timeout mode
|
|
|
|
socket.fastTimeout = false
|
|
|
|
else:
|
|
|
|
let shouldReSendPacket = socket.outBuffer.exists(oldestOutstandingPktSeqNr, (p: OutgoingPacket) => p.transmissions > 0)
|
|
|
|
if shouldReSendPacket:
|
|
|
|
debug "Packet fast timeout resend",
|
|
|
|
pkSeqNr = oldestOutstandingPktSeqNr
|
|
|
|
|
|
|
|
inc socket.fastResendSeqNr
|
2022-04-12 19:11:01 +00:00
|
|
|
|
2022-01-26 08:49:34 +00:00
|
|
|
# Is is safe to call force resend as we already checked shouldReSendPacket
|
|
|
|
# condition
|
2022-02-24 17:22:44 +00:00
|
|
|
socket.sendPacket(oldestOutstandingPktSeqNr)
|
2022-04-12 19:11:01 +00:00
|
|
|
|
2022-01-04 08:52:38 +00:00
|
|
|
if (p.eack.isSome()):
|
|
|
|
socket.selectiveAckPackets(pkAckNr, p.eack.unsafeGet(), timestampInfo.moment)
|
|
|
|
|
2022-03-18 07:13:17 +00:00
|
|
|
if p.header.pType == ST_DATA or p.header.pType == ST_FIN:
|
|
|
|
if socket.state != Connected:
|
|
|
|
debug "Unexpected packet",
|
|
|
|
socketState = socket.state,
|
|
|
|
packetType = p.header.pType
|
|
|
|
|
|
|
|
# we have received user generated packet (DATA or FIN), in not connected
|
|
|
|
# state. Stop processing it.
|
|
|
|
return
|
|
|
|
|
|
|
|
if (p.header.pType == ST_FIN and (not socket.gotFin)):
|
|
|
|
debug "Received FIN packet",
|
|
|
|
eofPktNr = pkSeqNr,
|
|
|
|
curAckNr = socket.ackNr
|
|
|
|
|
|
|
|
socket.gotFin = true
|
|
|
|
socket.eofPktNr = pkSeqNr
|
|
|
|
|
|
|
|
# we got in order packet
|
|
|
|
if (pastExpected == 0 and (not socket.reachedFin)):
|
|
|
|
debug "Received in order packet"
|
|
|
|
let payloadLength = len(p.payload)
|
|
|
|
if (payloadLength > 0 and (not socket.readShutdown)):
|
|
|
|
# we need to sum both rcv buffer and reorder buffer
|
|
|
|
if (uint32(socket.offset) + socket.inBufferBytes + uint32(payloadLength) > socket.socketConfig.optRcvBuffer):
|
|
|
|
# even though packet is in order and passes all the checks, it would
|
|
|
|
# overflow our receive buffer, it means that we are receiving data
|
|
|
|
# faster than we are reading it. Do not ack this packet, and drop received
|
|
|
|
# data
|
|
|
|
debug "Recevied packet would overflow receive buffer dropping it",
|
|
|
|
pkSeqNr = p.header.seqNr,
|
|
|
|
bytesReceived = payloadLength,
|
|
|
|
rcvbufferSize = socket.offset,
|
|
|
|
reorderBufferSize = socket.inBufferBytes
|
|
|
|
return
|
|
|
|
|
|
|
|
debug "Received data packet",
|
|
|
|
bytesReceived = payloadLength
|
|
|
|
# we are getting in order data packet, we can flush data directly to the incoming buffer
|
|
|
|
# await upload(addr socket.buffer, unsafeAddr p.payload[0], p.payload.len())
|
|
|
|
moveMem(addr socket.rcvBuffer[socket.offset], unsafeAddr p.payload[0], payloadLength)
|
|
|
|
socket.offset = socket.offset + payloadLength
|
2022-04-12 19:11:01 +00:00
|
|
|
|
2022-03-18 07:13:17 +00:00
|
|
|
# Bytes have been passed to upper layer, we can increase number of last
|
|
|
|
# acked packet
|
|
|
|
inc socket.ackNr
|
|
|
|
|
|
|
|
# check if the following packets are in reorder buffer
|
|
|
|
|
|
|
|
debug "Looking for packets in re-order buffer",
|
|
|
|
reorderCount = socket.reorderCount
|
|
|
|
|
|
|
|
while true:
|
2022-11-16 16:44:00 +00:00
|
|
|
# We are doing this in reorder loop, to handle the case when we already received
|
2022-03-18 07:13:17 +00:00
|
|
|
# fin but there were some gaps before eof
|
|
|
|
# we have reached remote eof, and should not receive more packets from remote
|
|
|
|
if ((not socket.reachedFin) and socket.gotFin and socket.eofPktNr == socket.ackNr):
|
|
|
|
debug "Reached socket EOF"
|
|
|
|
# In case of reaching eof, it is up to user of library what to to with
|
2022-11-16 16:44:00 +00:00
|
|
|
# it. With the current implementation, the most appropriate way would be to
|
|
|
|
# destroy it (as with our implementation we know that remote is destroying its acked fin)
|
2022-03-18 07:13:17 +00:00
|
|
|
# as any other send will either generate timeout, or socket will be forcefully
|
|
|
|
# closed by reset
|
|
|
|
socket.reachedFin = true
|
|
|
|
# this is not necessarily true, but as we have already reached eof we can
|
|
|
|
# ignore following packets
|
|
|
|
socket.reorderCount = 0
|
|
|
|
|
|
|
|
if socket.reorderCount == 0:
|
|
|
|
break
|
|
|
|
|
|
|
|
let nextPacketNum = socket.ackNr + 1
|
|
|
|
|
|
|
|
let maybePacket = socket.inBuffer.get(nextPacketNum)
|
|
|
|
|
|
|
|
if maybePacket.isNone():
|
|
|
|
break
|
|
|
|
|
|
|
|
let packet = maybePacket.unsafeGet()
|
|
|
|
let reorderPacketPayloadLength = len(packet.payload)
|
|
|
|
|
|
|
|
if (reorderPacketPayloadLength > 0 and (not socket.readShutdown)):
|
|
|
|
debug "Got packet from reorder buffer",
|
|
|
|
packetBytes = len(packet.payload),
|
|
|
|
packetSeqNr = packet.header.seqNr,
|
|
|
|
packetAckNr = packet.header.ackNr,
|
|
|
|
socketSeqNr = socket.seqNr,
|
2022-11-16 16:44:00 +00:00
|
|
|
socketAckNr = socket.ackNr,
|
2022-03-18 07:13:17 +00:00
|
|
|
rcvbufferSize = socket.offset,
|
|
|
|
reorderBufferSize = socket.inBufferBytes
|
2022-04-12 19:11:01 +00:00
|
|
|
|
|
|
|
# Rcv buffer and reorder buffer are sized that it is always possible to
|
2022-03-18 07:13:17 +00:00
|
|
|
# move data from reorder buffer to rcv buffer without overflow
|
|
|
|
moveMem(addr socket.rcvBuffer[socket.offset], unsafeAddr packet.payload[0], reorderPacketPayloadLength)
|
|
|
|
socket.offset = socket.offset + reorderPacketPayloadLength
|
|
|
|
|
|
|
|
debug "Deleting packet",
|
|
|
|
seqNr = nextPacketNum
|
|
|
|
|
|
|
|
socket.inBuffer.delete(nextPacketNum)
|
2021-10-28 09:41:43 +00:00
|
|
|
inc socket.ackNr
|
2022-03-18 07:13:17 +00:00
|
|
|
dec socket.reorderCount
|
|
|
|
socket.inBufferBytes = socket.inBufferBytes - uint32(reorderPacketPayloadLength)
|
2021-10-28 09:41:43 +00:00
|
|
|
|
2022-03-18 07:13:17 +00:00
|
|
|
debug "Socket state after processing in order packet",
|
|
|
|
socketKey = socket.socketKey,
|
|
|
|
socketAckNr = socket.ackNr,
|
|
|
|
reorderCount = socket.reorderCount,
|
|
|
|
windowPackets = socket.curWindowPackets
|
|
|
|
|
|
|
|
# TODO for now we just schedule concurrent task with ack sending. It may
|
|
|
|
# need improvement, as with this approach there is no direct control over
|
|
|
|
# how many concurrent tasks there are and how to cancel them when socket
|
|
|
|
# is closed
|
|
|
|
socket.sendAck()
|
2021-11-04 06:38:46 +00:00
|
|
|
|
2022-03-18 07:13:17 +00:00
|
|
|
# we got packet out of order
|
|
|
|
else:
|
|
|
|
debug "Got out of order packet"
|
2022-01-07 09:38:19 +00:00
|
|
|
|
2022-03-18 07:13:17 +00:00
|
|
|
if (socket.gotFin and pkSeqNr > socket.eofPktNr):
|
|
|
|
debug "Got packet past eof",
|
|
|
|
pkSeqNr = pkSeqNr,
|
|
|
|
eofPktNr = socket.eofPktNr
|
2021-11-09 14:29:59 +00:00
|
|
|
|
2022-03-18 07:13:17 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
# growing buffer before checking the packet is already there to avoid
|
|
|
|
# looking at older packet due to indices wrap aroud
|
|
|
|
socket.inBuffer.ensureSize(pkSeqNr + 1, pastExpected + 1)
|
|
|
|
|
|
|
|
if (socket.inBuffer.get(pkSeqNr).isSome()):
|
|
|
|
debug "Packet with seqNr already received",
|
|
|
|
seqNr = pkSeqNr
|
|
|
|
else:
|
|
|
|
let payloadLength = uint32(len(p.payload))
|
|
|
|
if (socket.inBufferBytes + payloadLength <= socket.socketConfig.maxSizeOfReorderBuffer and
|
|
|
|
socket.inBufferBytes + uint32(socket.offset) + payloadLength <= socket.socketConfig.optRcvBuffer):
|
2022-04-12 19:11:01 +00:00
|
|
|
|
2022-03-18 07:13:17 +00:00
|
|
|
debug "store packet in reorder buffer",
|
|
|
|
packetBytes = payloadLength,
|
|
|
|
packetSeqNr = p.header.seqNr,
|
|
|
|
packetAckNr = p.header.ackNr,
|
|
|
|
socketSeqNr = socket.seqNr,
|
2022-11-16 16:44:00 +00:00
|
|
|
socketAckNr = socket.ackNr,
|
2022-03-18 07:13:17 +00:00
|
|
|
rcvbufferSize = socket.offset,
|
|
|
|
reorderBufferSize = socket.inBufferBytes
|
|
|
|
|
|
|
|
socket.inBuffer.put(pkSeqNr, p)
|
|
|
|
inc socket.reorderCount
|
|
|
|
socket.inBufferBytes = socket.inBufferBytes + payloadLength
|
|
|
|
debug "added out of order packet to reorder buffer",
|
|
|
|
reorderCount = socket.reorderCount
|
2022-11-16 16:44:00 +00:00
|
|
|
# we send ack packet, as we reorder count is > 0, so the eack bitmask will be
|
2022-03-18 07:13:17 +00:00
|
|
|
# generated
|
|
|
|
socket.sendAck()
|
2021-10-28 09:41:43 +00:00
|
|
|
|
2022-04-12 19:11:01 +00:00
|
|
|
proc processPacket*(socket: UtpSocket, p: Packet): Future[void] =
|
2022-02-24 17:22:44 +00:00
|
|
|
socket.eventQueue.put(SocketEvent(kind: NewPacket, packet: p))
|
|
|
|
|
|
|
|
template shiftBuffer(t, c: untyped) =
|
|
|
|
if (t).offset > c:
|
|
|
|
if c > 0:
|
|
|
|
moveMem(addr((t).rcvBuffer[0]), addr((t).rcvBuffer[(c)]), (t).offset - (c))
|
|
|
|
(t).offset = (t).offset - (c)
|
|
|
|
else:
|
|
|
|
(t).offset = 0
|
|
|
|
|
|
|
|
proc onRead(socket: UtpSocket, readReq: var ReadReq): ReadResult =
|
2022-03-03 21:38:13 +00:00
|
|
|
debug "Handling incoming read",
|
|
|
|
rcvBufferSize = socket.offset,
|
|
|
|
reorderBufferSize = socket.inBufferBytes,
|
|
|
|
socketAtEOF = socket.atEof(),
|
|
|
|
readTillEOF = readReq.bytesToRead == 0
|
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
if readReq.reader.finished():
|
|
|
|
return ReadCancelled
|
2022-04-12 19:11:01 +00:00
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
if socket.atEof():
|
|
|
|
# buffer is already empty and we reached remote fin, just finish read with whatever
|
|
|
|
# was already read
|
|
|
|
readReq.reader.complete(readReq.bytesAvailable)
|
|
|
|
return SocketAlreadyFinished
|
|
|
|
|
|
|
|
if readReq.bytesToRead == 0:
|
|
|
|
# treat is as read till eof
|
|
|
|
readReq.bytesAvailable.add(socket.rcvBuffer.toOpenArray(0, socket.offset - 1))
|
|
|
|
socket.shiftBuffer(socket.offset)
|
|
|
|
if (socket.atEof()):
|
2022-03-03 21:38:13 +00:00
|
|
|
|
|
|
|
debug "Read finished",
|
|
|
|
bytesRead = len(readReq.bytesAvailable),
|
2022-11-16 16:44:00 +00:00
|
|
|
socketAtEof = socket.atEof()
|
2022-03-03 21:38:13 +00:00
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
readReq.reader.complete(readReq.bytesAvailable)
|
|
|
|
return ReadFinished
|
|
|
|
else:
|
2022-03-03 21:38:13 +00:00
|
|
|
debug "Read not finished",
|
|
|
|
bytesRead = len(readReq.bytesAvailable),
|
2022-11-16 16:44:00 +00:00
|
|
|
socketAtEof = socket.atEof()
|
2022-03-03 21:38:13 +00:00
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
return ReadNotFinished
|
|
|
|
else:
|
|
|
|
let bytesAlreadyRead = len(readReq.bytesAvailable)
|
|
|
|
let bytesLeftToRead = readReq.bytesToRead - bytesAlreadyRead
|
|
|
|
let count = min(socket.offset, bytesLeftToRead)
|
|
|
|
readReq.bytesAvailable.add(socket.rcvBuffer.toOpenArray(0, count - 1))
|
|
|
|
socket.shiftBuffer(count)
|
|
|
|
if (len(readReq.bytesAvailable) == readReq.bytesToRead):
|
2022-03-03 21:38:13 +00:00
|
|
|
debug "Read finished",
|
|
|
|
bytesRead = len(readReq.bytesAvailable),
|
2022-11-16 16:44:00 +00:00
|
|
|
socketAtEof = socket.atEof()
|
2022-03-03 21:38:13 +00:00
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
readReq.reader.complete(readReq.bytesAvailable)
|
|
|
|
return ReadFinished
|
|
|
|
else:
|
2022-03-03 21:38:13 +00:00
|
|
|
debug "Read not finished",
|
|
|
|
bytesRead = len(readReq.bytesAvailable),
|
2022-11-16 16:44:00 +00:00
|
|
|
socketAtEof = socket.atEof()
|
2022-03-03 21:38:13 +00:00
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
return ReadNotFinished
|
|
|
|
|
|
|
|
proc eventLoop(socket: UtpSocket) {.async.} =
|
|
|
|
try:
|
|
|
|
while true:
|
2022-11-30 08:34:08 +00:00
|
|
|
let socketEvent = await socket.eventQueue.get()
|
|
|
|
case socketEvent.kind
|
2022-02-24 17:22:44 +00:00
|
|
|
of NewPacket:
|
2022-11-30 08:34:08 +00:00
|
|
|
socket.processPacketInternal(socketEvent.packet)
|
2022-04-12 19:11:01 +00:00
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
# we processed a packet and rcv buffer size is larger than 0,
|
|
|
|
# check if we can finish some pending readers
|
2022-03-03 21:38:13 +00:00
|
|
|
while socket.pendingReads.len() > 0:
|
2022-02-24 17:22:44 +00:00
|
|
|
let readResult = socket.onRead(socket.pendingReads[0])
|
|
|
|
case readResult
|
|
|
|
of ReadFinished:
|
|
|
|
discard socket.pendingReads.popFirst()
|
|
|
|
of ReadNotFinished:
|
|
|
|
# there was not enough bytes in buffer to finish this read request,
|
2022-11-16 16:44:00 +00:00
|
|
|
# stop processing further reads
|
2022-02-24 17:22:44 +00:00
|
|
|
break
|
|
|
|
else:
|
2023-04-04 15:11:36 +00:00
|
|
|
# read was cancelled or socket is already finished move on to next
|
|
|
|
# read request
|
2022-02-24 17:22:44 +00:00
|
|
|
discard socket.pendingReads.popFirst()
|
|
|
|
|
|
|
|
# we processed packet, so there could more place in the send buffer
|
|
|
|
while socket.pendingWrites.len() > 0:
|
2022-11-30 08:34:08 +00:00
|
|
|
let pendingWrite = socket.pendingWrites.popFirst()
|
|
|
|
case pendingWrite.kind
|
2022-02-24 17:22:44 +00:00
|
|
|
of Close:
|
|
|
|
socket.handleClose()
|
|
|
|
# close should be last packet send
|
|
|
|
break
|
|
|
|
of Data:
|
2022-04-12 19:11:01 +00:00
|
|
|
# check if writing was not cancelled in the mean time. This approach
|
2022-02-24 17:22:44 +00:00
|
|
|
# can create partial writes as part of the data could be written with
|
|
|
|
# with WriteReq
|
2022-11-30 08:34:08 +00:00
|
|
|
if (not pendingWrite.writer.finished()):
|
|
|
|
let bytesWritten = socket.handleDataWrite(pendingWrite.data)
|
|
|
|
if (bytesWritten == len(pendingWrite.data)):
|
|
|
|
# all bytes were written we can finish external future
|
|
|
|
pendingWrite.writer.complete(
|
2023-04-04 15:11:36 +00:00
|
|
|
WriteResult.ok(bytesWritten)
|
2022-11-30 08:34:08 +00:00
|
|
|
)
|
2022-02-24 17:22:44 +00:00
|
|
|
else:
|
2022-11-30 08:34:08 +00:00
|
|
|
let bytesLeft =
|
|
|
|
pendingWrite.data[bytesWritten..pendingWrite.data.high]
|
2023-04-04 15:11:36 +00:00
|
|
|
# bytes partially written to buffer, schedule rest of data for
|
|
|
|
# later
|
2022-11-30 08:34:08 +00:00
|
|
|
socket.pendingWrites.addFirst(
|
|
|
|
WriteRequest(
|
|
|
|
kind: Data,
|
|
|
|
data: bytesLeft,
|
|
|
|
writer: pendingWrite.writer
|
|
|
|
)
|
|
|
|
)
|
2022-02-24 17:22:44 +00:00
|
|
|
# there is no more place in the buffer break from the loop
|
|
|
|
break
|
|
|
|
of CheckTimeouts:
|
|
|
|
discard
|
|
|
|
of CloseReq:
|
|
|
|
if (socket.pendingWrites.len() > 0):
|
|
|
|
# there are still some unfinished writes, waiting to be finished
|
|
|
|
socket.pendingWrites.addLast(WriteRequest(kind: Close))
|
|
|
|
else:
|
|
|
|
socket.handleClose()
|
|
|
|
of WriteReq:
|
|
|
|
# check if the writer was not cancelled in mean time
|
2022-11-30 08:34:08 +00:00
|
|
|
if (not socketEvent.writer.finished()):
|
2022-02-24 17:22:44 +00:00
|
|
|
if (socket.pendingWrites.len() > 0):
|
2023-04-04 15:11:36 +00:00
|
|
|
# there are still some unfinished writes, waiting to be finished
|
|
|
|
# schedule this batch for later
|
2022-11-30 08:34:08 +00:00
|
|
|
socket.pendingWrites.addLast(
|
|
|
|
WriteRequest(
|
|
|
|
kind: Data,
|
|
|
|
data: socketEvent.data,
|
|
|
|
writer: socketEvent.writer
|
|
|
|
)
|
|
|
|
)
|
2022-02-24 17:22:44 +00:00
|
|
|
else:
|
2022-11-30 08:34:08 +00:00
|
|
|
let bytesWritten = socket.handleDataWrite(socketEvent.data)
|
|
|
|
if (bytesWritten == len(socketEvent.data)):
|
2022-02-24 17:22:44 +00:00
|
|
|
# all bytes were written we can finish external future
|
2022-11-30 08:34:08 +00:00
|
|
|
socketEvent.writer.complete(
|
2023-04-04 15:11:36 +00:00
|
|
|
WriteResult.ok(bytesWritten)
|
2022-11-30 08:34:08 +00:00
|
|
|
)
|
2022-02-24 17:22:44 +00:00
|
|
|
else:
|
2022-11-30 08:34:08 +00:00
|
|
|
let bytesLeft =
|
|
|
|
socketEvent.data[bytesWritten..socketEvent.data.high]
|
2022-02-24 17:22:44 +00:00
|
|
|
# bytes partially written to buffer, schedule rest of data for later
|
2022-11-30 08:34:08 +00:00
|
|
|
socket.pendingWrites.addLast(
|
|
|
|
WriteRequest(
|
|
|
|
kind: Data,
|
|
|
|
data: bytesLeft,
|
|
|
|
writer: socketEvent.writer
|
|
|
|
)
|
|
|
|
)
|
2022-02-24 17:22:44 +00:00
|
|
|
of ReadReqType:
|
|
|
|
# check if the writer was not cancelled in mean time
|
2022-11-30 08:34:08 +00:00
|
|
|
if (not socketEvent.readReq.reader.finished()):
|
2022-02-24 17:22:44 +00:00
|
|
|
if (socket.pendingReads.len() > 0):
|
2023-04-04 15:11:36 +00:00
|
|
|
# there is already pending unfinished read request, schedule this
|
|
|
|
# one for later
|
2022-11-30 08:34:08 +00:00
|
|
|
socket.pendingReads.addLast(socketEvent.readReq)
|
2022-02-24 17:22:44 +00:00
|
|
|
else:
|
2022-11-30 08:34:08 +00:00
|
|
|
var readReq = socketEvent.readReq
|
2022-02-24 17:22:44 +00:00
|
|
|
let readResult = socket.onRead(readReq)
|
|
|
|
case readResult
|
|
|
|
of ReadNotFinished:
|
|
|
|
socket.pendingReads.addLast(readReq)
|
|
|
|
else:
|
2022-04-12 19:11:01 +00:00
|
|
|
# in any other case we do not need to do any thing
|
2022-02-24 17:22:44 +00:00
|
|
|
discard
|
|
|
|
socket.checkTimeouts()
|
2022-03-28 10:35:08 +00:00
|
|
|
except CancelledError as exc:
|
2022-02-24 17:22:44 +00:00
|
|
|
for w in socket.pendingWrites.items():
|
|
|
|
if w.kind == Data and (not w.writer.finished()):
|
2023-04-04 15:11:36 +00:00
|
|
|
let res = WriteResult.err(
|
2022-11-30 08:34:08 +00:00
|
|
|
WriteError(kind: SocketNotWriteable, currentState: socket.state)
|
|
|
|
)
|
2022-02-24 17:22:44 +00:00
|
|
|
w.writer.complete(res)
|
|
|
|
for r in socket.pendingReads.items():
|
|
|
|
# complete every reader with already read bytes
|
2023-04-04 15:11:36 +00:00
|
|
|
# TODO: it may be better to refine read API to return
|
|
|
|
# Future[Result[seq[byte], E]] and return errors for not finished reads
|
2022-02-24 17:22:44 +00:00
|
|
|
if (not r.reader.finished()):
|
|
|
|
r.reader.complete(r.bytesAvailable)
|
|
|
|
socket.pendingWrites.clear()
|
|
|
|
socket.pendingReads.clear()
|
2023-04-04 15:11:36 +00:00
|
|
|
# main eventLoop has been cancelled, try to cancel `checkTimeoutsLoop`
|
2022-03-28 10:35:08 +00:00
|
|
|
socket.checkTimeoutsLoop.cancel()
|
2022-02-24 17:22:44 +00:00
|
|
|
trace "main socket event loop cancelled"
|
2022-03-28 10:35:08 +00:00
|
|
|
raise exc
|
2022-02-24 17:22:44 +00:00
|
|
|
|
|
|
|
proc startEventLoop(s: UtpSocket) =
|
|
|
|
s.eventLoop = eventLoop(s)
|
|
|
|
|
2021-11-09 14:29:59 +00:00
|
|
|
proc atEof*(socket: UtpSocket): bool =
|
2023-04-04 15:11:36 +00:00
|
|
|
# The socket is considered at eof when the remote side sent us a FIN packet
|
|
|
|
# and all packets up to the FIN have been processed.
|
2022-02-24 17:22:44 +00:00
|
|
|
socket.offset == 0 and socket.reachedFin
|
2021-11-09 14:29:59 +00:00
|
|
|
|
|
|
|
proc readingClosed(socket: UtpSocket): bool =
|
|
|
|
socket.atEof() or socket.state == Destroy
|
2021-10-28 09:41:43 +00:00
|
|
|
|
2021-12-02 14:46:18 +00:00
|
|
|
proc close*(socket: UtpSocket) =
|
2023-04-04 15:11:36 +00:00
|
|
|
## Gracefully close the connection (send FIN) if the socket is in the
|
|
|
|
## connected state. Does not wait for the socket to close.
|
2021-11-09 14:29:59 +00:00
|
|
|
if socket.state != Destroy:
|
|
|
|
case socket.state
|
2021-12-02 14:46:18 +00:00
|
|
|
of Connected:
|
2021-11-09 14:29:59 +00:00
|
|
|
socket.readShutdown = true
|
2021-12-02 14:46:18 +00:00
|
|
|
if (not socket.sendFinRequested):
|
|
|
|
try:
|
2022-04-12 19:11:01 +00:00
|
|
|
debug "Sending FIN", dst = socket.socketKey
|
2023-04-04 15:11:36 +00:00
|
|
|
# With this approach, all pending writes will be executed before
|
|
|
|
# sending the FIN packet.
|
2022-02-24 17:22:44 +00:00
|
|
|
socket.eventQueue.putNoWait(SocketEvent(kind: CloseReq))
|
2021-12-02 14:46:18 +00:00
|
|
|
except AsyncQueueFullError as e:
|
2023-04-04 15:11:36 +00:00
|
|
|
# Should not happen as our write queue is unbounded.
|
2021-12-02 14:46:18 +00:00
|
|
|
raiseAssert e.msg
|
|
|
|
|
|
|
|
socket.sendFinRequested = true
|
2021-11-09 14:29:59 +00:00
|
|
|
else:
|
2023-04-04 15:11:36 +00:00
|
|
|
# When connection is not established, sending FIN makes no sense, just
|
|
|
|
# destroy the socket.
|
2021-11-09 14:29:59 +00:00
|
|
|
socket.destroy()
|
|
|
|
|
|
|
|
proc closeWait*(socket: UtpSocket) {.async.} =
|
2023-04-04 15:11:36 +00:00
|
|
|
## Gracefully close the connection (send FIN) if the socket is in the
|
|
|
|
## connected state and wait for the socket to be closed.
|
|
|
|
## Warning: if the FIN packet is lost, then the socket might get closed due to
|
|
|
|
## retransmission failures, which will take some time.
|
|
|
|
## The default is 4 retransmissions with doubling of rto between each
|
|
|
|
## retransmission.
|
2021-12-02 14:46:18 +00:00
|
|
|
socket.close()
|
2021-11-09 14:29:59 +00:00
|
|
|
await socket.closeEvent.wait()
|
|
|
|
|
2021-12-20 12:14:50 +00:00
|
|
|
proc write*(socket: UtpSocket, data: seq[byte]): Future[WriteResult] =
|
2022-04-12 19:11:01 +00:00
|
|
|
debug "Write data", dst = socket.socketKey, length = len(data)
|
2022-01-07 09:38:19 +00:00
|
|
|
|
2021-12-02 14:46:18 +00:00
|
|
|
let retFuture = newFuture[WriteResult]("UtpSocket.write")
|
|
|
|
|
2021-11-09 14:29:59 +00:00
|
|
|
if (socket.state != Connected):
|
2023-04-04 15:11:36 +00:00
|
|
|
let res = WriteResult.err(WriteError(kind: SocketNotWriteable, currentState: socket.state))
|
2021-12-02 14:46:18 +00:00
|
|
|
retFuture.complete(res)
|
|
|
|
return retFuture
|
2021-12-20 12:14:50 +00:00
|
|
|
|
2021-11-09 14:29:59 +00:00
|
|
|
# fin should be last packet received by remote side, therefore trying to write
|
|
|
|
# after sending fin is considered error
|
2021-12-02 14:46:18 +00:00
|
|
|
if socket.sendFinRequested or socket.finSent:
|
2023-04-04 15:11:36 +00:00
|
|
|
let res = WriteResult.err(WriteError(kind: FinSent))
|
2021-12-02 14:46:18 +00:00
|
|
|
retFuture.complete(res)
|
|
|
|
return retFuture
|
2021-11-09 14:29:59 +00:00
|
|
|
|
2021-10-28 09:41:43 +00:00
|
|
|
var bytesWritten = 0
|
2021-12-20 12:14:50 +00:00
|
|
|
|
2021-10-28 09:41:43 +00:00
|
|
|
if len(data) == 0:
|
2023-04-04 15:11:36 +00:00
|
|
|
let res = WriteResult.ok(bytesWritten)
|
2021-12-02 14:46:18 +00:00
|
|
|
retFuture.complete(res)
|
|
|
|
return retFuture
|
2021-12-20 12:14:50 +00:00
|
|
|
|
2021-12-02 14:46:18 +00:00
|
|
|
try:
|
2023-04-04 15:11:36 +00:00
|
|
|
socket.eventQueue.putNoWait(SocketEvent(
|
|
|
|
kind: WriteReq, data: data, writer: retFuture))
|
2021-12-02 14:46:18 +00:00
|
|
|
except AsyncQueueFullError as e:
|
|
|
|
# this should not happen as out write queue is unbounded
|
|
|
|
raiseAssert e.msg
|
2021-10-28 09:41:43 +00:00
|
|
|
|
2021-12-02 14:46:18 +00:00
|
|
|
return retFuture
|
2021-11-09 14:29:59 +00:00
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
proc read*(socket: UtpSocket, n: Natural): Future[seq[byte]] =
|
|
|
|
## Read all bytes from socket ``socket``.
|
2021-10-28 09:41:43 +00:00
|
|
|
##
|
|
|
|
## This procedure allocates buffer seq[byte] and return it as result.
|
2022-02-24 17:22:44 +00:00
|
|
|
let fut = newFuture[seq[uint8]]()
|
2021-10-28 09:41:43 +00:00
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
if socket.readingClosed():
|
|
|
|
fut.complete(newSeq[uint8]())
|
|
|
|
return fut
|
2021-11-09 14:29:59 +00:00
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
try:
|
|
|
|
socket.eventQueue.putNoWait(
|
|
|
|
SocketEvent(
|
|
|
|
kind:ReadReqType,
|
|
|
|
readReq: ReadReq(
|
|
|
|
bytesToRead: n,
|
|
|
|
bytesAvailable: newSeq[uint8](),
|
|
|
|
reader: fut))
|
|
|
|
)
|
|
|
|
except AsyncQueueFullError as e:
|
|
|
|
# should not happen as our write queue is unbounded
|
|
|
|
raiseAssert e.msg
|
2022-01-07 09:38:19 +00:00
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
return fut
|
2021-11-09 14:29:59 +00:00
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
proc read*(socket: UtpSocket): Future[seq[byte]] =
|
2021-11-09 14:29:59 +00:00
|
|
|
## Read all bytes from socket ``socket``.
|
|
|
|
##
|
|
|
|
## This procedure allocates buffer seq[byte] and return it as result.
|
2022-02-24 17:22:44 +00:00
|
|
|
let fut = newFuture[seq[uint8]]()
|
2021-11-09 14:29:59 +00:00
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
if socket.readingClosed():
|
|
|
|
fut.complete(newSeq[uint8]())
|
|
|
|
return fut
|
2021-10-28 09:41:43 +00:00
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
try:
|
|
|
|
socket.eventQueue.putNoWait(
|
|
|
|
SocketEvent(
|
|
|
|
kind:ReadReqType,
|
|
|
|
readReq: ReadReq(
|
|
|
|
bytesToRead: 0,
|
|
|
|
bytesAvailable: newSeq[uint8](),
|
|
|
|
reader: fut))
|
|
|
|
)
|
|
|
|
except AsyncQueueFullError as e:
|
|
|
|
# should not happen as our write queue is unbounded
|
|
|
|
raiseAssert e.msg
|
2022-01-07 09:38:19 +00:00
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
return fut
|
2021-10-28 09:41:43 +00:00
|
|
|
|
2022-11-16 16:44:00 +00:00
|
|
|
# Check how many packets are still in the out going buffer, usefully for tests or
|
2021-10-28 09:41:43 +00:00
|
|
|
# debugging.
|
|
|
|
proc numPacketsInOutGoingBuffer*(socket: UtpSocket): int =
|
|
|
|
var num = 0
|
|
|
|
for e in socket.outBuffer.items():
|
|
|
|
if e.isSome():
|
|
|
|
inc num
|
|
|
|
num
|
2021-11-04 06:38:46 +00:00
|
|
|
|
2021-11-24 16:49:13 +00:00
|
|
|
# Check how many payload bytes are still in flight
|
2022-02-24 17:22:44 +00:00
|
|
|
proc numOfBytesInFlight*(socket: UtpSocket): uint32 = socket.currentWindow
|
2021-11-24 16:49:13 +00:00
|
|
|
|
2022-01-20 08:22:53 +00:00
|
|
|
# Check how many bytes are in incoming buffer
|
2022-02-24 17:22:44 +00:00
|
|
|
proc numOfBytesInIncomingBuffer*(socket: UtpSocket): uint32 = uint32(socket.offset)
|
2022-01-20 08:22:53 +00:00
|
|
|
|
2022-11-16 16:44:00 +00:00
|
|
|
# Check how many packets are still in the reorder buffer, useful for tests or
|
2021-11-04 06:38:46 +00:00
|
|
|
# debugging.
|
|
|
|
# It throws assertion error when number of elements in buffer do not equal kept counter
|
2022-11-16 16:44:00 +00:00
|
|
|
proc numPacketsInReorderedBuffer*(socket: UtpSocket): int =
|
2021-11-04 06:38:46 +00:00
|
|
|
var num = 0
|
2021-12-20 12:14:50 +00:00
|
|
|
for e in socket.inBuffer.items():
|
2021-11-04 06:38:46 +00:00
|
|
|
if e.isSome():
|
|
|
|
inc num
|
|
|
|
doAssert(num == int(socket.reorderCount))
|
|
|
|
num
|
2021-11-18 09:05:56 +00:00
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
proc numOfEventsInEventQueue*(socket: UtpSocket): int = len(socket.eventQueue)
|
|
|
|
|
2021-11-18 09:05:56 +00:00
|
|
|
proc connectionId*[A](socket: UtpSocket[A]): uint16 =
|
|
|
|
## Connection id is id which is used in first SYN packet which establishes the connection
|
|
|
|
## so for Outgoing side it is actually its rcv_id, and for Incoming side it is
|
|
|
|
## its snd_id
|
|
|
|
case socket.direction
|
|
|
|
of Incoming:
|
|
|
|
socket.connectionIdSnd
|
|
|
|
of Outgoing:
|
|
|
|
socket.connectionIdRcv
|
2021-12-09 09:52:21 +00:00
|
|
|
|
|
|
|
# Check what is current available window size for this socket
|
|
|
|
proc currentMaxWindowSize*[A](socket: UtpSocket[A]): uint32 =
|
2022-02-24 17:22:44 +00:00
|
|
|
socket.maxWindow
|
|
|
|
|
|
|
|
proc new[A](
|
|
|
|
T: type UtpSocket[A],
|
|
|
|
to: A,
|
|
|
|
snd: SendCallback[A],
|
|
|
|
state: ConnectionState,
|
|
|
|
cfg: SocketConfig,
|
|
|
|
direction: ConnectionDirection,
|
|
|
|
rcvId: uint16,
|
|
|
|
sndId: uint16,
|
|
|
|
initialSeqNr: uint16,
|
|
|
|
initialAckNr: uint16,
|
|
|
|
initialTimeout: Duration
|
|
|
|
): T =
|
|
|
|
let currentTime = getMonoTimestamp().moment
|
2022-04-04 11:44:32 +00:00
|
|
|
|
|
|
|
# Initial max window size. Reference implementation uses value which enables one packet
|
2022-11-16 16:44:00 +00:00
|
|
|
# to be transferred.
|
2022-04-04 11:44:32 +00:00
|
|
|
# We use value two times higher as we do not yet have proper mtu estimation, and
|
2022-11-16 16:44:00 +00:00
|
|
|
# our impl should work over udp and discovery v5 (where proper estimation may be harder
|
2022-04-04 11:44:32 +00:00
|
|
|
# as packets already have discoveryv5 envelope)
|
|
|
|
let initMaxWindow = 2 * cfg.payloadSize
|
2022-02-24 17:22:44 +00:00
|
|
|
T(
|
|
|
|
remoteAddress: to,
|
|
|
|
state: state,
|
|
|
|
direction: direction,
|
|
|
|
socketConfig: cfg,
|
|
|
|
connectionIdRcv: rcvId,
|
|
|
|
connectionIdSnd: sndId,
|
|
|
|
seqNr: initialSeqNr,
|
|
|
|
ackNr: initialAckNr,
|
|
|
|
connectionFuture: newFuture[void](),
|
|
|
|
outBuffer: GrowableCircularBuffer[OutgoingPacket].init(),
|
|
|
|
outBufferBytes: 0,
|
|
|
|
currentWindow: 0,
|
|
|
|
# start with 1mb assumption, field will be updated with first received packet
|
|
|
|
maxRemoteWindow: 1024 * 1024,
|
2022-04-04 11:44:32 +00:00
|
|
|
maxWindow: initMaxWindow,
|
2022-02-24 17:22:44 +00:00
|
|
|
inBuffer: GrowableCircularBuffer[Packet].init(),
|
|
|
|
retransmitTimeout: initialTimeout,
|
|
|
|
rtoTimeout: currentTime + initialTimeout,
|
|
|
|
# Initial timeout values taken from reference implemntation
|
|
|
|
rtt: milliseconds(0),
|
|
|
|
rttVar: milliseconds(800),
|
|
|
|
rto: milliseconds(3000),
|
|
|
|
rcvBuffer: newSeq[uint8](int(cfg.optRcvBuffer)),
|
|
|
|
pendingReads: initDeque[ReadReq](),
|
|
|
|
closeEvent: newAsyncEvent(),
|
|
|
|
closeCallbacks: newSeq[Future[void]](),
|
|
|
|
pendingWrites: initDeque[WriteRequest](),
|
|
|
|
eventQueue: newAsyncQueue[SocketEvent](),
|
|
|
|
zeroWindowTimer: none[Moment](),
|
|
|
|
socketKey: UtpSocketKey.init(to, rcvId),
|
|
|
|
slowStart: true,
|
|
|
|
fastTimeout: false,
|
|
|
|
fastResendSeqNr: initialSeqNr,
|
|
|
|
lastWindowDecay: currentTime - maxWindowDecay,
|
2022-11-16 16:44:00 +00:00
|
|
|
slowStartThreshold: cfg.optSndBuffer,
|
2022-02-24 17:22:44 +00:00
|
|
|
ourHistogram: DelayHistogram.init(currentTime),
|
|
|
|
remoteHistogram: DelayHistogram.init(currentTime),
|
|
|
|
driftCalculator: ClockDriftCalculator.init(currentTime),
|
|
|
|
send: snd
|
|
|
|
)
|
|
|
|
|
|
|
|
proc newOutgoingSocket*[A](
|
|
|
|
to: A,
|
|
|
|
snd: SendCallback[A],
|
|
|
|
cfg: SocketConfig,
|
|
|
|
rcvConnectionId: uint16,
|
2022-06-17 20:45:37 +00:00
|
|
|
rng: var HmacDrbgContext
|
2022-02-24 17:22:44 +00:00
|
|
|
): UtpSocket[A] =
|
|
|
|
let sndConnectionId = rcvConnectionId + 1
|
|
|
|
let initialSeqNr = randUint16(rng)
|
|
|
|
|
|
|
|
UtpSocket[A].new(
|
|
|
|
to,
|
|
|
|
snd,
|
|
|
|
SynSent,
|
|
|
|
cfg,
|
|
|
|
Outgoing,
|
|
|
|
rcvConnectionId,
|
|
|
|
sndConnectionId,
|
|
|
|
initialSeqNr,
|
2022-11-16 16:44:00 +00:00
|
|
|
# Initially ack nr is 0, as we do not know remote initial seqnr
|
2022-02-24 17:22:44 +00:00
|
|
|
0,
|
|
|
|
cfg.initialSynTimeout
|
|
|
|
)
|
|
|
|
|
|
|
|
proc newIncomingSocket*[A](
|
|
|
|
to: A,
|
|
|
|
snd: SendCallback[A],
|
|
|
|
cfg: SocketConfig,
|
|
|
|
connectionId: uint16,
|
|
|
|
ackNr: uint16,
|
2022-06-17 20:45:37 +00:00
|
|
|
rng: var HmacDrbgContext
|
2022-02-24 17:22:44 +00:00
|
|
|
): UtpSocket[A] =
|
|
|
|
let initialSeqNr = randUint16(rng)
|
|
|
|
|
|
|
|
let (initialState, initialTimeout) =
|
|
|
|
if (cfg.incomingSocketReceiveTimeout.isNone()):
|
|
|
|
# it does not matter what timeout value we put here, as socket will be in
|
|
|
|
# connected state without outgoing packets in buffer so any timeout hit will
|
|
|
|
# just double rto without any penalties
|
2022-11-16 16:44:00 +00:00
|
|
|
# although we cannot use 0, as then timeout will be constantly re-set to 500ms
|
|
|
|
# and there will be a lot of not useful work done
|
2022-02-24 17:22:44 +00:00
|
|
|
(Connected, defaultInitialSynTimeout)
|
|
|
|
else:
|
|
|
|
let timeout = cfg.incomingSocketReceiveTimeout.unsafeGet()
|
|
|
|
(SynRecv, timeout)
|
|
|
|
|
|
|
|
UtpSocket[A].new(
|
|
|
|
to,
|
|
|
|
snd,
|
|
|
|
initialState,
|
|
|
|
cfg,
|
|
|
|
Incoming,
|
|
|
|
connectionId + 1,
|
|
|
|
connectionId,
|
|
|
|
initialSeqNr,
|
|
|
|
ackNr,
|
|
|
|
initialTimeout
|
|
|
|
)
|
|
|
|
|
2022-04-04 11:44:32 +00:00
|
|
|
proc getSocketConfig*(socket: UtpSocket): SocketConfig =
|
|
|
|
socket.socketConfig
|
|
|
|
|
2022-02-24 17:22:44 +00:00
|
|
|
proc startIncomingSocket*(socket: UtpSocket) =
|
|
|
|
# Make sure ack was flushed before moving forward
|
2022-04-12 19:11:01 +00:00
|
|
|
socket.sendAck()
|
2022-02-24 17:22:44 +00:00
|
|
|
socket.startEventLoop()
|
|
|
|
socket.startTimeoutLoop()
|
|
|
|
|
|
|
|
proc startOutgoingSocket*(socket: UtpSocket): Future[void] =
|
|
|
|
doAssert(socket.state == SynSent)
|
|
|
|
let packet = synPacket(socket.seqNr, socket.connectionIdRcv, socket.getRcvWindowSize())
|
2022-04-12 19:11:01 +00:00
|
|
|
debug "Sending SYN packet",
|
2022-02-24 17:22:44 +00:00
|
|
|
seqNr = packet.header.seqNr,
|
|
|
|
connectionId = packet.header.connectionId
|
|
|
|
# set number of transmissions to 1 as syn packet will be send just after
|
2022-11-16 16:44:00 +00:00
|
|
|
# initialization
|
2022-02-24 17:22:44 +00:00
|
|
|
let outgoingPacket = OutgoingPacket.init(encodePacket(packet), 1, false, 0)
|
|
|
|
socket.registerOutgoingPacket(outgoingPacket)
|
|
|
|
socket.startEventLoop()
|
|
|
|
socket.startTimeoutLoop()
|
|
|
|
socket.sendData(outgoingPacket.packetBytes)
|
|
|
|
return socket.connectionFuture
|