fix(dcutr): make the dcutr client inbound and the server outbound (#983)

This commit is contained in:
diegomrsantos 2023-11-17 10:46:35 +01:00 committed by GitHub
parent 08d9c84aca
commit 6791f5e7bb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 23 additions and 33 deletions

View File

@ -66,7 +66,7 @@ proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs:
if peerDialableAddrs.len > self.maxDialableAddrs:
peerDialableAddrs = peerDialableAddrs[0..<self.maxDialableAddrs]
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false))
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false, upgradeDir = Direction.In))
try:
discard await anyCompleted(futs).wait(self.connectTimeout)
debug "Dcutr initiator has directly connected to the remote peer."

View File

@ -56,7 +56,7 @@ proc new*(T: typedesc[Dcutr], switch: Switch, connectTimeout = 15.seconds, maxDi
if peerDialableAddrs.len > maxDialableAddrs:
peerDialableAddrs = peerDialableAddrs[0..<maxDialableAddrs]
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false, upgradeDir = Direction.In))
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false, upgradeDir = Direction.Out))
try:
discard await anyCompleted(futs).wait(connectTimeout)
debug "Dcutr receiver has directly connected to the remote peer."

View File

@ -57,14 +57,15 @@ suite "Dcutr":
for t in behindNATSwitch.transports:
t.networkReachability = NetworkReachability.NotReachable
expect CatchableError:
# we can't hole punch when both peers are in the same machine. This means that the simultaneous dialings will result
# in two connections attemps, instead of one. This dial is going to fail because the dcutr client is acting as the
# tcp simultaneous incoming upgrader in the dialer which works only in the simultaneous open case.
await DcutrClient.new().startSync(behindNATSwitch, publicSwitch.peerInfo.peerId, behindNATSwitch.peerInfo.addrs)
.wait(300.millis)
checkExpiring:
# we can't hole punch when both peers are in the same machine. This means that the simultaneous dialings will result
# in two connections attemps, instead of one. The server dial is going to fail because it is acting as the
# tcp simultaneous incoming upgrader in the dialer which works only in the simultaneous open case, but the client
# dial will succeed.
# we still expect a new connection to be open by the receiver peer acting as the dcutr server
behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 2
await allFutures(behindNATSwitch.stop(), publicSwitch.stop())
@ -83,8 +84,8 @@ suite "Dcutr":
body
checkExpiring:
# no connection will be open by the receiver peer acting as the dcutr server
behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 1
# we still expect a new connection to be open by the receiver peer acting as the dcutr server
behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 2
await allFutures(behindNATSwitch.stop(), publicSwitch.stop())
@ -142,13 +143,16 @@ suite "Dcutr":
for t in behindNATSwitch.transports:
t.networkReachability = NetworkReachability.NotReachable
expect CatchableError:
# we can't hole punch when both peers are in the same machine. This means that the simultaneous dialings will result
# in two connections attemps, instead of one. This dial is going to fail because the dcutr client is acting as the
# tcp simultaneous incoming upgrader in the dialer which works only in the simultaneous open case.
await DcutrClient.new().startSync(behindNATSwitch, publicSwitch.peerInfo.peerId, behindNATSwitch.peerInfo.addrs)
.wait(300.millis)
checkExpiring:
# we can't hole punch when both peers are in the same machine. This means that the simultaneous dialings will result
# in two connections attemps, instead of one. The server dial is going to fail, but the client dial will succeed.
behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 2
# we still expect a new connection to be open by the receiver peer acting as the dcutr server
behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 1
await allFutures(behindNATSwitch.stop(), publicSwitch.stop())

View File

@ -193,31 +193,17 @@ suite "Hole Punching":
await privatePeerSwitch2.connect(privatePeerSwitch1.peerInfo.peerId, (await privatePeerRelayAddr1))
privatePeerSwitch2.connectStub = rcvConnectStub
checkExpiring:
# we can't hole punch when both peers are in the same machine. This means that the simultaneous dialings will result
# in two connections attemps, instead of one. The server dial is going to fail because it is acting as the
# tcp simultaneous incoming upgrader in the dialer which works only in the simultaneous open case, but the client
# dial will succeed.
privatePeerSwitch1.connManager.connCount(privatePeerSwitch2.peerInfo.peerId) == 1 and
not isRelayed(privatePeerSwitch1.connManager.selectMuxer(privatePeerSwitch2.peerInfo.peerId).connection)
# wait for hole punching to finish in the background
await sleepAsync(600.millis)
await allFuturesThrowing(
privatePeerSwitch1.stop(), privatePeerSwitch2.stop(), switchRelay.stop(),
switchAux.stop(), switchAux2.stop(), switchAux3.stop(), switchAux4.stop())
asyncTest "Hole punching when peers addresses are private":
proc connectStub(self: SwitchStub,
peerId: PeerId,
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out): Future[void] {.async.} =
self.connectStub = nil # this stub should be called only once
await sleepAsync(100.millis) # avoid simultaneous dialing that causes address in use error
await self.switch.connect(peerId, addrs, forceDial, reuseConnection, upgradeDir)
await holePunchingTest(nil, connectStub, NotReachable)
await holePunchingTest(nil, nil, NotReachable)
asyncTest "Hole punching when there is an error during unilateral direct connection":
asyncTest "Hole punching when peers addresses are private and there is an error in the initiator side":
proc connectStub(self: SwitchStub,
peerId: PeerId,