Integrate dns resolving (#615)
* integrate dns * give hostname to transport dial * add hostname test * switched to websock master * Add dnsaddr dial test w multiple transports
This commit is contained in:
parent
e787fc35a6
commit
c92125a1a4
|
@ -16,7 +16,7 @@ requires "nim >= 1.2.0",
|
|||
"metrics",
|
||||
"secp256k1",
|
||||
"stew#head",
|
||||
"https://github.com/status-im/nim-websock"
|
||||
"websock"
|
||||
|
||||
proc runTest(filename: string, verify: bool = true, sign: bool = true,
|
||||
moreoptions: string = "") =
|
||||
|
|
|
@ -20,6 +20,7 @@ import dial,
|
|||
connmanager,
|
||||
stream/connection,
|
||||
transports/transport,
|
||||
nameresolving/nameresolver,
|
||||
errors
|
||||
|
||||
export dial, errors
|
||||
|
@ -41,6 +42,7 @@ type
|
|||
connManager: ConnManager
|
||||
dialLock: Table[PeerID, AsyncLock]
|
||||
transports: seq[Transport]
|
||||
nameResolver: NameResolver
|
||||
|
||||
proc dialAndUpgrade(
|
||||
self: Dialer,
|
||||
|
@ -49,58 +51,63 @@ proc dialAndUpgrade(
|
|||
Future[Connection] {.async.} =
|
||||
debug "Dialing peer", peerId
|
||||
|
||||
# Avoid "cannot be captured as it would violate memory safety" errors in Nim-1.4.x.
|
||||
var
|
||||
transport: Transport
|
||||
address: MultiAddress
|
||||
for address in addrs: # for each address
|
||||
let
|
||||
hostname = address.getHostname()
|
||||
resolvedAddresses =
|
||||
if isNil(self.nameResolver): @[address]
|
||||
else: await self.nameResolver.resolveMAddress(address)
|
||||
|
||||
for t in self.transports: # for each transport
|
||||
transport = t
|
||||
for a in addrs: # for each address
|
||||
address = a
|
||||
if t.handles(a): # check if it can dial it
|
||||
trace "Dialing address", address = $a, peerId
|
||||
let dialed = try:
|
||||
libp2p_total_dial_attempts.inc()
|
||||
# await a connection slot when the total
|
||||
# connection count is equal to `maxConns`
|
||||
await self.connManager.trackOutgoingConn(
|
||||
() => transport.dial(address)
|
||||
)
|
||||
except TooManyConnectionsError as exc:
|
||||
trace "Connection limit reached!"
|
||||
raise exc
|
||||
except CancelledError as exc:
|
||||
debug "Dialing canceled", msg = exc.msg, peerId
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
debug "Dialing failed", msg = exc.msg, peerId
|
||||
libp2p_failed_dials.inc()
|
||||
continue # Try the next address
|
||||
for a in resolvedAddresses: # for each resolved address
|
||||
for transport in self.transports: # for each transport
|
||||
if transport.handles(a): # check if it can dial it
|
||||
trace "Dialing address", address = $a, peerId, hostname
|
||||
let dialed = try:
|
||||
libp2p_total_dial_attempts.inc()
|
||||
# await a connection slot when the total
|
||||
# connection count is equal to `maxConns`
|
||||
#
|
||||
# Need to copy to avoid "cannot be captured" errors in Nim-1.4.x.
|
||||
let
|
||||
transportCopy = transport
|
||||
addressCopy = a
|
||||
await self.connManager.trackOutgoingConn(
|
||||
() => transportCopy.dial(hostname, addressCopy)
|
||||
)
|
||||
except TooManyConnectionsError as exc:
|
||||
trace "Connection limit reached!"
|
||||
raise exc
|
||||
except CancelledError as exc:
|
||||
debug "Dialing canceled", msg = exc.msg, peerId
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
debug "Dialing failed", msg = exc.msg, peerId
|
||||
libp2p_failed_dials.inc()
|
||||
continue # Try the next address
|
||||
|
||||
# make sure to assign the peer to the connection
|
||||
dialed.peerId = peerId
|
||||
# make sure to assign the peer to the connection
|
||||
dialed.peerId = peerId
|
||||
|
||||
# also keep track of the connection's bottom unsafe transport direction
|
||||
# required by gossipsub scoring
|
||||
dialed.transportDir = Direction.Out
|
||||
# also keep track of the connection's bottom unsafe transport direction
|
||||
# required by gossipsub scoring
|
||||
dialed.transportDir = Direction.Out
|
||||
|
||||
libp2p_successful_dials.inc()
|
||||
libp2p_successful_dials.inc()
|
||||
|
||||
let conn = try:
|
||||
await transport.upgradeOutgoing(dialed)
|
||||
except CatchableError as exc:
|
||||
# If we failed to establish the connection through one transport,
|
||||
# we won't succeeded through another - no use in trying again
|
||||
await dialed.close()
|
||||
debug "Upgrade failed", msg = exc.msg, peerId
|
||||
if exc isnot CancelledError:
|
||||
libp2p_failed_upgrades_outgoing.inc()
|
||||
raise exc
|
||||
let conn = try:
|
||||
await transport.upgradeOutgoing(dialed)
|
||||
except CatchableError as exc:
|
||||
# If we failed to establish the connection through one transport,
|
||||
# we won't succeeded through another - no use in trying again
|
||||
await dialed.close()
|
||||
debug "Upgrade failed", msg = exc.msg, peerId
|
||||
if exc isnot CancelledError:
|
||||
libp2p_failed_upgrades_outgoing.inc()
|
||||
raise exc
|
||||
|
||||
doAssert not isNil(conn), "connection died after upgradeOutgoing"
|
||||
debug "Dial successful", conn, peerId = conn.peerId
|
||||
return conn
|
||||
doAssert not isNil(conn), "connection died after upgradeOutgoing"
|
||||
debug "Dial successful", conn, peerId = conn.peerId
|
||||
return conn
|
||||
|
||||
proc internalConnect(
|
||||
self: Dialer,
|
||||
|
@ -234,9 +241,11 @@ proc new*(
|
|||
localPeerId: PeerId,
|
||||
connManager: ConnManager,
|
||||
transports: seq[Transport],
|
||||
ms: MultistreamSelect): Dialer =
|
||||
ms: MultistreamSelect,
|
||||
nameResolver: NameResolver = nil): Dialer =
|
||||
|
||||
T(localPeerId: localPeerId,
|
||||
connManager: connManager,
|
||||
transports: transports,
|
||||
ms: ms)
|
||||
ms: ms,
|
||||
nameResolver: nameResolver)
|
||||
|
|
|
@ -40,12 +40,10 @@ method resolveIp*(
|
|||
|
||||
doAssert(false, "Not implemented!")
|
||||
|
||||
proc getHostname(ma: MultiAddress): string =
|
||||
var dnsbuf = newSeq[byte](256)
|
||||
|
||||
let dnsLen = ma[0].get().protoArgument(dnsbuf).get()
|
||||
dnsbuf.setLen(dnsLen)
|
||||
return string.fromBytes(dnsbuf)
|
||||
proc getHostname*(ma: MultiAddress): string =
|
||||
let firstPart = ($ma[0].get()).split('/')
|
||||
if firstPart.len > 1: firstPart[2]
|
||||
else: ""
|
||||
|
||||
proc resolveDnsAddress(
|
||||
self: NameResolver,
|
||||
|
@ -122,27 +120,26 @@ proc resolveDnsAddr(
|
|||
return result
|
||||
|
||||
|
||||
proc resolveMAddresses*(
|
||||
proc resolveMAddress*(
|
||||
self: NameResolver,
|
||||
addrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
|
||||
address: MultiAddress): Future[seq[MultiAddress]] {.async.} =
|
||||
var res = initOrderedSet[MultiAddress]()
|
||||
|
||||
for address in addrs:
|
||||
if not DNS.matchPartial(address):
|
||||
res.incl(address)
|
||||
else:
|
||||
let code = address[0].get().protoCode().get()
|
||||
let seq = case code:
|
||||
of multiCodec("dns"):
|
||||
await self.resolveDnsAddress(address)
|
||||
of multiCodec("dns4"):
|
||||
await self.resolveDnsAddress(address, Domain.AF_INET)
|
||||
of multiCodec("dns6"):
|
||||
await self.resolveDnsAddress(address, Domain.AF_INET6)
|
||||
of multiCodec("dnsaddr"):
|
||||
await self.resolveDnsAddr(address)
|
||||
else:
|
||||
@[address]
|
||||
for ad in seq:
|
||||
res.incl(ad)
|
||||
if not DNS.matchPartial(address):
|
||||
res.incl(address)
|
||||
else:
|
||||
let code = address[0].get().protoCode().get()
|
||||
let seq = case code:
|
||||
of multiCodec("dns"):
|
||||
await self.resolveDnsAddress(address)
|
||||
of multiCodec("dns4"):
|
||||
await self.resolveDnsAddress(address, Domain.AF_INET)
|
||||
of multiCodec("dns6"):
|
||||
await self.resolveDnsAddress(address, Domain.AF_INET6)
|
||||
of multiCodec("dnsaddr"):
|
||||
await self.resolveDnsAddr(address)
|
||||
else:
|
||||
@[address]
|
||||
for ad in seq:
|
||||
res.incl(ad)
|
||||
return res.toSeq
|
||||
|
|
|
@ -273,7 +273,7 @@ proc newSwitch*(peerInfo: PeerInfo,
|
|||
transports: transports,
|
||||
connManager: connManager,
|
||||
peerStore: PeerStore.new(),
|
||||
dialer: Dialer.new(peerInfo.peerId, connManager, transports, ms),
|
||||
dialer: Dialer.new(peerInfo.peerId, connManager, transports, ms, nameResolver),
|
||||
nameResolver: nameResolver)
|
||||
|
||||
switch.connManager.peerStore = switch.peerStore
|
||||
|
|
|
@ -199,6 +199,7 @@ method accept*(self: TcpTransport): Future[Connection] {.async, gcsafe.} =
|
|||
|
||||
method dial*(
|
||||
self: TcpTransport,
|
||||
hostname: string,
|
||||
address: MultiAddress): Future[Connection] {.async, gcsafe.} =
|
||||
## dial a peer
|
||||
##
|
||||
|
|
|
@ -60,12 +60,18 @@ method accept*(self: Transport): Future[Connection]
|
|||
|
||||
method dial*(
|
||||
self: Transport,
|
||||
hostname: string,
|
||||
address: MultiAddress): Future[Connection] {.base, gcsafe.} =
|
||||
## dial a peer
|
||||
##
|
||||
|
||||
doAssert(false, "Not implemented!")
|
||||
|
||||
proc dial*(
|
||||
self: Transport,
|
||||
address: MultiAddress): Future[Connection] {.gcsafe.} =
|
||||
self.dial("", address)
|
||||
|
||||
method upgradeIncoming*(
|
||||
self: Transport,
|
||||
conn: Connection): Future[void] {.base, gcsafe.} =
|
||||
|
|
|
@ -207,6 +207,7 @@ method accept*(self: WsTransport): Future[Connection] {.async, gcsafe.} =
|
|||
|
||||
method dial*(
|
||||
self: WsTransport,
|
||||
hostname: string,
|
||||
address: MultiAddress): Future[Connection] {.async, gcsafe.} =
|
||||
## dial a peer
|
||||
##
|
||||
|
@ -219,6 +220,7 @@ method dial*(
|
|||
address.initTAddress().tryGet(),
|
||||
"",
|
||||
secure = secure,
|
||||
hostName = hostname,
|
||||
flags = self.tlsFlags)
|
||||
|
||||
return await self.connHandler(transp, Direction.Out)
|
||||
|
|
|
@ -59,7 +59,7 @@ suite "Name resolving":
|
|||
var resolver {.threadvar.}: MockResolver
|
||||
|
||||
proc testOne(input: string, output: seq[Multiaddress]): bool =
|
||||
let resolved = waitFor resolver.resolveMAddresses(@[Multiaddress.init(input).tryGet()])
|
||||
let resolved = waitFor resolver.resolveMAddress(Multiaddress.init(input).tryGet())
|
||||
if resolved != output:
|
||||
echo "Expected ", output
|
||||
echo "Got ", resolved
|
||||
|
@ -90,18 +90,6 @@ suite "Name resolving":
|
|||
|
||||
check testOne("/ip6/::1/tcp/0", "/ip6/::1/tcp/0")
|
||||
|
||||
asyncTest "test multiple resolve":
|
||||
resolver.ipResponses[("localhost", false)] = @["127.0.0.1"]
|
||||
resolver.ipResponses[("localhost", true)] = @["::1"]
|
||||
|
||||
let resolved = waitFor resolver.resolveMAddresses(@[
|
||||
Multiaddress.init("/dns/localhost/udp/0").tryGet(),
|
||||
Multiaddress.init("/dns4/localhost/udp/0").tryGet(),
|
||||
Multiaddress.init("/dns6/localhost/udp/0").tryGet(),
|
||||
])
|
||||
|
||||
check resolved == @[Multiaddress.init("/ip4/127.0.0.1/udp/0").tryGet(), Multiaddress.init("/ip6/::1/udp/0").tryGet()]
|
||||
|
||||
asyncTest "dnsaddr recursive test":
|
||||
resolver.txtResponses["_dnsaddr.bootstrap.libp2p.io"] = @[
|
||||
"dnsaddr=/dnsaddr/sjc-1.bootstrap.libp2p.io/tcp/4001/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN",
|
||||
|
|
|
@ -21,7 +21,8 @@ import ../libp2p/[errors,
|
|||
nameresolving/nameresolver,
|
||||
nameresolving/mockresolver,
|
||||
stream/chronosstream,
|
||||
transports/tcptransport]
|
||||
transports/tcptransport,
|
||||
transports/wstransport]
|
||||
import ./helpers
|
||||
|
||||
const
|
||||
|
@ -463,7 +464,7 @@ suite "Switch":
|
|||
|
||||
let switch1 = newStandardSwitch()
|
||||
|
||||
let rng = newRng()
|
||||
let rng = crypto.newRng()
|
||||
# use same private keys to emulate two connection from same peer
|
||||
let privKey = PrivateKey.random(rng[]).tryGet()
|
||||
let switch2 = newStandardSwitch(
|
||||
|
@ -530,7 +531,7 @@ suite "Switch":
|
|||
asyncTest "e2e should allow dropping peer from connection events":
|
||||
var awaiters: seq[Future[void]]
|
||||
|
||||
let rng = newRng()
|
||||
let rng = crypto.newRng()
|
||||
# use same private keys to emulate two connection from same peer
|
||||
let
|
||||
privateKey = PrivateKey.random(rng[]).tryGet()
|
||||
|
@ -573,7 +574,7 @@ suite "Switch":
|
|||
asyncTest "e2e should allow dropping multiple connections for peer from connection events":
|
||||
var awaiters: seq[Future[void]]
|
||||
|
||||
let rng = newRng()
|
||||
let rng = crypto.newRng()
|
||||
# use same private keys to emulate two connection from same peer
|
||||
let
|
||||
privateKey = PrivateKey.random(rng[]).tryGet()
|
||||
|
@ -901,5 +902,79 @@ suite "Switch":
|
|||
switch1.peerStore.addressBook.get(switch2.peerInfo.peerId) == switch2.peerInfo.addrs.toHashSet()
|
||||
switch2.peerStore.addressBook.get(switch1.peerInfo.peerId) == switch1.peerInfo.addrs.toHashSet()
|
||||
|
||||
switch1.peerStore.addressBook.get(switch2.peerInfo.peerId) == switch2.peerInfo.addrs.toHashSet()
|
||||
switch2.peerStore.addressBook.get(switch1.peerInfo.peerId) == switch1.peerInfo.addrs.toHashSet()
|
||||
switch1.peerStore.protoBook.get(switch2.peerInfo.peerId) == switch2.peerInfo.protocols.toHashSet()
|
||||
switch2.peerStore.protoBook.get(switch1.peerInfo.peerId) == switch1.peerInfo.protocols.toHashSet()
|
||||
|
||||
asyncTest "e2e dial dns4 address":
|
||||
var awaiters: seq[Future[void]]
|
||||
let resolver = MockResolver.new()
|
||||
resolver.ipResponses[("localhost", false)] = @["127.0.0.1"]
|
||||
resolver.ipResponses[("localhost", true)] = @["::1"]
|
||||
|
||||
let
|
||||
srcSwitch = newStandardSwitch(nameResolver = resolver)
|
||||
destSwitch = newStandardSwitch()
|
||||
|
||||
awaiters.add(await destSwitch.start())
|
||||
awaiters.add(await srcSwitch.start())
|
||||
await allFuturesThrowing(awaiters)
|
||||
|
||||
let testAddr = MultiAddress.init("/dns4/localhost/").tryGet() &
|
||||
destSwitch.peerInfo.addrs[0][1].tryGet()
|
||||
|
||||
await srcSwitch.connect(destSwitch.peerInfo.peerId, @[testAddr])
|
||||
check srcSwitch.isConnected(destSwitch.peerInfo.peerId)
|
||||
|
||||
await destSwitch.stop()
|
||||
await srcSwitch.stop()
|
||||
|
||||
asyncTest "e2e dial dnsaddr with multiple transports":
|
||||
var awaiters: seq[Future[void]]
|
||||
let resolver = MockResolver.new()
|
||||
|
||||
let
|
||||
wsAddress = MultiAddress.init("/ip4/127.0.0.1/tcp/0/ws").tryGet()
|
||||
tcpAddress = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet()
|
||||
|
||||
srcTcpSwitch = newStandardSwitch(nameResolver = resolver)
|
||||
srcWsSwitch =
|
||||
SwitchBuilder.new()
|
||||
.withAddress(wsAddress)
|
||||
.withRng(crypto.newRng())
|
||||
.withMplex()
|
||||
.withTransport(proc (upgr: Upgrade): Transport = WsTransport.new(upgr))
|
||||
.withNameResolver(resolver)
|
||||
.withNoise()
|
||||
.build()
|
||||
|
||||
destSwitch =
|
||||
SwitchBuilder.new()
|
||||
.withAddresses(@[tcpAddress, wsAddress])
|
||||
.withRng(crypto.newRng())
|
||||
.withMplex()
|
||||
.withTransport(proc (upgr: Upgrade): Transport = WsTransport.new(upgr))
|
||||
.withTcpTransport()
|
||||
.withNoise()
|
||||
.build()
|
||||
|
||||
awaiters.add(await destSwitch.start())
|
||||
awaiters.add(await srcTcpSwitch.start())
|
||||
awaiters.add(await srcWsSwitch.start())
|
||||
await allFuturesThrowing(awaiters)
|
||||
|
||||
resolver.txtResponses["_dnsaddr.test.io"] = @[
|
||||
"dnsaddr=/ip4/127.0.0.1" & $destSwitch.peerInfo.addrs[1][1].tryGet() & "/ws",
|
||||
"dnsaddr=/ip4/127.0.0.1" & $destSwitch.peerInfo.addrs[0][1].tryGet()
|
||||
]
|
||||
|
||||
let testAddr = MultiAddress.init("/dnsaddr/test.io/").tryGet()
|
||||
|
||||
await srcTcpSwitch.connect(destSwitch.peerInfo.peerId, @[testAddr])
|
||||
check srcTcpSwitch.isConnected(destSwitch.peerInfo.peerId)
|
||||
|
||||
await srcWsSwitch.connect(destSwitch.peerInfo.peerId, @[testAddr])
|
||||
check srcWsSwitch.isConnected(destSwitch.peerInfo.peerId)
|
||||
|
||||
await destSwitch.stop()
|
||||
await srcWsSwitch.stop()
|
||||
await srcTcpSwitch.stop()
|
||||
|
|
|
@ -15,56 +15,39 @@ import ./helpers, ./commontransport
|
|||
const
|
||||
SecureKey* = """
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCdNv0SX02aeZ4/
|
||||
Yc+p/Kwd5UVOHlpmK7/TVC/kcjFbdoUuKNn8pnX/fyhgSKpUYut+te7YRiZhqlaL
|
||||
EZKjfy8GBZwXZnJCevFkTvGTTebXXExLIsLGfJqKeLAdFCQkX8wV3jV1DT5JLV+D
|
||||
5+HWaiiBr38gsl4ZbfyedTF40JvzokCmcdlx9bpzX1j/b84L/zSwUyyEcgp5G28F
|
||||
Jh5TnxAeDHJpOVjr8XMb/xoNqiDF6NwF96hvOZC14mZ1TxxW5bUzXprsy0l52pmh
|
||||
dN3Crz11+t2h519hRKHxT6/l5pTx/+dApXiP6hMV04CQJNnas3NyRxTDR9dNel+3
|
||||
+wD7/PRTAgMBAAECggEBAJuXPEbegxMKog7gYoE9S6oaqchySc0sJyCjBPL2ANsg
|
||||
JRZV38cnh0hhNDh2MfxqGd7Bd6wbYQjvZ88iiRm+WW+ARcby4MnimtxHNNYwFvG0
|
||||
qt0BffqqftfkMYfV0x8coAJUdFtvy+DoQstsxhlJ3uTaJtrZLD/GlmjMWzXSX0Vy
|
||||
FXiLDO7/LoSjsjaf4e4aLofIyLJS3H1T+5cr/d2mdpRzkeWkxShODsK4cRLOlZ5I
|
||||
pz4Wm2770DTbiYph8ixl/CnmYn6T7V0F5VYujALknipUBeQY4e/A9vrQ/pvqJV+W
|
||||
JjFUne6Rxg/lJjh8vNJp2bK1ZbzpwmZLaZIoEz8t/qECgYEAzvCCA48uQPaurSQ3
|
||||
cvHDhcVwYmEaH8MW8aIW/5l8XJK60GsUHPFhEsfD/ObI5PJJ9aOqgabpRHkvD4ZY
|
||||
a8QJBxCy6UeogUeKvGks8VQ34SZXLimmgrL9Mlljv0v9PloEkVYbztYyX4GVO0ov
|
||||
3oH+hKO+/MclzNDyeXZx3Vv4K+UCgYEAwnyb7tqp7fRqm/8EymIZV5pa0p6h609p
|
||||
EhCBi9ii6d/ewEjsBhs7bPDBO4PO9ylvOvryYZH1hVbQja2anOCBjO8dAHRHWM86
|
||||
964TFriywBQkYxp6dsB8nUjLBDza2xAM3m+OGi9/ATuhEAe5sXp/fZL3tkfSaOXI
|
||||
A7Gzro+kS9cCgYEAtKScSfEeBlWQa9H2mV9UN5z/mtF61YkeqTW+b8cTGVh4vWEL
|
||||
wKww+gzqGAV6Duk2CLijKeSDMmO64gl7fC83VjSMiTklbhz+jbQeKFhFI0Sty71N
|
||||
/j+y6NXBTgdOfLRl0lzhj2/JrzdWBtie6tR9UloCaXSKmb04PTFY+kvDWsUCgYBR
|
||||
krJUnKJpi/qrM2tu93Zpp/QwIxkG+We4i/PKFDNApQVo4S0d4o4qQ1DJBZ/pSxe8
|
||||
RUUkZ3PzWVZgFlCjPAcadbBUYHEMbt7sw7Z98ToIFmqspo53AIVD8yQzwtKIz1KW
|
||||
eXPAx+sdOUV008ivCBIxOVNswPMfzED4S7Bxpw3iQQKBgGJhct2nBsgu0l2/wzh9
|
||||
tpKbalW1RllgptNQzjuBEZMTvPF0L+7BE09/exKtt4N9s3yAzi8o6Qo7RHX5djVc
|
||||
SNgafV4jj7jt2Ilh6KOy9dshtLoEkS1NmiqfVe2go2auXZdyGm+I2yzKWdKGDO0J
|
||||
diTtYf1sA0PgNXdSyDC03TZl
|
||||
MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBAP0yH7F7FtGunC91
|
||||
IPkU+u8B4gdxiwYW0J3PrixtB1Xz3e4dfjwQqhIJlG6BxQ4myCxmSPjxP/eOOYp+
|
||||
8/+A9nikbnc7H3OV8fNJhsSmPu8j8W2FsNVJzJnUQaE2yimrFR8NnvQ4MKvMBSgb
|
||||
lHTLbP1aAFp+K6KPPE7pkRMUdlqFAgMBAAECgYBl0eli4yALFI/kmdK3uBMtWHGA
|
||||
Es4YlcYxIFpnrTS9AQPnhN7F4uGxvT5+rhsDlN780+lWixXxRLWpF2KiBkeW8ayT
|
||||
kPeWvpSy6z+4LXw633ZLfCO1r6brpqSXNWxA0q7IgzYQEfMpnkaQrE3PVP5xkmTT
|
||||
k159ev138J23VfNgRQJBAP768qHOCnplKIe69SVUWlsQ5nnnybDBMq2YVATbombz
|
||||
KD57iufzBgND1clIEEuC6PK2C5gzTk4HZQioJ/juOFcCQQD+NVlb1HLoK7rHXZFO
|
||||
Tg3O+bwRZdo67J4pt//ijF7tLlZU/q6Kp9wHrXe1yhRV+Tow0BzBVHkc5eUM0/n7
|
||||
cOqDAkAedrECb/GEig17mfSsDxX0h2Jh8jWArrR1VRvEsNEIZ8jJHk2MRNbVEQe7
|
||||
0qZPv0ZBqUpdVtPmMq/5hs2vyhZlAkEA1cZ1fCUf8KD9tLS6AnjfYeRgRN07dXwQ
|
||||
0hKbTKAxIBJspZN7orzg60/0sNrc2SP6zJvm4qowI54tTelhexMNEwJBAOZz72xn
|
||||
EFUXKYQBbetiejnBBzFYmdA/QKmZ7kbQfDBOwG9wDPFmvnNSvSZws/bP1zcM95rq
|
||||
NABr5ec1FxuJa/8=
|
||||
-----END PRIVATE KEY-----
|
||||
"""
|
||||
|
||||
SecureCert* = """
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDazCCAlOgAwIBAgIUe9fr78Dz9PedQ5Sq0uluMWQhX9wwDQYJKoZIhvcNAQEL
|
||||
BQAwRTELMAkGA1UEBhMCSU4xEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
|
||||
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMTAzMTcwOTMzMzZaFw0zMTAz
|
||||
MTUwOTMzMzZaMEUxCzAJBgNVBAYTAklOMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
|
||||
HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB
|
||||
AQUAA4IBDwAwggEKAoIBAQCdNv0SX02aeZ4/Yc+p/Kwd5UVOHlpmK7/TVC/kcjFb
|
||||
doUuKNn8pnX/fyhgSKpUYut+te7YRiZhqlaLEZKjfy8GBZwXZnJCevFkTvGTTebX
|
||||
XExLIsLGfJqKeLAdFCQkX8wV3jV1DT5JLV+D5+HWaiiBr38gsl4ZbfyedTF40Jvz
|
||||
okCmcdlx9bpzX1j/b84L/zSwUyyEcgp5G28FJh5TnxAeDHJpOVjr8XMb/xoNqiDF
|
||||
6NwF96hvOZC14mZ1TxxW5bUzXprsy0l52pmhdN3Crz11+t2h519hRKHxT6/l5pTx
|
||||
/+dApXiP6hMV04CQJNnas3NyRxTDR9dNel+3+wD7/PRTAgMBAAGjUzBRMB0GA1Ud
|
||||
DgQWBBRkSY1AkGUpVNxG5fYocfgFODtQmTAfBgNVHSMEGDAWgBRkSY1AkGUpVNxG
|
||||
5fYocfgFODtQmTAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQBt
|
||||
D71VH7F8GOQXITFXCrHwEq1Fx3ScuSnL04NJrXw/e9huzLVQOchAYp/EIn4x2utN
|
||||
S31dt94wvi/IysOVbR1LatYNF5kKgGj2Wc6DH0PswBMk8R1G8QMeCz+hCjf1VDHe
|
||||
AAW1x2q20rJAvUrT6cRBQqeiMzQj0OaJbvfnd2hu0/d0DFkcuGVgBa2zlbG5rbdU
|
||||
Jnq7MQfSaZHd0uBgiKkS+Zw6XaYfWfByCAGSnUqRdOChiJ2stFVLvu+9oQ+PJjJt
|
||||
Er1u9bKTUyeuYpqXr2BP9dqphwu8R4NFVUg6DIRpMFMsybaL7KAd4hD22RXCvc0m
|
||||
uLu7KODi+eW62MHqs4N2
|
||||
MIICjDCCAfWgAwIBAgIURjeiJmkNbBVktqXvnXh44DKx364wDQYJKoZIhvcNAQEL
|
||||
BQAwVzELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
|
||||
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEQMA4GA1UEAwwHd3MudGVzdDAgFw0y
|
||||
MTA5MTQxMTU2NTZaGA8yMDgyMDgzMDExNTY1NlowVzELMAkGA1UEBhMCQVUxEzAR
|
||||
BgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5
|
||||
IEx0ZDEQMA4GA1UEAwwHd3MudGVzdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkC
|
||||
gYEA/TIfsXsW0a6cL3Ug+RT67wHiB3GLBhbQnc+uLG0HVfPd7h1+PBCqEgmUboHF
|
||||
DibILGZI+PE/9445in7z/4D2eKRudzsfc5Xx80mGxKY+7yPxbYWw1UnMmdRBoTbK
|
||||
KasVHw2e9Dgwq8wFKBuUdMts/VoAWn4roo88TumRExR2WoUCAwEAAaNTMFEwHQYD
|
||||
VR0OBBYEFHaV2ief8/Que1wxcZ8ACfdW7NUNMB8GA1UdIwQYMBaAFHaV2ief8/Qu
|
||||
e1wxcZ8ACfdW7NUNMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADgYEA
|
||||
XvDtaDLShrjS9huhRVoEdUtoBdhonmFpV3HXqRs7NdTuUWooXiph9a66GVSIfUCR
|
||||
iEaNOKF6OM0n7GLSDIrBeIWAxL9Ra/dFFwCxl+9wxg8yyzEJDBkAhXkrfp2b4Sx6
|
||||
wdK6xU2VOAxI0GUzwzjcyNl7RDFA3ayFaGl+9+oppWM=
|
||||
-----END CERTIFICATE-----
|
||||
"""
|
||||
|
||||
|
@ -86,3 +69,30 @@ suite "WebSocket transport":
|
|||
TLSCertificate.init(SecureCert),
|
||||
{TLSFlags.NoVerifyHost, TLSFlags.NoVerifyServerName}),
|
||||
"/ip4/0.0.0.0/tcp/0/wss")
|
||||
|
||||
asyncTest "Hostname verification":
|
||||
let ma: MultiAddress = Multiaddress.init("/ip4/0.0.0.0/tcp/0/wss").tryGet()
|
||||
let transport1 = WsTransport.new(Upgrade(), TLSPrivateKey.init(SecureKey), TLSCertificate.init(SecureCert), {TLSFlags.NoVerifyHost})
|
||||
|
||||
await transport1.start(ma)
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
while true:
|
||||
let conn = await transport1.accept()
|
||||
if not isNil(conn):
|
||||
await conn.close()
|
||||
|
||||
let handlerWait = acceptHandler()
|
||||
|
||||
# ws.test is in certificate
|
||||
let conn = await transport1.dial("ws.test", transport1.ma)
|
||||
|
||||
await conn.close()
|
||||
|
||||
try:
|
||||
let conn = await transport1.dial("ws.wronghostname", transport1.ma)
|
||||
check false
|
||||
except CatchableError as exc:
|
||||
check true
|
||||
|
||||
await handlerWait.cancelAndWait()
|
||||
await transport1.stop()
|
||||
|
|
Loading…
Reference in New Issue