From 24c15157781faa8aca23e047a115c55768337162 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Fri, 10 Apr 2026 18:26:27 +0400 Subject: [PATCH 01/32] Add autonat conf --- storage/conf.nim | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/storage/conf.nim b/storage/conf.nim index d24fd3ea..406615e7 100644 --- a/storage/conf.nim +++ b/storage/conf.nim @@ -52,6 +52,8 @@ export DefaultQuotaBytes, DefaultBlockTtl, DefaultBlockInterval, DefaultNumBlocksPerInterval, DefaultBlockRetries +const DefaultNatScheduleInterval* = 5.minutes + type ThreadCount* = distinct Natural proc `==`*(a, b: ThreadCount): bool {.borrow.} @@ -286,6 +288,31 @@ type desc: "Logs to file", defaultValue: string.none, name: "log-file", hidden .}: Option[string] + natScheduleInterval* {. + desc: "Interval between AutoNAT reachability checks", + defaultValue: DefaultNatScheduleInterval, + defaultValueDesc: $DefaultNatScheduleInterval, + name: "nat-schedule-interval" + .}: Duration + + natNumPeersToAsk* {. + desc: "Number of peers to ask per AutoNAT round", + defaultValue: 3, + name: "nat-num-peers-to-ask" + .}: int + + natMaxQueueSize* {. + desc: "Number of past AutoNAT results kept to calculate confidence", + defaultValue: 3, + name: "nat-max-queue-size" + .}: int + + natMinConfidence* {. + desc: "Minimum confidence threshold to confirm reachability", + defaultValue: 0.7, + name: "nat-min-confidence" + .}: float + func defaultAddress*(conf: StorageConf): IpAddress = result = static parseIpAddress("127.0.0.1") From 8110abcb9da7e552407252a6e43891411afd0336 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Fri, 10 Apr 2026 18:27:21 +0400 Subject: [PATCH 02/32] Better support for ivp6 and more tests --- storage/nat.nim | 7 ++- storage/utils/natutils.nim | 80 ++++++++++++++++++---------------- tests/storage/testnat.nim | 45 ++++++++++++++----- tests/storage/testnatutils.nim | 67 ++++++++++++++++++++++++++++ 4 files changed, 151 insertions(+), 48 deletions(-) create mode 100644 tests/storage/testnatutils.nim diff --git a/storage/nat.nim b/storage/nat.nim index 60dedc49..f11d9786 100644 --- a/storage/nat.nim +++ b/storage/nat.nim @@ -137,7 +137,12 @@ proc getRoutePrefSrc(bindIp: IpAddress): (Option[IpAddress], PrefSrcStatus) = let bindAddress = initTAddress(bindIp, Port(0)) if bindAddress.isAnyLocal(): - let ip = getRouteIpv4() + let ip = + if bindIp.family == IpAddressFamily.IPv6: + getRouteIpv6() + else: + getRouteIpv4() + if ip.isErr(): # No route was found, log error and continue without IP. error "No routable IP address found, check your network connection", diff --git a/storage/utils/natutils.nim b/storage/utils/natutils.nim index 45ad7589..37228236 100644 --- a/storage/utils/natutils.nim +++ b/storage/utils/natutils.nim @@ -1,6 +1,6 @@ {.push raises: [].} -import std/[net, tables, hashes], pkg/results, chronos, chronicles +import std/[net, tables, hashes, options], pkg/results, chronos, chronicles import pkg/libp2p @@ -10,32 +10,6 @@ type NatStrategy* = enum NatPmp NatNone -type IpLimits* = object - limit*: uint - ips: Table[IpAddress, uint] - -func hash*(ip: IpAddress): Hash = - case ip.family - of IpAddressFamily.IPv6: - hash(ip.address_v6) - of IpAddressFamily.IPv4: - hash(ip.address_v4) - -func inc*(ipLimits: var IpLimits, ip: IpAddress): bool = - let val = ipLimits.ips.getOrDefault(ip, 0) - if val < ipLimits.limit: - ipLimits.ips[ip] = val + 1 - true - else: - false - -func dec*(ipLimits: var IpLimits, ip: IpAddress) = - let val = ipLimits.ips.getOrDefault(ip, 0) - if val == 1: - ipLimits.ips.del(ip) - elif val > 1: - ipLimits.ips[ip] = val - 1 - func isGlobalUnicast*(address: TransportAddress): bool = if address.isGlobal() and address.isUnicast(): true else: false @@ -43,18 +17,11 @@ func isGlobalUnicast*(address: IpAddress): bool = let a = initTAddress(address, Port(0)) a.isGlobalUnicast() -proc getRouteIpv4*(): Result[IpAddress, cstring] = - # Avoiding Exception with initTAddress and can't make it work with static. - # Note: `publicAddress` is only used an "example" IP to find the best route, - # no data is send over the network to this IP! - let - publicAddress = TransportAddress( - family: AddressFamily.IPv4, address_v4: [1'u8, 1, 1, 1], port: Port(0) - ) - route = getBestRoute(publicAddress) +proc getRoute(publicAddress: TransportAddress): Result[IpAddress, cstring] = + let route = getBestRoute(publicAddress) if route.source.isUnspecified(): - err("No best ipv4 route found") + err("No best route found") else: let ip = try: @@ -64,3 +31,42 @@ proc getRouteIpv4*(): Result[IpAddress, cstring] = error "Address conversion error", exception = e.name, msg = e.msg return err("Invalid IP address") ok(ip) + +proc getRouteIpv4*(): Result[IpAddress, cstring] = + # Avoiding Exception with initTAddress and can't make it work with static. + # Note: `publicAddress` is only used an "example" IP to find the best route, + # no data is send over the network to this IP! + let publicAddress = TransportAddress( + family: AddressFamily.IPv4, address_v4: [1'u8, 1, 1, 1], port: Port(0) + ) + + return getRoute(publicAddress) + +proc getRouteIpv6*(): Result[IpAddress, cstring] = + # Note: `googleDnsIpv6` is only used as an "example" IP to find the best route, + # no data is sent over the network to this IP! + const googleDnsIpv6 = TransportAddress( + family: AddressFamily.IPv6, + # 2001:4860:4860::8888 + address_v6: [32'u8, 1, 72, 96, 72, 96, 0, 0, 0, 0, 0, 0, 0, 0, 136, 136], + port: Port(0), + ) + + return getRoute(googleDnsIpv6) + +# If bindIp is a anyLocal address (0.0.0.0 or ::), +# the function will find the best ip address. +# Otherwise, it will just return the ip as it is. +proc getBestLocalAddress*(bindIp: IpAddress): Option[IpAddress] = + let bindAddress = initTAddress(bindIp, Port(0)) + if bindAddress.isAnyLocal(): + let ip = + if bindIp.family == IpAddressFamily.IPv6: + getRouteIpv6() + else: + getRouteIpv4() + if ip.isOk(): + return some(ip.get()) + return none(IpAddress) + else: + return some(bindIp) diff --git a/tests/storage/testnat.nim b/tests/storage/testnat.nim index 21faa156..eb93c1ff 100644 --- a/tests/storage/testnat.nim +++ b/tests/storage/testnat.nim @@ -21,16 +21,18 @@ suite "NAT Address Tests": # Expected results let - expectedDiscoveryAddrs = @[ - MultiAddress.init("/ip4/8.8.8.8/udp/1234").expect("valid multiaddr"), - MultiAddress.init("/ip4/8.8.8.8/udp/1234").expect("valid multiaddr"), - MultiAddress.init("/ip4/8.8.8.8/udp/1234").expect("valid multiaddr"), - ] - expectedlibp2pAddrs = @[ - MultiAddress.init("/ip4/8.8.8.8/tcp/5000").expect("valid multiaddr"), - MultiAddress.init("/ip4/8.8.8.8/tcp/5000").expect("valid multiaddr"), - MultiAddress.init("/ip4/8.8.8.8/tcp/5000").expect("valid multiaddr"), - ] + expectedDiscoveryAddrs = + @[ + MultiAddress.init("/ip4/8.8.8.8/udp/1234").expect("valid multiaddr"), + MultiAddress.init("/ip4/8.8.8.8/udp/1234").expect("valid multiaddr"), + MultiAddress.init("/ip4/8.8.8.8/udp/1234").expect("valid multiaddr"), + ] + expectedlibp2pAddrs = + @[ + MultiAddress.init("/ip4/8.8.8.8/tcp/5000").expect("valid multiaddr"), + MultiAddress.init("/ip4/8.8.8.8/tcp/5000").expect("valid multiaddr"), + MultiAddress.init("/ip4/8.8.8.8/tcp/5000").expect("valid multiaddr"), + ] #ipv6Addr = MultiAddress.init("/ip6/::1/tcp/5000").expect("valid multiaddr") addrs = @[localAddr, anyAddr, publicAddr] @@ -41,3 +43,26 @@ suite "NAT Address Tests": # Verify results check(discoveryAddrs == expectedDiscoveryAddrs) check(libp2pAddrs == expectedlibp2pAddrs) + +suite "setupAddress": + test "public bind IP with NatNone returns bind IP": + let + bindIp = parseIpAddress("8.8.8.8") + natConfig = NatConfig(hasExtIp: false, nat: NatStrategy.NatNone) + (ip, tcpPort, udpPort) = + setupAddress(natConfig, bindIp, Port(5000), Port(5001), "test") + + check ip == some(bindIp) + check tcpPort == some(Port(5000)) + check udpPort == some(Port(5001)) + + test "private bind IP with NatNone returns no IP": + let + bindIp = parseIpAddress("192.168.1.1") + natConfig = NatConfig(hasExtIp: false, nat: NatStrategy.NatNone) + (ip, tcpPort, udpPort) = + setupAddress(natConfig, bindIp, Port(5000), Port(5001), "test") + + check ip == none(IpAddress) + check tcpPort == some(Port(5000)) + check udpPort == some(Port(5001)) diff --git a/tests/storage/testnatutils.nim b/tests/storage/testnatutils.nim new file mode 100644 index 00000000..fcc48561 --- /dev/null +++ b/tests/storage/testnatutils.nim @@ -0,0 +1,67 @@ +import std/[unittest, net, options] +import pkg/chronos +import ../../storage/utils/natutils + +suite "isGlobalUnicast": + test "localhost IPv4 is not global unicast": + check not isGlobalUnicast(parseIpAddress("127.0.0.1")) + + test "unspecified IPv4 is not global unicast": + check not isGlobalUnicast(parseIpAddress("0.0.0.0")) + + test "link-local IPv4 is not global unicast": + check not isGlobalUnicast(parseIpAddress("169.254.1.1")) + + test "private IPv4 is not global unicast": + check not isGlobalUnicast(parseIpAddress("10.0.0.1")) + + test "public IPv4 is global unicast": + check isGlobalUnicast(parseIpAddress("8.8.8.8")) + + test "localhost IPv6 is not global unicast": + check not isGlobalUnicast(parseIpAddress("::1")) + + test "unspecified IPv6 is not global unicast": + check not isGlobalUnicast(parseIpAddress("::")) + + test "link-local IPv6 is not global unicast": + check not isGlobalUnicast(parseIpAddress("fe80::1")) + + test "private IPv6 is not global unicast": + check not isGlobalUnicast(parseIpAddress("fc00::1")) + + test "public IPv6 is global unicast": + check isGlobalUnicast(parseIpAddress("2606:4700::1")) + +suite "getRoute": + test "getRouteIpv4 returns a valid IPv4": + let res = getRouteIpv4() + + check res.isOk + check res.get().family == IpAddressFamily.IPv4 + + test "getRouteIpv6 returns a valid IPv6": + let res = getRouteIpv6() + # If the machine does not have a global route because + # it is not configured for IPv6, the test will fail + # because it didn't find the best route. In that case, + # we can just skip the test, because it is not a problem + # with the test itself but the machine configuration. + if res.isErr: + check res.error == "No best route found" + else: + check res.get().family == IpAddressFamily.IPv6 + +suite "getBestLocalAddress": + test "specific IPv4 is returned as it is": + let ip = parseIpAddress("192.168.1.1") + check getBestLocalAddress(ip) == some(ip) + + test "specific IPv6 is returned as it is": + let ip = parseIpAddress("2606:4700::1") + check getBestLocalAddress(ip) == some(ip) + + test "0.0.0.0 resolves to a local IPv4": + let res = getBestLocalAddress(parseIpAddress("0.0.0.0")) + check res.isSome + check res.get().family == IpAddressFamily.IPv4 From 4d9eb9c34a73bf9b94d8aba9d179358fedbbf60a Mon Sep 17 00:00:00 2001 From: Arnaud Date: Fri, 10 Apr 2026 18:28:21 +0400 Subject: [PATCH 03/32] Autonat integration and reachability info in debug api --- storage/rest/api.nim | 12 ++++++++-- storage/storage.nim | 55 ++++++++++++++++++++++++++++++++++++++------ 2 files changed, 58 insertions(+), 9 deletions(-) diff --git a/storage/rest/api.nim b/storage/rest/api.nim index 865591fc..af4b009e 100644 --- a/storage/rest/api.nim +++ b/storage/rest/api.nim @@ -23,6 +23,7 @@ import pkg/confutils import pkg/libp2p import pkg/libp2p/routing_record +import pkg/libp2p/protocols/connectivity/autonat/service import pkg/codexdht/discv5/spr as spr import ../logutils @@ -557,7 +558,12 @@ proc initNodeApi(node: StorageNodeRef, conf: StorageConf, router: var RestRouter return RestApiResponse.error(Http500, "Unknown error dialling peer", headers = headers) -proc initDebugApi(node: StorageNodeRef, conf: StorageConf, router: var RestRouter) = +proc initDebugApi( + node: StorageNodeRef, + conf: StorageConf, + autonat: AutonatService, + router: var RestRouter, +) = let allowedOrigin = router.allowedOrigin router.api(MethodGet, "/api/storage/v1/debug/info") do() -> RestApiResponse: @@ -577,6 +583,7 @@ proc initDebugApi(node: StorageNodeRef, conf: StorageConf, router: var RestRoute "announceAddresses": node.discovery.announceAddrs, "table": table, "storage": {"version": $storageVersion, "revision": $storageRevision}, + "nat": {"reachability": $autonat.networkReachability}, } # return pretty json for human readability @@ -637,12 +644,13 @@ proc initRestApi*( node: StorageNodeRef, conf: StorageConf, repoStore: RepoStore, + autonat: AutonatService, corsAllowedOrigin: ?string, ): RestRouter = var router = RestRouter.init(validate, corsAllowedOrigin) initDataApi(node, repoStore, router) initNodeApi(node, conf, router) - initDebugApi(node, conf, router) + initDebugApi(node, conf, autonat, router) return router diff --git a/storage/storage.nim b/storage/storage.nim index f332f530..0b3f08e7 100644 --- a/storage/storage.nim +++ b/storage/storage.nim @@ -17,6 +17,7 @@ import pkg/chronos import pkg/taskpools import pkg/presto import pkg/libp2p +import pkg/libp2p/protocols/connectivity/autonat/[service, client] import pkg/confutils import pkg/confutils/defs import pkg/stew/io2 @@ -51,6 +52,7 @@ type repoStore: RepoStore maintenance: BlockMaintainer taskpool: Taskpool + autonatService*: AutonatService isStarted: bool StoragePrivateKey* = libp2p.PrivateKey # alias @@ -76,9 +78,25 @@ proc start*(s: StorageServer) {.async.} = await s.storageNode.switch.start() - let (announceAddrs, discoveryAddrs) = nattedAddress( - s.config.nat, s.storageNode.switch.peerInfo.addrs, s.config.discoveryPort - ) + let announceIp = + if s.config.nat.hasExtIp: + some(s.config.nat.extIp) + else: + getBestLocalAddress(s.config.listenIp) + + if announceIp.isNone: + # We should have an IP, even at private IP + raise newException(StorageError, "Unable to determine an IP address to announce") + + # Remap switch addresses to the resolved IP (replaces 0.0.0.0 or :: with the actual address), + # keeping unique entries only. + let announceAddrs = s.storageNode.switch.peerInfo.addrs + .mapIt(it.remapAddr(ip = announceIp, port = none(Port))) + .deduplicate() + let discoveryAddrs = + @[getMultiAddrWithIPAndUDPPort(announceIp.get, s.config.discoveryPort)] + s.storageNode.discovery.updateDhtRecord(discoveryAddrs) + s.storageNode.discovery.updateAnnounceRecord(announceAddrs) var hasPublicAddr = false for announceAddr in announceAddrs: @@ -90,9 +108,6 @@ proc start*(s: StorageServer) {.async.} = if not hasPublicAddr: warn "Unable to determine a public IP address. This node will only be reachable on a private network." - s.storageNode.discovery.updateAnnounceRecord(announceAddrs) - s.storageNode.discovery.updateDhtRecord(discoveryAddrs) - await s.storageNode.start() if s.restServer != nil: @@ -171,6 +186,16 @@ proc new*( ## create StorageServer including setting up datastore, repostore, etc let listenMultiAddr = getMultiAddrWithIpAndTcpPort(config.listenIp, config.listenPort) + let autonatService = AutonatService.new( + autonatClient = AutonatClient.new(), + rng = random.Rng.instance(), + scheduleInterval = Opt.some(config.natScheduleInterval), + askNewConnectedPeers = true, + numPeersToAsk = config.natNumPeersToAsk, + maxQueueSize = config.natMaxQueueSize, + minConfidence = config.natMinConfidence, + ) + let switch = SwitchBuilder .new() .withPrivateKey(privateKey) @@ -182,6 +207,8 @@ proc new*( .withAgentVersion(config.agentString) .withSignedPeerRecord(true) .withTcpTransport({ServerFlags.ReuseAddr, ServerFlags.TcpNoDelay}) + .withAutonat() + .withServices(@[Service(autonatService)]) .build() var @@ -290,7 +317,9 @@ proc new*( if config.apiBindAddress.isSome: restServer = RestServerRef .new( - storageNode.initRestApi(config, repoStore, config.apiCorsAllowedOrigin), + storageNode.initRestApi( + config, repoStore, autonatService, config.apiCorsAllowedOrigin + ), initTAddress(config.apiBindAddress.get(), config.apiPort), bufferSize = (1024 * 64), maxRequestBodySize = int.high, @@ -300,6 +329,17 @@ proc new*( switch.mount(network) switch.mount(manifestProto) + autonatService.statusAndConfidenceHandler( + proc( + networkReachability: NetworkReachability, confidence: Opt[float] + ) {.async: (raises: [CancelledError]).} = + if networkReachability == NotReachable: + let (announceAddrs, discoveryAddrs) = + nattedAddress(config.nat, switch.peerInfo.addrs, config.discoveryPort) + discovery.updateAnnounceRecord(announceAddrs) + discovery.updateDhtRecord(discoveryAddrs) + ) + StorageServer( config: config, storageNode: storageNode, @@ -308,4 +348,5 @@ proc new*( maintenance: maintenance, taskPool: taskPool, logFile: logFile, + autonatService: autonatService, ) From 0f3e9f1e75fce330ba340eaf6e299a61fef4dd86 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Fri, 10 Apr 2026 18:28:37 +0400 Subject: [PATCH 04/32] Autonat tests --- tests/integration/1_minute/testnat.nim | 44 ++++++++++++++++++++++++++ tests/integration/storageclient.nim | 43 ++++++++++++++++++------- tests/integration/storageconfig.nim | 41 ++++++++++++++++++++++++ 3 files changed, 117 insertions(+), 11 deletions(-) create mode 100644 tests/integration/1_minute/testnat.nim diff --git a/tests/integration/1_minute/testnat.nim b/tests/integration/1_minute/testnat.nim new file mode 100644 index 00000000..d7b9288f --- /dev/null +++ b/tests/integration/1_minute/testnat.nim @@ -0,0 +1,44 @@ +import std/json +import std/options +import std/sequtils +import pkg/chronos +import pkg/questionable/results + +import ../multinodes +import ../storageclient +import ../storageconfig + +multinodesuite "AutoNAT integration": + let natConfig = NodeConfigs( + clients: StorageConfigs + .init(nodes = 2) + .withNatNumPeersToAsk(1) + .withNatMinConfidence(0.5) + .withNatScheduleInterval(10.seconds) + .withNatMaxQueueSize(1) + .withLogFile() + .withLogLevel("DEBUG").some + ) + + # Reminder: multinodesuite setup the first node as bootstrap node + test "node is reachable when using bootstrap node on same network", natConfig: + let node1 = clients()[0] + let node2 = clients()[1] + + # Temporary + # DHT exposes only UDP information + # So we force temporary by connection the 2 node together + # to update the autonat reachability + let info = await node2.client.info() + + check not info.isErr + + await node1.client.connectPeer( + info.get()["id"].getStr(), info.get()["addrs"].getElems().mapIt(it.getStr()) + ) + + check eventuallySafe( + (await node1.client.natReachability()).get() == "Reachable", + timeout = 30_000, + pollInterval = 500, + ) diff --git a/tests/integration/storageclient.nim b/tests/integration/storageclient.nim index ec990bb9..c6ef5be0 100644 --- a/tests/integration/storageclient.nim +++ b/tests/integration/storageclient.nim @@ -1,4 +1,5 @@ import std/strutils +import std/sequtils from pkg/libp2p import Cid, `$`, init import pkg/questionable/results @@ -32,17 +33,17 @@ proc request( async: (raw: true, raises: [CancelledError, HttpError]) .} = HttpClientRequestRef - .new( - self.session, - url, - httpMethod, - version = HttpVersion11, - flags = {}, - maxResponseHeadersSize = HttpMaxHeadersSize, - headers = headers, - body = body.toOpenArrayByte(0, len(body) - 1), - ).get - .send() + .new( + self.session, + url, + httpMethod, + version = HttpVersion11, + flags = {}, + maxResponseHeadersSize = HttpMaxHeadersSize, + headers = headers, + body = body.toOpenArrayByte(0, len(body) - 1), + ).get + .send() proc post*( self: StorageClient, @@ -260,3 +261,23 @@ proc hasBlockRaw*( .} = let url = client.baseurl & "/data/" & cid & "/exists" return client.get(url) + +proc connectPeer*( + client: StorageClient, peerId: string, addrs: seq[string] +): Future[void] {.async: (raises: [CancelledError, HttpError]).} = + var url = client.baseurl & "/connect/" & peerId + if addrs.len > 0: + url &= "?" & addrs.mapIt("addrs=" & it).join("&") + let response = await client.get(url) + assert response.status == 200 + +proc natReachability*( + client: StorageClient +): Future[?!string] {.async: (raises: [CancelledError, HttpError]).} = + let info = await client.info() + if info.isErr: + return failure "Failed to get node info" + try: + return info.get()["nat"]["reachability"].getStr().success + except KeyError as e: + return failure e.msg diff --git a/tests/integration/storageconfig.nim b/tests/integration/storageconfig.nim index 4aeb6d60..240d44a2 100644 --- a/tests/integration/storageconfig.nim +++ b/tests/integration/storageconfig.nim @@ -5,6 +5,7 @@ import std/strutils import std/sugar import std/tables from pkg/chronicles import LogLevel +import pkg/chronos import pkg/storage/conf import pkg/storage/units import pkg/confutils @@ -280,3 +281,43 @@ proc withStorageQuota*( for config in startConfig.configs.mitems: config.addCliOption("--storage-quota", $quota) return startConfig + +proc withListenIp*( + self: StorageConfigs, ip: string +): StorageConfigs {.raises: [StorageConfigError].} = + var startConfig = self + for config in startConfig.configs.mitems: + config.addCliOption("--listen-ip", ip) + return startConfig + +proc withNatNumPeersToAsk*( + self: StorageConfigs, numPeersToAsk: int +): StorageConfigs {.raises: [StorageConfigError].} = + var startConfig = self + for config in startConfig.configs.mitems: + config.addCliOption("--nat-num-peers-to-ask", $numPeersToAsk) + return startConfig + +proc withNatMaxQueueSize*( + self: StorageConfigs, maxQueueSize: int +): StorageConfigs {.raises: [StorageConfigError].} = + var startConfig = self + for config in startConfig.configs.mitems: + config.addCliOption("--nat-max-queue-size", $maxQueueSize) + return startConfig + +proc withNatMinConfidence*( + self: StorageConfigs, minConfidence: float +): StorageConfigs {.raises: [StorageConfigError].} = + var startConfig = self + for config in startConfig.configs.mitems: + config.addCliOption("--nat-min-confidence", $minConfidence) + return startConfig + +proc withNatScheduleInterval*( + self: StorageConfigs, scheduleInterval: Duration +): StorageConfigs {.raises: [StorageConfigError].} = + var startConfig = self + for config in startConfig.configs.mitems: + config.addCliOption("--nat-schedule-interval", $scheduleInterval) + return startConfig From f21299d080b52eadda92e7fdb52bba51ee7b6eda Mon Sep 17 00:00:00 2001 From: Arnaud Date: Fri, 10 Apr 2026 19:43:50 +0400 Subject: [PATCH 05/32] Add reachability to debug info in lib --- .claude/settings.local.json | 23 ++ .../requests/node_debug_request.nim | 2 + nat-hackmd.md | 235 ++++++++++++++++++ 3 files changed, 260 insertions(+) create mode 100644 .claude/settings.local.json create mode 100644 nat-hackmd.md diff --git a/.claude/settings.local.json b/.claude/settings.local.json new file mode 100644 index 00000000..4494ff72 --- /dev/null +++ b/.claude/settings.local.json @@ -0,0 +1,23 @@ +{ + "permissions": { + "allow": [ + "Bash(grep -rn \"testnat\" /home/arnaud/Work/logos/logos-storage-nim/*.nims)", + "Bash(grep:*)", + "Bash(find:*)", + "Bash(curl -s http://127.0.0.1:8080/api/storage/v1/debug/info)", + "Bash(ls /home/arnaud/Work/logos/logos-storage-nim/tests/*.cfg /home/arnaud/Work/logos/logos-storage-nim/tests/*.nim)", + "Bash(nph --version)", + "Bash(make print-nph-path:*)", + "Bash(vendor/nimbus-build-system/vendor/Nim/bin/nph:*)", + "Bash(nimble build:*)", + "Bash(nim --version)", + "Bash(nimble --version)", + "Bash(/home/arnaud/.nimble/bin/nim --version)", + "Bash(make -n build-nph)", + "Bash(make build-nph:*)", + "Bash(/home/arnaud/.nimble/bin/nim c:*)", + "Bash(NIMFLAGS=\"--skipParentCfg\" nimble build)", + "Bash(NIMFLAGS=\"--skipParentCfg\" nimble build --verbose)" + ] + } +} diff --git a/library/storage_thread_requests/requests/node_debug_request.nim b/library/storage_thread_requests/requests/node_debug_request.nim index 8bf3106c..fe9bd389 100644 --- a/library/storage_thread_requests/requests/node_debug_request.nim +++ b/library/storage_thread_requests/requests/node_debug_request.nim @@ -9,6 +9,7 @@ import std/[options] import chronos import chronicles import codexdht/discv5/spr +import pkg/libp2p/protocols/connectivity/autonat/service import ../../alloc import ../../../storage/conf import ../../../storage/rest/json @@ -59,6 +60,7 @@ proc getDebug( if node.discovery.dhtRecord.isSome: node.discovery.dhtRecord.get.toURI else: "", "announceAddresses": node.discovery.announceAddrs, "table": table, + "nat": {"reachability": $storage[].autonatService.networkReachability}, } return ok($json) diff --git a/nat-hackmd.md b/nat-hackmd.md new file mode 100644 index 00000000..570d21f4 --- /dev/null +++ b/nat-hackmd.md @@ -0,0 +1,235 @@ +# NAT Traversal + +## Context + +A logos-storage-nim node needs to tell other peers how to reach it. This is harder than it sounds: most nodes are behind a NAT or a firewall. From the outside, the node looks unreachable. + +UPnP / NAT-PMP is already implemented in `nat.nim`. This document describes how we build on top of it using libp2p's AutoNAT to implement a full NAT traversal strategy. + +--- + +## Overview + +```mermaid +flowchart TD + A[Startup] --> B[Step 1: Collect address candidates] + B --> C[Step 2: AutoNAT] + C --> D{Result?} + D -- Reachable --> E[DHT server mode and announce direct address] + D -- Not Reachable --> F[Step 3: UPnP / NAT-PMP] + F --> G{Mapping OK?} + G -- Yes --> E + G -- No --> H[Step 4: AutoRelayService] + H --> E2[DHT client mode and announce relay address] + E --> I[AutoNAT every 5min] + E2 --> I + I --> D + F -- UPnp recheck --> F +``` + +--- + +## Step 1: Collecting address candidates + +At startup, the node builds a list of IP / port pairs it could announce to other peers. + +### IP addresses + +If `--listen-ip` is set to a specific address, only that address is used. Otherwise the node scans its network interfaces: + +| `--listen-ip` | What gets collected | +| --- | --- | +| `0.0.0.0` (default) | all local IPv4 addresses | +| `::` | all local IPv4 and IPv6 addresses | + +### Port + +The TCP port comes from `--listen-port`. If not set, a random free port is picked at startup. + +--- + +### Initial announcement + +At startup, the node resolves its initial addresses from the routing table and announces them. If `--nat:extip` is set, the static external IP is announced directly instead. + +No UPnP or NAT-PMP is attempted at this stage. That happens later in Step 3 if AutoNAT reports the node is not reachable. + +The node starts in DHT client mode. AutoNAT (Step 2) runs in the background to check if the node is reachable from the outside, and switches to server mode if confirmed. + +This ensures connectivity from the start, whether on a local or public network. If an address turns out to be unreachable, AutoNAT will detect it and update the announced addresses accordingly. + +--- + +### IPv6 specifics + +- Do not run UPnP or NAT-PMP on IPv6 addresses: they are directly routable, no port mapping needed. A node with a stable global IPv6 address can skip AutoNAT, UPnP, and relay entirely for that address. +- Some IPv6 addresses are temporary and change over time. If we announce one and it changes, the node becomes unreachable before the DHT records expire. We must only announce the stable address. +- chronos has no support for distinguishing stable addresses from temporary ones. chronos needs to be updated to expose address stability flags. + +--- + +### Initial DHT mode + +The DHT has two modes: + +- **Server mode**: the node appears in the routing tables of other nodes and answers their discovery requests ([`handleFindNode`](https://github.com/logos-storage/logos-storage-nim-dht/blob/6c7de036224724b064dcaa6b1d898be1c6d03242/codexdht/private/eth/p2p/discoveryv5/protocol.nim#L338), [`handlePing`](https://github.com/logos-storage/logos-storage-nim-dht/blob/6c7de036224724b064dcaa6b1d898be1c6d03242/codexdht/private/eth/p2p/discoveryv5/protocol.nim#L330)). Other nodes use it to route their own lookups. +- **Client mode**: the node can query the DHT and publish provider records (telling the network "I have this content at this address"), but it ignores inbound routing requests and does not appear in routing tables. A node that is not directly reachable must stay in client mode: if it were in server mode, other nodes would try to contact it, get timeouts, and that would degrade the DHT for everyone. + +The node starts in client mode. It switches to server mode only after AutoNAT confirms it is reachable (Step 2). + +--- + +## Step 2: AutoNAT + +After collecting and filtering addresses (Step 1), the node needs to check if it is actually reachable from the outside. It does this using AutoNAT: it asks a few connected peers to try to connect back to it. Bootstrap nodes are the first peers available for this, as they are dialed at startup. + +**Note:** AutoNAT tests TCP reachability via the libp2p switch, we infer UDP reachability from it. + +### How it works + +The node asks a few peers: "try to connect to me at this address". Each peer attempts the connection and reports success or failure. The results are collected and a confidence score is calculated. When confidence crosses a threshold, the node is marked `Reachable` or `NotReachable`. + +### Reference implementation + +logos-delivery already has a minimal setup at [`waku/discovery/autonat_service.nim`](https://github.com/logos-messaging/logos-delivery/blob/0b86093247da92060c503544d39e5d0a23922c15/waku/discovery/autonat_service.nim): + +```nim +AutonatService.new( + autonatClient = AutonatClient.new(), + rng = rng, + scheduleInterval = Opt.some(30.seconds), # logos-storage uses 5min + askNewConnectedPeers = true, # triggers a check on first bootstrap connection, avoids waiting 5min at startup + numPeersToAsk = 3, + maxQueueSize = 3, + minConfidence = 0.7, +) +``` + +### Parameters + +| Parameter | logos-delivery | logos-storage | libp2p default | Notes | +| --- | --- | --- | --- | --- | +| `numPeersToAsk` | 3 | 3 | 5 | how many peers are asked per round | +| `maxQueueSize` | 3 | 3 | 10 | how many past results are kept to calculate confidence | +| `minConfidence` | 0.7 | 0.7 | 0.3 | fraction of successful answers needed to confirm a state | +| `scheduleInterval` | 30s | 5min | none | reachability changes rarely in a storage network | +| `askNewConnectedPeers` | false | true | true | triggers a check on first bootstrap connection | + +logos-delivery uses a higher confidence threshold (0.7 vs 0.3) and a smaller history window (3 vs 10): fewer samples but a stricter bar to confirm reachability. + +libp2p default values are available [here](https://github.com/vacp2p/nim-libp2p/blob/e82080f7b1aa61c6d35fa5311b873f41eff4bb52/libp2p/protocols/connectivity/autonat/service.nim#L60-L64). + +## Step 3: UPnP / NAT-PMP (fallback) + +If AutoNAT says the node is not reachable, we try to ask the router to open a port using UPnP or NAT-PMP already implemented. + +### What already exists + +The implementation is already in `nat.nim`: +- [`getExternalIP()`](https://github.com/logos-storage/logos-storage-nim/blob/48f2508b07e51a222070ada72c254927da9c5806/storage/nat.nim#L67): tries UPnP first, then NAT-PMP, returns the external IP +- [`redirectPorts()`](https://github.com/logos-storage/logos-storage-nim/blob/48f2508b07e51a222070ada72c254927da9c5806/storage/nat.nim#L305): creates the port mapping on the router +- [`nattedAddress()`](https://github.com/logos-storage/logos-storage-nim/blob/48f2508b07e51a222070ada72c254927da9c5806/storage/nat.nim#L400): calls both and returns the updated addresses to announce + +### The problem + +[`nattedAddress()` is called unconditionally at startup](https://github.com/logos-storage/logos-storage-nim/blob/48f2508b07e51a222070ada72c254927da9c5806/storage/storage.nim#L79), before AutoNAT has run. It should only be called when AutoNAT returns `NotReachable`. + +### What needs to change + +Move the `nattedAddress()` call out of `start()` and into the AutoNAT `statusAndConfidenceHandler`: + +```nim +of NotReachable: + let (announceAddrs, discoveryAddrs) = nattedAddress( + config.nat, switch.peerInfo.addrs, config.discoveryPort + ) + discovery.updateAnnounceRecord(announceAddrs) + discovery.updateDhtRecord(discoveryAddrs) +``` + +`nattedAddress()` opens a port on the router and starts [`repeatPortMapping`](https://github.com/logos-storage/logos-storage-nim/blob/48f2508b07e51a222070ada72c254927da9c5806/storage/nat.nim#L231) in the background to renew it every [`20 min`](https://github.com/logos-storage/logos-storage-nim/blob/48f2508b07e51a222070ada72c254927da9c5806/storage/nat.nim#L28) (routers forget the mapping on reboot for UPnP, or after [`1 hour`](https://github.com/logos-storage/logos-storage-nim/blob/48f2508b07e51a222070ada72c254927da9c5806/storage/nat.nim#L29) for NAT-PMP). It only needs to be called once, if it fails, the node falls back to relay immediately. + +### States + +The `statusAndConfidenceHandler` needs to track what has already been tried to avoid re-running UPnP on every AutoNAT cycle: + +- `Unknown` + - `NotReachable`: try UPnP + - `Reachable`: switch DHT to server +- `UPnP` + - `NotReachable`: switch DHT to client, start relay + - `Reachable`: switch DHT to server + - background: nat.nim renews the mapping every 20 min and fires `onMappingRestored` on restoration +- `Relay` + - `NotReachable`: do nothing + - if `hasExtIp: true`: start a background task that periodically asks a peer to dial the static IP. On success, fires the same `onMappingRestored` callback + +When applying a state transition: always update announce records first, then change DHT mode, then start or stop the relay. If you set server mode before publishing the new addresses, peers will try to contact you before your records are up to date. + +**Note:** nim-libp2p's AutoNAT [filters out relay addresses](https://github.com/vacp2p/nim-libp2p/blob/e82080f7b1aa61c6d35fa5311b873f41eff4bb52/libp2p/protocols/connectivity/autonat/server.nim#L122-L126) before attempting dial-back. A node behind relay will always get `NotReachable` from AutoNAT. + +## Step 4: Relay and hole punching + +We do not use `HPService` because it starts the relay immediately on `NotReachable`, bypassing the UPnP step. Instead we wire `AutonatService` and `AutoRelayService` directly and control the relay from our own `statusAndConfidenceHandler`. + +A node in relay mode can receive inbound connections via the relay, so other peers can download data from it. The relay server acts as a middleman: the node connects to it, reserves a slot, and gets a relay address of the form `/ip4//tcp//p2p//p2p-circuit/p2p/`. It publishes this address in its DHT provider records so peers looking for its content can find and connect to it. It stays in DHT client mode (see Step 1): it can publish provider records but cannot act as a routing hop. + +Bootstrap nodes serve as the initial relay servers. `AutoRelayService` finds relay candidates among connected peers, so bootstrap nodes are the first ones used. + +### Setup + +```nim +let relayClient = RelayClient.new() +let autoRelayService = AutoRelayService.new(2, relayClient, onReservation, rng) + +let switch = SwitchBuilder + .new() + ... + .withServices(@[Service(autonatService)]) + .build() +``` + +The `onReservation` callback updates DHT addresses when relay reservations change: + +```nim +proc onReservation(addresses: seq[MultiAddress]) {.gcsafe, raises: [].} = + discovery.updateAnnounceRecord(addresses) + discovery.updateDhtRecord(addresses) +``` + +`updateAnnounceRecord` updates the libp2p peer record (multiaddr, used for content routing). `updateDhtRecord` updates the discv5 node record (SPR, used for peer discovery). Both replace the current set of addresses, they do not accumulate. + +### Mapping restored callback + +`repeatPortMapping` runs in the background even in `Relay` state, this is how we detect when the mapping comes back. It needs a callback added so we can stop the relay and update addresses when it fires. The external IP may have changed after a router reboot, so we always update regardless. + +```nim +proc onMappingRestored(addrs: seq[MultiAddress]) = + if natState == Relay: + await autoRelayService.stop(switch) + natState = UPnP + # addrs may differ from previously announced ones (e.g. router reboot changed external IP) + discovery.updateAnnounceRecord(addrs) + discovery.updateDhtRecord(addrs) +``` + +### Hole punching + +When a peer connects through a relay, libp2p automatically tries to establish a direct connection using dcutr (a protocol for hole punching). If it works, that specific connection bypasses the relay: lower latency and less load on the relay server. This does not change the node's reachability: new peers still need to go through the relay first. + +## Step 5: Periodic Re-evaluation + +`AutonatService` re-runs every 5 minutes. On each cycle it asks `numPeersToAsk` peers to dial back. If confidence crosses `minConfidence`, `statusAndConfidenceHandler` fires and our handler updates DHT mode, relay state, and announced addresses. The `onMappingRestored` callback from `nat.nim` can also fire independently between cycles. + +## What needs to be implemented + +- DHT client/server mode in `discovery.nim`: the node currently starts as a DHT server unconditionally +- Move `nattedAddress()` call out of `start()` and into the AutoNAT `statusAndConfidenceHandler` +- Expose a `onRestored` callback in `nat.nim`'s `repeatPortMapping` +- For `hasExtIp: true` (manual port forwarding): add a background task that periodically asks a peer to dial the static IP directly. If it succeeds, fire the same `onMappingRestored` callback. Without this, a node in `Relay` state with a restored manual port forwarding has no way to recover automatically. +- `statusAndConfidenceHandler` with `natState` tracking +- chronos needs to be updated to expose IPv6 address stability flags to distinguish stable SLAAC addresses from temporary ones + +## Open questions + +- [ ] Mobile nodes: UDP is often blocked on cellular networks. discv5 is UDP-only. How do we support mobile participation? From cb67eb1d6087a5a80712707d964818c15522e33c Mon Sep 17 00:00:00 2001 From: Arnaud Date: Mon, 13 Apr 2026 10:06:19 +0400 Subject: [PATCH 06/32] Fix format --- tests/integration/storageclient.nim | 22 +++++++++++----------- tests/storage/testnat.nim | 22 ++++++++++------------ 2 files changed, 21 insertions(+), 23 deletions(-) diff --git a/tests/integration/storageclient.nim b/tests/integration/storageclient.nim index c6ef5be0..62d72917 100644 --- a/tests/integration/storageclient.nim +++ b/tests/integration/storageclient.nim @@ -33,17 +33,17 @@ proc request( async: (raw: true, raises: [CancelledError, HttpError]) .} = HttpClientRequestRef - .new( - self.session, - url, - httpMethod, - version = HttpVersion11, - flags = {}, - maxResponseHeadersSize = HttpMaxHeadersSize, - headers = headers, - body = body.toOpenArrayByte(0, len(body) - 1), - ).get - .send() + .new( + self.session, + url, + httpMethod, + version = HttpVersion11, + flags = {}, + maxResponseHeadersSize = HttpMaxHeadersSize, + headers = headers, + body = body.toOpenArrayByte(0, len(body) - 1), + ).get + .send() proc post*( self: StorageClient, diff --git a/tests/storage/testnat.nim b/tests/storage/testnat.nim index eb93c1ff..08d87c9c 100644 --- a/tests/storage/testnat.nim +++ b/tests/storage/testnat.nim @@ -21,18 +21,16 @@ suite "NAT Address Tests": # Expected results let - expectedDiscoveryAddrs = - @[ - MultiAddress.init("/ip4/8.8.8.8/udp/1234").expect("valid multiaddr"), - MultiAddress.init("/ip4/8.8.8.8/udp/1234").expect("valid multiaddr"), - MultiAddress.init("/ip4/8.8.8.8/udp/1234").expect("valid multiaddr"), - ] - expectedlibp2pAddrs = - @[ - MultiAddress.init("/ip4/8.8.8.8/tcp/5000").expect("valid multiaddr"), - MultiAddress.init("/ip4/8.8.8.8/tcp/5000").expect("valid multiaddr"), - MultiAddress.init("/ip4/8.8.8.8/tcp/5000").expect("valid multiaddr"), - ] + expectedDiscoveryAddrs = @[ + MultiAddress.init("/ip4/8.8.8.8/udp/1234").expect("valid multiaddr"), + MultiAddress.init("/ip4/8.8.8.8/udp/1234").expect("valid multiaddr"), + MultiAddress.init("/ip4/8.8.8.8/udp/1234").expect("valid multiaddr"), + ] + expectedlibp2pAddrs = @[ + MultiAddress.init("/ip4/8.8.8.8/tcp/5000").expect("valid multiaddr"), + MultiAddress.init("/ip4/8.8.8.8/tcp/5000").expect("valid multiaddr"), + MultiAddress.init("/ip4/8.8.8.8/tcp/5000").expect("valid multiaddr"), + ] #ipv6Addr = MultiAddress.init("/ip6/::1/tcp/5000").expect("valid multiaddr") addrs = @[localAddr, anyAddr, publicAddr] From e98abb26ead302d5d30cbc8280e46288ba4960fe Mon Sep 17 00:00:00 2001 From: Arnaud Date: Mon, 13 Apr 2026 11:04:21 +0400 Subject: [PATCH 07/32] Debug macos error --- storage/utils/natutils.nim | 1 + 1 file changed, 1 insertion(+) diff --git a/storage/utils/natutils.nim b/storage/utils/natutils.nim index 37228236..6a7984d3 100644 --- a/storage/utils/natutils.nim +++ b/storage/utils/natutils.nim @@ -28,6 +28,7 @@ proc getRoute(publicAddress: TransportAddress): Result[IpAddress, cstring] = route.source.address() except ValueError as e: # This should not occur really. + echo "Address conversion error: ", e.name, " ", e.msg error "Address conversion error", exception = e.name, msg = e.msg return err("Invalid IP address") ok(ip) From 0bbe60980292447fe48959f66924e4fbe7d236b6 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Mon, 13 Apr 2026 11:22:14 +0400 Subject: [PATCH 08/32] Fix no route detection when AddressFamily is non --- storage/utils/natutils.nim | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/storage/utils/natutils.nim b/storage/utils/natutils.nim index 6a7984d3..fa63a9cc 100644 --- a/storage/utils/natutils.nim +++ b/storage/utils/natutils.nim @@ -20,7 +20,7 @@ func isGlobalUnicast*(address: IpAddress): bool = proc getRoute(publicAddress: TransportAddress): Result[IpAddress, cstring] = let route = getBestRoute(publicAddress) - if route.source.isUnspecified(): + if route.source == AddressFamily.None or route.source.isUnspecified(): err("No best route found") else: let ip = @@ -28,7 +28,6 @@ proc getRoute(publicAddress: TransportAddress): Result[IpAddress, cstring] = route.source.address() except ValueError as e: # This should not occur really. - echo "Address conversion error: ", e.name, " ", e.msg error "Address conversion error", exception = e.name, msg = e.msg return err("Invalid IP address") ok(ip) From dfb5b6537fbf7caa3081914e417f90fabc4c46ca Mon Sep 17 00:00:00 2001 From: Arnaud Date: Mon, 13 Apr 2026 11:38:23 +0400 Subject: [PATCH 09/32] Fix route family condition --- storage/utils/natutils.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/utils/natutils.nim b/storage/utils/natutils.nim index fa63a9cc..04447d26 100644 --- a/storage/utils/natutils.nim +++ b/storage/utils/natutils.nim @@ -20,7 +20,7 @@ func isGlobalUnicast*(address: IpAddress): bool = proc getRoute(publicAddress: TransportAddress): Result[IpAddress, cstring] = let route = getBestRoute(publicAddress) - if route.source == AddressFamily.None or route.source.isUnspecified(): + if route.source.family == AddressFamily.None or route.source.isUnspecified(): err("No best route found") else: let ip = From 8e72d007c55d1627a452e86e933a85504689f63d Mon Sep 17 00:00:00 2001 From: Arnaud Date: Mon, 13 Apr 2026 17:46:42 +0400 Subject: [PATCH 10/32] Fix testnat import --- tests/storage/testnat.nim | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/storage/testnat.nim b/tests/storage/testnat.nim index 08d87c9c..71155540 100644 --- a/tests/storage/testnat.nim +++ b/tests/storage/testnat.nim @@ -5,6 +5,7 @@ import pkg/results import ../../storage/nat import ../../storage/utils +import ../../storage/utils/natutils suite "NAT Address Tests": test "nattedAddress with local addresses": From a3d76cb91156550897a6a0f4bc31c89fb9f21949 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Wed, 15 Apr 2026 09:07:14 +0400 Subject: [PATCH 11/32] Remove doc --- nat-hackmd.md | 235 -------------------------------------------------- 1 file changed, 235 deletions(-) delete mode 100644 nat-hackmd.md diff --git a/nat-hackmd.md b/nat-hackmd.md deleted file mode 100644 index 570d21f4..00000000 --- a/nat-hackmd.md +++ /dev/null @@ -1,235 +0,0 @@ -# NAT Traversal - -## Context - -A logos-storage-nim node needs to tell other peers how to reach it. This is harder than it sounds: most nodes are behind a NAT or a firewall. From the outside, the node looks unreachable. - -UPnP / NAT-PMP is already implemented in `nat.nim`. This document describes how we build on top of it using libp2p's AutoNAT to implement a full NAT traversal strategy. - ---- - -## Overview - -```mermaid -flowchart TD - A[Startup] --> B[Step 1: Collect address candidates] - B --> C[Step 2: AutoNAT] - C --> D{Result?} - D -- Reachable --> E[DHT server mode and announce direct address] - D -- Not Reachable --> F[Step 3: UPnP / NAT-PMP] - F --> G{Mapping OK?} - G -- Yes --> E - G -- No --> H[Step 4: AutoRelayService] - H --> E2[DHT client mode and announce relay address] - E --> I[AutoNAT every 5min] - E2 --> I - I --> D - F -- UPnp recheck --> F -``` - ---- - -## Step 1: Collecting address candidates - -At startup, the node builds a list of IP / port pairs it could announce to other peers. - -### IP addresses - -If `--listen-ip` is set to a specific address, only that address is used. Otherwise the node scans its network interfaces: - -| `--listen-ip` | What gets collected | -| --- | --- | -| `0.0.0.0` (default) | all local IPv4 addresses | -| `::` | all local IPv4 and IPv6 addresses | - -### Port - -The TCP port comes from `--listen-port`. If not set, a random free port is picked at startup. - ---- - -### Initial announcement - -At startup, the node resolves its initial addresses from the routing table and announces them. If `--nat:extip` is set, the static external IP is announced directly instead. - -No UPnP or NAT-PMP is attempted at this stage. That happens later in Step 3 if AutoNAT reports the node is not reachable. - -The node starts in DHT client mode. AutoNAT (Step 2) runs in the background to check if the node is reachable from the outside, and switches to server mode if confirmed. - -This ensures connectivity from the start, whether on a local or public network. If an address turns out to be unreachable, AutoNAT will detect it and update the announced addresses accordingly. - ---- - -### IPv6 specifics - -- Do not run UPnP or NAT-PMP on IPv6 addresses: they are directly routable, no port mapping needed. A node with a stable global IPv6 address can skip AutoNAT, UPnP, and relay entirely for that address. -- Some IPv6 addresses are temporary and change over time. If we announce one and it changes, the node becomes unreachable before the DHT records expire. We must only announce the stable address. -- chronos has no support for distinguishing stable addresses from temporary ones. chronos needs to be updated to expose address stability flags. - ---- - -### Initial DHT mode - -The DHT has two modes: - -- **Server mode**: the node appears in the routing tables of other nodes and answers their discovery requests ([`handleFindNode`](https://github.com/logos-storage/logos-storage-nim-dht/blob/6c7de036224724b064dcaa6b1d898be1c6d03242/codexdht/private/eth/p2p/discoveryv5/protocol.nim#L338), [`handlePing`](https://github.com/logos-storage/logos-storage-nim-dht/blob/6c7de036224724b064dcaa6b1d898be1c6d03242/codexdht/private/eth/p2p/discoveryv5/protocol.nim#L330)). Other nodes use it to route their own lookups. -- **Client mode**: the node can query the DHT and publish provider records (telling the network "I have this content at this address"), but it ignores inbound routing requests and does not appear in routing tables. A node that is not directly reachable must stay in client mode: if it were in server mode, other nodes would try to contact it, get timeouts, and that would degrade the DHT for everyone. - -The node starts in client mode. It switches to server mode only after AutoNAT confirms it is reachable (Step 2). - ---- - -## Step 2: AutoNAT - -After collecting and filtering addresses (Step 1), the node needs to check if it is actually reachable from the outside. It does this using AutoNAT: it asks a few connected peers to try to connect back to it. Bootstrap nodes are the first peers available for this, as they are dialed at startup. - -**Note:** AutoNAT tests TCP reachability via the libp2p switch, we infer UDP reachability from it. - -### How it works - -The node asks a few peers: "try to connect to me at this address". Each peer attempts the connection and reports success or failure. The results are collected and a confidence score is calculated. When confidence crosses a threshold, the node is marked `Reachable` or `NotReachable`. - -### Reference implementation - -logos-delivery already has a minimal setup at [`waku/discovery/autonat_service.nim`](https://github.com/logos-messaging/logos-delivery/blob/0b86093247da92060c503544d39e5d0a23922c15/waku/discovery/autonat_service.nim): - -```nim -AutonatService.new( - autonatClient = AutonatClient.new(), - rng = rng, - scheduleInterval = Opt.some(30.seconds), # logos-storage uses 5min - askNewConnectedPeers = true, # triggers a check on first bootstrap connection, avoids waiting 5min at startup - numPeersToAsk = 3, - maxQueueSize = 3, - minConfidence = 0.7, -) -``` - -### Parameters - -| Parameter | logos-delivery | logos-storage | libp2p default | Notes | -| --- | --- | --- | --- | --- | -| `numPeersToAsk` | 3 | 3 | 5 | how many peers are asked per round | -| `maxQueueSize` | 3 | 3 | 10 | how many past results are kept to calculate confidence | -| `minConfidence` | 0.7 | 0.7 | 0.3 | fraction of successful answers needed to confirm a state | -| `scheduleInterval` | 30s | 5min | none | reachability changes rarely in a storage network | -| `askNewConnectedPeers` | false | true | true | triggers a check on first bootstrap connection | - -logos-delivery uses a higher confidence threshold (0.7 vs 0.3) and a smaller history window (3 vs 10): fewer samples but a stricter bar to confirm reachability. - -libp2p default values are available [here](https://github.com/vacp2p/nim-libp2p/blob/e82080f7b1aa61c6d35fa5311b873f41eff4bb52/libp2p/protocols/connectivity/autonat/service.nim#L60-L64). - -## Step 3: UPnP / NAT-PMP (fallback) - -If AutoNAT says the node is not reachable, we try to ask the router to open a port using UPnP or NAT-PMP already implemented. - -### What already exists - -The implementation is already in `nat.nim`: -- [`getExternalIP()`](https://github.com/logos-storage/logos-storage-nim/blob/48f2508b07e51a222070ada72c254927da9c5806/storage/nat.nim#L67): tries UPnP first, then NAT-PMP, returns the external IP -- [`redirectPorts()`](https://github.com/logos-storage/logos-storage-nim/blob/48f2508b07e51a222070ada72c254927da9c5806/storage/nat.nim#L305): creates the port mapping on the router -- [`nattedAddress()`](https://github.com/logos-storage/logos-storage-nim/blob/48f2508b07e51a222070ada72c254927da9c5806/storage/nat.nim#L400): calls both and returns the updated addresses to announce - -### The problem - -[`nattedAddress()` is called unconditionally at startup](https://github.com/logos-storage/logos-storage-nim/blob/48f2508b07e51a222070ada72c254927da9c5806/storage/storage.nim#L79), before AutoNAT has run. It should only be called when AutoNAT returns `NotReachable`. - -### What needs to change - -Move the `nattedAddress()` call out of `start()` and into the AutoNAT `statusAndConfidenceHandler`: - -```nim -of NotReachable: - let (announceAddrs, discoveryAddrs) = nattedAddress( - config.nat, switch.peerInfo.addrs, config.discoveryPort - ) - discovery.updateAnnounceRecord(announceAddrs) - discovery.updateDhtRecord(discoveryAddrs) -``` - -`nattedAddress()` opens a port on the router and starts [`repeatPortMapping`](https://github.com/logos-storage/logos-storage-nim/blob/48f2508b07e51a222070ada72c254927da9c5806/storage/nat.nim#L231) in the background to renew it every [`20 min`](https://github.com/logos-storage/logos-storage-nim/blob/48f2508b07e51a222070ada72c254927da9c5806/storage/nat.nim#L28) (routers forget the mapping on reboot for UPnP, or after [`1 hour`](https://github.com/logos-storage/logos-storage-nim/blob/48f2508b07e51a222070ada72c254927da9c5806/storage/nat.nim#L29) for NAT-PMP). It only needs to be called once, if it fails, the node falls back to relay immediately. - -### States - -The `statusAndConfidenceHandler` needs to track what has already been tried to avoid re-running UPnP on every AutoNAT cycle: - -- `Unknown` - - `NotReachable`: try UPnP - - `Reachable`: switch DHT to server -- `UPnP` - - `NotReachable`: switch DHT to client, start relay - - `Reachable`: switch DHT to server - - background: nat.nim renews the mapping every 20 min and fires `onMappingRestored` on restoration -- `Relay` - - `NotReachable`: do nothing - - if `hasExtIp: true`: start a background task that periodically asks a peer to dial the static IP. On success, fires the same `onMappingRestored` callback - -When applying a state transition: always update announce records first, then change DHT mode, then start or stop the relay. If you set server mode before publishing the new addresses, peers will try to contact you before your records are up to date. - -**Note:** nim-libp2p's AutoNAT [filters out relay addresses](https://github.com/vacp2p/nim-libp2p/blob/e82080f7b1aa61c6d35fa5311b873f41eff4bb52/libp2p/protocols/connectivity/autonat/server.nim#L122-L126) before attempting dial-back. A node behind relay will always get `NotReachable` from AutoNAT. - -## Step 4: Relay and hole punching - -We do not use `HPService` because it starts the relay immediately on `NotReachable`, bypassing the UPnP step. Instead we wire `AutonatService` and `AutoRelayService` directly and control the relay from our own `statusAndConfidenceHandler`. - -A node in relay mode can receive inbound connections via the relay, so other peers can download data from it. The relay server acts as a middleman: the node connects to it, reserves a slot, and gets a relay address of the form `/ip4//tcp//p2p//p2p-circuit/p2p/`. It publishes this address in its DHT provider records so peers looking for its content can find and connect to it. It stays in DHT client mode (see Step 1): it can publish provider records but cannot act as a routing hop. - -Bootstrap nodes serve as the initial relay servers. `AutoRelayService` finds relay candidates among connected peers, so bootstrap nodes are the first ones used. - -### Setup - -```nim -let relayClient = RelayClient.new() -let autoRelayService = AutoRelayService.new(2, relayClient, onReservation, rng) - -let switch = SwitchBuilder - .new() - ... - .withServices(@[Service(autonatService)]) - .build() -``` - -The `onReservation` callback updates DHT addresses when relay reservations change: - -```nim -proc onReservation(addresses: seq[MultiAddress]) {.gcsafe, raises: [].} = - discovery.updateAnnounceRecord(addresses) - discovery.updateDhtRecord(addresses) -``` - -`updateAnnounceRecord` updates the libp2p peer record (multiaddr, used for content routing). `updateDhtRecord` updates the discv5 node record (SPR, used for peer discovery). Both replace the current set of addresses, they do not accumulate. - -### Mapping restored callback - -`repeatPortMapping` runs in the background even in `Relay` state, this is how we detect when the mapping comes back. It needs a callback added so we can stop the relay and update addresses when it fires. The external IP may have changed after a router reboot, so we always update regardless. - -```nim -proc onMappingRestored(addrs: seq[MultiAddress]) = - if natState == Relay: - await autoRelayService.stop(switch) - natState = UPnP - # addrs may differ from previously announced ones (e.g. router reboot changed external IP) - discovery.updateAnnounceRecord(addrs) - discovery.updateDhtRecord(addrs) -``` - -### Hole punching - -When a peer connects through a relay, libp2p automatically tries to establish a direct connection using dcutr (a protocol for hole punching). If it works, that specific connection bypasses the relay: lower latency and less load on the relay server. This does not change the node's reachability: new peers still need to go through the relay first. - -## Step 5: Periodic Re-evaluation - -`AutonatService` re-runs every 5 minutes. On each cycle it asks `numPeersToAsk` peers to dial back. If confidence crosses `minConfidence`, `statusAndConfidenceHandler` fires and our handler updates DHT mode, relay state, and announced addresses. The `onMappingRestored` callback from `nat.nim` can also fire independently between cycles. - -## What needs to be implemented - -- DHT client/server mode in `discovery.nim`: the node currently starts as a DHT server unconditionally -- Move `nattedAddress()` call out of `start()` and into the AutoNAT `statusAndConfidenceHandler` -- Expose a `onRestored` callback in `nat.nim`'s `repeatPortMapping` -- For `hasExtIp: true` (manual port forwarding): add a background task that periodically asks a peer to dial the static IP directly. If it succeeds, fire the same `onMappingRestored` callback. Without this, a node in `Relay` state with a restored manual port forwarding has no way to recover automatically. -- `statusAndConfidenceHandler` with `natState` tracking -- chronos needs to be updated to expose IPv6 address stability flags to distinguish stable SLAAC addresses from temporary ones - -## Open questions - -- [ ] Mobile nodes: UDP is often blocked on cellular networks. discv5 is UDP-only. How do we support mobile participation? From 654b90973ce10ced99a6a2e919e87b78c76de6ab Mon Sep 17 00:00:00 2001 From: Arnaud Date: Wed, 15 Apr 2026 09:07:42 +0400 Subject: [PATCH 12/32] Remove local settings --- .claude/settings.local.json | 23 ----------------------- 1 file changed, 23 deletions(-) delete mode 100644 .claude/settings.local.json diff --git a/.claude/settings.local.json b/.claude/settings.local.json deleted file mode 100644 index 4494ff72..00000000 --- a/.claude/settings.local.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "permissions": { - "allow": [ - "Bash(grep -rn \"testnat\" /home/arnaud/Work/logos/logos-storage-nim/*.nims)", - "Bash(grep:*)", - "Bash(find:*)", - "Bash(curl -s http://127.0.0.1:8080/api/storage/v1/debug/info)", - "Bash(ls /home/arnaud/Work/logos/logos-storage-nim/tests/*.cfg /home/arnaud/Work/logos/logos-storage-nim/tests/*.nim)", - "Bash(nph --version)", - "Bash(make print-nph-path:*)", - "Bash(vendor/nimbus-build-system/vendor/Nim/bin/nph:*)", - "Bash(nimble build:*)", - "Bash(nim --version)", - "Bash(nimble --version)", - "Bash(/home/arnaud/.nimble/bin/nim --version)", - "Bash(make -n build-nph)", - "Bash(make build-nph:*)", - "Bash(/home/arnaud/.nimble/bin/nim c:*)", - "Bash(NIMFLAGS=\"--skipParentCfg\" nimble build)", - "Bash(NIMFLAGS=\"--skipParentCfg\" nimble build --verbose)" - ] - } -} From e349568833025b217bb4c493ebc93231ac127372 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Wed, 15 Apr 2026 09:17:45 +0400 Subject: [PATCH 13/32] Add bootstrap nodes connect --- storage/storage.nim | 13 +++++++++++-- tests/integration/1_minute/testnat.nim | 19 ++++--------------- 2 files changed, 15 insertions(+), 17 deletions(-) diff --git a/storage/storage.nim b/storage/storage.nim index 0b3f08e7..eb8a3fe7 100644 --- a/storage/storage.nim +++ b/storage/storage.nim @@ -39,6 +39,7 @@ import ./namespaces import ./storagetypes import ./logutils import ./nat +import ./utils/natutils logScope: topics = "storage node" @@ -95,7 +96,7 @@ proc start*(s: StorageServer) {.async.} = .deduplicate() let discoveryAddrs = @[getMultiAddrWithIPAndUDPPort(announceIp.get, s.config.discoveryPort)] - s.storageNode.discovery.updateDhtRecord(discoveryAddrs) + s.storageNode.discovery.updateDhtRecord(announceAddrs & discoveryAddrs) s.storageNode.discovery.updateAnnounceRecord(announceAddrs) var hasPublicAddr = false @@ -110,6 +111,14 @@ proc start*(s: StorageServer) {.async.} = await s.storageNode.start() + for spr in s.config.bootstrapNodes: + try: + let addrs = spr.data.addresses.mapIt(it.address) + await s.storageNode.switch.connect(spr.data.peerId, addrs) + except CatchableError as e: + warn "Cannot connect to bootstrap node", error = e.msg + discard + if s.restServer != nil: s.restServer.start() @@ -337,7 +346,7 @@ proc new*( let (announceAddrs, discoveryAddrs) = nattedAddress(config.nat, switch.peerInfo.addrs, config.discoveryPort) discovery.updateAnnounceRecord(announceAddrs) - discovery.updateDhtRecord(discoveryAddrs) + discovery.updateDhtRecord(announceAddrs & discoveryAddrs) ) StorageServer( diff --git a/tests/integration/1_minute/testnat.nim b/tests/integration/1_minute/testnat.nim index d7b9288f..4aa13bce 100644 --- a/tests/integration/1_minute/testnat.nim +++ b/tests/integration/1_minute/testnat.nim @@ -16,8 +16,9 @@ multinodesuite "AutoNAT integration": .withNatMinConfidence(0.5) .withNatScheduleInterval(10.seconds) .withNatMaxQueueSize(1) - .withLogFile() - .withLogLevel("DEBUG").some + # .withLogFile() + # .withLogLevel("DEBUG") + .some ) # Reminder: multinodesuite setup the first node as bootstrap node @@ -25,20 +26,8 @@ multinodesuite "AutoNAT integration": let node1 = clients()[0] let node2 = clients()[1] - # Temporary - # DHT exposes only UDP information - # So we force temporary by connection the 2 node together - # to update the autonat reachability - let info = await node2.client.info() - - check not info.isErr - - await node1.client.connectPeer( - info.get()["id"].getStr(), info.get()["addrs"].getElems().mapIt(it.getStr()) - ) - check eventuallySafe( - (await node1.client.natReachability()).get() == "Reachable", + (await node2.client.natReachability()).get() == "Reachable", timeout = 30_000, pollInterval = 500, ) From 42ced0189318964d93d284592982fae982d2c3fd Mon Sep 17 00:00:00 2001 From: Arnaud Date: Wed, 15 Apr 2026 10:11:37 +0400 Subject: [PATCH 14/32] Move to autonatv2 --- storage/rest/api.nim | 6 +++--- storage/storage.nim | 28 +++++++++++++++++----------- 2 files changed, 20 insertions(+), 14 deletions(-) diff --git a/storage/rest/api.nim b/storage/rest/api.nim index af4b009e..6c54cc8b 100644 --- a/storage/rest/api.nim +++ b/storage/rest/api.nim @@ -23,7 +23,7 @@ import pkg/confutils import pkg/libp2p import pkg/libp2p/routing_record -import pkg/libp2p/protocols/connectivity/autonat/service +import pkg/libp2p/protocols/connectivity/autonatv2/service import pkg/codexdht/discv5/spr as spr import ../logutils @@ -561,7 +561,7 @@ proc initNodeApi(node: StorageNodeRef, conf: StorageConf, router: var RestRouter proc initDebugApi( node: StorageNodeRef, conf: StorageConf, - autonat: AutonatService, + autonat: AutonatV2Service, router: var RestRouter, ) = let allowedOrigin = router.allowedOrigin @@ -644,7 +644,7 @@ proc initRestApi*( node: StorageNodeRef, conf: StorageConf, repoStore: RepoStore, - autonat: AutonatService, + autonat: AutonatV2Service, corsAllowedOrigin: ?string, ): RestRouter = var router = RestRouter.init(validate, corsAllowedOrigin) diff --git a/storage/storage.nim b/storage/storage.nim index eb8a3fe7..ca099ef9 100644 --- a/storage/storage.nim +++ b/storage/storage.nim @@ -17,7 +17,7 @@ import pkg/chronos import pkg/taskpools import pkg/presto import pkg/libp2p -import pkg/libp2p/protocols/connectivity/autonat/[service, client] +import pkg/libp2p/protocols/connectivity/autonatv2/[service, client] import pkg/confutils import pkg/confutils/defs import pkg/stew/io2 @@ -53,7 +53,7 @@ type repoStore: RepoStore maintenance: BlockMaintainer taskpool: Taskpool - autonatService*: AutonatService + autonatService*: AutonatV2Service isStarted: bool StoragePrivateKey* = libp2p.PrivateKey # alias @@ -195,14 +195,17 @@ proc new*( ## create StorageServer including setting up datastore, repostore, etc let listenMultiAddr = getMultiAddrWithIpAndTcpPort(config.listenIp, config.listenPort) - let autonatService = AutonatService.new( - autonatClient = AutonatClient.new(), + let autonatClient = AutonatV2Client.new(random.Rng.instance()) + let autonatService = AutonatV2Service.new( rng = random.Rng.instance(), - scheduleInterval = Opt.some(config.natScheduleInterval), - askNewConnectedPeers = true, - numPeersToAsk = config.natNumPeersToAsk, - maxQueueSize = config.natMaxQueueSize, - minConfidence = config.natMinConfidence, + client = autonatClient, + config = AutonatV2ServiceConfig.new( + scheduleInterval = Opt.some(config.natScheduleInterval), + askNewConnectedPeers = true, + numPeersToAsk = config.natNumPeersToAsk, + maxQueueSize = config.natMaxQueueSize, + minConfidence = config.natMinConfidence, + ), ) let switch = SwitchBuilder @@ -216,10 +219,13 @@ proc new*( .withAgentVersion(config.agentString) .withSignedPeerRecord(true) .withTcpTransport({ServerFlags.ReuseAddr, ServerFlags.TcpNoDelay}) - .withAutonat() + .withAutonatV2Server() .withServices(@[Service(autonatService)]) .build() + autonatClient.setup(switch) + switch.mount(autonatClient) + var cache: CacheStore = nil taskPool: Taskpool @@ -338,7 +344,7 @@ proc new*( switch.mount(network) switch.mount(manifestProto) - autonatService.statusAndConfidenceHandler( + autonatService.setStatusAndConfidenceHandler( proc( networkReachability: NetworkReachability, confidence: Opt[float] ) {.async: (raises: [CancelledError]).} = From 4a453c53e0d06af33d15ff27070d89e2a91fd03d Mon Sep 17 00:00:00 2001 From: Arnaud Date: Wed, 15 Apr 2026 10:25:36 +0400 Subject: [PATCH 15/32] Add abstraction for reachable nodes --- storage/nat.nim | 6 ++++++ storage/storage.nim | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/storage/nat.nim b/storage/nat.nim index f11d9786..0b83729e 100644 --- a/storage/nat.nim +++ b/storage/nat.nim @@ -401,6 +401,12 @@ proc setupAddress*( of NatStrategy.NatUpnp, NatStrategy.NatPmp: return setupNat(natConfig.nat, tcpPort, udpPort, clientId) +proc findReachableNodes*(bootstrapNodes: seq[SignedPeerRecord]): seq[SignedPeerRecord] = + ## Returns the list of nodes known to be directly reachable. + ## Currently returns bootstrap nodes. In the future, any network participant + ## confirmed reachable by AutoNAT could be included. + bootstrapNodes + proc nattedAddress*( natConfig: NatConfig, addrs: seq[MultiAddress], udpPort: Port ): tuple[libp2p, discovery: seq[MultiAddress]] = diff --git a/storage/storage.nim b/storage/storage.nim index ca099ef9..7c51fde0 100644 --- a/storage/storage.nim +++ b/storage/storage.nim @@ -111,7 +111,7 @@ proc start*(s: StorageServer) {.async.} = await s.storageNode.start() - for spr in s.config.bootstrapNodes: + for spr in findReachableNodes(s.config.bootstrapNodes): try: let addrs = spr.data.addresses.mapIt(it.address) await s.storageNode.switch.connect(spr.data.peerId, addrs) From 77662d2d68315608461677d5a45d00e6d7c69f5f Mon Sep 17 00:00:00 2001 From: Arnaud Date: Tue, 21 Apr 2026 17:17:25 +0400 Subject: [PATCH 16/32] Introduce first tests for the async machine --- storage/nat.nim | 40 +++++++++++++++++++++++++++++++++++++++ storage/storage.nim | 10 +++++----- tests/storage/testnat.nim | 27 ++++++++++++++++++++++++++ 3 files changed, 72 insertions(+), 5 deletions(-) diff --git a/storage/nat.nim b/storage/nat.nim index 0b83729e..ad06fba3 100644 --- a/storage/nat.nim +++ b/storage/nat.nim @@ -17,10 +17,12 @@ import import pkg/chronos import pkg/chronicles import pkg/libp2p +import pkg/libp2p/protocols/connectivity/autonatv2/service import ./utils import ./utils/natutils import ./utils/addrutils +import ./discovery const UPNP_TIMEOUT = 200 # ms @@ -61,6 +63,16 @@ type PrefSrcStatus = enum BindAddressIsPublic BindAddressIsPrivate +type NatMapper* = ref object of RootObj + +method mapNatAddresses*( + m: NatMapper, addrs: seq[MultiAddress], discoveryPort: Port +): tuple[libp2p, discovery: seq[MultiAddress]] {.base, gcsafe, raises: [].} = + raiseAssert "mapNatAddresses not implemented" + +type DefaultNatMapper* = ref object of NatMapper + natConfig*: NatConfig + ## Also does threadvar initialisation. ## Must be called before redirectPorts() in each thread. proc getExternalIP*(natStrategy: NatStrategy, quiet = false): Option[IpAddress] = @@ -437,3 +449,31 @@ proc nattedAddress*( # Invalid multiaddress format - return as is it (newAddrs, discoveryAddrs) + +method mapNatAddresses*( + m: DefaultNatMapper, addrs: seq[MultiAddress], discoveryPort: Port +): tuple[libp2p, discovery: seq[MultiAddress]] {.gcsafe, raises: [].} = + nattedAddress(m.natConfig, addrs, discoveryPort) + +proc handleNatStatus*( + networkReachability: NetworkReachability, + confidence: Opt[float], + mapper: NatMapper, + listenAddrs: seq[MultiAddress], + discoveryPort: Port, + discovery: Discovery, +) {.async: (raises: [CancelledError]).} = + debug "AutoNAT status", reachability = networkReachability, confidence + + case networkReachability + of Reachable: + # TODO: switch DHT to server mode, stop relay if running + discard + of NotReachable: + let (announceAddrs, discoveryAddrs) = + mapper.mapNatAddresses(listenAddrs, discoveryPort) + discovery.updateAnnounceRecord(announceAddrs) + discovery.updateDhtRecord(announceAddrs & discoveryAddrs) + of Unknown: + # Nothing to do here, not enough confidence score result + discard diff --git a/storage/storage.nim b/storage/storage.nim index 7c51fde0..c760e7dd 100644 --- a/storage/storage.nim +++ b/storage/storage.nim @@ -344,15 +344,15 @@ proc new*( switch.mount(network) switch.mount(manifestProto) + let natMapper = DefaultNatMapper(natConfig: config.nat) autonatService.setStatusAndConfidenceHandler( proc( networkReachability: NetworkReachability, confidence: Opt[float] ) {.async: (raises: [CancelledError]).} = - if networkReachability == NotReachable: - let (announceAddrs, discoveryAddrs) = - nattedAddress(config.nat, switch.peerInfo.addrs, config.discoveryPort) - discovery.updateAnnounceRecord(announceAddrs) - discovery.updateDhtRecord(announceAddrs & discoveryAddrs) + await handleNatStatus( + networkReachability, confidence, natMapper, switch.peerInfo.addrs, + config.discoveryPort, discovery, + ) ) StorageServer( diff --git a/tests/storage/testnat.nim b/tests/storage/testnat.nim index 71155540..0acb0725 100644 --- a/tests/storage/testnat.nim +++ b/tests/storage/testnat.nim @@ -1,12 +1,24 @@ import std/[unittest, net] import pkg/chronos +import pkg/libp2p import pkg/libp2p/[multiaddress, multihash, multicodec] +import pkg/libp2p/protocols/connectivity/autonatv2/service import pkg/results import ../../storage/nat +import ../../storage/discovery +import ../../storage/rng import ../../storage/utils import ../../storage/utils/natutils +type MockNatMapper = ref object of NatMapper + mapped: tuple[libp2p, discovery: seq[MultiAddress]] + +method mapNatAddresses*( + m: MockNatMapper, addrs: seq[MultiAddress], discoveryPort: Port +): tuple[libp2p, discovery: seq[MultiAddress]] {.raises: [].} = + m.mapped + suite "NAT Address Tests": test "nattedAddress with local addresses": # Setup test data @@ -65,3 +77,18 @@ suite "setupAddress": check ip == none(IpAddress) check tcpPort == some(Port(5000)) check udpPort == some(Port(5001)) + +suite "handleNatStatus": + let key = PrivateKey.random(Rng.instance[]).get() + + test "NotReachable updates announce addresses": + let disc = Discovery.new(key, announceAddrs = @[]) + let announceAddr = MultiAddress.init("/ip4/1.2.3.4/tcp/8080").expect("valid") + let discAddr = MultiAddress.init("/ip4/1.2.3.4/udp/8090").expect("valid") + let mapper = MockNatMapper(mapped: (@[announceAddr], @[discAddr])) + + waitFor handleNatStatus( + NotReachable, Opt.none(float), mapper, @[], Port(8090), disc + ) + + check disc.announceAddrs == @[announceAddr] From bc05f8154d135fab7cedd68c035129aa8112efdc Mon Sep 17 00:00:00 2001 From: Arnaud Date: Wed, 22 Apr 2026 12:19:26 +0400 Subject: [PATCH 17/32] Introduce max relay config --- storage/conf.nim | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/storage/conf.nim b/storage/conf.nim index 406615e7..af9e871e 100644 --- a/storage/conf.nim +++ b/storage/conf.nim @@ -313,6 +313,12 @@ type name: "nat-min-confidence" .}: float + natMaxRelays* {. + desc: "Maximum number of relay servers to reserve slots on simultaneously", + defaultValue: 2, + name: "nat-max-relays" + .}: int + func defaultAddress*(conf: StorageConf): IpAddress = result = static parseIpAddress("127.0.0.1") From e48ca913a9db228f27ffbacbf24c3abef14882f4 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Wed, 22 Apr 2026 12:21:43 +0400 Subject: [PATCH 18/32] Add relay integration and tests --- storage/nat.nim | 80 +++++++++++++++++++++------ storage/storage.nim | 24 +++++++-- tests/storage/testnat.nim | 110 +++++++++++++++++++++++++++++++++----- 3 files changed, 183 insertions(+), 31 deletions(-) diff --git a/storage/nat.nim b/storage/nat.nim index ad06fba3..eccb193a 100644 --- a/storage/nat.nim +++ b/storage/nat.nim @@ -18,6 +18,7 @@ import pkg/chronos import pkg/chronicles import pkg/libp2p import pkg/libp2p/protocols/connectivity/autonatv2/service +import pkg/libp2p/services/autorelayservice import ./utils import ./utils/natutils @@ -66,12 +67,18 @@ type PrefSrcStatus = enum type NatMapper* = ref object of RootObj method mapNatAddresses*( - m: NatMapper, addrs: seq[MultiAddress], discoveryPort: Port + m: NatMapper, addrs: seq[MultiAddress] ): tuple[libp2p, discovery: seq[MultiAddress]] {.base, gcsafe, raises: [].} = raiseAssert "mapNatAddresses not implemented" +method getReachableAddresses*( + m: NatMapper, addrs: seq[MultiAddress] +): tuple[libp2p, discovery: seq[MultiAddress]] {.base, gcsafe, raises: [].} = + raiseAssert "getReachableAddresses not implemented" + type DefaultNatMapper* = ref object of NatMapper natConfig*: NatConfig + discoveryPort*: Port ## Also does threadvar initialisation. ## Must be called before redirectPorts() in each thread. @@ -451,29 +458,70 @@ proc nattedAddress*( (newAddrs, discoveryAddrs) method mapNatAddresses*( - m: DefaultNatMapper, addrs: seq[MultiAddress], discoveryPort: Port + m: DefaultNatMapper, addrs: seq[MultiAddress] ): tuple[libp2p, discovery: seq[MultiAddress]] {.gcsafe, raises: [].} = - nattedAddress(m.natConfig, addrs, discoveryPort) + nattedAddress(m.natConfig, addrs, m.discoveryPort) + +method getReachableAddresses*( + m: DefaultNatMapper, addrs: seq[MultiAddress] +): tuple[libp2p, discovery: seq[MultiAddress]] {.gcsafe, raises: [].} = + let ip = + if m.natConfig.hasExtIp: + some(m.natConfig.extIp) + else: + let (routeIp, _) = getRoutePrefSrc(static parseIpAddress("0.0.0.0")) + routeIp + if ip.isNone: + return (@[], @[]) + let announceAddrs = + addrs.mapIt(it.remapAddr(ip = ip, port = none(Port))).deduplicate() + (announceAddrs, @[getMultiAddrWithIPAndUDPPort(ip.get, m.discoveryPort)]) + +proc hasPublicIp*(addrs: seq[MultiAddress]): bool = + for addr in addrs: + let (ip, _) = getAddressAndPort(addr) + if ip.isSome and isGlobalUnicast(ip.get): + return true proc handleNatStatus*( networkReachability: NetworkReachability, - confidence: Opt[float], mapper: NatMapper, - listenAddrs: seq[MultiAddress], - discoveryPort: Port, discovery: Discovery, + switch: Switch, + autoRelayService: AutoRelayService, ) {.async: (raises: [CancelledError]).} = - debug "AutoNAT status", reachability = networkReachability, confidence - case networkReachability - of Reachable: - # TODO: switch DHT to server mode, stop relay if running - discard - of NotReachable: - let (announceAddrs, discoveryAddrs) = - mapper.mapNatAddresses(listenAddrs, discoveryPort) - discovery.updateAnnounceRecord(announceAddrs) - discovery.updateDhtRecord(announceAddrs & discoveryAddrs) of Unknown: # Nothing to do here, not enough confidence score result discard + of Reachable: + # For UPnP, it the mapping was a success, + # the autorelay service has been stopped + # and the address was already announced + if autoRelayService.isRunning: + if not await autoRelayService.stop(switch): + debug "AutoRelayService stop method returned false" + + let (announceAddrs, discoveryAddrs) = + mapper.getReachableAddresses(switch.peerInfo.addrs) + discovery.updateAnnounceRecord(announceAddrs) + discovery.updateDhtRecord(announceAddrs & discoveryAddrs) + # TODO: switch DHT to server mode + of NotReachable: + let (announceAddrs, discoveryAddrs) = mapper.mapNatAddresses(switch.peerInfo.addrs) + + # With a UPnP / NatPmP successful mapping, + # we suppose that having a public IP make it Reachable. + # If not, the state will be updated in the next Autonat iteration. + # TODO: Do we need to manually call dialMe to make sure we are Reachable ? + if hasPublicIp(announceAddrs): + discovery.updateAnnounceRecord(announceAddrs) + discovery.updateDhtRecord(announceAddrs & discoveryAddrs) + + if autoRelayService.isRunning: + if not await autoRelayService.stop(switch): + debug "AutoRelayService stop method returned false" + else: + if not autoRelayService.isRunning: + if not await autoRelayService.setup(switch): + debug "AutoRelayService setup method returned false" diff --git a/storage/storage.nim b/storage/storage.nim index c760e7dd..ff9d97b3 100644 --- a/storage/storage.nim +++ b/storage/storage.nim @@ -18,6 +18,8 @@ import pkg/taskpools import pkg/presto import pkg/libp2p import pkg/libp2p/protocols/connectivity/autonatv2/[service, client] +import pkg/libp2p/protocols/connectivity/relay/client as relayClientModule +import pkg/libp2p/services/autorelayservice import pkg/confutils import pkg/confutils/defs import pkg/stew/io2 @@ -54,6 +56,7 @@ type maintenance: BlockMaintainer taskpool: Taskpool autonatService*: AutonatV2Service + autoRelayService: AutoRelayService isStarted: bool StoragePrivateKey* = libp2p.PrivateKey # alias @@ -195,6 +198,8 @@ proc new*( ## create StorageServer including setting up datastore, repostore, etc let listenMultiAddr = getMultiAddrWithIpAndTcpPort(config.listenIp, config.listenPort) + let relayClient = relayClientModule.RelayClient.new() + let autonatClient = AutonatV2Client.new(random.Rng.instance()) let autonatService = AutonatV2Service.new( rng = random.Rng.instance(), @@ -220,6 +225,7 @@ proc new*( .withSignedPeerRecord(true) .withTcpTransport({ServerFlags.ReuseAddr, ServerFlags.TcpNoDelay}) .withAutonatV2Server() + .withCircuitRelay(relayClient) .withServices(@[Service(autonatService)]) .build() @@ -327,6 +333,16 @@ proc new*( taskPool = taskPool, ) + autoRelayService = AutoRelayService.new( + maxNumRelays = config.natMaxRelays, + client = relayClient, + onReservation = proc(addresses: seq[MultiAddress]) {.gcsafe, raises: [].} = + debug "Relay reservation updated", addresses + discovery.updateAnnounceRecord(addresses) + discovery.updateDhtRecord(addresses), + rng = random.Rng.instance(), + ) + var restServer: RestServerRef = nil if config.apiBindAddress.isSome: @@ -344,14 +360,15 @@ proc new*( switch.mount(network) switch.mount(manifestProto) - let natMapper = DefaultNatMapper(natConfig: config.nat) + let natMapper = + DefaultNatMapper(natConfig: config.nat, discoveryPort: config.discoveryPort) autonatService.setStatusAndConfidenceHandler( proc( networkReachability: NetworkReachability, confidence: Opt[float] ) {.async: (raises: [CancelledError]).} = + debug "AutoNAT status", reachability = networkReachability, confidence await handleNatStatus( - networkReachability, confidence, natMapper, switch.peerInfo.addrs, - config.discoveryPort, discovery, + networkReachability, natMapper, discovery, switch, autoRelayService ) ) @@ -364,4 +381,5 @@ proc new*( taskPool: taskPool, logFile: logFile, autonatService: autonatService, + autoRelayService: autoRelayService, ) diff --git a/tests/storage/testnat.nim b/tests/storage/testnat.nim index 0acb0725..3f772ee2 100644 --- a/tests/storage/testnat.nim +++ b/tests/storage/testnat.nim @@ -1,10 +1,15 @@ -import std/[unittest, net] +import std/net import pkg/chronos -import pkg/libp2p import pkg/libp2p/[multiaddress, multihash, multicodec] -import pkg/libp2p/protocols/connectivity/autonatv2/service +import pkg/libp2p/protocols/connectivity/autonatv2/service except setup +import pkg/libp2p/protocols/connectivity/autonatv2/types +import pkg/libp2p/protocols/connectivity/relay/client as relayClientModule +import pkg/libp2p/services/autorelayservice except setup + import pkg/results +import ./helpers +import ../asynctest import ../../storage/nat import ../../storage/discovery import ../../storage/rng @@ -15,7 +20,12 @@ type MockNatMapper = ref object of NatMapper mapped: tuple[libp2p, discovery: seq[MultiAddress]] method mapNatAddresses*( - m: MockNatMapper, addrs: seq[MultiAddress], discoveryPort: Port + m: MockNatMapper, addrs: seq[MultiAddress] +): tuple[libp2p, discovery: seq[MultiAddress]] {.raises: [].} = + m.mapped + +method getReachableAddresses*( + m: MockNatMapper, addrs: seq[MultiAddress] ): tuple[libp2p, discovery: seq[MultiAddress]] {.raises: [].} = m.mapped @@ -44,7 +54,6 @@ suite "NAT Address Tests": MultiAddress.init("/ip4/8.8.8.8/tcp/5000").expect("valid multiaddr"), MultiAddress.init("/ip4/8.8.8.8/tcp/5000").expect("valid multiaddr"), ] - #ipv6Addr = MultiAddress.init("/ip6/::1/tcp/5000").expect("valid multiaddr") addrs = @[localAddr, anyAddr, publicAddr] @@ -78,17 +87,94 @@ suite "setupAddress": check tcpPort == some(Port(5000)) check udpPort == some(Port(5001)) -suite "handleNatStatus": - let key = PrivateKey.random(Rng.instance[]).get() +suite "getReachableAddresses": + test "returns remapped addresses when extIp is configured": + let + natConfig = NatConfig(hasExtIp: true, extIp: parseIpAddress("1.2.3.4")) + mapper = DefaultNatMapper(natConfig: natConfig, discoveryPort: Port(8090)) + listenAddr = MultiAddress.init("/ip4/0.0.0.0/tcp/5000").expect("valid") - test "NotReachable updates announce addresses": - let disc = Discovery.new(key, announceAddrs = @[]) + let (libp2pAddrs, discAddrs) = mapper.getReachableAddresses(@[listenAddr]) + + check libp2pAddrs == @[MultiAddress.init("/ip4/1.2.3.4/tcp/5000").expect("valid")] + check discAddrs == @[MultiAddress.init("/ip4/1.2.3.4/udp/8090").expect("valid")] + +suite "hasPublicIp": + test "hasPublicIp returns true when the address is public": + let ma = MultiAddress.init("/ip4/8.8.8.8/tcp/8080").expect("valid") + check hasPublicIp(@[ma]) + + test "hasPublicIp returns false when the address is private": + let ma = MultiAddress.init("/ip4/192.168.1.1/tcp/8080").expect("valid") + check not hasPublicIp(@[ma]) + + test "hasPublicIp returns false when the address is empty": + check not hasPublicIp(@[]) + +asyncchecksuite "handleNatStatus": + var sw: Switch + var key: PrivateKey + var disc: Discovery + let autoRelay = + AutoRelayService.new(1, relayClientModule.RelayClient.new(), nil, Rng.instance()) + + setup: + key = PrivateKey.random(Rng.instance[]).get() + disc = Discovery.new(key, announceAddrs = @[]) + sw = newStandardSwitch() + await sw.start() + + teardown: + await sw.stop() + + if autoRelay.isRunning: + discard await autoRelay.stop(sw) + + test "handleNatStatus announces address when the node is not Reachable and the UPnP succeed with public ip": let announceAddr = MultiAddress.init("/ip4/1.2.3.4/tcp/8080").expect("valid") let discAddr = MultiAddress.init("/ip4/1.2.3.4/udp/8090").expect("valid") let mapper = MockNatMapper(mapped: (@[announceAddr], @[discAddr])) - waitFor handleNatStatus( - NotReachable, Opt.none(float), mapper, @[], Port(8090), disc - ) + await handleNatStatus(NotReachable, mapper, disc, sw, autoRelay) check disc.announceAddrs == @[announceAddr] + check not autoRelay.isRunning + + # test "handleNatStatus does not announce address when the node is not Reachable and the UPnP succeed with private ip": + # let privateAddr = MultiAddress.init("/ip4/192.168.1.1/tcp/8080").expect("valid") + # let mapper = MockNatMapper(mapped: (@[privateAddr], @[])) + + # await handleNatStatus( + # NotReachable, mapper, disc, sw, autoRelay + # ) + + # check disc.announceAddrs == @[] + # check not autoRelay.isRunning + + test "handleNatStatus starts autoRelay when node is not Reachable and UPnP failed": + let mapper = MockNatMapper(mapped: (@[], @[])) + + await handleNatStatus(NotReachable, mapper, disc, sw, autoRelay) + + check autoRelay.isRunning + # The addresses will be announced in the onReservation callback + # after a node accepted a Relay reservation. + + test "handleNatStatus does not announce address when node is Reachable and relay is not running": + let mapper = MockNatMapper(mapped: (@[], @[])) + + await handleNatStatus(Reachable, mapper, disc, sw, autoRelay) + + check disc.announceAddrs == newSeq[MultiAddress]() + check not autoRelay.isRunning + + test "handleNatStatus stops relay and announces address when node is Reachable and relay is running": + let announceAddr = MultiAddress.init("/ip4/1.2.3.4/tcp/8080").expect("valid") + let discAddr = MultiAddress.init("/ip4/1.2.3.4/udp/8090").expect("valid") + let mapper = MockNatMapper(mapped: (@[announceAddr], @[discAddr])) + + discard await autorelayservice.setup(autoRelay, sw) + await handleNatStatus(Reachable, mapper, disc, sw, autoRelay) + + check not autoRelay.isRunning + check disc.announceAddrs == @[announceAddr] From 4a4941a593abad3cf193926f3dd0cd32fb7a7628 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Wed, 22 Apr 2026 19:53:39 +0400 Subject: [PATCH 19/32] Add relay config --- storage/conf.nim | 6 ++++++ storage/storage.nim | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/storage/conf.nim b/storage/conf.nim index af9e871e..80c48b6f 100644 --- a/storage/conf.nim +++ b/storage/conf.nim @@ -319,6 +319,12 @@ type name: "nat-max-relays" .}: int + relay* {. + desc: "Enable circuit relay server (hop) - use on publicly reachable nodes only", + defaultValue: false, + name: "relay" + .}: bool + func defaultAddress*(conf: StorageConf): IpAddress = result = static parseIpAddress("127.0.0.1") diff --git a/storage/storage.nim b/storage/storage.nim index ff9d97b3..86441170 100644 --- a/storage/storage.nim +++ b/storage/storage.nim @@ -198,7 +198,7 @@ proc new*( ## create StorageServer including setting up datastore, repostore, etc let listenMultiAddr = getMultiAddrWithIpAndTcpPort(config.listenIp, config.listenPort) - let relayClient = relayClientModule.RelayClient.new() + let relayClient = relayClientModule.RelayClient.new(canHop = config.relay) let autonatClient = AutonatV2Client.new(random.Rng.instance()) let autonatService = AutonatV2Service.new( From 95d7f6882f991cf93aa5b693be3b5ff97238521c Mon Sep 17 00:00:00 2001 From: Arnaud Date: Tue, 5 May 2026 10:48:55 +0400 Subject: [PATCH 20/32] Remove nat none stategy --- storage/conf.nim | 4 +--- storage/nat.nim | 14 +------------- storage/utils/natutils.nim | 1 - tests/integration/multinodes.nim | 1 - tests/storage/helpers/nodeutils.nim | 2 +- tests/storage/testnat.nim | 23 ----------------------- 6 files changed, 3 insertions(+), 42 deletions(-) diff --git a/storage/conf.nim b/storage/conf.nim index 80c48b6f..b5738a0f 100644 --- a/storage/conf.nim +++ b/storage/conf.nim @@ -157,7 +157,7 @@ type nat* {. desc: "Specify method to use for determining public address. " & - "Must be one of: any, none, upnp, pmp, extip:. " & + "Must be one of: any, upnp, pmp, extip:. " & "If connecting to peers on a local network only, use 'none'.", defaultValue: defaultNatConfig(), defaultValueDesc: "any", @@ -407,8 +407,6 @@ func parse*(T: type NatConfig, p: string): Result[NatConfig, string] = case p.toLowerAscii of "any": return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatAny)) - of "none": - return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatNone)) of "upnp": return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatUpnp)) of "pmp": diff --git a/storage/nat.nim b/storage/nat.nim index eccb193a..84ecef1d 100644 --- a/storage/nat.nim +++ b/storage/nat.nim @@ -48,7 +48,7 @@ type NatConfig* = object var upnp {.threadvar.}: Miniupnp npmp {.threadvar.}: NatPmp - strategy = NatStrategy.NatNone + strategy = NatStrategy.NatAny natClosed: Atomic[bool] extIp: Option[IpAddress] activeMappings: seq[PortMappings] @@ -405,18 +405,6 @@ proc setupAddress*( return (prefSrcIp, some(tcpPort), some(udpPort)) of PrefSrcIsPrivate, BindAddressIsPrivate: return setupNat(natConfig.nat, tcpPort, udpPort, clientId) - of NatStrategy.NatNone: - let (prefSrcIp, prefSrcStatus) = getRoutePrefSrc(bindIp) - - case prefSrcStatus - of NoRoutingInfo, PrefSrcIsPublic, BindAddressIsPublic: - return (prefSrcIp, some(tcpPort), some(udpPort)) - of PrefSrcIsPrivate: - error "No public IP address found. Should not use --nat:none option" - return (none(IpAddress), some(tcpPort), some(udpPort)) - of BindAddressIsPrivate: - error "Bind IP is not a public IP address. Should not use --nat:none option" - return (none(IpAddress), some(tcpPort), some(udpPort)) of NatStrategy.NatUpnp, NatStrategy.NatPmp: return setupNat(natConfig.nat, tcpPort, udpPort, clientId) diff --git a/storage/utils/natutils.nim b/storage/utils/natutils.nim index 04447d26..2a129793 100644 --- a/storage/utils/natutils.nim +++ b/storage/utils/natutils.nim @@ -8,7 +8,6 @@ type NatStrategy* = enum NatAny NatUpnp NatPmp - NatNone func isGlobalUnicast*(address: TransportAddress): bool = if address.isGlobal() and address.isUnicast(): true else: false diff --git a/tests/integration/multinodes.nim b/tests/integration/multinodes.nim index 9d4153bd..034ee657 100644 --- a/tests/integration/multinodes.nim +++ b/tests/integration/multinodes.nim @@ -131,7 +131,6 @@ template multinodesuite*(suiteName: string, body: untyped) = config.addCliOption("--bootstrap-node", bootstrapNode) config.addCliOption("--data-dir", datadir) - config.addCliOption("--nat", "none") except StorageConfigError as e: raiseMultiNodeSuiteError "invalid cli option, error: " & e.msg diff --git a/tests/storage/helpers/nodeutils.nim b/tests/storage/helpers/nodeutils.nim index e1951b83..087d86e8 100644 --- a/tests/storage/helpers/nodeutils.nim +++ b/tests/storage/helpers/nodeutils.nim @@ -219,7 +219,7 @@ proc generateNodes*( if config.enableBootstrap: waitFor switch.peerInfo.update() let (announceAddrs, discoveryAddrs) = nattedAddress( - NatConfig(hasExtIp: false, nat: NatNone), + NatConfig(hasExtIp: false, nat: NatAny), switch.peerInfo.addrs, bindPort.Port, ) diff --git a/tests/storage/testnat.nim b/tests/storage/testnat.nim index 3f772ee2..ee9ed27e 100644 --- a/tests/storage/testnat.nim +++ b/tests/storage/testnat.nim @@ -64,29 +64,6 @@ suite "NAT Address Tests": check(discoveryAddrs == expectedDiscoveryAddrs) check(libp2pAddrs == expectedlibp2pAddrs) -suite "setupAddress": - test "public bind IP with NatNone returns bind IP": - let - bindIp = parseIpAddress("8.8.8.8") - natConfig = NatConfig(hasExtIp: false, nat: NatStrategy.NatNone) - (ip, tcpPort, udpPort) = - setupAddress(natConfig, bindIp, Port(5000), Port(5001), "test") - - check ip == some(bindIp) - check tcpPort == some(Port(5000)) - check udpPort == some(Port(5001)) - - test "private bind IP with NatNone returns no IP": - let - bindIp = parseIpAddress("192.168.1.1") - natConfig = NatConfig(hasExtIp: false, nat: NatStrategy.NatNone) - (ip, tcpPort, udpPort) = - setupAddress(natConfig, bindIp, Port(5000), Port(5001), "test") - - check ip == none(IpAddress) - check tcpPort == some(Port(5000)) - check udpPort == some(Port(5001)) - suite "getReachableAddresses": test "returns remapped addresses when extIp is configured": let From 86708b545feefc8873b85f6ab48b0589c6bc7083 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Tue, 5 May 2026 10:55:11 +0400 Subject: [PATCH 21/32] Rename nat any to nat auto --- storage/conf.nim | 11 +++++------ storage/nat.nim | 8 ++++---- storage/utils/natutils.nim | 2 +- tests/storage/helpers/nodeutils.nim | 2 +- 4 files changed, 11 insertions(+), 12 deletions(-) diff --git a/storage/conf.nim b/storage/conf.nim index b5738a0f..487dac94 100644 --- a/storage/conf.nim +++ b/storage/conf.nim @@ -157,10 +157,9 @@ type nat* {. desc: "Specify method to use for determining public address. " & - "Must be one of: any, upnp, pmp, extip:. " & - "If connecting to peers on a local network only, use 'none'.", + "Must be one of: auto, upnp, pmp, extip:.", defaultValue: defaultNatConfig(), - defaultValueDesc: "any", + defaultValueDesc: "auto", name: "nat" .}: NatConfig @@ -329,7 +328,7 @@ func defaultAddress*(conf: StorageConf): IpAddress = result = static parseIpAddress("127.0.0.1") func defaultNatConfig*(): NatConfig = - result = NatConfig(hasExtIp: false, nat: NatStrategy.NatAny) + result = NatConfig(hasExtIp: false, nat: NatStrategy.NatAuto) proc getStorageVersion(): string = let tag = strip(staticExec("git describe --tags --abbrev=0")) @@ -405,8 +404,8 @@ proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T = func parse*(T: type NatConfig, p: string): Result[NatConfig, string] = case p.toLowerAscii - of "any": - return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatAny)) + of "auto": + return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatAuto)) of "upnp": return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatUpnp)) of "pmp": diff --git a/storage/nat.nim b/storage/nat.nim index 84ecef1d..ee9ce622 100644 --- a/storage/nat.nim +++ b/storage/nat.nim @@ -48,7 +48,7 @@ type NatConfig* = object var upnp {.threadvar.}: Miniupnp npmp {.threadvar.}: NatPmp - strategy = NatStrategy.NatAny + strategy = NatStrategy.NatAuto natClosed: Atomic[bool] extIp: Option[IpAddress] activeMappings: seq[PortMappings] @@ -85,7 +85,7 @@ type DefaultNatMapper* = ref object of NatMapper proc getExternalIP*(natStrategy: NatStrategy, quiet = false): Option[IpAddress] = var externalIP: IpAddress - if natStrategy == NatStrategy.NatAny or natStrategy == NatStrategy.NatUpnp: + if natStrategy == NatStrategy.NatAuto or natStrategy == NatStrategy.NatUpnp: if upnp == nil: upnp = newMiniupnp() @@ -127,7 +127,7 @@ proc getExternalIP*(natStrategy: NatStrategy, quiet = false): Option[IpAddress] error "parseIpAddress() exception", err = e.msg return - if natStrategy == NatStrategy.NatAny or natStrategy == NatStrategy.NatPmp: + if natStrategy == NatStrategy.NatAuto or natStrategy == NatStrategy.NatPmp: if npmp == nil: npmp = newNatPmp() let nres = npmp.init() @@ -397,7 +397,7 @@ proc setupAddress*( return (some(natConfig.extIp), some(tcpPort), some(udpPort)) case natConfig.nat - of NatStrategy.NatAny: + of NatStrategy.NatAuto: let (prefSrcIp, prefSrcStatus) = getRoutePrefSrc(bindIp) case prefSrcStatus diff --git a/storage/utils/natutils.nim b/storage/utils/natutils.nim index 2a129793..04cc57f9 100644 --- a/storage/utils/natutils.nim +++ b/storage/utils/natutils.nim @@ -5,7 +5,7 @@ import std/[net, tables, hashes, options], pkg/results, chronos, chronicles import pkg/libp2p type NatStrategy* = enum - NatAny + NatAuto NatUpnp NatPmp diff --git a/tests/storage/helpers/nodeutils.nim b/tests/storage/helpers/nodeutils.nim index 087d86e8..a891db23 100644 --- a/tests/storage/helpers/nodeutils.nim +++ b/tests/storage/helpers/nodeutils.nim @@ -219,7 +219,7 @@ proc generateNodes*( if config.enableBootstrap: waitFor switch.peerInfo.update() let (announceAddrs, discoveryAddrs) = nattedAddress( - NatConfig(hasExtIp: false, nat: NatAny), + NatConfig(hasExtIp: false, nat: NatAuto), switch.peerInfo.addrs, bindPort.Port, ) From 7a6b4993b7ddf9d5e759d1c96b8e02ef6050ee31 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Tue, 5 May 2026 12:13:09 +0400 Subject: [PATCH 22/32] Update libp2p (and chronos) to include the dial address in AutoNat callback --- .gitmodules | 3 + storage/blockexchange/network/network.nim | 7 +- storage/manifest/coders.nim | 2 + storage/storage.nim | 4 +- tests/storage/blockexchange/testnetwork.nim | 71 +++++++++++---------- vendor/nim-chronos | 2 +- vendor/nim-libp2p | 2 +- vendor/nim-lsquic | 1 + 8 files changed, 52 insertions(+), 40 deletions(-) create mode 160000 vendor/nim-lsquic diff --git a/.gitmodules b/.gitmodules index e6214d80..ec152ae6 100644 --- a/.gitmodules +++ b/.gitmodules @@ -196,3 +196,6 @@ [submodule "vendor/nim-merkletree"] path = vendor/nim-merkletree url = https://github.com/logos-storage/nim-merkletree +[submodule "vendor/nim-lsquic"] + path = vendor/nim-lsquic + url = https://github.com/vacp2p/nim-lsquic diff --git a/storage/blockexchange/network/network.nim b/storage/blockexchange/network/network.nim index 5f53fe20..4a08f203 100644 --- a/storage/blockexchange/network/network.nim +++ b/storage/blockexchange/network/network.nim @@ -13,7 +13,7 @@ import std/sequtils import pkg/chronos import pkg/libp2p -import pkg/libp2p/utils/semaphore +import pkg/chronos/asyncsync import pkg/questionable import pkg/questionable/results @@ -113,7 +113,10 @@ proc send*( except CatchableError as err: error "Error sending message", peer = id, msg = err.msg finally: - b.inflightSema.release() + try: + b.inflightSema.release() + except AsyncSemaphoreError as err: + error "Failed to release semaphore", msg = err.msg proc handleWantList( b: BlockExcNetwork, peer: NetworkPeer, list: WantList diff --git a/storage/manifest/coders.nim b/storage/manifest/coders.nim index 80a9879e..19664e64 100644 --- a/storage/manifest/coders.nim +++ b/storage/manifest/coders.nim @@ -14,6 +14,8 @@ import times {.push raises: [].} import std/tables +import std/sequtils +import std/options import pkg/libp2p import pkg/questionable diff --git a/storage/storage.nim b/storage/storage.nim index 86441170..31bb9d8a 100644 --- a/storage/storage.nim +++ b/storage/storage.nim @@ -364,7 +364,9 @@ proc new*( DefaultNatMapper(natConfig: config.nat, discoveryPort: config.discoveryPort) autonatService.setStatusAndConfidenceHandler( proc( - networkReachability: NetworkReachability, confidence: Opt[float] + networkReachability: NetworkReachability, + confidence: Opt[float], + addrs: Opt[MultiAddress], ) {.async: (raises: [CancelledError]).} = debug "AutoNAT status", reachability = networkReachability, confidence await handleNatStatus( diff --git a/tests/storage/blockexchange/testnetwork.nim b/tests/storage/blockexchange/testnetwork.nim index dd035a44..c03cf7db 100644 --- a/tests/storage/blockexchange/testnetwork.nim +++ b/tests/storage/blockexchange/testnetwork.nim @@ -174,38 +174,39 @@ asyncchecksuite "Network - Senders": await done.wait(500.millis) -asyncchecksuite "Network - Test Limits": - var - switch1, switch2: Switch - network1, network2: BlockExcNetwork - done: Future[void] - - setup: - done = newFuture[void]() - switch1 = newStandardSwitch() - switch2 = newStandardSwitch() - - network1 = BlockExcNetwork.new(switch = switch1, maxInflight = 0) - switch1.mount(network1) - - network2 = BlockExcNetwork.new(switch = switch2) - switch2.mount(network2) - - await switch1.start() - await switch2.start() - - await switch1.connect(switch2.peerInfo.peerId, switch2.peerInfo.addrs) - - teardown: - await allFuturesThrowing(switch1.stop(), switch2.stop()) - - test "Concurrent Sends": - network2.handlers.onPresence = proc( - peer: PeerId, presence: seq[BlockPresence] - ): Future[void] {.async: (raises: []).} = - check false - - let fut = network1.send(switch2.peerInfo.peerId, Message()) - - await sleepAsync(100.millis) - check not fut.finished +# TODO: AsyncSemaphore in chronos requires size > 0, maxInflight = 0 no longer valid +# asyncchecksuite "Network - Test Limits": +# var +# switch1, switch2: Switch +# network1, network2: BlockExcNetwork +# done: Future[void] +# +# setup: +# done = newFuture[void]() +# switch1 = newStandardSwitch() +# switch2 = newStandardSwitch() +# +# network1 = BlockExcNetwork.new(switch = switch1, maxInflight = 0) +# switch1.mount(network1) +# +# network2 = BlockExcNetwork.new(switch = switch2) +# switch2.mount(network2) +# +# await switch1.start() +# await switch2.start() +# +# await switch1.connect(switch2.peerInfo.peerId, switch2.peerInfo.addrs) +# +# teardown: +# await allFuturesThrowing(switch1.stop(), switch2.stop()) +# +# test "Concurrent Sends": +# network2.handlers.onPresence = proc( +# peer: PeerId, presence: seq[BlockPresence] +# ): Future[void] {.async: (raises: []).} = +# check false +# +# let fut = network1.send(switch2.peerInfo.peerId, Message()) +# +# await sleepAsync(100.millis) +# check not fut.finished diff --git a/vendor/nim-chronos b/vendor/nim-chronos index 785fcf4d..45f43a9a 160000 --- a/vendor/nim-chronos +++ b/vendor/nim-chronos @@ -1 +1 @@ -Subproject commit 785fcf4ddec1101a3df1f044d6331504d7ab95c6 +Subproject commit 45f43a9ad8bd8bcf5903b42f365c1c879bd54240 diff --git a/vendor/nim-libp2p b/vendor/nim-libp2p index e82080f7..425e7248 160000 --- a/vendor/nim-libp2p +++ b/vendor/nim-libp2p @@ -1 +1 @@ -Subproject commit e82080f7b1aa61c6d35fa5311b873f41eff4bb52 +Subproject commit 425e72487ea4bd5766b5a0590a05039406f46a2f diff --git a/vendor/nim-lsquic b/vendor/nim-lsquic new file mode 160000 index 00000000..a776eced --- /dev/null +++ b/vendor/nim-lsquic @@ -0,0 +1 @@ +Subproject commit a776eced48d1f3c630d8f3a8a3e976171dd1f9c1 From 80ea4c71ccdc4e3ed85ef58cfdfa3198eb8e03bc Mon Sep 17 00:00:00 2001 From: Chrysostomos Nanakos Date: Tue, 5 May 2026 12:07:29 +0300 Subject: [PATCH 23/32] fix: rewrite Network - Test Limits for chronos AsyncSemaphore Signed-off-by: Chrysostomos Nanakos --- tests/storage/blockexchange/testnetwork.nim | 76 +++++++++++---------- 1 file changed, 40 insertions(+), 36 deletions(-) diff --git a/tests/storage/blockexchange/testnetwork.nim b/tests/storage/blockexchange/testnetwork.nim index c03cf7db..638f6735 100644 --- a/tests/storage/blockexchange/testnetwork.nim +++ b/tests/storage/blockexchange/testnetwork.nim @@ -1,3 +1,4 @@ +import std/importutils import std/[sequtils, tables] import pkg/chronos @@ -12,6 +13,8 @@ import ../../asynctest import ../examples import ../helpers +privateAccess(BlockExcNetwork) + asyncchecksuite "Network - Handlers": let rng = Rng.instance() @@ -174,39 +177,40 @@ asyncchecksuite "Network - Senders": await done.wait(500.millis) -# TODO: AsyncSemaphore in chronos requires size > 0, maxInflight = 0 no longer valid -# asyncchecksuite "Network - Test Limits": -# var -# switch1, switch2: Switch -# network1, network2: BlockExcNetwork -# done: Future[void] -# -# setup: -# done = newFuture[void]() -# switch1 = newStandardSwitch() -# switch2 = newStandardSwitch() -# -# network1 = BlockExcNetwork.new(switch = switch1, maxInflight = 0) -# switch1.mount(network1) -# -# network2 = BlockExcNetwork.new(switch = switch2) -# switch2.mount(network2) -# -# await switch1.start() -# await switch2.start() -# -# await switch1.connect(switch2.peerInfo.peerId, switch2.peerInfo.addrs) -# -# teardown: -# await allFuturesThrowing(switch1.stop(), switch2.stop()) -# -# test "Concurrent Sends": -# network2.handlers.onPresence = proc( -# peer: PeerId, presence: seq[BlockPresence] -# ): Future[void] {.async: (raises: []).} = -# check false -# -# let fut = network1.send(switch2.peerInfo.peerId, Message()) -# -# await sleepAsync(100.millis) -# check not fut.finished +asyncchecksuite "Network - Test Limits": + var + switch1, switch2: Switch + network1, network2: BlockExcNetwork + done: Future[void] + + setup: + done = newFuture[void]() + switch1 = newStandardSwitch() + switch2 = newStandardSwitch() + + network1 = BlockExcNetwork.new(switch = switch1, maxInflight = 1) + switch1.mount(network1) + + network2 = BlockExcNetwork.new(switch = switch2) + switch2.mount(network2) + + await switch1.start() + await switch2.start() + + await switch1.connect(switch2.peerInfo.peerId, switch2.peerInfo.addrs) + + teardown: + await allFuturesThrowing(switch1.stop(), switch2.stop()) + + test "Concurrent Sends": + network2.handlers.onPresence = proc( + peer: PeerId, presence: seq[BlockPresence] + ): Future[void] {.async: (raises: []).} = + check false + + await network1.inflightSema.acquire() + + let fut = network1.send(switch2.peerInfo.peerId, Message()) + + await sleepAsync(100.millis) + check not fut.finished From 17ef2d3b585f211e06da50020b1ceccc7d80d285 Mon Sep 17 00:00:00 2001 From: Chrysostomos Nanakos Date: Tue, 5 May 2026 12:41:52 +0300 Subject: [PATCH 24/32] fix: don't release inflight semaphore when acquire is cancelled Signed-off-by: Chrysostomos Nanakos --- storage/blockexchange/network/network.nim | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/storage/blockexchange/network/network.nim b/storage/blockexchange/network/network.nim index 4a08f203..95870595 100644 --- a/storage/blockexchange/network/network.nim +++ b/storage/blockexchange/network/network.nim @@ -107,16 +107,17 @@ proc send*( let peer = b.peers[id] await b.inflightSema.acquire() - await peer.send(msg) + try: + await peer.send(msg) + finally: + try: + b.inflightSema.release() + except AsyncSemaphoreError as err: + error "Failed to release semaphore", msg = err.msg except CancelledError as error: raise error except CatchableError as err: error "Error sending message", peer = id, msg = err.msg - finally: - try: - b.inflightSema.release() - except AsyncSemaphoreError as err: - error "Failed to release semaphore", msg = err.msg proc handleWantList( b: BlockExcNetwork, peer: NetworkPeer, list: WantList From 00e5c0cfac5b594824b260724629edac8a516dfd Mon Sep 17 00:00:00 2001 From: Arnaud Date: Tue, 5 May 2026 14:58:26 +0400 Subject: [PATCH 25/32] use AutoNat dialback IP instead of router public IP --- storage/nat.nim | 281 +++++++--------------------- storage/storage.nim | 10 +- storage/utils/addrutils.nim | 9 +- tests/storage/helpers/nodeutils.nim | 9 +- tests/storage/testnat.nim | 132 +++++-------- 5 files changed, 133 insertions(+), 308 deletions(-) diff --git a/storage/nat.nim b/storage/nat.nim index ee9ce622..863b1e68 100644 --- a/storage/nat.nim +++ b/storage/nat.nim @@ -9,15 +9,14 @@ {.push raises: [].} import - std/[options, os, times, net, atomics, exitprocs], + std/[options, os, times, atomics, exitprocs], nat_traversal/[miniupnpc, natpmp], - json_serialization/std/net, results import pkg/chronos import pkg/chronicles import pkg/libp2p -import pkg/libp2p/protocols/connectivity/autonatv2/service +import pkg/libp2p/protocols/connectivity/autonat/types import pkg/libp2p/services/autorelayservice import ./utils @@ -50,41 +49,25 @@ var npmp {.threadvar.}: NatPmp strategy = NatStrategy.NatAuto natClosed: Atomic[bool] - extIp: Option[IpAddress] activeMappings: seq[PortMappings] natThreads: seq[Thread[PortMappingArgs]] = @[] logScope: topics = "nat" -type PrefSrcStatus = enum - NoRoutingInfo - PrefSrcIsPublic - PrefSrcIsPrivate - BindAddressIsPublic - BindAddressIsPrivate - type NatMapper* = ref object of RootObj -method mapNatAddresses*( - m: NatMapper, addrs: seq[MultiAddress] -): tuple[libp2p, discovery: seq[MultiAddress]] {.base, gcsafe, raises: [].} = - raiseAssert "mapNatAddresses not implemented" - -method getReachableAddresses*( - m: NatMapper, addrs: seq[MultiAddress] -): tuple[libp2p, discovery: seq[MultiAddress]] {.base, gcsafe, raises: [].} = - raiseAssert "getReachableAddresses not implemented" +method mapNatPorts*(m: NatMapper): Option[(Port, Port)] {.base, gcsafe, raises: [].} = + raiseAssert "mapNatPorts not implemented" type DefaultNatMapper* = ref object of NatMapper natConfig*: NatConfig + tcpPort*: Port discoveryPort*: Port -## Also does threadvar initialisation. +## Initialises the UPnP or NAT-PMP threadvar and sets the `strategy` threadvar. ## Must be called before redirectPorts() in each thread. -proc getExternalIP*(natStrategy: NatStrategy, quiet = false): Option[IpAddress] = - var externalIP: IpAddress - +proc initNatDevice(natStrategy: NatStrategy, quiet = false): bool = if natStrategy == NatStrategy.NatAuto or natStrategy == NatStrategy.NatUpnp: if upnp == nil: upnp = newMiniupnp() @@ -114,18 +97,8 @@ proc getExternalIP*(natStrategy: NatStrategy, quiet = false): Option[IpAddress] if not quiet: debug "UPnP", msg if canContinue: - let ires = upnp.externalIPAddress() - if ires.isErr: - debug "UPnP", msg = ires.error - else: - # if we got this far, UPnP is working and we don't need to try NAT-PMP - try: - externalIP = parseIpAddress(ires.value) - strategy = NatStrategy.NatUpnp - return some(externalIP) - except ValueError as e: - error "parseIpAddress() exception", err = e.msg - return + strategy = NatStrategy.NatUpnp + return true if natStrategy == NatStrategy.NatAuto or natStrategy == NatStrategy.NatPmp: if npmp == nil: @@ -134,61 +107,10 @@ proc getExternalIP*(natStrategy: NatStrategy, quiet = false): Option[IpAddress] if nres.isErr: debug "NAT-PMP", msg = nres.error else: - let nires = npmp.externalIPAddress() - if nires.isErr: - debug "NAT-PMP", msg = nires.error - else: - try: - externalIP = parseIpAddress($(nires.value)) - strategy = NatStrategy.NatPmp - return some(externalIP) - except ValueError as e: - error "parseIpAddress() exception", err = e.msg - return + strategy = NatStrategy.NatPmp + return true -# This queries the routing table to get the "preferred source" attribute and -# checks if it's a public IP. If so, then it's our public IP. -# -# Further more, we check if the bind address (user provided, or a "0.0.0.0" -# default) is a public IP. That's a long shot, because code paths involving a -# user-provided bind address are not supposed to get here. -proc getRoutePrefSrc(bindIp: IpAddress): (Option[IpAddress], PrefSrcStatus) = - let bindAddress = initTAddress(bindIp, Port(0)) - - if bindAddress.isAnyLocal(): - let ip = - if bindIp.family == IpAddressFamily.IPv6: - getRouteIpv6() - else: - getRouteIpv4() - - if ip.isErr(): - # No route was found, log error and continue without IP. - error "No routable IP address found, check your network connection", - error = ip.error - return (none(IpAddress), NoRoutingInfo) - elif ip.get().isGlobalUnicast(): - return (some(ip.get()), PrefSrcIsPublic) - else: - return (none(IpAddress), PrefSrcIsPrivate) - elif bindAddress.isGlobalUnicast(): - return (some(bindIp), BindAddressIsPublic) - else: - return (none(IpAddress), BindAddressIsPrivate) - -# Try to detect a public IP assigned to this host, before trying NAT traversal. -proc getPublicRoutePrefSrcOrExternalIP*( - natStrategy: NatStrategy, bindIp: IpAddress, quiet = true -): Option[IpAddress] = - let (prefSrcIp, prefSrcStatus) = getRoutePrefSrc(bindIp) - - case prefSrcStatus - of NoRoutingInfo, PrefSrcIsPublic, BindAddressIsPublic: - return prefSrcIp - of PrefSrcIsPrivate, BindAddressIsPrivate: - let extIp = getExternalIP(natStrategy, quiet) - if extIp.isSome: - return some(extIp.get) + return false proc doPortMapping( strategy: NatStrategy, tcpPort, udpPort: Port, description: string @@ -262,10 +184,8 @@ proc repeatPortMapping(args: PortMappingArgs) {.thread, raises: [ValueError].} = # We can't use copies of Miniupnp and NatPmp objects in this thread, because they share # C pointers with other instances that have already been garbage collected, so - # we use threadvars instead and initialise them again with getExternalIP(), - # even though we don't need the external IP's value. - let ipres = getExternalIP(strategy, quiet = true) - if ipres.isSome: + # we use threadvars instead and initialise them again here. + if initNatDevice(strategy, quiet = true): while natClosed.load() == false: let currTime = now() if currTime >= (lastUpdate + interval): @@ -292,8 +212,7 @@ proc stopNatThreads() {.noconv.} = # In Windows, a new thread is created for the signal handler, so we need to # initialise our threadvars again. - let ipres = getExternalIP(strategy, quiet = true) - if ipres.isSome: + if initNatDevice(strategy, quiet = true): if strategy == NatStrategy.NatUpnp: for entry in activeMappings: for t in [ @@ -358,55 +277,23 @@ proc redirectPorts*( proc setupNat*( natStrategy: NatStrategy, tcpPort, udpPort: Port, clientId: string -): tuple[ip: Option[IpAddress], tcpPort, udpPort: Option[Port]] = - ## Setup NAT port mapping and get external IP address. - ## If any of this fails, we don't return any IP address but do return the - ## original ports as best effort. +): Option[(Port, Port)] = + ## Setup NAT port mapping. + ## Returns the external (tcpPort, udpPort) if port mapping succeeded, none otherwise. ## TODO: Allow for tcp or udp port mapping to be optional. - if extIp.isNone: - extIp = getExternalIP(natStrategy) - if extIp.isSome: - let ip = extIp.get - let extPorts = ( - {.gcsafe.}: - redirectPorts( - strategy, tcpPort = tcpPort, udpPort = udpPort, description = clientId - ) - ) - if extPorts.isSome: - let (extTcpPort, extUdpPort) = extPorts.get() - (ip: some(ip), tcpPort: some(extTcpPort), udpPort: some(extUdpPort)) - else: - warn "UPnP/NAT-PMP available but port forwarding failed" - (ip: none(IpAddress), tcpPort: some(tcpPort), udpPort: some(udpPort)) - else: + if not initNatDevice(natStrategy): warn "UPnP/NAT-PMP not available" - (ip: none(IpAddress), tcpPort: some(tcpPort), udpPort: some(udpPort)) + return none((Port, Port)) -proc setupAddress*( - natConfig: NatConfig, bindIp: IpAddress, tcpPort, udpPort: Port, clientId: string -): tuple[ip: Option[IpAddress], tcpPort, udpPort: Option[Port]] {.gcsafe.} = - ## Set-up of the external address via any of the ways as configured in - ## `NatConfig`. In case all fails an error is logged and the bind ports are - ## selected also as external ports, as best effort and in hope that the - ## external IP can be figured out by other means at a later stage. - ## TODO: Allow for tcp or udp bind ports to be optional. - - if natConfig.hasExtIp: - # any required port redirection must be done by hand - return (some(natConfig.extIp), some(tcpPort), some(udpPort)) - - case natConfig.nat - of NatStrategy.NatAuto: - let (prefSrcIp, prefSrcStatus) = getRoutePrefSrc(bindIp) - - case prefSrcStatus - of NoRoutingInfo, PrefSrcIsPublic, BindAddressIsPublic: - return (prefSrcIp, some(tcpPort), some(udpPort)) - of PrefSrcIsPrivate, BindAddressIsPrivate: - return setupNat(natConfig.nat, tcpPort, udpPort, clientId) - of NatStrategy.NatUpnp, NatStrategy.NatPmp: - return setupNat(natConfig.nat, tcpPort, udpPort, clientId) + let extPorts = ( + {.gcsafe.}: + redirectPorts( + strategy, tcpPort = tcpPort, udpPort = udpPort, description = clientId + ) + ) + if extPorts.isNone: + warn "UPnP/NAT-PMP available but port forwarding failed" + extPorts proc findReachableNodes*(bootstrapNodes: seq[SignedPeerRecord]): seq[SignedPeerRecord] = ## Returns the list of nodes known to be directly reachable. @@ -414,56 +301,13 @@ proc findReachableNodes*(bootstrapNodes: seq[SignedPeerRecord]): seq[SignedPeerR ## confirmed reachable by AutoNAT could be included. bootstrapNodes -proc nattedAddress*( - natConfig: NatConfig, addrs: seq[MultiAddress], udpPort: Port -): tuple[libp2p, discovery: seq[MultiAddress]] = - ## Takes a NAT configuration, sequence of multiaddresses and UDP port and returns: - ## - Modified multiaddresses with NAT-mapped addresses for libp2p - ## - Discovery addresses with NAT-mapped UDP ports +proc nattedPorts*(natConfig: NatConfig, tcpPort, udpPort: Port): Option[(Port, Port)] = + if natConfig.hasExtIp: + return none((Port, Port)) # manual setup, no port mapping needed + setupNat(natConfig.nat, tcpPort, udpPort, "storage") - var discoveryAddrs = newSeq[MultiAddress](0) - let newAddrs = addrs.mapIt: - block: - # Extract IP address and port from the multiaddress - let (ipPart, port) = getAddressAndPort(it) - if ipPart.isSome and port.isSome: - # Try to setup NAT mapping for the address - let (newIP, tcp, udp) = - setupAddress(natConfig, ipPart.get, port.get, udpPort, "storage") - if newIP.isSome: - # NAT mapping successful - add discovery address with mapped UDP port - discoveryAddrs.add(getMultiAddrWithIPAndUDPPort(newIP.get, udp.get)) - # Remap original address with NAT IP and TCP port - it.remapAddr(ip = newIP, port = tcp) - else: - # NAT mapping failed - use original address - echo "Failed to get external IP, using original address", it - discoveryAddrs.add(getMultiAddrWithIPAndUDPPort(ipPart.get, udpPort)) - it - else: - # Invalid multiaddress format - return as is - it - (newAddrs, discoveryAddrs) - -method mapNatAddresses*( - m: DefaultNatMapper, addrs: seq[MultiAddress] -): tuple[libp2p, discovery: seq[MultiAddress]] {.gcsafe, raises: [].} = - nattedAddress(m.natConfig, addrs, m.discoveryPort) - -method getReachableAddresses*( - m: DefaultNatMapper, addrs: seq[MultiAddress] -): tuple[libp2p, discovery: seq[MultiAddress]] {.gcsafe, raises: [].} = - let ip = - if m.natConfig.hasExtIp: - some(m.natConfig.extIp) - else: - let (routeIp, _) = getRoutePrefSrc(static parseIpAddress("0.0.0.0")) - routeIp - if ip.isNone: - return (@[], @[]) - let announceAddrs = - addrs.mapIt(it.remapAddr(ip = ip, port = none(Port))).deduplicate() - (announceAddrs, @[getMultiAddrWithIPAndUDPPort(ip.get, m.discoveryPort)]) +method mapNatPorts*(m: DefaultNatMapper): Option[(Port, Port)] {.gcsafe, raises: [].} = + nattedPorts(m.natConfig, m.tcpPort, m.discoveryPort) proc hasPublicIp*(addrs: seq[MultiAddress]): bool = for addr in addrs: @@ -473,6 +317,8 @@ proc hasPublicIp*(addrs: seq[MultiAddress]): bool = proc handleNatStatus*( networkReachability: NetworkReachability, + dialBackAddr: Opt[MultiAddress], + discoveryPort: Port, mapper: NatMapper, discovery: Discovery, switch: Switch, @@ -483,33 +329,42 @@ proc handleNatStatus*( # Nothing to do here, not enough confidence score result discard of Reachable: - # For UPnP, it the mapping was a success, - # the autorelay service has been stopped - # and the address was already announced + if dialBackAddr.isNone: + warn "Got empty dialback address in AutoNat when node is Reachable" + return + if autoRelayService.isRunning: if not await autoRelayService.stop(switch): debug "AutoRelayService stop method returned false" - let (announceAddrs, discoveryAddrs) = - mapper.getReachableAddresses(switch.peerInfo.addrs) - discovery.updateAnnounceRecord(announceAddrs) - discovery.updateDhtRecord(announceAddrs & discoveryAddrs) + let discAddr = + dialBackAddr.get.remapAddr(protocol = some("udp"), port = some(discoveryPort)) + discovery.updateAnnounceRecord(@[dialBackAddr.get]) + discovery.updateDhtRecord(@[dialBackAddr.get, discAddr]) # TODO: switch DHT to server mode of NotReachable: - let (announceAddrs, discoveryAddrs) = mapper.mapNatAddresses(switch.peerInfo.addrs) + var hasPortMapping = false - # With a UPnP / NatPmP successful mapping, - # we suppose that having a public IP make it Reachable. - # If not, the state will be updated in the next Autonat iteration. - # TODO: Do we need to manually call dialMe to make sure we are Reachable ? - if hasPublicIp(announceAddrs): - discovery.updateAnnounceRecord(announceAddrs) - discovery.updateDhtRecord(announceAddrs & discoveryAddrs) + if dialBackAddr.isSome: + let maybePorts = mapper.mapNatPorts() - if autoRelayService.isRunning: - if not await autoRelayService.stop(switch): - debug "AutoRelayService stop method returned false" - else: - if not autoRelayService.isRunning: - if not await autoRelayService.setup(switch): - debug "AutoRelayService setup method returned false" + if maybePorts.isSome: + let (tcpPort, udpPort) = maybePorts.get() + let announceAddr = dialBackAddr.get.remapAddr(port = some(tcpPort)) + let discAddr = + dialBackAddr.get.remapAddr(protocol = some("udp"), port = some(udpPort)) + + # TODO: Try a dial me to make sure we are reachable + + if autoRelayService.isRunning: + if not await autoRelayService.stop(switch): + debug "AutoRelayService stop method returned false" + + discovery.updateAnnounceRecord(@[announceAddr]) + discovery.updateDhtRecord(@[announceAddr, discAddr]) + + hasPortMapping = true + + if not hasPortMapping and not autoRelayService.isRunning: + if not await autoRelayService.setup(switch): + debug "AutoRelayService setup method returned false" diff --git a/storage/storage.nim b/storage/storage.nim index 31bb9d8a..5e7ce9f7 100644 --- a/storage/storage.nim +++ b/storage/storage.nim @@ -360,8 +360,11 @@ proc new*( switch.mount(network) switch.mount(manifestProto) - let natMapper = - DefaultNatMapper(natConfig: config.nat, discoveryPort: config.discoveryPort) + let natMapper = DefaultNatMapper( + natConfig: config.nat, + tcpPort: config.listenPort, + discoveryPort: config.discoveryPort, + ) autonatService.setStatusAndConfidenceHandler( proc( networkReachability: NetworkReachability, @@ -370,7 +373,8 @@ proc new*( ) {.async: (raises: [CancelledError]).} = debug "AutoNAT status", reachability = networkReachability, confidence await handleNatStatus( - networkReachability, natMapper, discovery, switch, autoRelayService + networkReachability, addrs, config.discoveryPort, natMapper, discovery, switch, + autoRelayService, ) ) diff --git a/storage/utils/addrutils.nim b/storage/utils/addrutils.nim index 31570e06..09891cfa 100644 --- a/storage/utils/addrutils.nim +++ b/storage/utils/addrutils.nim @@ -20,8 +20,9 @@ func remapAddr*( address: MultiAddress, ip: Option[IpAddress] = IpAddress.none, port: Option[Port] = Port.none, + protocol: Option[string] = string.none, ): MultiAddress = - ## Remap addresses to new IP and/or Port + ## Remap addresses to new IP, port, and/or transport protocol (e.g. "tcp" → "udp") ## var parts = ($address).split("/") @@ -32,6 +33,12 @@ func remapAddr*( else: parts[2] + parts[3] = + if protocol.isSome: + protocol.get + else: + parts[3] + parts[4] = if port.isSome: $port.get diff --git a/tests/storage/helpers/nodeutils.nim b/tests/storage/helpers/nodeutils.nim index a891db23..ae8dab9c 100644 --- a/tests/storage/helpers/nodeutils.nim +++ b/tests/storage/helpers/nodeutils.nim @@ -218,13 +218,8 @@ proc generateNodes*( if config.enableBootstrap: waitFor switch.peerInfo.update() - let (announceAddrs, discoveryAddrs) = nattedAddress( - NatConfig(hasExtIp: false, nat: NatAuto), - switch.peerInfo.addrs, - bindPort.Port, - ) - blockDiscovery.updateAnnounceRecord(announceAddrs) - blockDiscovery.updateDhtRecord(discoveryAddrs) + blockDiscovery.updateAnnounceRecord(switch.peerInfo.addrs) + blockDiscovery.updateDhtRecord(switch.peerInfo.addrs) if blockDiscovery.dhtRecord.isSome: bootstrapNodes.add !blockDiscovery.dhtRecord diff --git a/tests/storage/testnat.nim b/tests/storage/testnat.nim index ee9ed27e..70b80572 100644 --- a/tests/storage/testnat.nim +++ b/tests/storage/testnat.nim @@ -15,66 +15,34 @@ import ../../storage/discovery import ../../storage/rng import ../../storage/utils import ../../storage/utils/natutils +import ../../storage/utils/addrutils type MockNatMapper = ref object of NatMapper - mapped: tuple[libp2p, discovery: seq[MultiAddress]] + mappedPorts: Option[(Port, Port)] -method mapNatAddresses*( - m: MockNatMapper, addrs: seq[MultiAddress] -): tuple[libp2p, discovery: seq[MultiAddress]] {.raises: [].} = - m.mapped +method mapNatPorts*(m: MockNatMapper): Option[(Port, Port)] {.raises: [].} = + m.mappedPorts -method getReachableAddresses*( - m: MockNatMapper, addrs: seq[MultiAddress] -): tuple[libp2p, discovery: seq[MultiAddress]] {.raises: [].} = - m.mapped +suite "remapAddr": + test "replaces protocol tcp with udp": + let ma = MultiAddress.init("/ip4/1.2.3.4/tcp/5000").expect("valid") + let remapped = ma.remapAddr(protocol = some("udp"), port = some(Port(9000))) + check remapped == MultiAddress.init("/ip4/1.2.3.4/udp/9000").expect("valid") -suite "NAT Address Tests": - test "nattedAddress with local addresses": - # Setup test data - let - udpPort = Port(1234) - natConfig = NatConfig(hasExtIp: true, extIp: parseIpAddress("8.8.8.8")) + test "replaces only port, keeping protocol": + let ma = MultiAddress.init("/ip4/1.2.3.4/tcp/5000").expect("valid") + let remapped = ma.remapAddr(port = some(Port(9000))) + check remapped == MultiAddress.init("/ip4/1.2.3.4/tcp/9000").expect("valid") - # Create test addresses - localAddr = MultiAddress.init("/ip4/127.0.0.1/tcp/5000").expect("valid multiaddr") - anyAddr = MultiAddress.init("/ip4/0.0.0.0/tcp/5000").expect("valid multiaddr") - publicAddr = - MultiAddress.init("/ip4/192.168.1.1/tcp/5000").expect("valid multiaddr") + test "replaces only ip, keeping protocol and port": + let ma = MultiAddress.init("/ip4/1.2.3.4/tcp/5000").expect("valid") + let remapped = ma.remapAddr(ip = some(parseIpAddress("8.8.8.8"))) + check remapped == MultiAddress.init("/ip4/8.8.8.8/tcp/5000").expect("valid") - # Expected results - let - expectedDiscoveryAddrs = @[ - MultiAddress.init("/ip4/8.8.8.8/udp/1234").expect("valid multiaddr"), - MultiAddress.init("/ip4/8.8.8.8/udp/1234").expect("valid multiaddr"), - MultiAddress.init("/ip4/8.8.8.8/udp/1234").expect("valid multiaddr"), - ] - expectedlibp2pAddrs = @[ - MultiAddress.init("/ip4/8.8.8.8/tcp/5000").expect("valid multiaddr"), - MultiAddress.init("/ip4/8.8.8.8/tcp/5000").expect("valid multiaddr"), - MultiAddress.init("/ip4/8.8.8.8/tcp/5000").expect("valid multiaddr"), - ] - #ipv6Addr = MultiAddress.init("/ip6/::1/tcp/5000").expect("valid multiaddr") - addrs = @[localAddr, anyAddr, publicAddr] - - # Test address remapping - let (libp2pAddrs, discoveryAddrs) = nattedAddress(natConfig, addrs, udpPort) - - # Verify results - check(discoveryAddrs == expectedDiscoveryAddrs) - check(libp2pAddrs == expectedlibp2pAddrs) - -suite "getReachableAddresses": - test "returns remapped addresses when extIp is configured": - let - natConfig = NatConfig(hasExtIp: true, extIp: parseIpAddress("1.2.3.4")) - mapper = DefaultNatMapper(natConfig: natConfig, discoveryPort: Port(8090)) - listenAddr = MultiAddress.init("/ip4/0.0.0.0/tcp/5000").expect("valid") - - let (libp2pAddrs, discAddrs) = mapper.getReachableAddresses(@[listenAddr]) - - check libp2pAddrs == @[MultiAddress.init("/ip4/1.2.3.4/tcp/5000").expect("valid")] - check discAddrs == @[MultiAddress.init("/ip4/1.2.3.4/udp/8090").expect("valid")] +suite "nattedPorts": + test "returns none when extIp is configured (manual setup)": + let natConfig = NatConfig(hasExtIp: true, extIp: parseIpAddress("8.8.8.8")) + check nattedPorts(natConfig, Port(5000), Port(1234)).isNone suite "hasPublicIp": test "hasPublicIp returns true when the address is public": @@ -107,51 +75,47 @@ asyncchecksuite "handleNatStatus": if autoRelay.isRunning: discard await autoRelay.stop(sw) - test "handleNatStatus announces address when the node is not Reachable and the UPnP succeed with public ip": - let announceAddr = MultiAddress.init("/ip4/1.2.3.4/tcp/8080").expect("valid") - let discAddr = MultiAddress.init("/ip4/1.2.3.4/udp/8090").expect("valid") - let mapper = MockNatMapper(mapped: (@[announceAddr], @[discAddr])) + let discoveryPort = Port(8090) - await handleNatStatus(NotReachable, mapper, disc, sw, autoRelay) + test "handleNatStatus announces mapped address when NotReachable and UPnP succeeds": + let dialBack = MultiAddress.init("/ip4/1.2.3.4/tcp/8080").expect("valid") + let mapper = MockNatMapper(mappedPorts: some((Port(9000), Port(9001)))) - check disc.announceAddrs == @[announceAddr] + await handleNatStatus( + NotReachable, Opt.some(dialBack), discoveryPort, mapper, disc, sw, autoRelay + ) + + check disc.announceAddrs == + @[MultiAddress.init("/ip4/1.2.3.4/tcp/9000").expect("valid")] check not autoRelay.isRunning - # test "handleNatStatus does not announce address when the node is not Reachable and the UPnP succeed with private ip": - # let privateAddr = MultiAddress.init("/ip4/192.168.1.1/tcp/8080").expect("valid") - # let mapper = MockNatMapper(mapped: (@[privateAddr], @[])) + test "handleNatStatus starts autoRelay when NotReachable and UPnP failed": + let mapper = MockNatMapper(mappedPorts: none((Port, Port))) - # await handleNatStatus( - # NotReachable, mapper, disc, sw, autoRelay - # ) - - # check disc.announceAddrs == @[] - # check not autoRelay.isRunning - - test "handleNatStatus starts autoRelay when node is not Reachable and UPnP failed": - let mapper = MockNatMapper(mapped: (@[], @[])) - - await handleNatStatus(NotReachable, mapper, disc, sw, autoRelay) + await handleNatStatus( + NotReachable, Opt.none(MultiAddress), discoveryPort, mapper, disc, sw, autoRelay + ) check autoRelay.isRunning - # The addresses will be announced in the onReservation callback - # after a node accepted a Relay reservation. - test "handleNatStatus does not announce address when node is Reachable and relay is not running": - let mapper = MockNatMapper(mapped: (@[], @[])) + test "handleNatStatus does not announce address when Reachable and no dialBackAddr": + let mapper = MockNatMapper(mappedPorts: none((Port, Port))) - await handleNatStatus(Reachable, mapper, disc, sw, autoRelay) + await handleNatStatus( + Reachable, Opt.none(MultiAddress), discoveryPort, mapper, disc, sw, autoRelay + ) check disc.announceAddrs == newSeq[MultiAddress]() check not autoRelay.isRunning - test "handleNatStatus stops relay and announces address when node is Reachable and relay is running": - let announceAddr = MultiAddress.init("/ip4/1.2.3.4/tcp/8080").expect("valid") - let discAddr = MultiAddress.init("/ip4/1.2.3.4/udp/8090").expect("valid") - let mapper = MockNatMapper(mapped: (@[announceAddr], @[discAddr])) + test "handleNatStatus stops relay and announces dialBackAddr when Reachable": + let dialBack = MultiAddress.init("/ip4/1.2.3.4/tcp/8080").expect("valid") + let mapper = MockNatMapper(mappedPorts: none((Port, Port))) discard await autorelayservice.setup(autoRelay, sw) - await handleNatStatus(Reachable, mapper, disc, sw, autoRelay) + await handleNatStatus( + Reachable, Opt.some(dialBack), discoveryPort, mapper, disc, sw, autoRelay + ) check not autoRelay.isRunning - check disc.announceAddrs == @[announceAddr] + check disc.announceAddrs == @[dialBack] From 62af77926656643282cbdb992b29b052819a6ff5 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Tue, 5 May 2026 21:28:01 +0400 Subject: [PATCH 26/32] Rebase and cleanup --- storage/blockexchange/peers/peercontext.nim | 2 +- storage/manifest/coders.nim | 4 +- storage/nat.nim | 29 ++++---- storage/storage.nim | 34 +++------- storage/stores/networkstore.nim | 2 + storage/utils/addrutils.nim | 35 ---------- storage/utils/natutils.nim | 50 ++++++-------- tests/integration/1_minute/testnat.nim | 1 - tests/integration/storageconfig.nim | 8 +++ .../blockexchange/discovery/testdiscovery.nim | 4 +- tests/storage/testnatutils.nim | 68 +------------------ 11 files changed, 56 insertions(+), 181 deletions(-) diff --git a/storage/blockexchange/peers/peercontext.nim b/storage/blockexchange/peers/peercontext.nim index 9e7c80f6..d81a4161 100644 --- a/storage/blockexchange/peers/peercontext.nim +++ b/storage/blockexchange/peers/peercontext.nim @@ -7,7 +7,7 @@ ## This file may not be copied, modified, or distributed except according to ## those terms. -import std/math +import std/[math, options] import pkg/libp2p import pkg/chronos diff --git a/storage/manifest/coders.nim b/storage/manifest/coders.nim index 19664e64..a774a96a 100644 --- a/storage/manifest/coders.nim +++ b/storage/manifest/coders.nim @@ -13,9 +13,7 @@ import times {.push raises: [].} -import std/tables -import std/sequtils -import std/options +import std/[tables, options] import pkg/libp2p import pkg/questionable diff --git a/storage/nat.nim b/storage/nat.nim index 863b1e68..9e4e591b 100644 --- a/storage/nat.nim +++ b/storage/nat.nim @@ -16,7 +16,7 @@ import import pkg/chronos import pkg/chronicles import pkg/libp2p -import pkg/libp2p/protocols/connectivity/autonat/types +import pkg/libp2p/protocols/connectivity/autonatv2/service import pkg/libp2p/services/autorelayservice import ./utils @@ -58,7 +58,7 @@ logScope: type NatMapper* = ref object of RootObj method mapNatPorts*(m: NatMapper): Option[(Port, Port)] {.base, gcsafe, raises: [].} = - raiseAssert "mapNatPorts not implemented" + none((Port, Port)) type DefaultNatMapper* = ref object of NatMapper natConfig*: NatConfig @@ -303,18 +303,13 @@ proc findReachableNodes*(bootstrapNodes: seq[SignedPeerRecord]): seq[SignedPeerR proc nattedPorts*(natConfig: NatConfig, tcpPort, udpPort: Port): Option[(Port, Port)] = if natConfig.hasExtIp: - return none((Port, Port)) # manual setup, no port mapping needed - setupNat(natConfig.nat, tcpPort, udpPort, "storage") + return none((Port, Port)) + let clientId = "storage" + return setupNat(natConfig.nat, tcpPort, udpPort, clientId) method mapNatPorts*(m: DefaultNatMapper): Option[(Port, Port)] {.gcsafe, raises: [].} = nattedPorts(m.natConfig, m.tcpPort, m.discoveryPort) -proc hasPublicIp*(addrs: seq[MultiAddress]): bool = - for addr in addrs: - let (ip, _) = getAddressAndPort(addr) - if ip.isSome and isGlobalUnicast(ip.get): - return true - proc handleNatStatus*( networkReachability: NetworkReachability, dialBackAddr: Opt[MultiAddress], @@ -340,18 +335,20 @@ proc handleNatStatus*( let discAddr = dialBackAddr.get.remapAddr(protocol = some("udp"), port = some(discoveryPort)) discovery.updateAnnounceRecord(@[dialBackAddr.get]) - discovery.updateDhtRecord(@[dialBackAddr.get, discAddr]) + discovery.updateDhtRecord(@[discAddr]) # TODO: switch DHT to server mode of NotReachable: var hasPortMapping = false - if dialBackAddr.isSome: + if dialBackAddr.isNone: + warn "Got empty dialback address in AutoNat when node is Reachable" + else: let maybePorts = mapper.mapNatPorts() if maybePorts.isSome: let (tcpPort, udpPort) = maybePorts.get() - let announceAddr = dialBackAddr.get.remapAddr(port = some(tcpPort)) - let discAddr = + let announceAddress = dialBackAddr.get.remapAddr(port = some(tcpPort)) + let discoveryAddrs = dialBackAddr.get.remapAddr(protocol = some("udp"), port = some(udpPort)) # TODO: Try a dial me to make sure we are reachable @@ -360,8 +357,8 @@ proc handleNatStatus*( if not await autoRelayService.stop(switch): debug "AutoRelayService stop method returned false" - discovery.updateAnnounceRecord(@[announceAddr]) - discovery.updateDhtRecord(@[announceAddr, discAddr]) + discovery.updateAnnounceRecord(@[announceAddress]) + discovery.updateDhtRecord(@[discoveryAddrs]) hasPortMapping = true diff --git a/storage/storage.nim b/storage/storage.nim index 5e7ce9f7..69b9bfea 100644 --- a/storage/storage.nim +++ b/storage/storage.nim @@ -41,7 +41,6 @@ import ./namespaces import ./storagetypes import ./logutils import ./nat -import ./utils/natutils logScope: topics = "storage node" @@ -88,29 +87,16 @@ proc start*(s: StorageServer) {.async.} = else: getBestLocalAddress(s.config.listenIp) - if announceIp.isNone: - # We should have an IP, even at private IP - raise newException(StorageError, "Unable to determine an IP address to announce") - - # Remap switch addresses to the resolved IP (replaces 0.0.0.0 or :: with the actual address), - # keeping unique entries only. - let announceAddrs = s.storageNode.switch.peerInfo.addrs - .mapIt(it.remapAddr(ip = announceIp, port = none(Port))) - .deduplicate() - let discoveryAddrs = - @[getMultiAddrWithIPAndUDPPort(announceIp.get, s.config.discoveryPort)] - s.storageNode.discovery.updateDhtRecord(announceAddrs & discoveryAddrs) - s.storageNode.discovery.updateAnnounceRecord(announceAddrs) - - var hasPublicAddr = false - for announceAddr in announceAddrs: - let (maybeIp, _) = getAddressAndPort(announceAddr) - if maybeIp.isSome and maybeIp.get.isGlobalUnicast(): - hasPublicAddr = true - break - - if not hasPublicAddr: - warn "Unable to determine a public IP address. This node will only be reachable on a private network." + if announceIp.isSome: + let ip = announceIp.get + let announceAddrs = s.storageNode.switch.peerInfo.addrs + .mapIt(it.remapAddr(ip = some(ip), port = none(Port))) + .deduplicate() + let discAddr = getMultiAddrWithIPAndUDPPort(ip, s.config.discoveryPort) + s.storageNode.discovery.updateAnnounceRecord(announceAddrs) + s.storageNode.discovery.updateDhtRecord(announceAddrs & @[discAddr]) + else: + warn "Unable to determine a local IP address to announce" await s.storageNode.start() diff --git a/storage/stores/networkstore.nim b/storage/stores/networkstore.nim index 8c781f6b..6767f042 100644 --- a/storage/stores/networkstore.nim +++ b/storage/stores/networkstore.nim @@ -9,6 +9,8 @@ {.push raises: [].} +import std/options + import pkg/chronos import pkg/libp2p import pkg/questionable/results diff --git a/storage/utils/addrutils.nim b/storage/utils/addrutils.nim index 09891cfa..72d80ad3 100644 --- a/storage/utils/addrutils.nim +++ b/storage/utils/addrutils.nim @@ -74,38 +74,3 @@ proc getMultiAddrWithIpAndTcpPort*(ip: IpAddress, port: Port): MultiAddress = return MultiAddress.init(ipFamily & $ip & "/tcp/" & $port).expect( "Failed to construct multiaddress with IP and TCP port" ) - -proc getAddressAndPort*( - ma: MultiAddress -): tuple[ip: Option[IpAddress], port: Option[Port]] = - try: - # Try IPv4 first - let ipv4Result = ma[multiCodec("ip4")] - let ip = - if ipv4Result.isOk: - let ipBytes = ipv4Result.get().protoArgument().expect("Invalid IPv4 format") - let ipArray = [ipBytes[0], ipBytes[1], ipBytes[2], ipBytes[3]] - some(IpAddress(family: IPv4, address_v4: ipArray)) - else: - # Try IPv6 if IPv4 not found - let ipv6Result = ma[multiCodec("ip6")] - if ipv6Result.isOk: - let ipBytes = ipv6Result.get().protoArgument().expect("Invalid IPv6 format") - var ipArray: array[16, byte] - for i in 0 .. 15: - ipArray[i] = ipBytes[i] - some(IpAddress(family: IPv6, address_v6: ipArray)) - else: - none(IpAddress) - - # Get TCP Port - let portResult = ma[multiCodec("tcp")] - let port = - if portResult.isOk: - let portBytes = portResult.get().protoArgument().expect("Invalid port format") - some(Port(fromBytesBE(uint16, portBytes))) - else: - none(Port) - (ip: ip, port: port) - except Exception: - (ip: none(IpAddress), port: none(Port)) diff --git a/storage/utils/natutils.nim b/storage/utils/natutils.nim index 04cc57f9..b808d915 100644 --- a/storage/utils/natutils.nim +++ b/storage/utils/natutils.nim @@ -1,6 +1,6 @@ {.push raises: [].} -import std/[net, tables, hashes, options], pkg/results, chronos, chronicles +import std/[net, options], pkg/results, chronos, chronicles import pkg/libp2p @@ -9,54 +9,42 @@ type NatStrategy* = enum NatUpnp NatPmp -func isGlobalUnicast*(address: TransportAddress): bool = - if address.isGlobal() and address.isUnicast(): true else: false +proc getRouteIpv4*(): Result[IpAddress, cstring] = + let + publicAddress = TransportAddress( + family: AddressFamily.IPv4, address_v4: [1'u8, 1, 1, 1], port: Port(0) + ) + route = getBestRoute(publicAddress) -func isGlobalUnicast*(address: IpAddress): bool = - let a = initTAddress(address, Port(0)) - a.isGlobalUnicast() - -proc getRoute(publicAddress: TransportAddress): Result[IpAddress, cstring] = - let route = getBestRoute(publicAddress) - - if route.source.family == AddressFamily.None or route.source.isUnspecified(): - err("No best route found") + if route.source.isUnspecified(): + err("No best ipv4 route found") else: let ip = try: route.source.address() except ValueError as e: - # This should not occur really. error "Address conversion error", exception = e.name, msg = e.msg return err("Invalid IP address") ok(ip) -proc getRouteIpv4*(): Result[IpAddress, cstring] = - # Avoiding Exception with initTAddress and can't make it work with static. - # Note: `publicAddress` is only used an "example" IP to find the best route, - # no data is send over the network to this IP! - let publicAddress = TransportAddress( - family: AddressFamily.IPv4, address_v4: [1'u8, 1, 1, 1], port: Port(0) - ) - - return getRoute(publicAddress) - proc getRouteIpv6*(): Result[IpAddress, cstring] = - # Note: `googleDnsIpv6` is only used as an "example" IP to find the best route, - # no data is sent over the network to this IP! const googleDnsIpv6 = TransportAddress( family: AddressFamily.IPv6, - # 2001:4860:4860::8888 address_v6: [32'u8, 1, 72, 96, 72, 96, 0, 0, 0, 0, 0, 0, 0, 0, 136, 136], port: Port(0), ) + let route = getBestRoute(googleDnsIpv6) + if route.source.isUnspecified(): + return err("No best ipv6 route found") + try: + ok(route.source.address()) + except ValueError as e: + error "Address conversion error", exception = e.name, msg = e.msg + err("Invalid IP address") - return getRoute(googleDnsIpv6) - -# If bindIp is a anyLocal address (0.0.0.0 or ::), -# the function will find the best ip address. -# Otherwise, it will just return the ip as it is. proc getBestLocalAddress*(bindIp: IpAddress): Option[IpAddress] = + ## If bindIp is anyLocal (0.0.0.0 or ::), finds the best local IP via routing table. + ## Otherwise returns bindIp as-is. let bindAddress = initTAddress(bindIp, Port(0)) if bindAddress.isAnyLocal(): let ip = diff --git a/tests/integration/1_minute/testnat.nim b/tests/integration/1_minute/testnat.nim index 4aa13bce..50c0467d 100644 --- a/tests/integration/1_minute/testnat.nim +++ b/tests/integration/1_minute/testnat.nim @@ -1,6 +1,5 @@ import std/json import std/options -import std/sequtils import pkg/chronos import pkg/questionable/results diff --git a/tests/integration/storageconfig.nim b/tests/integration/storageconfig.nim index 240d44a2..691daf34 100644 --- a/tests/integration/storageconfig.nim +++ b/tests/integration/storageconfig.nim @@ -321,3 +321,11 @@ proc withNatScheduleInterval*( for config in startConfig.configs.mitems: config.addCliOption("--nat-schedule-interval", $scheduleInterval) return startConfig + +proc withExtIp*( + self: StorageConfigs, ip = "127.0.0.1" +): StorageConfigs {.raises: [StorageConfigError].} = + var startConfig = self + for config in startConfig.configs.mitems: + config.addCliOption("--nat", "extip:" & ip) + return startConfig diff --git a/tests/storage/blockexchange/discovery/testdiscovery.nim b/tests/storage/blockexchange/discovery/testdiscovery.nim index d467361e..471f9e88 100644 --- a/tests/storage/blockexchange/discovery/testdiscovery.nim +++ b/tests/storage/blockexchange/discovery/testdiscovery.nim @@ -1,6 +1,4 @@ -import std/sequtils -import std/sugar -import std/tables +import std/[sequtils, sugar, tables, options] import pkg/chronos diff --git a/tests/storage/testnatutils.nim b/tests/storage/testnatutils.nim index fcc48561..08a6621e 100644 --- a/tests/storage/testnatutils.nim +++ b/tests/storage/testnatutils.nim @@ -1,67 +1 @@ -import std/[unittest, net, options] -import pkg/chronos -import ../../storage/utils/natutils - -suite "isGlobalUnicast": - test "localhost IPv4 is not global unicast": - check not isGlobalUnicast(parseIpAddress("127.0.0.1")) - - test "unspecified IPv4 is not global unicast": - check not isGlobalUnicast(parseIpAddress("0.0.0.0")) - - test "link-local IPv4 is not global unicast": - check not isGlobalUnicast(parseIpAddress("169.254.1.1")) - - test "private IPv4 is not global unicast": - check not isGlobalUnicast(parseIpAddress("10.0.0.1")) - - test "public IPv4 is global unicast": - check isGlobalUnicast(parseIpAddress("8.8.8.8")) - - test "localhost IPv6 is not global unicast": - check not isGlobalUnicast(parseIpAddress("::1")) - - test "unspecified IPv6 is not global unicast": - check not isGlobalUnicast(parseIpAddress("::")) - - test "link-local IPv6 is not global unicast": - check not isGlobalUnicast(parseIpAddress("fe80::1")) - - test "private IPv6 is not global unicast": - check not isGlobalUnicast(parseIpAddress("fc00::1")) - - test "public IPv6 is global unicast": - check isGlobalUnicast(parseIpAddress("2606:4700::1")) - -suite "getRoute": - test "getRouteIpv4 returns a valid IPv4": - let res = getRouteIpv4() - - check res.isOk - check res.get().family == IpAddressFamily.IPv4 - - test "getRouteIpv6 returns a valid IPv6": - let res = getRouteIpv6() - # If the machine does not have a global route because - # it is not configured for IPv6, the test will fail - # because it didn't find the best route. In that case, - # we can just skip the test, because it is not a problem - # with the test itself but the machine configuration. - if res.isErr: - check res.error == "No best route found" - else: - check res.get().family == IpAddressFamily.IPv6 - -suite "getBestLocalAddress": - test "specific IPv4 is returned as it is": - let ip = parseIpAddress("192.168.1.1") - check getBestLocalAddress(ip) == some(ip) - - test "specific IPv6 is returned as it is": - let ip = parseIpAddress("2606:4700::1") - check getBestLocalAddress(ip) == some(ip) - - test "0.0.0.0 resolves to a local IPv4": - let res = getBestLocalAddress(parseIpAddress("0.0.0.0")) - check res.isSome - check res.get().family == IpAddressFamily.IPv4 +discard From 05882b4d4127dd0486995abab9f142b7cc2a71e4 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Wed, 6 May 2026 10:12:39 +0400 Subject: [PATCH 27/32] Update address announcements update --- storage/discovery.nim | 39 +++++++++++++++------ storage/nat.nim | 11 ++---- storage/storage.nim | 35 ++++++++++--------- storage/utils/addrutils.nim | 1 - storage/utils/natutils.nim | 53 ----------------------------- tests/integration/multinodes.nim | 1 + tests/integration/storageconfig.nim | 9 +++++ tests/storage/helpers/nodeutils.nim | 5 +-- tests/storage/testnat.nim | 13 ------- 9 files changed, 60 insertions(+), 107 deletions(-) diff --git a/storage/discovery.nim b/storage/discovery.nim index 73be4ee3..a45fd16d 100644 --- a/storage/discovery.nim +++ b/storage/discovery.nim @@ -22,6 +22,7 @@ import pkg/codexdht/discv5/[routing_table, protocol as discv5] from pkg/nimcrypto import keccak256 import ./rng +import ./utils/addrutils import ./errors import ./logutils @@ -175,29 +176,45 @@ method removeProvider*( warn "Error removing provider", peerId = peerId, exc = exc.msg raiseAssert("Unexpected Exception in removeProvider") +proc updateRecords*( + d: Discovery, announceAddrs: openArray[MultiAddress], discoveryPort: Port +) = + ## Update both provider and DHT records from TCP announce addresses. + ## Discovery (UDP) addresses are derived by remapping announceAddrs to UDP with discoveryPort. + ## Updates the discv5 SPR once with the full set of addresses. + let tcpAddrs = @announceAddrs + let udpAddrs = + tcpAddrs.mapIt(it.remapAddr(protocol = some("udp"), port = some(discoveryPort))) + + debug "Updating addresses", tcpAddrs, udpAddrs + + d.announceAddrs = tcpAddrs + d.providerRecord = SignedPeerRecord + .init(d.key, PeerRecord.init(d.peerId, tcpAddrs)) + .expect("Should construct signed record").some + d.dhtRecord = SignedPeerRecord + .init(d.key, PeerRecord.init(d.peerId, tcpAddrs & udpAddrs)) + .expect("Should construct signed record").some + + if not d.protocol.isNil: + d.protocol.updateRecord(d.dhtRecord).expect("Should update SPR") + proc updateAnnounceRecord*(d: Discovery, addrs: openArray[MultiAddress]) = - ## Update providers record - ## - d.announceAddrs = @addrs - info "Updating announce record", addrs = d.announceAddrs d.providerRecord = SignedPeerRecord .init(d.key, PeerRecord.init(d.peerId, d.announceAddrs)) .expect("Should construct signed record").some - if not d.protocol.isNil: d.protocol.updateRecord(d.providerRecord).expect("Should update SPR") -proc updateDhtRecord*(d: Discovery, addrs: openArray[MultiAddress]) = - ## Update providers record - ## - +proc updateDhtRecord*( + d: Discovery, addrs: openArray[MultiAddress] +) {.deprecated: "use updateRecords instead".} = info "Updating Dht record", addrs = addrs d.dhtRecord = SignedPeerRecord .init(d.key, PeerRecord.init(d.peerId, @addrs)) .expect("Should construct signed record").some - if not d.protocol.isNil: d.protocol.updateRecord(d.dhtRecord).expect("Should update SPR") @@ -248,7 +265,7 @@ proc new*( key: key, peerId: PeerId.init(key).expect("Should construct PeerId"), store: store ) - self.updateAnnounceRecord(announceAddrs) + self.updateRecords(@[], Port(0)) # -------------------------------------------------------------------------- # FIXME disable IP limits temporarily so we can run our workshop. Re-enable diff --git a/storage/nat.nim b/storage/nat.nim index 9e4e591b..49977830 100644 --- a/storage/nat.nim +++ b/storage/nat.nim @@ -16,7 +16,6 @@ import import pkg/chronos import pkg/chronicles import pkg/libp2p -import pkg/libp2p/protocols/connectivity/autonatv2/service import pkg/libp2p/services/autorelayservice import ./utils @@ -332,10 +331,7 @@ proc handleNatStatus*( if not await autoRelayService.stop(switch): debug "AutoRelayService stop method returned false" - let discAddr = - dialBackAddr.get.remapAddr(protocol = some("udp"), port = some(discoveryPort)) - discovery.updateAnnounceRecord(@[dialBackAddr.get]) - discovery.updateDhtRecord(@[discAddr]) + discovery.updateRecords(@[dialBackAddr.get], discoveryPort) # TODO: switch DHT to server mode of NotReachable: var hasPortMapping = false @@ -348,8 +344,6 @@ proc handleNatStatus*( if maybePorts.isSome: let (tcpPort, udpPort) = maybePorts.get() let announceAddress = dialBackAddr.get.remapAddr(port = some(tcpPort)) - let discoveryAddrs = - dialBackAddr.get.remapAddr(protocol = some("udp"), port = some(udpPort)) # TODO: Try a dial me to make sure we are reachable @@ -357,8 +351,7 @@ proc handleNatStatus*( if not await autoRelayService.stop(switch): debug "AutoRelayService stop method returned false" - discovery.updateAnnounceRecord(@[announceAddress]) - discovery.updateDhtRecord(@[discoveryAddrs]) + discovery.updateRecords(@[announceAddress], udpPort) hasPortMapping = true diff --git a/storage/storage.nim b/storage/storage.nim index 69b9bfea..2b0150fa 100644 --- a/storage/storage.nim +++ b/storage/storage.nim @@ -36,7 +36,6 @@ import ./blockexchange import ./utils/fileutils import ./discovery import ./utils/addrutils -import ./utils/natutils import ./namespaces import ./storagetypes import ./logutils @@ -81,22 +80,26 @@ proc start*(s: StorageServer) {.async.} = await s.storageNode.switch.start() - let announceIp = + let announceAddrs = if s.config.nat.hasExtIp: - some(s.config.nat.extIp) + # extip means that we assume the IP is reachable + # So we just take the first peer addr and remap it with extip to keep the port only + @[ + s.storageNode.switch.peerInfo.addrs[0].remapAddr( + ip = some(s.config.nat.extIp), port = none(Port) + ) + ] else: - getBestLocalAddress(s.config.listenIp) + # If extip is not set, we have 2 choices: + # 1- Announce the peer addrs contains detected addresses on the machine. + # 2- Wait for AutoNat + # The probleme with 1 is that you will certainly announce private addresses + # and if you advertise a CID, you will advertise these private addresses. + # TODO: DHT client mode + #s.storageNode.switch.peerInfo.addrs + @[] - if announceIp.isSome: - let ip = announceIp.get - let announceAddrs = s.storageNode.switch.peerInfo.addrs - .mapIt(it.remapAddr(ip = some(ip), port = none(Port))) - .deduplicate() - let discAddr = getMultiAddrWithIPAndUDPPort(ip, s.config.discoveryPort) - s.storageNode.discovery.updateAnnounceRecord(announceAddrs) - s.storageNode.discovery.updateDhtRecord(announceAddrs & @[discAddr]) - else: - warn "Unable to determine a local IP address to announce" + s.storageNode.discovery.updateRecords(announceAddrs, s.config.discoveryPort) await s.storageNode.start() @@ -324,8 +327,8 @@ proc new*( client = relayClient, onReservation = proc(addresses: seq[MultiAddress]) {.gcsafe, raises: [].} = debug "Relay reservation updated", addresses - discovery.updateAnnounceRecord(addresses) - discovery.updateDhtRecord(addresses), + # relay addresses are for download traffic only, not DHT routing + discovery.updateAnnounceRecord(addresses), rng = random.Rng.instance(), ) diff --git a/storage/utils/addrutils.nim b/storage/utils/addrutils.nim index 72d80ad3..47204889 100644 --- a/storage/utils/addrutils.nim +++ b/storage/utils/addrutils.nim @@ -14,7 +14,6 @@ import std/strutils import std/options import pkg/libp2p -import pkg/stew/endians2 func remapAddr*( address: MultiAddress, diff --git a/storage/utils/natutils.nim b/storage/utils/natutils.nim index b808d915..9d05dd58 100644 --- a/storage/utils/natutils.nim +++ b/storage/utils/natutils.nim @@ -1,59 +1,6 @@ {.push raises: [].} -import std/[net, options], pkg/results, chronos, chronicles - -import pkg/libp2p - type NatStrategy* = enum NatAuto NatUpnp NatPmp - -proc getRouteIpv4*(): Result[IpAddress, cstring] = - let - publicAddress = TransportAddress( - family: AddressFamily.IPv4, address_v4: [1'u8, 1, 1, 1], port: Port(0) - ) - route = getBestRoute(publicAddress) - - if route.source.isUnspecified(): - err("No best ipv4 route found") - else: - let ip = - try: - route.source.address() - except ValueError as e: - error "Address conversion error", exception = e.name, msg = e.msg - return err("Invalid IP address") - ok(ip) - -proc getRouteIpv6*(): Result[IpAddress, cstring] = - const googleDnsIpv6 = TransportAddress( - family: AddressFamily.IPv6, - address_v6: [32'u8, 1, 72, 96, 72, 96, 0, 0, 0, 0, 0, 0, 0, 0, 136, 136], - port: Port(0), - ) - let route = getBestRoute(googleDnsIpv6) - if route.source.isUnspecified(): - return err("No best ipv6 route found") - try: - ok(route.source.address()) - except ValueError as e: - error "Address conversion error", exception = e.name, msg = e.msg - err("Invalid IP address") - -proc getBestLocalAddress*(bindIp: IpAddress): Option[IpAddress] = - ## If bindIp is anyLocal (0.0.0.0 or ::), finds the best local IP via routing table. - ## Otherwise returns bindIp as-is. - let bindAddress = initTAddress(bindIp, Port(0)) - if bindAddress.isAnyLocal(): - let ip = - if bindIp.family == IpAddressFamily.IPv6: - getRouteIpv6() - else: - getRouteIpv4() - if ip.isOk(): - return some(ip.get()) - return none(IpAddress) - else: - return some(bindIp) diff --git a/tests/integration/multinodes.nim b/tests/integration/multinodes.nim index 034ee657..94b3ede3 100644 --- a/tests/integration/multinodes.nim +++ b/tests/integration/multinodes.nim @@ -213,6 +213,7 @@ template multinodesuite*(suiteName: string, body: untyped) = trace "Setting up test", suite = suiteName, test = currentTestName, nodeConfigs if var clients =? nodeConfigs.clients: failAndTeardownOnError "failed to start client nodes": + clients = clients.withExtIp() for config in clients.configs: let node = await startClientNode(config) running.add RunningNode(role: Role.Client, node: node) diff --git a/tests/integration/storageconfig.nim b/tests/integration/storageconfig.nim index 691daf34..7b218bd3 100644 --- a/tests/integration/storageconfig.nim +++ b/tests/integration/storageconfig.nim @@ -322,6 +322,15 @@ proc withNatScheduleInterval*( config.addCliOption("--nat-schedule-interval", $scheduleInterval) return startConfig +proc withExtIp*( + self: StorageConfigs, idx: int, ip = "127.0.0.1" +): StorageConfigs {.raises: [StorageConfigError].} = + self.checkBounds idx + + var startConfig = self + startConfig.configs[idx].addCliOption("--nat", "extip:" & ip) + return startConfig + proc withExtIp*( self: StorageConfigs, ip = "127.0.0.1" ): StorageConfigs {.raises: [StorageConfigError].} = diff --git a/tests/storage/helpers/nodeutils.nim b/tests/storage/helpers/nodeutils.nim index ae8dab9c..ad06995c 100644 --- a/tests/storage/helpers/nodeutils.nim +++ b/tests/storage/helpers/nodeutils.nim @@ -10,8 +10,6 @@ import pkg/storage/stores import pkg/storage/blocktype as bt import pkg/storage/blockexchange import pkg/storage/systemclock -import pkg/storage/nat -import pkg/storage/utils/natutils import pkg/storage/merkletree import pkg/storage/manifest @@ -218,8 +216,7 @@ proc generateNodes*( if config.enableBootstrap: waitFor switch.peerInfo.update() - blockDiscovery.updateAnnounceRecord(switch.peerInfo.addrs) - blockDiscovery.updateDhtRecord(switch.peerInfo.addrs) + blockDiscovery.updateRecords(switch.peerInfo.addrs, bindPort.Port) if blockDiscovery.dhtRecord.isSome: bootstrapNodes.add !blockDiscovery.dhtRecord diff --git a/tests/storage/testnat.nim b/tests/storage/testnat.nim index 70b80572..f74c876f 100644 --- a/tests/storage/testnat.nim +++ b/tests/storage/testnat.nim @@ -14,7 +14,6 @@ import ../../storage/nat import ../../storage/discovery import ../../storage/rng import ../../storage/utils -import ../../storage/utils/natutils import ../../storage/utils/addrutils type MockNatMapper = ref object of NatMapper @@ -44,18 +43,6 @@ suite "nattedPorts": let natConfig = NatConfig(hasExtIp: true, extIp: parseIpAddress("8.8.8.8")) check nattedPorts(natConfig, Port(5000), Port(1234)).isNone -suite "hasPublicIp": - test "hasPublicIp returns true when the address is public": - let ma = MultiAddress.init("/ip4/8.8.8.8/tcp/8080").expect("valid") - check hasPublicIp(@[ma]) - - test "hasPublicIp returns false when the address is private": - let ma = MultiAddress.init("/ip4/192.168.1.1/tcp/8080").expect("valid") - check not hasPublicIp(@[ma]) - - test "hasPublicIp returns false when the address is empty": - check not hasPublicIp(@[]) - asyncchecksuite "handleNatStatus": var sw: Switch var key: PrivateKey From 0112c9502bf4733d7ff6f9481072ac48a3decb11 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Wed, 6 May 2026 10:13:36 +0400 Subject: [PATCH 28/32] Add comment --- storage/discovery.nim | 1 + 1 file changed, 1 insertion(+) diff --git a/storage/discovery.nim b/storage/discovery.nim index a45fd16d..36c50b10 100644 --- a/storage/discovery.nim +++ b/storage/discovery.nim @@ -265,6 +265,7 @@ proc new*( key: key, peerId: PeerId.init(key).expect("Should construct PeerId"), store: store ) + # Update with empty values to get a valid SPR self.updateRecords(@[], Port(0)) # -------------------------------------------------------------------------- From ae261e32105339f2b6e8a7f1910b924d77736f60 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Fri, 8 May 2026 11:16:45 +0400 Subject: [PATCH 29/32] Refactor UPnP and PMP --- storage/nat.nim | 333 +++++-------------------------- storage/rest/api.nim | 12 +- storage/storage.nim | 68 ++++--- storage/utils/natutils.nim | 201 +++++++++++++++++++ tests/integration/multinodes.nim | 4 +- tests/storage/testnat.nim | 77 +++++-- tests/storage/testnatutils.nim | 94 ++++++++- 7 files changed, 462 insertions(+), 327 deletions(-) diff --git a/storage/nat.nim b/storage/nat.nim index 49977830..616c64e7 100644 --- a/storage/nat.nim +++ b/storage/nat.nim @@ -8,10 +8,8 @@ {.push raises: [].} -import - std/[options, os, times, atomics, exitprocs], - nat_traversal/[miniupnpc, natpmp], - results +import std/[options, net] +import results import pkg/chronos import pkg/chronicles @@ -23,304 +21,52 @@ import ./utils/natutils import ./utils/addrutils import ./discovery -const - UPNP_TIMEOUT = 200 # ms - PORT_MAPPING_INTERVAL = 20 * 60 # seconds - NATPMP_LIFETIME = 60 * 60 # in seconds, must be longer than PORT_MAPPING_INTERVAL - -type PortMappings* = object - internalTcpPort: Port - externalTcpPort: Port - internalUdpPort: Port - externalUdpPort: Port - description: string - -type PortMappingArgs = - tuple[strategy: NatStrategy, tcpPort, udpPort: Port, description: string] +logScope: + topics = "nat" type NatConfig* = object case hasExtIp*: bool of true: extIp*: IpAddress of false: nat*: NatStrategy -var - upnp {.threadvar.}: Miniupnp - npmp {.threadvar.}: NatPmp - strategy = NatStrategy.NatAuto - natClosed: Atomic[bool] - activeMappings: seq[PortMappings] - natThreads: seq[Thread[PortMappingArgs]] = @[] - -logScope: - topics = "nat" - type NatMapper* = ref object of RootObj - -method mapNatPorts*(m: NatMapper): Option[(Port, Port)] {.base, gcsafe, raises: [].} = - none((Port, Port)) - -type DefaultNatMapper* = ref object of NatMapper natConfig*: NatConfig tcpPort*: Port discoveryPort*: Port + hasUpnpMapping: bool -## Initialises the UPnP or NAT-PMP threadvar and sets the `strategy` threadvar. -## Must be called before redirectPorts() in each thread. -proc initNatDevice(natStrategy: NatStrategy, quiet = false): bool = - if natStrategy == NatStrategy.NatAuto or natStrategy == NatStrategy.NatUpnp: - if upnp == nil: - upnp = newMiniupnp() - - upnp.discoverDelay = UPNP_TIMEOUT - let dres = upnp.discover() - if dres.isErr: - debug "UPnP", msg = dres.error - else: - var - msg: cstring - canContinue = true - case upnp.selectIGD() - of IGDNotFound: - msg = "Internet Gateway Device not found. Giving up." - canContinue = false - of IGDFound: - msg = "Internet Gateway Device found." - of IGDNotConnected: - msg = "Internet Gateway Device found but it's not connected. Trying anyway." - of NotAnIGD: - msg = - "Some device found, but it's not recognised as an Internet Gateway Device. Trying anyway." - of IGDIpNotRoutable: - msg = - "Internet Gateway Device found and is connected, but with a reserved or non-routable IP. Trying anyway." - if not quiet: - debug "UPnP", msg - if canContinue: - strategy = NatStrategy.NatUpnp - return true - - if natStrategy == NatStrategy.NatAuto or natStrategy == NatStrategy.NatPmp: - if npmp == nil: - npmp = newNatPmp() - let nres = npmp.init() - if nres.isErr: - debug "NAT-PMP", msg = nres.error - else: - strategy = NatStrategy.NatPmp - return true - - return false - -proc doPortMapping( - strategy: NatStrategy, tcpPort, udpPort: Port, description: string -): Option[(Port, Port)] {.gcsafe.} = - var - extTcpPort: Port - extUdpPort: Port - - if strategy == NatStrategy.NatUpnp: - for t in [(tcpPort, UPNPProtocol.TCP), (udpPort, UPNPProtocol.UDP)]: - let - (port, protocol) = t - pmres = upnp.addPortMapping( - externalPort = $port, - protocol = protocol, - internalHost = upnp.lanAddr, - internalPort = $port, - desc = description, - leaseDuration = 0, - ) - if pmres.isErr: - error "UPnP port mapping", msg = pmres.error, port - return - else: - # let's check it - let cres = - upnp.getSpecificPortMapping(externalPort = $port, protocol = protocol) - if cres.isErr: - warn "UPnP port mapping check failed. Assuming the check itself is broken and the port mapping was done.", - msg = cres.error - - info "UPnP: added port mapping", - externalPort = port, internalPort = port, protocol = protocol - case protocol - of UPNPProtocol.TCP: - extTcpPort = port - of UPNPProtocol.UDP: - extUdpPort = port - elif strategy == NatStrategy.NatPmp: - for t in [(tcpPort, NatPmpProtocol.TCP), (udpPort, NatPmpProtocol.UDP)]: - let - (port, protocol) = t - pmres = npmp.addPortMapping( - eport = port.cushort, - iport = port.cushort, - protocol = protocol, - lifetime = NATPMP_LIFETIME, - ) - if pmres.isErr: - error "NAT-PMP port mapping", msg = pmres.error, port - return - else: - let extPort = Port(pmres.value) - info "NAT-PMP: added port mapping", - externalPort = extPort, internalPort = port, protocol = protocol - case protocol - of NatPmpProtocol.TCP: - extTcpPort = extPort - of NatPmpProtocol.UDP: - extUdpPort = extPort - return some((extTcpPort, extUdpPort)) - -proc repeatPortMapping(args: PortMappingArgs) {.thread, raises: [ValueError].} = - ignoreSignalsInThread() - let - (strategy, tcpPort, udpPort, description) = args - interval = initDuration(seconds = PORT_MAPPING_INTERVAL) - sleepDuration = 1_000 # in ms, also the maximum delay after pressing Ctrl-C - - var lastUpdate = now() - - # We can't use copies of Miniupnp and NatPmp objects in this thread, because they share - # C pointers with other instances that have already been garbage collected, so - # we use threadvars instead and initialise them again here. - if initNatDevice(strategy, quiet = true): - while natClosed.load() == false: - let currTime = now() - if currTime >= (lastUpdate + interval): - discard doPortMapping(strategy, tcpPort, udpPort, description) - lastUpdate = currTime - - sleep(sleepDuration) - -proc stopNatThreads() {.noconv.} = - # stop the thread - debug "Stopping NAT port mapping renewal threads" - try: - natClosed.store(true) - joinThreads(natThreads) - except Exception as exc: - warn "Failed to stop NAT port mapping renewal thread", exc = exc.msg - - # delete our port mappings - - # FIXME: if the initial port mapping failed because it already existed for the - # required external port, we should not delete it. It might have been set up - # by another program. - - # In Windows, a new thread is created for the signal handler, so we need to - # initialise our threadvars again. - - if initNatDevice(strategy, quiet = true): - if strategy == NatStrategy.NatUpnp: - for entry in activeMappings: - for t in [ - (entry.externalTcpPort, entry.internalTcpPort, UPNPProtocol.TCP), - (entry.externalUdpPort, entry.internalUdpPort, UPNPProtocol.UDP), - ]: - let - (eport, iport, protocol) = t - pmres = upnp.deletePortMapping(externalPort = $eport, protocol = protocol) - if pmres.isErr: - error "UPnP port mapping deletion", msg = pmres.error - else: - debug "UPnP: deleted port mapping", - externalPort = eport, internalPort = iport, protocol = protocol - elif strategy == NatStrategy.NatPmp: - for entry in activeMappings: - for t in [ - (entry.externalTcpPort, entry.internalTcpPort, NatPmpProtocol.TCP), - (entry.externalUdpPort, entry.internalUdpPort, NatPmpProtocol.UDP), - ]: - let - (eport, iport, protocol) = t - pmres = npmp.deletePortMapping( - eport = eport.cushort, iport = iport.cushort, protocol = protocol - ) - if pmres.isErr: - error "NAT-PMP port mapping deletion", msg = pmres.error - else: - debug "NAT-PMP: deleted port mapping", - externalPort = eport, internalPort = iport, protocol = protocol - -proc redirectPorts*( - strategy: NatStrategy, tcpPort, udpPort: Port, description: string -): Option[(Port, Port)] = - result = doPortMapping(strategy, tcpPort, udpPort, description) - if result.isSome: - let (externalTcpPort, externalUdpPort) = result.get() - # needed by NAT-PMP on port mapping deletion - # Port mapping works. Let's launch a thread that repeats it, in case the - # NAT-PMP lease expires or the router is rebooted and forgets all about - # these mappings. - activeMappings.add( - PortMappings( - internalTcpPort: tcpPort, - externalTcpPort: externalTcpPort, - internalUdpPort: udpPort, - externalUdpPort: externalUdpPort, - description: description, - ) - ) - try: - natThreads.add(Thread[PortMappingArgs]()) - natThreads[^1].createThread( - repeatPortMapping, (strategy, externalTcpPort, externalUdpPort, description) - ) - # atexit() in disguise - if natThreads.len == 1: - # we should register the thread termination function only once - addExitProc(stopNatThreads) - except Exception as exc: - warn "Failed to create NAT port mapping renewal thread", exc = exc.msg - -proc setupNat*( - natStrategy: NatStrategy, tcpPort, udpPort: Port, clientId: string -): Option[(Port, Port)] = - ## Setup NAT port mapping. - ## Returns the external (tcpPort, udpPort) if port mapping succeeded, none otherwise. - ## TODO: Allow for tcp or udp port mapping to be optional. - if not initNatDevice(natStrategy): - warn "UPnP/NAT-PMP not available" +method mapNatPorts*(m: NatMapper): Option[(Port, Port)] {.base, gcsafe, raises: [].} = + if m.natConfig.hasExtIp: return none((Port, Port)) - let extPorts = ( - {.gcsafe.}: - redirectPorts( - strategy, tcpPort = tcpPort, udpPort = udpPort, description = clientId - ) - ) - if extPorts.isNone: - warn "UPnP/NAT-PMP available but port forwarding failed" - extPorts + # Devices are recreated on each call: discover() costs ~200ms but only fires + # when AutoNAT reports NotReachable, which is exactly when we want a fresh scan. + let upnpRes = UpnpDevice.init() + if upnpRes.isOk: + let ports = upnpRes.value.mapPorts(m.tcpPort, m.discoveryPort) + if ports.isSome: + m.hasUpnpMapping = true + return ports -proc findReachableNodes*(bootstrapNodes: seq[SignedPeerRecord]): seq[SignedPeerRecord] = - ## Returns the list of nodes known to be directly reachable. - ## Currently returns bootstrap nodes. In the future, any network participant - ## confirmed reachable by AutoNAT could be included. - bootstrapNodes + let pmpRes = PmpDevice.init() + if pmpRes.isOk: + let ports = pmpRes.value.mapPorts(m.tcpPort, m.discoveryPort) + if ports.isSome: + return ports -proc nattedPorts*(natConfig: NatConfig, tcpPort, udpPort: Port): Option[(Port, Port)] = - if natConfig.hasExtIp: - return none((Port, Port)) - let clientId = "storage" - return setupNat(natConfig.nat, tcpPort, udpPort, clientId) + none((Port, Port)) -method mapNatPorts*(m: DefaultNatMapper): Option[(Port, Port)] {.gcsafe, raises: [].} = - nattedPorts(m.natConfig, m.tcpPort, m.discoveryPort) - -proc handleNatStatus*( +method handleNatStatus*( + m: NatMapper, networkReachability: NetworkReachability, dialBackAddr: Opt[MultiAddress], discoveryPort: Port, - mapper: NatMapper, discovery: Discovery, switch: Switch, autoRelayService: AutoRelayService, -) {.async: (raises: [CancelledError]).} = +) {.async: (raises: [CancelledError]), base, gcsafe.} = case networkReachability of Unknown: - # Nothing to do here, not enough confidence score result discard of Reachable: if dialBackAddr.isNone: @@ -337,9 +83,9 @@ proc handleNatStatus*( var hasPortMapping = false if dialBackAddr.isNone: - warn "Got empty dialback address in AutoNat when node is Reachable" + warn "Got empty dialback address in AutoNat when node is NotReachable" else: - let maybePorts = mapper.mapNatPorts() + let maybePorts = m.mapNatPorts() if maybePorts.isSome: let (tcpPort, udpPort) = maybePorts.get() @@ -352,9 +98,34 @@ proc handleNatStatus*( debug "AutoRelayService stop method returned false" discovery.updateRecords(@[announceAddress], udpPort) - hasPortMapping = true if not hasPortMapping and not autoRelayService.isRunning: if not await autoRelayService.setup(switch): debug "AutoRelayService setup method returned false" + +proc close*(m: NatMapper, device = UpnpDevice()) = + # UPnP mappings are permanent (leaseDuration=0) and must be deleted explicitly. + # NAT-PMP mappings expire automatically after NATPMP_LIFETIME seconds. + if not m.hasUpnpMapping: + return + + # deletePortMapping requires the IGD control URL set during init + let deviceRes = device.init() + if deviceRes.isErr: + warn "UPnP reinit failed during cleanup, port mappings may remain", + msg = deviceRes.error + return + + for (port, proto) in [ + (m.tcpPort, NatIpProtocol.Tcp), (m.discoveryPort, NatIpProtocol.Udp) + ]: + let res = deviceRes.value.deletePortMapping(port, proto) + if res.isErr: + error "UPnP port mapping deletion failed", port, proto, msg = res.error + +proc findReachableNodes*(bootstrapNodes: seq[SignedPeerRecord]): seq[SignedPeerRecord] = + ## Returns the list of nodes known to be directly reachable. + ## Currently returns bootstrap nodes. In the future, any network participant + ## confirmed reachable by AutoNAT could be included. + bootstrapNodes diff --git a/storage/rest/api.nim b/storage/rest/api.nim index 6c54cc8b..5047686d 100644 --- a/storage/rest/api.nim +++ b/storage/rest/api.nim @@ -561,7 +561,7 @@ proc initNodeApi(node: StorageNodeRef, conf: StorageConf, router: var RestRouter proc initDebugApi( node: StorageNodeRef, conf: StorageConf, - autonat: AutonatV2Service, + autonat: Option[AutonatV2Service], router: var RestRouter, ) = let allowedOrigin = router.allowedOrigin @@ -583,7 +583,13 @@ proc initDebugApi( "announceAddresses": node.discovery.announceAddrs, "table": table, "storage": {"version": $storageVersion, "revision": $storageRevision}, - "nat": {"reachability": $autonat.networkReachability}, + "nat": { + "reachability": + if autonat.isSome: + $autonat.get.networkReachability + else: + "unknown" + }, } # return pretty json for human readability @@ -644,7 +650,7 @@ proc initRestApi*( node: StorageNodeRef, conf: StorageConf, repoStore: RepoStore, - autonat: AutonatV2Service, + autonat: Option[AutonatV2Service], corsAllowedOrigin: ?string, ): RestRouter = var router = RestRouter.init(validate, corsAllowedOrigin) diff --git a/storage/storage.nim b/storage/storage.nim index 2b0150fa..2176210f 100644 --- a/storage/storage.nim +++ b/storage/storage.nim @@ -53,8 +53,9 @@ type repoStore: RepoStore maintenance: BlockMaintainer taskpool: Taskpool - autonatService*: AutonatV2Service + autonatService*: Option[AutonatV2Service] autoRelayService: AutoRelayService + natMapper: NatMapper isStarted: bool StoragePrivateKey* = libp2p.PrivateKey # alias @@ -123,6 +124,8 @@ proc stop*(s: StorageServer) {.async.} = notice "Stopping Storage node" + s.natMapper.close() + var futures = @[ s.storageNode.switch.stop(), s.storageNode.stop(), @@ -190,17 +193,23 @@ proc new*( let relayClient = relayClientModule.RelayClient.new(canHop = config.relay) let autonatClient = AutonatV2Client.new(random.Rng.instance()) - let autonatService = AutonatV2Service.new( - rng = random.Rng.instance(), - client = autonatClient, - config = AutonatV2ServiceConfig.new( - scheduleInterval = Opt.some(config.natScheduleInterval), - askNewConnectedPeers = true, - numPeersToAsk = config.natNumPeersToAsk, - maxQueueSize = config.natMaxQueueSize, - minConfidence = config.natMinConfidence, - ), - ) + let autonatService = + if config.nat.hasExtIp: + none(AutonatV2Service) + else: + some( + AutonatV2Service.new( + rng = random.Rng.instance(), + client = autonatClient, + config = AutonatV2ServiceConfig.new( + scheduleInterval = Opt.some(config.natScheduleInterval), + askNewConnectedPeers = true, + numPeersToAsk = config.natNumPeersToAsk, + maxQueueSize = config.natMaxQueueSize, + minConfidence = config.natMinConfidence, + ), + ) + ) let switch = SwitchBuilder .new() @@ -215,7 +224,12 @@ proc new*( .withTcpTransport({ServerFlags.ReuseAddr, ServerFlags.TcpNoDelay}) .withAutonatV2Server() .withCircuitRelay(relayClient) - .withServices(@[Service(autonatService)]) + .withServices( + if autonatService.isSome: + @[Service(autonatService.get)] + else: + @[] + ) .build() autonatClient.setup(switch) @@ -349,23 +363,24 @@ proc new*( switch.mount(network) switch.mount(manifestProto) - let natMapper = DefaultNatMapper( + let natMapper = NatMapper( natConfig: config.nat, tcpPort: config.listenPort, discoveryPort: config.discoveryPort, ) - autonatService.setStatusAndConfidenceHandler( - proc( - networkReachability: NetworkReachability, - confidence: Opt[float], - addrs: Opt[MultiAddress], - ) {.async: (raises: [CancelledError]).} = - debug "AutoNAT status", reachability = networkReachability, confidence - await handleNatStatus( - networkReachability, addrs, config.discoveryPort, natMapper, discovery, switch, - autoRelayService, - ) - ) + if autonatService.isSome: + autonatService.get.setStatusAndConfidenceHandler( + proc( + networkReachability: NetworkReachability, + confidence: Opt[float], + addrs: Opt[MultiAddress], + ) {.async: (raises: [CancelledError]).} = + debug "AutoNAT status", reachability = networkReachability, confidence + await natMapper.handleNatStatus( + networkReachability, addrs, config.discoveryPort, discovery, switch, + autoRelayService, + ) + ) StorageServer( config: config, @@ -377,4 +392,5 @@ proc new*( logFile: logFile, autonatService: autonatService, autoRelayService: autoRelayService, + natMapper: natMapper, ) diff --git a/storage/utils/natutils.nim b/storage/utils/natutils.nim index 9d05dd58..7364efe6 100644 --- a/storage/utils/natutils.nim +++ b/storage/utils/natutils.nim @@ -1,6 +1,207 @@ {.push raises: [].} +import std/[options, net] +import nat_traversal/[miniupnpc, natpmp] +import pkg/chronicles +import results + +export miniupnpc, natpmp, results, options, net + +logScope: + topics = "nat" + +const UPNP_TIMEOUT* = 200 # ms +const NATPMP_LIFETIME* = 60 * 60 # seconds + type NatStrategy* = enum NatAuto NatUpnp NatPmp + +type NatIpProtocol* = enum + Tcp + Udp + +# Generic Nat device can be UPnP or PmP +type NatDevice* = ref object of RootObj + +type UpnpDevice* = ref object of NatDevice + upnp: Miniupnp + +type PmpDevice* = ref object of NatDevice + npmp: NatPmp + +# appPortMapping is specific to the type of Nat device +method addPortMapping*( + d: NatDevice, port: Port, proto: NatIpProtocol +): Result[Port, string] {.base, gcsafe.} = + return err("not implemented") + +# Creates the mapping the the router and +# returns the opened ports. +method mapPorts*( + d: NatDevice, tcpPort, udpPort: Port +): Option[(Port, Port)] {.base, gcsafe.} = + var extTcpPort, extUdpPort: Port + + for t in [(tcpPort, NatIpProtocol.Tcp), (udpPort, NatIpProtocol.Udp)]: + let (port, proto) = t + let pmres = d.addPortMapping(port, proto) + + if pmres.isErr: + error "port mapping failed", msg = pmres.error + return none((Port, Port)) + + case proto + of Tcp: + extTcpPort = pmres.value + of Udp: + extUdpPort = pmres.value + + return some((extTcpPort, extUdpPort)) + +method getSpecificPortMapping*( + d: UpnpDevice, externalPort: string, protocol: UPNPProtocol +): Result[PortMappingRes, cstring] {.base, gcsafe.} = + if d.upnp == nil: + return err(cstring("upnp not initialized")) + + d.upnp.getSpecificPortMapping(externalPort = externalPort, protocol = protocol) + +method discover*(d: UpnpDevice): Result[int, cstring] {.base, gcsafe.} = + if d.upnp == nil: + return err(cstring("upnp not initialized")) + + return d.upnp.discover() + +method selectIGD*(d: UpnpDevice): SelectIGDResult {.base, gcsafe.} = + if d.upnp == nil: + return IGDNotFound + + return d.upnp.selectIGD() + +proc init*(T: type UpnpDevice): Result[UpnpDevice, string] {.gcsafe.} = + UpnpDevice().init() + +# Init UPnP device and create miniupnp instance. +# It call "discover" to retrieve the UPnP devices on the network, +# and then "selectIGD" to select a suitable device. +proc init*(d: UpnpDevice): Result[UpnpDevice, string] {.gcsafe.} = + if d.upnp == nil: + d.upnp = newMiniupnp() + + d.upnp.discoverDelay = UPNP_TIMEOUT + + let dres = d.discover() + if dres.isErr: + debug "UPnP", msg = dres.error + return err($dres.error) + + case d.selectIGD() + of IGDNotFound: + debug "UPnP", msg = "Internet Gateway Device not found. Giving up." + return err("IGD not found") + of IGDFound: + debug "UPnP", msg = "Internet Gateway Device found." + of IGDNotConnected: + debug "UPnP", + msg = "Internet Gateway Device found but it's not connected. Trying anyway." + of NotAnIGD: + debug "UPnP", + msg = + "Some device found, but it's not recognised as an Internet Gateway Device. Trying anyway." + of IGDIpNotRoutable: + debug "UPnP", + msg = + "Internet Gateway Device found and is connected, but with a reserved or non-routable IP. Trying anyway." + + return ok(d) + +# For UPnP, the external port is the same as the application port. +# This should work for most of the case. +# We could change this by using addAnyPortMapping for IGD2 compatible routers +# if needed. +method addPortMapping*( + d: UpnpDevice, port: Port, proto: NatIpProtocol +): Result[Port, string] {.gcsafe.} = + if d.upnp == nil: + return err("upnp not initialized") + + let protocol = if proto == NatIpProtocol.Tcp: UPNPProtocol.TCP else: UPNPProtocol.UDP + let pmres = d.upnp.addPortMapping( + externalPort = $port, + protocol = protocol, + internalHost = d.upnp.lanAddr, + internalPort = $port, + desc = "logos-storage", + leaseDuration = 0, + ) + if pmres.isErr: + return err($pmres.error) + + let cres = d.getSpecificPortMapping(externalPort = $port, protocol = protocol) + if cres.isErr: + # Eventually, the check could fail on some router even if the router is successful. + # So we log a warning but we still want to continue because it is not sure it is a failure. + warn "UPnP port mapping check failed. Assuming the check itself is broken and the port mapping was done.", + msg = cres.error + + info "UPnP: added port mapping", externalPort = port, internalPort = port + + return ok(port) + +method deletePortMapping*( + d: UpnpDevice, port: Port, proto: NatIpProtocol +): Result[void, string] {.base, gcsafe.} = + if d.upnp == nil: + return err("upnp not initialized") + + let protocol = if proto == NatIpProtocol.Tcp: UPNPProtocol.TCP else: UPNPProtocol.UDP + let res = d.upnp.deletePortMapping(externalPort = $port, protocol = protocol) + if res.isErr: + return err($res.error) + + debug "UPnP: deleted port mapping", port, proto + + ok() + +proc init*(T: type PmpDevice): Result[PmpDevice, string] {.gcsafe.} = + PmpDevice().init() + +# Create a NatPmP instance. +proc init*(d: PmpDevice): Result[PmpDevice, string] {.gcsafe.} = + if d.npmp == nil: + d.npmp = newNatPmp() + + let res = d.npmp.init() + if res.isErr: + debug "NAT-PMP", msg = res.error + return err($res.error) + + return ok(d) + +# Add a port mapping on NAT-PMP device. +# The application port might not be the external port. +# The latter is returned. +method addPortMapping*( + d: PmpDevice, port: Port, proto: NatIpProtocol +): Result[Port, string] {.gcsafe.} = + if d.npmp == nil: + return err("npmp not initialized") + + let protocol = + if proto == NatIpProtocol.Tcp: NatPmpProtocol.TCP else: NatPmpProtocol.UDP + let pmres = d.npmp.addPortMapping( + eport = port.cushort, + iport = port.cushort, + protocol = protocol, + lifetime = NATPMP_LIFETIME, + ) + if pmres.isErr: + return err(pmres.error) + + let extPort = Port(pmres.value) + + info "NAT-PMP: added port mapping", externalPort = extPort, internalPort = port + + return ok(extPort) diff --git a/tests/integration/multinodes.nim b/tests/integration/multinodes.nim index 94b3ede3..4d6d6f62 100644 --- a/tests/integration/multinodes.nim +++ b/tests/integration/multinodes.nim @@ -213,7 +213,9 @@ template multinodesuite*(suiteName: string, body: untyped) = trace "Setting up test", suite = suiteName, test = currentTestName, nodeConfigs if var clients =? nodeConfigs.clients: failAndTeardownOnError "failed to start client nodes": - clients = clients.withExtIp() + # Only the first node (bootstrap) gets a known extip. Other nodes use + # nat=auto so AutoNAT can run and determine their reachability. + clients = clients.withExtIp(0) for config in clients.configs: let node = await startClientNode(config) running.add RunningNode(role: Role.Client, node: node) diff --git a/tests/storage/testnat.nim b/tests/storage/testnat.nim index f74c876f..27ff6e8e 100644 --- a/tests/storage/testnat.nim +++ b/tests/storage/testnat.nim @@ -1,5 +1,6 @@ -import std/net +import std/[net, importutils] import pkg/chronos +import ../../storage/utils/natutils import pkg/libp2p/[multiaddress, multihash, multicodec] import pkg/libp2p/protocols/connectivity/autonatv2/service except setup import pkg/libp2p/protocols/connectivity/autonatv2/types @@ -16,12 +17,52 @@ import ../../storage/rng import ../../storage/utils import ../../storage/utils/addrutils +privateAccess(NatMapper) + +type MockUpnpDevice = ref object of UpnpDevice + deletedPorts: seq[(Port, NatIpProtocol)] + +method discover*(d: MockUpnpDevice): Result[int, cstring] {.gcsafe.} = + ok(1) + +method selectIGD*(d: MockUpnpDevice): SelectIGDResult {.gcsafe.} = + IGDFound + +method deletePortMapping*( + d: MockUpnpDevice, port: Port, proto: NatIpProtocol +): Result[void, string] {.gcsafe.} = + d.deletedPorts.add((port, proto)) + ok() + type MockNatMapper = ref object of NatMapper mappedPorts: Option[(Port, Port)] -method mapNatPorts*(m: MockNatMapper): Option[(Port, Port)] {.raises: [].} = +method mapNatPorts*(m: MockNatMapper): Option[(Port, Port)] {.gcsafe, raises: [].} = m.mappedPorts +suite "NatMapper.close": + test "does nothing when no upnp mapping": + let mapper = MockNatMapper( + natConfig: NatConfig(hasExtIp: false, nat: NatAuto), + tcpPort: Port(8080), + discoveryPort: Port(8090), + ) + let device = MockUpnpDevice() + mapper.close(device) + check device.deletedPorts.len == 0 + + test "deletes tcp and udp ports when upnp mapping exists": + let mapper = MockNatMapper( + natConfig: NatConfig(hasExtIp: false, nat: NatAuto), + tcpPort: Port(8080), + discoveryPort: Port(8090), + ) + mapper.hasUpnpMapping = true + let device = MockUpnpDevice() + mapper.close(device) + check device.deletedPorts == + @[(Port(8080), NatIpProtocol.Tcp), (Port(8090), NatIpProtocol.Udp)] + suite "remapAddr": test "replaces protocol tcp with udp": let ma = MultiAddress.init("/ip4/1.2.3.4/tcp/5000").expect("valid") @@ -38,11 +79,6 @@ suite "remapAddr": let remapped = ma.remapAddr(ip = some(parseIpAddress("8.8.8.8"))) check remapped == MultiAddress.init("/ip4/8.8.8.8/tcp/5000").expect("valid") -suite "nattedPorts": - test "returns none when extIp is configured (manual setup)": - let natConfig = NatConfig(hasExtIp: true, extIp: parseIpAddress("8.8.8.8")) - check nattedPorts(natConfig, Port(5000), Port(1234)).isNone - asyncchecksuite "handleNatStatus": var sw: Switch var key: PrivateKey @@ -68,8 +104,8 @@ asyncchecksuite "handleNatStatus": let dialBack = MultiAddress.init("/ip4/1.2.3.4/tcp/8080").expect("valid") let mapper = MockNatMapper(mappedPorts: some((Port(9000), Port(9001)))) - await handleNatStatus( - NotReachable, Opt.some(dialBack), discoveryPort, mapper, disc, sw, autoRelay + await mapper.handleNatStatus( + NotReachable, Opt.some(dialBack), discoveryPort, disc, sw, autoRelay ) check disc.announceAddrs == @@ -79,17 +115,28 @@ asyncchecksuite "handleNatStatus": test "handleNatStatus starts autoRelay when NotReachable and UPnP failed": let mapper = MockNatMapper(mappedPorts: none((Port, Port))) - await handleNatStatus( - NotReachable, Opt.none(MultiAddress), discoveryPort, mapper, disc, sw, autoRelay + await mapper.handleNatStatus( + NotReachable, Opt.none(MultiAddress), discoveryPort, disc, sw, autoRelay ) check autoRelay.isRunning + test "handleNatStatus starts autoRelay when NotReachable and mapping fails": + let dialBack = MultiAddress.init("/ip4/1.2.3.4/tcp/8080").expect("valid") + let mapper = MockNatMapper(mappedPorts: none((Port, Port))) + + await mapper.handleNatStatus( + NotReachable, Opt.some(dialBack), discoveryPort, disc, sw, autoRelay + ) + + check autoRelay.isRunning + check disc.announceAddrs == newSeq[MultiAddress]() + test "handleNatStatus does not announce address when Reachable and no dialBackAddr": let mapper = MockNatMapper(mappedPorts: none((Port, Port))) - await handleNatStatus( - Reachable, Opt.none(MultiAddress), discoveryPort, mapper, disc, sw, autoRelay + await mapper.handleNatStatus( + Reachable, Opt.none(MultiAddress), discoveryPort, disc, sw, autoRelay ) check disc.announceAddrs == newSeq[MultiAddress]() @@ -100,8 +147,8 @@ asyncchecksuite "handleNatStatus": let mapper = MockNatMapper(mappedPorts: none((Port, Port))) discard await autorelayservice.setup(autoRelay, sw) - await handleNatStatus( - Reachable, Opt.some(dialBack), discoveryPort, mapper, disc, sw, autoRelay + await mapper.handleNatStatus( + Reachable, Opt.some(dialBack), discoveryPort, disc, sw, autoRelay ) check not autoRelay.isRunning diff --git a/tests/storage/testnatutils.nim b/tests/storage/testnatutils.nim index 08a6621e..9eea951a 100644 --- a/tests/storage/testnatutils.nim +++ b/tests/storage/testnatutils.nim @@ -1 +1,93 @@ -discard +import std/[options, net] +import nat_traversal/[miniupnpc, natpmp] +import pkg/results +import ../asynctest +import ../../storage/utils/natutils + +type MockUpnpDev = ref object of UpnpDevice + discoverOk: bool + igdResult: SelectIGDResult + addPortMappingOk: bool + failOnProto: Option[NatIpProtocol] + +type MockPmpDev = ref object of PmpDevice + addPortMappingOk: bool + mappedPort: Port + +method discover*(d: MockUpnpDev): Result[int, cstring] {.gcsafe.} = + if d.discoverOk: + ok(1) + else: + err(cstring("discover failed")) + +method selectIGD*(d: MockUpnpDev): SelectIGDResult {.gcsafe.} = + d.igdResult + +method addPortMapping*( + d: MockUpnpDev, port: Port, proto: NatIpProtocol +): Result[Port, string] {.gcsafe.} = + if d.failOnProto == some(proto): + err("mapping failed") + elif d.addPortMappingOk: + ok(port) + else: + err("mapping failed") + +method getSpecificPortMapping*( + d: MockUpnpDev, externalPort: string, protocol: UPNPProtocol +): Result[PortMappingRes, cstring] {.gcsafe.} = + ok(PortMappingRes()) + +method addPortMapping*( + d: MockPmpDev, port: Port, proto: NatIpProtocol +): Result[Port, string] {.gcsafe.} = + if d.addPortMappingOk: + ok(d.mappedPort) + else: + err("mapping failed") + +suite "UpnpDevice.init": + test "returns err when discover fails": + check MockUpnpDev(discoverOk: false).init().isErr + + test "returns err when IGD not found": + check MockUpnpDev(discoverOk: true, igdResult: IGDNotFound).init().isErr + + test "returns ok when IGD found": + check MockUpnpDev(discoverOk: true, igdResult: IGDFound).init().isOk + + test "returns ok when IGD not connected": + check MockUpnpDev(discoverOk: true, igdResult: IGDNotConnected).init().isOk + + test "returns ok when not an IGD": + check MockUpnpDev(discoverOk: true, igdResult: NotAnIGD).init().isOk + + test "returns ok when IP not routable": + check MockUpnpDev(discoverOk: true, igdResult: IGDIpNotRoutable).init().isOk + +suite "UpnpDevice.mapPorts": + test "returns none when addPortMapping fails": + check MockUpnpDev(addPortMappingOk: false).mapPorts(Port(8080), Port(8090)).isNone + + test "returns mapped ports": + let res = MockUpnpDev(addPortMappingOk: true).mapPorts(Port(8080), Port(8090)) + check res.isSome + check res.get() == (Port(8080), Port(8090)) + + test "returns none when tcp mapping fails": + let d = MockUpnpDev(addPortMappingOk: true, failOnProto: some(NatIpProtocol.Tcp)) + check d.mapPorts(Port(8080), Port(8090)).isNone + + test "returns none when udp mapping fails": + let d = MockUpnpDev(addPortMappingOk: true, failOnProto: some(NatIpProtocol.Udp)) + check d.mapPorts(Port(8080), Port(8090)).isNone + +suite "PmpDevice.mapPorts": + test "returns none when mapping fails": + check MockPmpDev(addPortMappingOk: false).mapPorts(Port(8080), Port(8090)).isNone + + test "returns assigned external ports": + let d = MockPmpDev(addPortMappingOk: true, mappedPort: Port(9000)) + let res = d.mapPorts(Port(8080), Port(8090)) + check res.isSome + check res.get() == (Port(9000), Port(9000)) From 082411e050536706bf0a227278022d032b3fc3b7 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Fri, 8 May 2026 14:56:42 +0400 Subject: [PATCH 30/32] Use thread to set the nat ports --- storage/nat.nim | 83 ++++++++++++++++++++++++++++++++++----- tests/storage/testnat.nim | 4 +- 2 files changed, 76 insertions(+), 11 deletions(-) diff --git a/storage/nat.nim b/storage/nat.nim index 616c64e7..acbcbcbe 100644 --- a/storage/nat.nim +++ b/storage/nat.nim @@ -12,6 +12,7 @@ import std/[options, net] import results import pkg/chronos +import pkg/chronos/threadsync import pkg/chronicles import pkg/libp2p import pkg/libp2p/services/autorelayservice @@ -24,6 +25,8 @@ import ./discovery logScope: topics = "nat" +const NatPortMappingTimeout = 5.seconds + type NatConfig* = object case hasExtIp*: bool of true: extIp*: IpAddress @@ -35,26 +38,86 @@ type NatMapper* = ref object of RootObj discoveryPort*: Port hasUpnpMapping: bool -method mapNatPorts*(m: NatMapper): Option[(Port, Port)] {.base, gcsafe, raises: [].} = - if m.natConfig.hasExtIp: - return none((Port, Port)) +type MapNatPortsCtx = object + natConfig: NatConfig + tcpPort: Port + discoveryPort: Port + signal: ThreadSignalPtr + result: Option[(Port, Port)] + hasUpnpMapping: bool + +proc mapNatPortsThread(ctx: ptr MapNatPortsCtx) {.thread.} = + if ctx.natConfig.hasExtIp: + discard ctx.signal.fireSync() + return # Devices are recreated on each call: discover() costs ~200ms but only fires # when AutoNAT reports NotReachable, which is exactly when we want a fresh scan. let upnpRes = UpnpDevice.init() if upnpRes.isOk: - let ports = upnpRes.value.mapPorts(m.tcpPort, m.discoveryPort) + let ports = upnpRes.value.mapPorts(ctx.tcpPort, ctx.discoveryPort) if ports.isSome: - m.hasUpnpMapping = true - return ports + ctx.hasUpnpMapping = true + ctx.result = ports + discard ctx.signal.fireSync() + return let pmpRes = PmpDevice.init() if pmpRes.isOk: - let ports = pmpRes.value.mapPorts(m.tcpPort, m.discoveryPort) + let ports = pmpRes.value.mapPorts(ctx.tcpPort, ctx.discoveryPort) if ports.isSome: - return ports + ctx.result = ports - none((Port, Port)) + discard ctx.signal.fireSync() + +method mapNatPorts*( + m: NatMapper +): Future[Option[(Port, Port)]] {.async: (raises: [CancelledError]), base, gcsafe.} = + let signal = ThreadSignalPtr.new().valueOr: + warn "Failed to create ThreadSignalPtr for NAT port mapping" + return none((Port, Port)) + + var ctx = cast[ptr MapNatPortsCtx](createShared(MapNatPortsCtx)) + ctx[] = MapNatPortsCtx( + natConfig: m.natConfig, + tcpPort: m.tcpPort, + discoveryPort: m.discoveryPort, + signal: signal, + ) + + var thread: Thread[ptr MapNatPortsCtx] + var threadStarted = false + defer: + if threadStarted: + # Blocking the event loop here is acceptable: UPnP discover() is bounded + # by UPNP_TIMEOUT (200ms), so the worst-case stall is ~200ms. + joinThread(thread) + # Always sync hasUpnpMapping back, even on timeout or cancellation. + # If the thread mapped ports just after the timeout, close() will + # still clean them up on the router. + if ctx.hasUpnpMapping: + m.hasUpnpMapping = true + freeShared(ctx) + discard signal.close() + + try: + createThread(thread, mapNatPortsThread, ctx) + threadStarted = true + except ValueError, ResourceExhaustedError: + warn "Failed to create thread for NAT port mapping" + return none((Port, Port)) + + try: + if not await signal.wait().withTimeout(NatPortMappingTimeout): + warn "NAT port mapping thread timed out" + return none((Port, Port)) + except CancelledError as exc: + raise exc + except AsyncError as exc: + warn "Error waiting for NAT port mapping thread", error = exc.msg + return none((Port, Port)) + + return ctx.result method handleNatStatus*( m: NatMapper, @@ -85,7 +148,7 @@ method handleNatStatus*( if dialBackAddr.isNone: warn "Got empty dialback address in AutoNat when node is NotReachable" else: - let maybePorts = m.mapNatPorts() + let maybePorts = await m.mapNatPorts() if maybePorts.isSome: let (tcpPort, udpPort) = maybePorts.get() diff --git a/tests/storage/testnat.nim b/tests/storage/testnat.nim index 27ff6e8e..78cf07f1 100644 --- a/tests/storage/testnat.nim +++ b/tests/storage/testnat.nim @@ -37,7 +37,9 @@ method deletePortMapping*( type MockNatMapper = ref object of NatMapper mappedPorts: Option[(Port, Port)] -method mapNatPorts*(m: MockNatMapper): Option[(Port, Port)] {.gcsafe, raises: [].} = +method mapNatPorts*( + m: MockNatMapper +): Future[Option[(Port, Port)]] {.async: (raises: [CancelledError]), gcsafe.} = m.mappedPorts suite "NatMapper.close": From 7e9c21160b2940e6c6e0f4493ccd141f4fef8442 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Fri, 8 May 2026 16:51:18 +0400 Subject: [PATCH 31/32] Provide different small fixes --- .../requests/node_debug_request.nim | 9 +++- storage/discovery.nim | 8 +-- storage/storage.nim | 11 ++-- tests/storage/testnat.nim | 50 ++++++++++--------- tests/storage/testnatutils.nim | 6 +-- 5 files changed, 49 insertions(+), 35 deletions(-) diff --git a/library/storage_thread_requests/requests/node_debug_request.nim b/library/storage_thread_requests/requests/node_debug_request.nim index fe9bd389..8c7dec7d 100644 --- a/library/storage_thread_requests/requests/node_debug_request.nim +++ b/library/storage_thread_requests/requests/node_debug_request.nim @@ -9,7 +9,6 @@ import std/[options] import chronos import chronicles import codexdht/discv5/spr -import pkg/libp2p/protocols/connectivity/autonat/service import ../../alloc import ../../../storage/conf import ../../../storage/rest/json @@ -60,7 +59,13 @@ proc getDebug( if node.discovery.dhtRecord.isSome: node.discovery.dhtRecord.get.toURI else: "", "announceAddresses": node.discovery.announceAddrs, "table": table, - "nat": {"reachability": $storage[].autonatService.networkReachability}, + "nat": { + "reachability": + if storage[].autonatService.isSome: + $storage[].autonatService.get.networkReachability + else: + "unknown" + }, } return ok($json) diff --git a/storage/discovery.nim b/storage/discovery.nim index 36c50b10..d9faf162 100644 --- a/storage/discovery.nim +++ b/storage/discovery.nim @@ -200,6 +200,8 @@ proc updateRecords*( d.protocol.updateRecord(d.dhtRecord).expect("Should update SPR") proc updateAnnounceRecord*(d: Discovery, addrs: openArray[MultiAddress]) = + # Updates announce addresses only, not the DHT routing record. + # Relay addresses should not pollute DHT routing. d.announceAddrs = @addrs info "Updating announce record", addrs = d.announceAddrs d.providerRecord = SignedPeerRecord @@ -254,7 +256,8 @@ proc new*( key: PrivateKey, bindIp = IPv4_any(), bindPort = 0.Port, - announceAddrs: openArray[MultiAddress], + announceAddrs: openArray[MultiAddress] = [], + discoveryPort = 0.Port, bootstrapNodes: openArray[SignedPeerRecord] = [], store: Datastore = SQLiteDatastore.new(Memory).expect("Should not fail!"), ): Discovery = @@ -265,8 +268,7 @@ proc new*( key: key, peerId: PeerId.init(key).expect("Should construct PeerId"), store: store ) - # Update with empty values to get a valid SPR - self.updateRecords(@[], Port(0)) + self.updateRecords(announceAddrs, discoveryPort) # -------------------------------------------------------------------------- # FIXME disable IP limits temporarily so we can run our workshop. Re-enable diff --git a/storage/storage.nim b/storage/storage.nim index 2176210f..26aa205d 100644 --- a/storage/storage.nim +++ b/storage/storage.nim @@ -85,6 +85,9 @@ proc start*(s: StorageServer) {.async.} = if s.config.nat.hasExtIp: # extip means that we assume the IP is reachable # So we just take the first peer addr and remap it with extip to keep the port only + if s.storageNode.switch.peerInfo.addrs.len == 0: + raise + newException(StorageError, "extip is set but switch has no listen addresses") @[ s.storageNode.switch.peerInfo.addrs[0].remapAddr( ip = some(s.config.nat.extIp), port = none(Port) @@ -94,7 +97,7 @@ proc start*(s: StorageServer) {.async.} = # If extip is not set, we have 2 choices: # 1- Announce the peer addrs contains detected addresses on the machine. # 2- Wait for AutoNat - # The probleme with 1 is that you will certainly announce private addresses + # The problem with 1 is that you will certainly announce private addresses # and if you advertise a CID, you will advertise these private addresses. # TODO: DHT client mode #s.storageNode.switch.peerInfo.addrs @@ -124,7 +127,8 @@ proc stop*(s: StorageServer) {.async.} = notice "Stopping Storage node" - s.natMapper.close() + if s.natMapper != nil: + s.natMapper.close() var futures = @[ s.storageNode.switch.stop(), @@ -273,8 +277,9 @@ proc new*( discovery = Discovery.new( switch.peerInfo.privateKey, - announceAddrs = @[listenMultiAddr], + announceAddrs = @[], bindPort = config.discoveryPort, + discoveryPort = config.discoveryPort, bootstrapNodes = config.bootstrapNodes, store = discoveryStore, ) diff --git a/tests/storage/testnat.nim b/tests/storage/testnat.nim index 78cf07f1..f843acf2 100644 --- a/tests/storage/testnat.nim +++ b/tests/storage/testnat.nim @@ -1,9 +1,8 @@ -import std/[net, importutils] +import std/[net, importutils, envvars] import pkg/chronos import ../../storage/utils/natutils import pkg/libp2p/[multiaddress, multihash, multicodec] -import pkg/libp2p/protocols/connectivity/autonatv2/service except setup -import pkg/libp2p/protocols/connectivity/autonatv2/types +import pkg/libp2p/protocols/connectivity/autonat/types import pkg/libp2p/protocols/connectivity/relay/client as relayClientModule import pkg/libp2p/services/autorelayservice except setup @@ -15,7 +14,6 @@ import ../../storage/nat import ../../storage/discovery import ../../storage/rng import ../../storage/utils -import ../../storage/utils/addrutils privateAccess(NatMapper) @@ -42,7 +40,7 @@ method mapNatPorts*( ): Future[Option[(Port, Port)]] {.async: (raises: [CancelledError]), gcsafe.} = m.mappedPorts -suite "NatMapper.close": +suite "NAT - NatMapper.close": test "does nothing when no upnp mapping": let mapper = MockNatMapper( natConfig: NatConfig(hasExtIp: false, nat: NatAuto), @@ -65,30 +63,15 @@ suite "NatMapper.close": check device.deletedPorts == @[(Port(8080), NatIpProtocol.Tcp), (Port(8090), NatIpProtocol.Udp)] -suite "remapAddr": - test "replaces protocol tcp with udp": - let ma = MultiAddress.init("/ip4/1.2.3.4/tcp/5000").expect("valid") - let remapped = ma.remapAddr(protocol = some("udp"), port = some(Port(9000))) - check remapped == MultiAddress.init("/ip4/1.2.3.4/udp/9000").expect("valid") - - test "replaces only port, keeping protocol": - let ma = MultiAddress.init("/ip4/1.2.3.4/tcp/5000").expect("valid") - let remapped = ma.remapAddr(port = some(Port(9000))) - check remapped == MultiAddress.init("/ip4/1.2.3.4/tcp/9000").expect("valid") - - test "replaces only ip, keeping protocol and port": - let ma = MultiAddress.init("/ip4/1.2.3.4/tcp/5000").expect("valid") - let remapped = ma.remapAddr(ip = some(parseIpAddress("8.8.8.8"))) - check remapped == MultiAddress.init("/ip4/8.8.8.8/tcp/5000").expect("valid") - -asyncchecksuite "handleNatStatus": +asyncchecksuite "NAT - handleNatStatus": var sw: Switch var key: PrivateKey var disc: Discovery - let autoRelay = - AutoRelayService.new(1, relayClientModule.RelayClient.new(), nil, Rng.instance()) + var autoRelay: AutoRelayService setup: + autoRelay = + AutoRelayService.new(1, relayClientModule.RelayClient.new(), nil, Rng.instance()) key = PrivateKey.random(Rng.instance[]).get() disc = Discovery.new(key, announceAddrs = @[]) sw = newStandardSwitch() @@ -155,3 +138,22 @@ asyncchecksuite "handleNatStatus": check not autoRelay.isRunning check disc.announceAddrs == @[dialBack] + +suite "NAT - UPnP port mapping (requires NAT_TEST_UPNP=1)": + test "mapPorts and cleanup": + if getEnv("NAT_TEST_UPNP") != "1": + skip() + + let res = UpnpDevice.init() + check res.isOk + + let device = res.value + let ports = device.mapPorts(Port(8101), Port(8090)) + check ports.isSome + + let (tcp, udp) = ports.get() + check tcp == Port(8101) + check udp == Port(8090) + + check device.deletePortMapping(Port(8101), NatIpProtocol.Tcp).isOk + check device.deletePortMapping(Port(8090), NatIpProtocol.Udp).isOk diff --git a/tests/storage/testnatutils.nim b/tests/storage/testnatutils.nim index 9eea951a..10de4d18 100644 --- a/tests/storage/testnatutils.nim +++ b/tests/storage/testnatutils.nim @@ -46,7 +46,7 @@ method addPortMapping*( else: err("mapping failed") -suite "UpnpDevice.init": +suite "NAT - UpnpDevice.init": test "returns err when discover fails": check MockUpnpDev(discoverOk: false).init().isErr @@ -65,7 +65,7 @@ suite "UpnpDevice.init": test "returns ok when IP not routable": check MockUpnpDev(discoverOk: true, igdResult: IGDIpNotRoutable).init().isOk -suite "UpnpDevice.mapPorts": +suite "NAT - UpnpDevice.mapPorts": test "returns none when addPortMapping fails": check MockUpnpDev(addPortMappingOk: false).mapPorts(Port(8080), Port(8090)).isNone @@ -82,7 +82,7 @@ suite "UpnpDevice.mapPorts": let d = MockUpnpDev(addPortMappingOk: true, failOnProto: some(NatIpProtocol.Udp)) check d.mapPorts(Port(8080), Port(8090)).isNone -suite "PmpDevice.mapPorts": +suite "NAT - PmpDevice.mapPorts": test "returns none when mapping fails": check MockPmpDev(addPortMappingOk: false).mapPorts(Port(8080), Port(8090)).isNone From 14c934676e1922626c93c70eaad7a215081d192a Mon Sep 17 00:00:00 2001 From: Arnaud Date: Fri, 8 May 2026 17:20:04 +0400 Subject: [PATCH 32/32] Remove NatStrategy upnp and pmp --- storage/conf.nim | 6 +----- storage/utils/natutils.nim | 2 -- tests/storage/testnat.nim | 1 + 3 files changed, 2 insertions(+), 7 deletions(-) diff --git a/storage/conf.nim b/storage/conf.nim index 487dac94..0b6a1c26 100644 --- a/storage/conf.nim +++ b/storage/conf.nim @@ -157,7 +157,7 @@ type nat* {. desc: "Specify method to use for determining public address. " & - "Must be one of: auto, upnp, pmp, extip:.", + "Must be one of: auto, extip:.", defaultValue: defaultNatConfig(), defaultValueDesc: "auto", name: "nat" @@ -406,10 +406,6 @@ func parse*(T: type NatConfig, p: string): Result[NatConfig, string] = case p.toLowerAscii of "auto": return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatAuto)) - of "upnp": - return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatUpnp)) - of "pmp": - return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatPmp)) else: if p.startsWith("extip:"): try: diff --git a/storage/utils/natutils.nim b/storage/utils/natutils.nim index 7364efe6..45abd178 100644 --- a/storage/utils/natutils.nim +++ b/storage/utils/natutils.nim @@ -15,8 +15,6 @@ const NATPMP_LIFETIME* = 60 * 60 # seconds type NatStrategy* = enum NatAuto - NatUpnp - NatPmp type NatIpProtocol* = enum Tcp diff --git a/tests/storage/testnat.nim b/tests/storage/testnat.nim index f843acf2..ff3da3cb 100644 --- a/tests/storage/testnat.nim +++ b/tests/storage/testnat.nim @@ -143,6 +143,7 @@ suite "NAT - UPnP port mapping (requires NAT_TEST_UPNP=1)": test "mapPorts and cleanup": if getEnv("NAT_TEST_UPNP") != "1": skip() + return let res = UpnpDevice.init() check res.isOk