From 4b3de54fc55db152f8de5c63e7f358b65c260940 Mon Sep 17 00:00:00 2001 From: gabrielmer <101006718+gabrielmer@users.noreply.github.com> Date: Wed, 16 Jul 2025 13:25:06 +0200 Subject: [PATCH] chore: improve connection proc (#3509) --- .../requests/peer_manager_request.nim | 23 +++---------------- tests/test_peer_manager.nim | 6 +++++ waku/node/peer_manager/peer_manager.nim | 8 ------- 3 files changed, 9 insertions(+), 28 deletions(-) diff --git a/library/waku_thread_requests/requests/peer_manager_request.nim b/library/waku_thread_requests/requests/peer_manager_request.nim index a7e643a21..28617ce09 100644 --- a/library/waku_thread_requests/requests/peer_manager_request.nim +++ b/library/waku_thread_requests/requests/peer_manager_request.nim @@ -56,22 +56,6 @@ proc destroyShared(self: ptr PeerManagementRequest) = deallocShared(self) -proc connectTo( - node: WakuNode, peerMultiAddr: string, dialTimeout: Duration -): Result[void, string] = - let peers = (peerMultiAddr).split(",").mapIt(strip(it)) - - # TODO: the dialTimeout is not being used at all! - let connectFut = node.connectToNodes(peers, source = "static") - while not connectFut.finished(): - poll() - - if not connectFut.completed(): - let msg = "Timeout expired." - return err(msg) - - return ok() - proc process*( self: ptr PeerManagementRequest, waku: Waku ): Future[Result[string, string]] {.async.} = @@ -80,10 +64,9 @@ proc process*( case self.operation of CONNECT_TO: - let ret = waku.node.connectTo($self[].peerMultiAddr, self[].dialTimeout) - if ret.isErr(): - error "CONNECT_TO failed", error = ret.error - return err(ret.error) + let peers = ($self[].peerMultiAddr).split(",").mapIt(strip(it)) + await waku.node.connectToNodes(peers, source = "static") + return ok("") of GET_ALL_PEER_IDS: ## returns a comma-separated string of peerIDs let peerIDs = diff --git a/tests/test_peer_manager.nim b/tests/test_peer_manager.nim index 889e397cc..e36f2a819 100644 --- a/tests/test_peer_manager.nim +++ b/tests/test_peer_manager.nim @@ -567,6 +567,9 @@ procSuite "Peer Manager": # Connect to relay peers await nodes[0].peerManager.connectToRelayPeers() + # wait for the connections to settle + await sleepAsync(chronos.milliseconds(500)) + check: # Peerstore track all three peers nodes[0].peerManager.switch.peerStore.peers().len == 3 @@ -637,6 +640,9 @@ procSuite "Peer Manager": # Connect to relay peers await nodes[0].peerManager.manageRelayPeers() + # wait for the connections to settle + await sleepAsync(chronos.milliseconds(500)) + check: # Peerstore track all three peers nodes[0].peerManager.switch.peerStore.peers().len == 3 diff --git a/waku/node/peer_manager/peer_manager.nim b/waku/node/peer_manager/peer_manager.nim index 7deff0593..0df7c672d 100644 --- a/waku/node/peer_manager/peer_manager.nim +++ b/waku/node/peer_manager/peer_manager.nim @@ -372,14 +372,6 @@ proc connectToNodes*( info "Finished dialing multiple peers", successfulConns = connectedPeers.len, attempted = nodes.len - # The issue seems to be around peers not being fully connected when - # trying to subscribe. So what we do is sleep to guarantee nodes are - # fully connected. - # - # This issue was known to Dmitiry on nim-libp2p and may be resolvable - # later. - await sleepAsync(chronos.seconds(5)) - proc disconnectNode*(pm: PeerManager, peerId: PeerId) {.async.} = await pm.switch.disconnect(peerId)