deploy: 192d4dd66bbc3f874f5ac1a418642f3b3fc9b584

This commit is contained in:
jm-clius 2021-04-16 10:19:59 +00:00
parent f452bd4085
commit 83635c12dc
4 changed files with 42 additions and 7 deletions

View File

@ -2,7 +2,7 @@
# libtool - Provide generalized library-building support services. # libtool - Provide generalized library-building support services.
# Generated automatically by config.status (libbacktrace) version-unused # Generated automatically by config.status (libbacktrace) version-unused
# Libtool was configured on host fv-az235-259: # Libtool was configured on host fv-az199-41:
# NOTE: Changes made to this file will be lost: look at ltmain.sh. # NOTE: Changes made to this file will be lost: look at ltmain.sh.
# #
# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005,

View File

@ -76,6 +76,10 @@ proc dialPeer(pm: PeerManager, peerId: PeerID,
proc loadFromStorage(pm: PeerManager) = proc loadFromStorage(pm: PeerManager) =
# Load peers from storage, if available # Load peers from storage, if available
proc onData(peerId: PeerID, storedInfo: StoredInfo, connectedness: Connectedness) = proc onData(peerId: PeerID, storedInfo: StoredInfo, connectedness: Connectedness) =
if peerId == pm.switch.peerInfo.peerId:
# Do not manage self
return
pm.peerStore.addressBook.set(peerId, storedInfo.addrs) pm.peerStore.addressBook.set(peerId, storedInfo.addrs)
pm.peerStore.protoBook.set(peerId, storedInfo.protos) pm.peerStore.protoBook.set(peerId, storedInfo.protos)
pm.peerStore.keyBook.set(peerId, storedInfo.publicKey) pm.peerStore.keyBook.set(peerId, storedInfo.publicKey)
@ -149,9 +153,17 @@ proc hasPeer*(pm: PeerManager, peerInfo: PeerInfo, proto: string): bool =
pm.peerStore.get(peerInfo.peerId).protos.contains(proto) pm.peerStore.get(peerInfo.peerId).protos.contains(proto)
proc hasPeers*(pm: PeerManager, proto: string): bool =
# Returns `true` if manager has any peers for the specified protocol
pm.peers.anyIt(it.protos.contains(proto))
proc addPeer*(pm: PeerManager, peerInfo: PeerInfo, proto: string) = proc addPeer*(pm: PeerManager, peerInfo: PeerInfo, proto: string) =
# Adds peer to manager for the specified protocol # Adds peer to manager for the specified protocol
if peerInfo.peerId == pm.switch.peerInfo.peerId:
# Do not attempt to manage our unmanageable self
return
debug "Adding peer to manager", peerId = peerInfo.peerId, addr = peerInfo.addrs[0], proto = proto debug "Adding peer to manager", peerId = peerInfo.peerId, addr = peerInfo.addrs[0], proto = proto
# ...known addresses # ...known addresses

View File

@ -419,10 +419,31 @@ proc mountRelay*(node: WakuNode, topics: seq[string] = newSeq[string](), rlnRela
info "mounting relay" info "mounting relay"
node.subscribe(defaultTopic, none(TopicHandler)) if node.peerManager.hasPeers(WakuRelayCodec):
trace "Found previous WakuRelay peers. Reconnecting."
# Reconnect to previous relay peers
waitFor node.peerManager.reconnectPeers(WakuRelayCodec)
## GossipSub specifies a backoff period after disconnecting and unsubscribing before attempting
## to re-graft peer on previous topics. We have to respect this period before starting WakuRelay.
trace "Backing off before grafting after reconnecting to WakuRelay peers", backoff=wakuRelay.parameters.pruneBackoff
for topic in topics: proc subscribeFuture() {.async.} =
node.subscribe(topic, none(TopicHandler)) # Subscribe after the backoff period
await sleepAsync(wakuRelay.parameters.pruneBackoff)
node.subscribe(defaultTopic, none(TopicHandler))
for topic in topics:
node.subscribe(topic, none(TopicHandler))
discard subscribeFuture() # Dispatch future, but do not await.
else:
# Subscribe immediately
node.subscribe(defaultTopic, none(TopicHandler))
for topic in topics:
node.subscribe(topic, none(TopicHandler))
if rlnRelayEnabled: if rlnRelayEnabled:
# TODO pass rln relay inputs to this proc, right now it uses default values that are set in the mountRlnRelay proc # TODO pass rln relay inputs to this proc, right now it uses default values that are set in the mountRlnRelay proc
@ -438,9 +459,6 @@ proc mountRelay*(node: WakuNode, topics: seq[string] = newSeq[string](), rlnRela
waitFor node.wakuRelay.start() waitFor node.wakuRelay.start()
info "relay mounted and started successfully" info "relay mounted and started successfully"
# Reconnect to previous relay peers
waitFor node.peerManager.reconnectPeers(WakuRelayCodec)
## Helpers ## Helpers
proc dialPeer*(n: WakuNode, address: string) {.async.} = proc dialPeer*(n: WakuNode, address: string) {.async.} =

View File

@ -41,6 +41,11 @@ method initPubSub*(w: WakuRelay) =
w.verifySignature = false w.verifySignature = false
w.sign = false w.sign = false
# Here we can fine-tune GossipSub params for our purposes
w.parameters = GossipSubParams.init()
# Setting pruneBackoff allows us to restart nodes and trigger a re-subscribe within reasonable time.
w.parameters.pruneBackoff = 30.seconds
procCall GossipSub(w).initPubSub() procCall GossipSub(w).initPubSub()
w.init() w.init()