mirror of
https://github.com/waku-org/nwaku.git
synced 2025-02-24 21:08:38 +00:00
chore: update submodules and bump Nim to 1.6.6 (#1307)
* chore: update submodules * fix: libp2p now provides callback to update peer addrs * fix: breaking change in EthereumNode constructor * fix: contentType type has changed (again) * fix: explicit future type * fix: nim 1.6.6 error handling requirements * fix: missed a spot - peer info addrs in sim2 * fix: help compiler a bit here
This commit is contained in:
parent
ea8d72188e
commit
16c85db43c
1
.gitmodules
vendored
1
.gitmodules
vendored
@ -151,4 +151,5 @@
|
||||
[submodule "vendor/zerokit"]
|
||||
path = vendor/zerokit
|
||||
url = https://github.com/vacp2p/zerokit.git
|
||||
ignore = dirty
|
||||
branch = master
|
||||
|
@ -236,7 +236,7 @@ proc new*(T: type WakuBridge,
|
||||
# Setup Waku v1 node
|
||||
var
|
||||
nodev1 = newEthereumNode(keys = nodev1Key, address = nodev1Address,
|
||||
networkId = NetworkId(1), chain = nil, clientId = ClientIdV1,
|
||||
networkId = NetworkId(1), clientId = ClientIdV1,
|
||||
addAllCapabilities = false, bindUdpPort = nodev1Address.udpPort, bindTcpPort = nodev1Address.tcpPort, rng = rng)
|
||||
|
||||
nodev1.addCapability Waku # Always enable Waku protocol
|
||||
|
@ -30,7 +30,6 @@ proc run(config: WakuNodeConf, rng: ref HmacDrbgContext) =
|
||||
var node = newEthereumNode(config.nodekey, # Node identifier
|
||||
address, # Address reachable for incoming requests
|
||||
NetworkId(1), # Network Id, only applicable for ETH protocol
|
||||
nil, # Database, not required for Waku
|
||||
clientId, # Client id string
|
||||
addAllCapabilities = false, # Disable default all RLPx capabilities
|
||||
bindUdpPort = address.udpPort, # Assume same as external
|
||||
|
@ -17,7 +17,7 @@ proc setupTestNode*(
|
||||
let
|
||||
keys1 = keys.KeyPair.random(rng[])
|
||||
address = localAddress(nextPort)
|
||||
result = newEthereumNode(keys1, address, NetworkId(1), nil,
|
||||
result = newEthereumNode(keys1, address, NetworkId(1),
|
||||
addAllCapabilities = false,
|
||||
bindUdpPort = address.udpPort, # Assume same as external
|
||||
bindTcpPort = address.tcpPort, # Assume same as external
|
||||
|
@ -20,7 +20,7 @@ proc setupNode(capabilities: varargs[ProtocolInfo, `protocolInfo`],
|
||||
srvAddress = Address(ip: parseIpAddress("0.0.0.0"), tcpPort: Port(30303),
|
||||
udpPort: Port(30303))
|
||||
|
||||
result = newEthereumNode(keypair, srvAddress, NetworkId(1), nil, "waku test rpc",
|
||||
result = newEthereumNode(keypair, srvAddress, NetworkId(1), "waku test rpc",
|
||||
addAllCapabilities = false, bindUdpPort = srvAddress.udpPort, bindTcpPort = srvAddress.tcpPort, rng = rng)
|
||||
for capability in capabilities:
|
||||
result.addCapability capability
|
||||
|
@ -46,7 +46,7 @@ procSuite "Waku Mail Client":
|
||||
|
||||
# Simple mailserver part
|
||||
let peer = simpleServer.peerPool.connectedNodes[clientNode]
|
||||
var f = peer.nextMsg(Waku.p2pRequest)
|
||||
var f: Future[Waku.p2pRequest] = peer.nextMsg(Waku.p2pRequest)
|
||||
require await f.withTimeout(transmissionTimeout)
|
||||
let response = f.read()
|
||||
let decoded = decode(response.envelope.data, symKey = some(symKey))
|
||||
|
@ -4,8 +4,8 @@ import
|
||||
stew/shims/net,
|
||||
chronicles,
|
||||
testutils/unittests,
|
||||
presto,
|
||||
libp2p/peerid,
|
||||
presto, presto/client as presto_client,
|
||||
libp2p/peerinfo,
|
||||
libp2p/multiaddress,
|
||||
libp2p/crypto/crypto
|
||||
import
|
||||
@ -46,7 +46,7 @@ suite "REST API - Debug":
|
||||
# Then
|
||||
check:
|
||||
response.status == 200
|
||||
response.contentType == $MIMETYPE_JSON
|
||||
$response.contentType == $MIMETYPE_JSON
|
||||
response.data.listenAddresses == @[$node.switch.peerInfo.addrs[^1] & "/p2p/" & $node.switch.peerInfo.peerId]
|
||||
|
||||
await restServer.stop()
|
||||
|
@ -6,7 +6,7 @@ import
|
||||
stew/shims/net,
|
||||
chronicles,
|
||||
testutils/unittests,
|
||||
presto,
|
||||
presto, presto/client as presto_client,
|
||||
libp2p/crypto/crypto,
|
||||
libp2p/protocols/pubsub/pubsub
|
||||
import
|
||||
@ -59,7 +59,7 @@ suite "REST API - Relay":
|
||||
# Then
|
||||
check:
|
||||
response.status == 200
|
||||
response.contentType == $MIMETYPE_TEXT
|
||||
$response.contentType == $MIMETYPE_TEXT
|
||||
response.data == "OK"
|
||||
|
||||
check:
|
||||
@ -109,7 +109,7 @@ suite "REST API - Relay":
|
||||
# Then
|
||||
check:
|
||||
response.status == 200
|
||||
response.contentType == $MIMETYPE_TEXT
|
||||
$response.contentType == $MIMETYPE_TEXT
|
||||
response.data == "OK"
|
||||
|
||||
check:
|
||||
@ -156,7 +156,7 @@ suite "REST API - Relay":
|
||||
# Then
|
||||
check:
|
||||
response.status == 200
|
||||
response.contentType == $MIMETYPE_JSON
|
||||
$response.contentType == $MIMETYPE_JSON
|
||||
response.data.len == 3
|
||||
response.data.all do (msg: RelayWakuMessage) -> bool:
|
||||
msg.payload == Base64String.encode("TEST-1") and
|
||||
@ -214,7 +214,7 @@ suite "REST API - Relay":
|
||||
# Then
|
||||
check:
|
||||
response.status == 200
|
||||
response.contentType == $MIMETYPE_TEXT
|
||||
$response.contentType == $MIMETYPE_TEXT
|
||||
response.data == "OK"
|
||||
|
||||
# TODO: Check for the message to be published to the topic
|
||||
|
@ -170,8 +170,10 @@ procSuite "WakuNode":
|
||||
|
||||
check:
|
||||
# Check that underlying peer info contains only bindIp before starting
|
||||
node.switch.peerInfo.addrs.len == 1
|
||||
node.switch.peerInfo.addrs.contains(bindEndpoint)
|
||||
node.switch.peerInfo.listenAddrs.len == 1
|
||||
node.switch.peerInfo.listenAddrs.contains(bindEndpoint)
|
||||
# Underlying peer info has not updated addrs before starting
|
||||
node.switch.peerInfo.addrs.len == 0
|
||||
|
||||
node.announcedAddresses.len == 1
|
||||
node.announcedAddresses.contains(announcedEndpoint)
|
||||
@ -179,8 +181,11 @@ procSuite "WakuNode":
|
||||
await node.start()
|
||||
|
||||
check:
|
||||
# Check that underlying peer info is updated with announced address
|
||||
node.started
|
||||
# Underlying peer info listenAddrs has not changed
|
||||
node.switch.peerInfo.listenAddrs.len == 1
|
||||
node.switch.peerInfo.listenAddrs.contains(bindEndpoint)
|
||||
# Check that underlying peer info is updated with announced address
|
||||
node.switch.peerInfo.addrs.len == 1
|
||||
node.switch.peerInfo.addrs.contains(announcedEndpoint)
|
||||
|
||||
|
@ -318,7 +318,7 @@ procSuite "WakuNode - Relay":
|
||||
|
||||
#delete websocket peer address
|
||||
# TODO: a better way to find the index - this is too brittle
|
||||
node2.switch.peerInfo.addrs.delete(0)
|
||||
node2.switch.peerInfo.listenAddrs.delete(0)
|
||||
|
||||
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||
|
||||
|
@ -78,11 +78,11 @@ proc initNodeCmd(shift: int, staticNodes: seq[string] = @[], master = false, lab
|
||||
|
||||
info "Address", address
|
||||
# TODO: Need to port shift
|
||||
peerInfo.addrs.add(hostAddress)
|
||||
peerInfo.listenAddrs.add(hostAddress)
|
||||
let id = $peerInfo.peerId
|
||||
|
||||
info "PeerInfo", id = id, addrs = peerInfo.addrs
|
||||
let listenStr = $peerInfo.addrs[0] & "/p2p/" & id
|
||||
info "PeerInfo", id = id, addrs = peerInfo.listenAddrs
|
||||
let listenStr = $peerInfo.listenAddrs[0] & "/p2p/" & id
|
||||
|
||||
result.cmd = wakuNodeBin & " " & defaults & " "
|
||||
result.cmd &= "--nodekey:" & hkey & " "
|
||||
|
2
vendor/dnsclient.nim
vendored
2
vendor/dnsclient.nim
vendored
@ -1 +1 @@
|
||||
Subproject commit 4960de2b345f567b12f09a08e9967af104ab39a3
|
||||
Subproject commit fcd7443634b950eaea574e5eaa00a628ae029823
|
2
vendor/nim-chronos
vendored
2
vendor/nim-chronos
vendored
@ -1 +1 @@
|
||||
Subproject commit 1334cdfebdc6182ff752e7d20796d9936cc8faa3
|
||||
Subproject commit 266e2c0ed26b455872bccb3ddbd316815a283659
|
2
vendor/nim-confutils
vendored
2
vendor/nim-confutils
vendored
@ -1 +1 @@
|
||||
Subproject commit 40c6f0b378a34d4812e410e9d6762e21f059de4c
|
||||
Subproject commit a26bfab7e5fb2f9fc018e5d778c169bc05772ee6
|
2
vendor/nim-eth
vendored
2
vendor/nim-eth
vendored
@ -1 +1 @@
|
||||
Subproject commit b057057e64cc041d797e894a728963b10fbfac69
|
||||
Subproject commit 43b29fdb1a3c5e15167ce7a477ae447b1df3a8bb
|
2
vendor/nim-faststreams
vendored
2
vendor/nim-faststreams
vendored
@ -1 +1 @@
|
||||
Subproject commit 682b9c6541bbb82b09e415fbe9ef944570b62f76
|
||||
Subproject commit 6112432b3a81d9db116cd5d64c39648881cfff29
|
2
vendor/nim-json-rpc
vendored
2
vendor/nim-json-rpc
vendored
@ -1 +1 @@
|
||||
Subproject commit 335f292a5816910aebf215e3a88db8a665133e0e
|
||||
Subproject commit 64dbf122d7457ceabdacfb26131e929f26f15581
|
2
vendor/nim-libp2p
vendored
2
vendor/nim-libp2p
vendored
@ -1 +1 @@
|
||||
Subproject commit abbeaab684c500f4c6ff5881797bb8f184b41ccc
|
||||
Subproject commit a69301f39292c6bd012427e3a172a7687222fa89
|
2
vendor/nim-nat-traversal
vendored
2
vendor/nim-nat-traversal
vendored
@ -1 +1 @@
|
||||
Subproject commit 48f94ebd194874d98da14a2494e89fc3a619a7ae
|
||||
Subproject commit 27d314d65c9078924b3239fe4e2f5af0c512b28c
|
2
vendor/nim-presto
vendored
2
vendor/nim-presto
vendored
@ -1 +1 @@
|
||||
Subproject commit d298b0ba307023f2664757cee92ec94dde6acd18
|
||||
Subproject commit 8bc34dd6f60b6bfe22049323100355671e4137a2
|
2
vendor/nim-stew
vendored
2
vendor/nim-stew
vendored
@ -1 +1 @@
|
||||
Subproject commit 018760954a1530b7336aed7133393908875d860f
|
||||
Subproject commit f2e58ba4c8da65548c824e4fa8732db9739f6505
|
2
vendor/nim-toml-serialization
vendored
2
vendor/nim-toml-serialization
vendored
@ -1 +1 @@
|
||||
Subproject commit 1043942d3479ba931437b371834de0d19218e621
|
||||
Subproject commit 4879c8893c1d89495f088b9e2488417935d235d4
|
2
vendor/nim-web3
vendored
2
vendor/nim-web3
vendored
@ -1 +1 @@
|
||||
Subproject commit 06ef3497d4017f0f85edcadb6f743452566a2448
|
||||
Subproject commit 78a9e5d097c755e16ddbccbfc488e7117b04ec3b
|
2
vendor/nim-websock
vendored
2
vendor/nim-websock
vendored
@ -1 +1 @@
|
||||
Subproject commit af8779d9d95e488ec9fd2d584b6328bd506c702b
|
||||
Subproject commit acbe30e9ca1e51dcbbfe4c552ee6f16c7eede538
|
2
vendor/nimbus-build-system
vendored
2
vendor/nimbus-build-system
vendored
@ -1 +1 @@
|
||||
Subproject commit 07e37730c627c19848e8662352976afe7e15bb1f
|
||||
Subproject commit dc535cd4627e6c1ec023ee6d6d0c3e5d66d414e5
|
2
vendor/rln
vendored
2
vendor/rln
vendored
@ -1 +1 @@
|
||||
Subproject commit 7ac74183f8b69b399e3bc96c1ae8ab61c026dc43
|
||||
Subproject commit cb615731d7b04fb7882845db95474cf11700cc02
|
@ -39,7 +39,7 @@ proc run(config: WakuNodeConf, rng: ref HmacDrbgContext)
|
||||
else: @[]
|
||||
|
||||
# Set-up node
|
||||
var node = newEthereumNode(config.nodekey, address, NetworkId(1), nil, clientId,
|
||||
var node = newEthereumNode(config.nodekey, address, NetworkId(1), clientId,
|
||||
addAllCapabilities = false, bootstrapNodes = bootnodes, bindUdpPort = address.udpPort, bindTcpPort = address.tcpPort, rng = rng)
|
||||
if not config.bootnodeOnly:
|
||||
node.addCapability Waku # Always enable Waku protocol
|
||||
|
@ -62,7 +62,7 @@ proc requestMail*(node: EthereumNode, peerId: NodeId, request: MailRequest,
|
||||
traceAsyncErrors peer.get().p2pRequest(env)
|
||||
|
||||
# Wait for the Request Complete packet
|
||||
var f = peer.get().nextMsg(Waku.p2pRequestComplete)
|
||||
var f: Future[Waku.p2pRequestComplete] = peer.get().nextMsg(Waku.p2pRequestComplete)
|
||||
if await f.withTimeout(requestCompleteTimeout):
|
||||
let response = f.read()
|
||||
# TODO: I guess the idea is to check requestId (Hash) also?
|
||||
|
@ -27,7 +27,7 @@ proc constructMultiaddrStr*(wireaddr: MultiAddress, peerId: PeerId): string =
|
||||
|
||||
proc constructMultiaddrStr*(peerInfo: PeerInfo): string =
|
||||
# Constructs a multiaddress with both location (wire) address and p2p identity
|
||||
constructMultiaddrStr(peerInfo.addrs[0], peerInfo.peerId)
|
||||
constructMultiaddrStr(peerInfo.listenAddrs[0], peerInfo.peerId)
|
||||
|
||||
proc constructMultiaddrStr*(remotePeerInfo: RemotePeerInfo): string =
|
||||
# Constructs a multiaddress with both location (wire) address and p2p identity
|
||||
|
@ -33,8 +33,8 @@ proc installDebugApiHandlers*(router: var RestRouter, node: WakuNode) =
|
||||
|
||||
#### Client
|
||||
|
||||
proc decodeBytes*(t: typedesc[DebugWakuInfo], data: openArray[byte], contentType: string): RestResult[DebugWakuInfo] =
|
||||
if MediaType.init(contentType) != MIMETYPE_JSON:
|
||||
proc decodeBytes*(t: typedesc[DebugWakuInfo], data: openArray[byte], contentType: Opt[ContentTypeData]): RestResult[DebugWakuInfo] =
|
||||
if MediaType.init($contentType) != MIMETYPE_JSON:
|
||||
error "Unsupported respose contentType value", contentType = contentType
|
||||
return err("Unsupported response contentType")
|
||||
|
||||
|
@ -163,8 +163,8 @@ proc encodeBytes*(value: seq[PubSubTopicString],
|
||||
return ok(encoded)
|
||||
|
||||
proc decodeBytes*(t: typedesc[string], value: openarray[byte],
|
||||
contentType: string): RestResult[string] =
|
||||
if MediaType.init(contentType) != MIMETYPE_TEXT:
|
||||
contentType: Opt[ContentTypeData]): RestResult[string] =
|
||||
if MediaType.init($contentType) != MIMETYPE_TEXT:
|
||||
error "Unsupported contentType value", contentType = contentType
|
||||
return err("Unsupported contentType")
|
||||
|
||||
@ -181,8 +181,8 @@ proc relayPostSubscriptionsV1*(body: RelayPostSubscriptionsRequest): RestRespons
|
||||
proc relayDeleteSubscriptionsV1*(body: RelayDeleteSubscriptionsRequest): RestResponse[string] {.rest, endpoint: "/relay/v1/subscriptions", meth: HttpMethod.MethodDelete.}
|
||||
|
||||
|
||||
proc decodeBytes*(t: typedesc[RelayGetMessagesResponse], data: openArray[byte], contentType: string): RestResult[RelayGetMessagesResponse] =
|
||||
if MediaType.init(contentType) != MIMETYPE_JSON:
|
||||
proc decodeBytes*(t: typedesc[RelayGetMessagesResponse], data: openArray[byte], contentType: Opt[ContentTypeData]): RestResult[RelayGetMessagesResponse] =
|
||||
if MediaType.init($contentType) != MIMETYPE_JSON:
|
||||
error "Unsupported respose contentType value", contentType = contentType
|
||||
return err("Unsupported response contentType")
|
||||
|
||||
|
@ -96,22 +96,6 @@ proc protocolMatcher(codec: string): Matcher =
|
||||
|
||||
return match
|
||||
|
||||
proc updateSwitchPeerInfo(node: WakuNode) =
|
||||
## TODO: remove this when supported upstream
|
||||
##
|
||||
## nim-libp2p does not yet support announcing addrs
|
||||
## different from bound addrs.
|
||||
##
|
||||
## This is a temporary workaround to replace
|
||||
## peer info addrs in switch to announced
|
||||
## addresses.
|
||||
##
|
||||
## WARNING: this should only be called once the switch
|
||||
## has already been started.
|
||||
|
||||
if node.announcedAddresses.len > 0:
|
||||
node.switch.peerInfo.addrs = node.announcedAddresses
|
||||
|
||||
template ip4TcpEndPoint(address, port): MultiAddress =
|
||||
MultiAddress.init(address, tcpProtocol, port)
|
||||
|
||||
@ -807,10 +791,6 @@ proc start*(node: WakuNode) {.async.} =
|
||||
|
||||
waku_version.set(1, labelValues=[git_version])
|
||||
|
||||
## NB: careful when moving this. We need to start the switch with the bind address
|
||||
## BEFORE updating with announced addresses for the sake of identify.
|
||||
await node.switch.start()
|
||||
|
||||
let peerInfo = node.switch.peerInfo
|
||||
info "PeerInfo", peerId = peerInfo.peerId, addrs = peerInfo.addrs
|
||||
var listenStr = ""
|
||||
@ -826,8 +806,15 @@ proc start*(node: WakuNode) {.async.} =
|
||||
if not node.wakuRelay.isNil():
|
||||
await node.startRelay()
|
||||
|
||||
## Update switch peer info with announced addrs
|
||||
node.updateSwitchPeerInfo()
|
||||
## The switch uses this mapper to update peer info addrs
|
||||
## with announced addrs after start
|
||||
let addressMapper =
|
||||
proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
|
||||
return node.announcedAddresses
|
||||
node.switch.peerInfo.addressMappers.add(addressMapper)
|
||||
|
||||
## The switch will update addresses after start using the addressMapper
|
||||
await node.switch.start()
|
||||
|
||||
node.started = true
|
||||
|
||||
|
@ -1,5 +1,9 @@
|
||||
# Waku Switch utils.
|
||||
{.push raises: [TLSStreamProtocolError, IOError, Defect].}
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/options,
|
||||
chronos, chronicles,
|
||||
@ -14,9 +18,10 @@ proc withWsTransport*(b: SwitchBuilder): SwitchBuilder =
|
||||
b.withTransport(proc(upgr: Upgrade): Transport = WsTransport.new(upgr))
|
||||
|
||||
proc getSecureKey(path : string): TLSPrivateKey
|
||||
{.raises: [Defect,TLSStreamProtocolError, IOError].} =
|
||||
{.raises: [Defect, IOError].} =
|
||||
|
||||
trace "Key path is.", path=path
|
||||
var stringkey: string = readFile(path)
|
||||
let stringkey: string = readFile(path)
|
||||
try:
|
||||
let key = TLSPrivateKey.init(stringkey)
|
||||
return key
|
||||
@ -24,9 +29,10 @@ proc getSecureKey(path : string): TLSPrivateKey
|
||||
debug "exception raised from getSecureKey", msg=exc.msg
|
||||
|
||||
proc getSecureCert(path : string): TLSCertificate
|
||||
{.raises: [Defect,TLSStreamProtocolError, IOError].} =
|
||||
{.raises: [Defect, IOError].} =
|
||||
|
||||
trace "Certificate path is.", path=path
|
||||
var stringCert: string = readFile(path)
|
||||
let stringCert: string = readFile(path)
|
||||
try:
|
||||
let cert = TLSCertificate.init(stringCert)
|
||||
return cert
|
||||
@ -35,7 +41,9 @@ proc getSecureCert(path : string): TLSCertificate
|
||||
|
||||
proc withWssTransport*(b: SwitchBuilder,
|
||||
secureKeyPath: string,
|
||||
secureCertPath: string): SwitchBuilder =
|
||||
secureCertPath: string): SwitchBuilder
|
||||
{.raises: [Defect, IOError].} =
|
||||
|
||||
let key : TLSPrivateKey = getSecureKey(secureKeyPath)
|
||||
let cert : TLSCertificate = getSecureCert(secureCertPath)
|
||||
b.withTransport(proc(upgr: Upgrade): Transport = WsTransport.new(upgr,
|
||||
@ -63,7 +71,7 @@ proc newWakuSwitch*(
|
||||
wssEnabled: bool = false,
|
||||
secureKeyPath: string = "",
|
||||
secureCertPath: string = ""): Switch
|
||||
{.raises: [Defect,TLSStreamProtocolError,IOError, LPError].} =
|
||||
{.raises: [Defect, IOError, LPError].} =
|
||||
|
||||
var b = SwitchBuilder
|
||||
.new()
|
||||
|
@ -2,7 +2,10 @@
|
||||
##
|
||||
## See https://github.com/vacp2p/specs/blob/master/specs/waku/v2/waku-relay.md
|
||||
## for spec.
|
||||
{.push raises: [Defect].}
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[tables, sets],
|
||||
|
@ -98,11 +98,15 @@ proc queryLoop*(w: WakuStoreClient, req: HistoryQuery, peers: seq[RemotePeerInfo
|
||||
|
||||
let messagesList = queryFuturesList
|
||||
.map(proc (fut: Future[WakuStoreResult[seq[WakuMessage]]]): seq[WakuMessage] =
|
||||
# These futures have been awaited before using allFutures(). Call completed() just as a sanity check.
|
||||
if not fut.completed() or fut.read().isErr():
|
||||
return @[]
|
||||
try:
|
||||
# fut.read() can raise a CatchableError
|
||||
# These futures have been awaited before using allFutures(). Call completed() just as a sanity check.
|
||||
if not fut.completed() or fut.read().isErr():
|
||||
return @[]
|
||||
|
||||
fut.read().value
|
||||
fut.read().value
|
||||
except CatchableError:
|
||||
return @[]
|
||||
)
|
||||
.concat()
|
||||
.deduplicate()
|
||||
|
@ -366,11 +366,15 @@ proc queryLoop(w: WakuStore, req: HistoryQuery, candidateList: seq[RemotePeerInf
|
||||
|
||||
let messagesList = queriesList
|
||||
.map(proc (fut: Future[WakuStoreResult[seq[WakuMessage]]]): seq[WakuMessage] =
|
||||
# These futures have been awaited before using allFutures(). Call completed() just as a sanity check.
|
||||
if not fut.completed() or fut.read().isErr():
|
||||
return @[]
|
||||
try:
|
||||
# fut.read() can raise a CatchableError
|
||||
# These futures have been awaited before using allFutures(). Call completed() just as a sanity check.
|
||||
if not fut.completed() or fut.read().isErr():
|
||||
return @[]
|
||||
|
||||
fut.read().value
|
||||
fut.read().value
|
||||
except CatchableError:
|
||||
return @[]
|
||||
)
|
||||
.concat()
|
||||
.deduplicate()
|
||||
|
@ -163,6 +163,6 @@ proc toRemotePeerInfo*(peerRecord: PeerRecord): RemotePeerInfo =
|
||||
## Useful for testing or internal connections
|
||||
proc toRemotePeerInfo*(peerInfo: PeerInfo): RemotePeerInfo =
|
||||
RemotePeerInfo.init(peerInfo.peerId,
|
||||
peerInfo.addrs,
|
||||
peerInfo.listenAddrs,
|
||||
none(enr.Record), # we could generate an ENR from PeerInfo
|
||||
peerInfo.protocols)
|
||||
|
@ -34,6 +34,6 @@ template nanosecondTime*(collector: Gauge, body: untyped) =
|
||||
when defined(metrics):
|
||||
let start = nowInUnixFloat()
|
||||
body
|
||||
collector.set(nowInUnixFloat() - start)
|
||||
metrics.set(collector, nowInUnixFloat() - start)
|
||||
else:
|
||||
body
|
Loading…
x
Reference in New Issue
Block a user