diff --git a/apps/chat2/chat2.nim b/apps/chat2/chat2.nim index 6b52bc919..e2a46ca1b 100644 --- a/apps/chat2/chat2.nim +++ b/apps/chat2/chat2.nim @@ -317,27 +317,19 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} = if conf.logLevel != LogLevel.NONE: setLogLevel(conf.logLevel) - let natRes = setupNat( + let (extIp, extTcpPort, extUdpPort) = setupNat( conf.nat, clientId, Port(uint16(conf.tcpPort) + conf.portsShift), Port(uint16(conf.udpPort) + conf.portsShift), - ) - - if natRes.isErr(): - raise newException(ValueError, "setupNat error " & natRes.error) - - let (extIp, extTcpPort, extUdpPort) = natRes.get() + ).valueOr: + raise newException(ValueError, "setupNat error " & error) var enrBuilder = EnrBuilder.init(nodeKey) - let recordRes = enrBuilder.build() - let record = - if recordRes.isErr(): - error "failed to create enr record", error = recordRes.error - quit(QuitFailure) - else: - recordRes.get() + let record = enrBuilder.build().valueOr: + error "failed to create enr record", error = error + quit(QuitFailure) let node = block: var builder = WakuNodeBuilder.init() diff --git a/apps/chat2bridge/chat2bridge.nim b/apps/chat2bridge/chat2bridge.nim index 2d7b48cf8..9a22572cd 100644 --- a/apps/chat2bridge/chat2bridge.nim +++ b/apps/chat2bridge/chat2bridge.nim @@ -126,23 +126,20 @@ proc toMatterbridge( assert chat2Msg.isOk - let postRes = cmb.mbClient.postMessage( - text = string.fromBytes(chat2Msg[].payload), username = chat2Msg[].nick - ) - - if postRes.isErr() or (postRes[] == false): + if not cmb.mbClient + .postMessage(text = string.fromBytes(chat2Msg[].payload), username = chat2Msg[].nick) + .containsValue(true): chat2_mb_dropped.inc(labelValues = ["duplicate"]) error "Matterbridge host unreachable. Dropping message." proc pollMatterbridge(cmb: Chat2MatterBridge, handler: MbMessageHandler) {.async.} = while cmb.running: - if (let getRes = cmb.mbClient.getMessages(); getRes.isOk()): - for jsonNode in getRes[]: - await handler(jsonNode) - else: + let msg = cmb.mbClient.getMessages().valueOr: error "Matterbridge host unreachable. Sleeping before retrying." await sleepAsync(chronos.seconds(10)) - + continue + for jsonNode in msg: + await handler(jsonNode) await sleepAsync(cmb.pollPeriod) ############## @@ -252,25 +249,21 @@ when isMainModule: if conf.logLevel != LogLevel.NONE: setLogLevel(conf.logLevel) - let natRes = setupNat( + let (nodev2ExtIp, nodev2ExtPort, _) = setupNat( conf.nat, clientId, Port(uint16(conf.libp2pTcpPort) + conf.portsShift), Port(uint16(conf.udpPort) + conf.portsShift), - ) - if natRes.isErr(): - error "Error in setupNat", error = natRes.error + ).valueOr: + raise newException(ValueError, "setupNat error " & error) - # Load address configuration - let - (nodev2ExtIp, nodev2ExtPort, _) = natRes.get() - ## The following heuristic assumes that, in absence of manual - ## config, the external port is the same as the bind port. - extPort = - if nodev2ExtIp.isSome() and nodev2ExtPort.isNone(): - some(Port(uint16(conf.libp2pTcpPort) + conf.portsShift)) - else: - nodev2ExtPort + ## The following heuristic assumes that, in absence of manual + ## config, the external port is the same as the bind port. + let extPort = + if nodev2ExtIp.isSome() and nodev2ExtPort.isNone(): + some(Port(uint16(conf.libp2pTcpPort) + conf.portsShift)) + else: + nodev2ExtPort let bridge = Chat2Matterbridge.new( mbHostUri = "http://" & $initTAddress(conf.mbHostAddress, Port(conf.mbHostPort)), diff --git a/apps/chat2mix/chat2mix.nim b/apps/chat2mix/chat2mix.nim index 2b4e0a924..5979e2936 100644 --- a/apps/chat2mix/chat2mix.nim +++ b/apps/chat2mix/chat2mix.nim @@ -175,18 +175,16 @@ proc startMetricsServer( ): Result[MetricsHttpServerRef, string] = info "Starting metrics HTTP server", serverIp = $serverIp, serverPort = $serverPort - let metricsServerRes = MetricsHttpServerRef.new($serverIp, serverPort) - if metricsServerRes.isErr(): - return err("metrics HTTP server start failed: " & $metricsServerRes.error) + let server = MetricsHttpServerRef.new($serverIp, serverPort).valueOr: + return err("metrics HTTP server start failed: " & $error) - let server = metricsServerRes.value try: waitFor server.start() except CatchableError: return err("metrics HTTP server start failed: " & getCurrentExceptionMsg()) info "Metrics HTTP server started", serverIp = $serverIp, serverPort = $serverPort - ok(metricsServerRes.value) + ok(server) proc publish(c: Chat, line: string) {.async.} = # First create a Chat2Message protobuf with this line of text @@ -333,57 +331,56 @@ proc maintainSubscription( const maxFailedServiceNodeSwitches = 10 var noFailedSubscribes = 0 var noFailedServiceNodeSwitches = 0 + const RetryWaitMs = 2.seconds # Quick retry interval + const SubscriptionMaintenanceMs = 30.seconds # Subscription maintenance interval while true: info "maintaining subscription at", peer = constructMultiaddrStr(actualFilterPeer) # First use filter-ping to check if we have an active subscription - let pingRes = await wakuNode.wakuFilterClient.ping(actualFilterPeer) - if pingRes.isErr(): - # No subscription found. Let's subscribe. - error "ping failed.", err = pingRes.error - trace "no subscription found. Sending subscribe request" + let pingErr = (await wakuNode.wakuFilterClient.ping(actualFilterPeer)).errorOr: + await sleepAsync(SubscriptionMaintenanceMs) + info "subscription is live." + continue - let subscribeRes = await wakuNode.filterSubscribe( + # No subscription found. Let's subscribe. + error "ping failed.", error = pingErr + trace "no subscription found. Sending subscribe request" + + let subscribeErr = ( + await wakuNode.filterSubscribe( some(filterPubsubTopic), filterContentTopic, actualFilterPeer ) + ).errorOr: + await sleepAsync(SubscriptionMaintenanceMs) + if noFailedSubscribes > 0: + noFailedSubscribes -= 1 + notice "subscribe request successful." + continue - if subscribeRes.isErr(): - noFailedSubscribes += 1 - error "Subscribe request failed.", - err = subscribeRes.error, - peer = actualFilterPeer, - failCount = noFailedSubscribes + noFailedSubscribes += 1 + error "Subscribe request failed.", + error = subscribeErr, peer = actualFilterPeer, failCount = noFailedSubscribes - # TODO: disconnet from failed actualFilterPeer - # asyncSpawn(wakuNode.peerManager.switch.disconnect(p)) - # wakunode.peerManager.peerStore.delete(actualFilterPeer) + # TODO: disconnet from failed actualFilterPeer + # asyncSpawn(wakuNode.peerManager.switch.disconnect(p)) + # wakunode.peerManager.peerStore.delete(actualFilterPeer) - if noFailedSubscribes < maxFailedSubscribes: - await sleepAsync(2000) # Wait a bit before retrying - continue - elif not preventPeerSwitch: - let peerOpt = selectRandomServicePeer( - wakuNode.peerManager, some(actualFilterPeer), WakuFilterSubscribeCodec - ) - peerOpt.isOkOr: - error "Failed to find new service peer. Exiting." - noFailedServiceNodeSwitches += 1 - break + if noFailedSubscribes < maxFailedSubscribes: + await sleepAsync(RetryWaitMs) # Wait a bit before retrying + elif not preventPeerSwitch: + # try again with new peer without delay + let actualFilterPeer = selectRandomServicePeer( + wakuNode.peerManager, some(actualFilterPeer), WakuFilterSubscribeCodec + ).valueOr: + error "Failed to find new service peer. Exiting." + noFailedServiceNodeSwitches += 1 + break - actualFilterPeer = peerOpt.get() - info "Found new peer for codec", - codec = filterPubsubTopic, peer = constructMultiaddrStr(actualFilterPeer) + info "Found new peer for codec", + codec = filterPubsubTopic, peer = constructMultiaddrStr(actualFilterPeer) - noFailedSubscribes = 0 - continue # try again with new peer without delay - else: - if noFailedSubscribes > 0: - noFailedSubscribes -= 1 - - notice "subscribe request successful." + noFailedSubscribes = 0 else: - info "subscription is live." - - await sleepAsync(30000) # Subscription maintenance interval + await sleepAsync(SubscriptionMaintenanceMs) {.pop.} # @TODO confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError @@ -401,17 +398,13 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} = if conf.logLevel != LogLevel.NONE: setLogLevel(conf.logLevel) - let natRes = setupNat( + let (extIp, extTcpPort, extUdpPort) = setupNat( conf.nat, clientId, Port(uint16(conf.tcpPort) + conf.portsShift), Port(uint16(conf.udpPort) + conf.portsShift), - ) - - if natRes.isErr(): - raise newException(ValueError, "setupNat error " & natRes.error) - - let (extIp, extTcpPort, extUdpPort) = natRes.get() + ).valueOr: + raise newException(ValueError, "setupNat error " & error) var enrBuilder = EnrBuilder.init(nodeKey) @@ -421,13 +414,9 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} = error "failed to add sharded topics to ENR", error = error quit(QuitFailure) - let recordRes = enrBuilder.build() - let record = - if recordRes.isErr(): - error "failed to create enr record", error = recordRes.error - quit(QuitFailure) - else: - recordRes.get() + let record = enrBuilder.build().valueOr: + error "failed to create enr record", error = error + quit(QuitFailure) let node = block: var builder = WakuNodeBuilder.init() diff --git a/apps/liteprotocoltester/diagnose_connections.nim b/apps/liteprotocoltester/diagnose_connections.nim index f595b4e03..15c0768f4 100644 --- a/apps/liteprotocoltester/diagnose_connections.nim +++ b/apps/liteprotocoltester/diagnose_connections.nim @@ -59,7 +59,4 @@ proc logSelfPeers*(pm: PeerManager) = {allPeers(pm)} *------------------------------------------------------------------------------------------*""".fmt() - if printable.isErr(): - echo "Error while printing statistics: " & printable.error().msg - else: - echo printable.get() + echo printable.valueOr("Error while printing statistics: " & error.msg) diff --git a/apps/liteprotocoltester/liteprotocoltester.nim b/apps/liteprotocoltester/liteprotocoltester.nim index 90c355a25..7778183d1 100644 --- a/apps/liteprotocoltester/liteprotocoltester.nim +++ b/apps/liteprotocoltester/liteprotocoltester.nim @@ -49,13 +49,10 @@ when isMainModule: const versionString = "version / git commit hash: " & waku_factory.git_version - let confRes = LiteProtocolTesterConf.load(version = versionString) - if confRes.isErr(): - error "failure while loading the configuration", error = confRes.error + let conf = LiteProtocolTesterConf.load(version = versionString).valueOr: + error "failure while loading the configuration", error = error quit(QuitFailure) - var conf = confRes.get() - ## Logging setup logging.setupLog(conf.logLevel, conf.logFormat) @@ -187,7 +184,7 @@ when isMainModule: error "Service node not found in time via PX" quit(QuitFailure) - if futForServiceNode.read().isErr(): + futForServiceNode.read().isOkOr: error "Service node for test not found via PX" quit(QuitFailure) diff --git a/apps/liteprotocoltester/publisher.nim b/apps/liteprotocoltester/publisher.nim index 1debfdf56..0df3f3e3e 100644 --- a/apps/liteprotocoltester/publisher.nim +++ b/apps/liteprotocoltester/publisher.nim @@ -89,10 +89,7 @@ proc reportSentMessages() = |{numMessagesToSend+failedToSendCount:>11} |{messagesSent:>11} |{failedToSendCount:>11} | *----------------------------------------*""".fmt() - if report.isErr: - echo "Error while printing statistics" - else: - echo report.get() + echo report.valueOr("Error while printing statistics") echo "*--------------------------------------------------------------------------------------------------*" echo "| Failure cause | count |" diff --git a/apps/liteprotocoltester/receiver.nim b/apps/liteprotocoltester/receiver.nim index 0e6638c61..9792549ca 100644 --- a/apps/liteprotocoltester/receiver.nim +++ b/apps/liteprotocoltester/receiver.nim @@ -54,64 +54,65 @@ proc maintainSubscription( var noFailedSubscribes = 0 var noFailedServiceNodeSwitches = 0 var isFirstPingOnNewPeer = true + const RetryWaitMs = 2.seconds # Quick retry interval + const SubscriptionMaintenanceMs = 30.seconds # Subscription maintenance interval while true: info "maintaining subscription at", peer = constructMultiaddrStr(actualFilterPeer) # First use filter-ping to check if we have an active subscription - let pingRes = await wakuNode.wakuFilterClient.ping(actualFilterPeer) - if pingRes.isErr(): - if isFirstPingOnNewPeer == false: - # Very first ping expected to fail as we have not yet subscribed at all - lpt_receiver_lost_subscription_count.inc() - isFirstPingOnNewPeer = false - # No subscription found. Let's subscribe. - error "ping failed.", err = pingRes.error - trace "no subscription found. Sending subscribe request" + let pingErr = (await wakuNode.wakuFilterClient.ping(actualFilterPeer)).errorOr: + await sleepAsync(SubscriptionMaintenanceMs) + info "subscription is live." + continue - let subscribeRes = await wakuNode.filterSubscribe( + if isFirstPingOnNewPeer == false: + # Very first ping expected to fail as we have not yet subscribed at all + lpt_receiver_lost_subscription_count.inc() + isFirstPingOnNewPeer = false + # No subscription found. Let's subscribe. + error "ping failed.", error = pingErr + trace "no subscription found. Sending subscribe request" + + let subscribeErr = ( + await wakuNode.filterSubscribe( some(filterPubsubTopic), filterContentTopic, actualFilterPeer ) + ).errorOr: + await sleepAsync(subscriptionMaintenanceMs) + if noFailedSubscribes > 0: + noFailedSubscribes -= 1 + notice "subscribe request successful." + continue - if subscribeRes.isErr(): - noFailedSubscribes += 1 - lpt_service_peer_failure_count.inc( - labelValues = ["receiver", actualFilterPeer.getAgent()] - ) - error "Subscribe request failed.", - err = subscribeRes.error, - peer = actualFilterPeer, - failCount = noFailedSubscribes + noFailedSubscribes += 1 + lpt_service_peer_failure_count.inc( + labelValues = ["receiver", actualFilterPeer.getAgent()] + ) + error "Subscribe request failed.", + err = subscribeErr, peer = actualFilterPeer, failCount = noFailedSubscribes - # TODO: disconnet from failed actualFilterPeer - # asyncSpawn(wakuNode.peerManager.switch.disconnect(p)) - # wakunode.peerManager.peerStore.delete(actualFilterPeer) + # TODO: disconnet from failed actualFilterPeer + # asyncSpawn(wakuNode.peerManager.switch.disconnect(p)) + # wakunode.peerManager.peerStore.delete(actualFilterPeer) - if noFailedSubscribes < maxFailedSubscribes: - await sleepAsync(2.seconds) # Wait a bit before retrying - continue - elif not preventPeerSwitch: - actualFilterPeer = selectRandomServicePeer( - wakuNode.peerManager, some(actualFilterPeer), WakuFilterSubscribeCodec - ).valueOr: - error "Failed to find new service peer. Exiting." - noFailedServiceNodeSwitches += 1 - break + if noFailedSubscribes < maxFailedSubscribes: + await sleepAsync(RetryWaitMs) # Wait a bit before retrying + elif not preventPeerSwitch: + # try again with new peer without delay + actualFilterPeer = selectRandomServicePeer( + wakuNode.peerManager, some(actualFilterPeer), WakuFilterSubscribeCodec + ).valueOr: + error "Failed to find new service peer. Exiting." + noFailedServiceNodeSwitches += 1 + break - info "Found new peer for codec", - codec = filterPubsubTopic, peer = constructMultiaddrStr(actualFilterPeer) + info "Found new peer for codec", + codec = filterPubsubTopic, peer = constructMultiaddrStr(actualFilterPeer) - noFailedSubscribes = 0 - lpt_change_service_peer_count.inc(labelValues = ["receiver"]) - isFirstPingOnNewPeer = true - continue # try again with new peer without delay - else: - if noFailedSubscribes > 0: - noFailedSubscribes -= 1 - - notice "subscribe request successful." + noFailedSubscribes = 0 + lpt_change_service_peer_count.inc(labelValues = ["receiver"]) + isFirstPingOnNewPeer = true else: - info "subscription is live." - - await sleepAsync(30.seconds) # Subscription maintenance interval + await sleepAsync(SubscriptionMaintenanceMs) proc setupAndListen*( wakuNode: WakuNode, conf: LiteProtocolTesterConf, servicePeer: RemotePeerInfo diff --git a/apps/liteprotocoltester/service_peer_management.nim b/apps/liteprotocoltester/service_peer_management.nim index 747ace86b..053445740 100644 --- a/apps/liteprotocoltester/service_peer_management.nim +++ b/apps/liteprotocoltester/service_peer_management.nim @@ -181,7 +181,7 @@ proc pxLookupServiceNode*( if not await futPeers.withTimeout(30.seconds): notice "Cannot get peers from PX", round = 5 - trialCount else: - if futPeers.value().isErr(): + futPeers.value().isOkOr: info "PeerExchange reported error", error = futPeers.read().error return err() diff --git a/apps/liteprotocoltester/statistics.nim b/apps/liteprotocoltester/statistics.nim index 8322edd8f..5ca215b2c 100644 --- a/apps/liteprotocoltester/statistics.nim +++ b/apps/liteprotocoltester/statistics.nim @@ -114,12 +114,7 @@ proc addMessage*( if not self.contains(peerId): self[peerId] = Statistics.init() - let shortSenderId = block: - let senderPeer = PeerId.init(msg.sender) - if senderPeer.isErr(): - msg.sender - else: - senderPeer.get().shortLog() + let shortSenderId = PeerId.init(msg.sender).map(p => p.shortLog()).valueOr(msg.sender) discard catch: self[peerId].addMessage(shortSenderId, msg, msgHash) @@ -220,10 +215,7 @@ proc echoStat*(self: Statistics, peerId: string) = | {self.missingIndices()} | *------------------------------------------------------------------------------------------*""".fmt() - if printable.isErr(): - echo "Error while printing statistics: " & printable.error().msg - else: - echo printable.get() + echo printable.valueOr("Error while printing statistics: " & error.msg) proc jsonStat*(self: Statistics): string = let minL, maxL, avgL = self.calcLatency() @@ -243,20 +235,18 @@ proc jsonStat*(self: Statistics): string = }}, "lostIndices": {self.missingIndices()} }}""".fmt() - if json.isErr: - return "{\"result:\": \"" & json.error.msg & "\"}" - return json.get() + return json.valueOr("{\"result:\": \"" & error.msg & "\"}") proc echoStats*(self: var PerPeerStatistics) = for peerId, stats in self.pairs: let peerLine = catch: "Receiver statistics from peer {peerId}".fmt() - if peerLine.isErr: + peerLine.isOkOr: echo "Error while printing statistics" - else: - echo peerLine.get() - stats.echoStat(peerId) + continue + echo peerLine.get() + stats.echoStat(peerId) proc jsonStats*(self: PerPeerStatistics): string = try: diff --git a/apps/networkmonitor/networkmonitor.nim b/apps/networkmonitor/networkmonitor.nim index ad7732db2..23607b118 100644 --- a/apps/networkmonitor/networkmonitor.nim +++ b/apps/networkmonitor/networkmonitor.nim @@ -443,12 +443,8 @@ proc initAndStartApp( error "failed to add sharded topics to ENR", error = error return err("failed to add sharded topics to ENR: " & $error) - let recordRes = builder.build() - let record = - if recordRes.isErr(): - return err("cannot build record: " & $recordRes.error) - else: - recordRes.get() + let record = builder.build().valueOr: + return err("cannot build record: " & $error) var nodeBuilder = WakuNodeBuilder.init() @@ -461,21 +457,15 @@ proc initAndStartApp( relayServiceRatio = "13.33:86.67", shardAware = true, ) - let res = nodeBuilder.withNetworkConfigurationDetails(bindIp, nodeTcpPort) - if res.isErr(): - return err("node building error" & $res.error) + nodeBuilder.withNetworkConfigurationDetails(bindIp, nodeTcpPort).isOkOr: + return err("node building error" & $error) - let nodeRes = nodeBuilder.build() - let node = - if nodeRes.isErr(): - return err("node building error" & $res.error) - else: - nodeRes.get() + let node = nodeBuilder.build().valueOr: + return err("node building error" & $error) - var discv5BootstrapEnrsRes = await getBootstrapFromDiscDns(conf) - if discv5BootstrapEnrsRes.isErr(): + var discv5BootstrapEnrs = (await getBootstrapFromDiscDns(conf)).valueOr: error("failed discovering peers from DNS") - var discv5BootstrapEnrs = discv5BootstrapEnrsRes.get() + quit(QuitFailure) # parse enrURIs from the configuration and add the resulting ENRs to the discv5BootstrapEnrs seq for enrUri in conf.bootstrapNodes: @@ -553,12 +543,10 @@ proc subscribeAndHandleMessages( when isMainModule: # known issue: confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError {.pop.} - let confRes = NetworkMonitorConf.loadConfig() - if confRes.isErr(): - error "could not load cli variables", err = confRes.error - quit(1) + var conf = NetworkMonitorConf.loadConfig().valueOr: + error "could not load cli variables", error = error + quit(QuitFailure) - var conf = confRes.get() info "cli flags", conf = conf if conf.clusterId == 1: @@ -586,37 +574,30 @@ when isMainModule: # start metrics server if conf.metricsServer: - let res = - startMetricsServer(conf.metricsServerAddress, Port(conf.metricsServerPort)) - if res.isErr(): - error "could not start metrics server", err = res.error - quit(1) + startMetricsServer(conf.metricsServerAddress, Port(conf.metricsServerPort)).isOkOr: + error "could not start metrics server", error = error + quit(QuitFailure) # start rest server for custom metrics - let res = startRestApiServer(conf, allPeersInfo, msgPerContentTopic) - if res.isErr(): - error "could not start rest api server", err = res.error - quit(1) + startRestApiServer(conf, allPeersInfo, msgPerContentTopic).isOkOr: + error "could not start rest api server", error = error + quit(QuitFailure) # create a rest client - let clientRest = - RestClientRef.new(url = "http://ip-api.com", connectTimeout = ctime.seconds(2)) - if clientRest.isErr(): - error "could not start rest api client", err = res.error - quit(1) - let restClient = clientRest.get() + let restClient = RestClientRef.new( + url = "http://ip-api.com", connectTimeout = ctime.seconds(2) + ).valueOr: + error "could not start rest api client", error = error + quit(QuitFailure) # start waku node - let nodeRes = waitFor initAndStartApp(conf) - if nodeRes.isErr(): - error "could not start node" - quit 1 - - let (node, discv5) = nodeRes.get() + let (node, discv5) = (waitFor initAndStartApp(conf)).valueOr: + error "could not start node", error = error + quit(QuitFailure) (waitFor node.mountRelay()).isOkOr: - error "failed to mount waku relay protocol: ", err = error - quit 1 + error "failed to mount waku relay protocol: ", error = error + quit(QuitFailure) waitFor node.mountLibp2pPing() @@ -640,12 +621,12 @@ when isMainModule: try: waitFor node.mountRlnRelay(rlnConf) except CatchableError: - error "failed to setup RLN", err = getCurrentExceptionMsg() - quit 1 + error "failed to setup RLN", error = getCurrentExceptionMsg() + quit(QuitFailure) node.mountMetadata(conf.clusterId, conf.shards).isOkOr: - error "failed to mount waku metadata protocol: ", err = error - quit 1 + error "failed to mount waku metadata protocol: ", error = error + quit(QuitFailure) for shard in conf.shards: # Subscribe the node to the shards, to count messages diff --git a/apps/wakucanary/wakucanary.nim b/apps/wakucanary/wakucanary.nim index 337896d39..bcff9653e 100644 --- a/apps/wakucanary/wakucanary.nim +++ b/apps/wakucanary/wakucanary.nim @@ -181,13 +181,10 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} = protocols = conf.protocols, logLevel = conf.logLevel - let peerRes = parsePeerInfo(conf.address) - if peerRes.isErr(): - error "Couldn't parse 'conf.address'", error = peerRes.error + let peer = parsePeerInfo(conf.address).valueOr: + error "Couldn't parse 'conf.address'", error = error quit(QuitFailure) - let peer = peerRes.value - let nodeKey = crypto.PrivateKey.random(Secp256k1, rng[])[] bindIp = parseIpAddress("0.0.0.0") @@ -225,13 +222,9 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} = error "could not initialize ENR with shards", error quit(QuitFailure) - let recordRes = enrBuilder.build() - let record = - if recordRes.isErr(): - error "failed to create enr record", error = recordRes.error - quit(QuitFailure) - else: - recordRes.get() + let record = enrBuilder.build().valueOr: + error "failed to create enr record", error = error + quit(QuitFailure) if isWss and (conf.websocketSecureKeyPath.len == 0 or conf.websocketSecureCertPath.len == 0): diff --git a/examples/filter_subscriber.nim b/examples/filter_subscriber.nim index e4e26bdb7..03a5de4eb 100644 --- a/examples/filter_subscriber.nim +++ b/examples/filter_subscriber.nim @@ -62,13 +62,9 @@ proc setupAndSubscribe(rng: ref HmacDrbgContext) {.async.} = "Building ENR with relay sharding failed" ) - let recordRes = enrBuilder.build() - let record = - if recordRes.isErr(): - error "failed to create enr record", error = recordRes.error - quit(QuitFailure) - else: - recordRes.get() + let record = enrBuilder.build().valueOr: + error "failed to create enr record", error = error + quit(QuitFailure) var builder = WakuNodeBuilder.init() builder.withNodeKey(nodeKey) @@ -92,20 +88,18 @@ proc setupAndSubscribe(rng: ref HmacDrbgContext) {.async.} = while true: notice "maintaining subscription" # First use filter-ping to check if we have an active subscription - let pingRes = await node.wakuFilterClient.ping(filterPeer) - if pingRes.isErr(): + if (await node.wakuFilterClient.ping(filterPeer)).isErr(): # No subscription found. Let's subscribe. notice "no subscription found. Sending subscribe request" - let subscribeRes = await node.wakuFilterClient.subscribe( - filterPeer, FilterPubsubTopic, @[FilterContentTopic] - ) - - if subscribeRes.isErr(): - notice "subscribe request failed. Quitting.", err = subscribeRes.error + ( + await node.wakuFilterClient.subscribe( + filterPeer, FilterPubsubTopic, @[FilterContentTopic] + ) + ).isOkOr: + notice "subscribe request failed. Quitting.", error = error break - else: - notice "subscribe request successful." + notice "subscribe request successful." else: notice "subscription found." diff --git a/examples/lightpush_publisher.nim b/examples/lightpush_publisher.nim index 70ebd9c53..c7eacdd30 100644 --- a/examples/lightpush_publisher.nim +++ b/examples/lightpush_publisher.nim @@ -54,13 +54,9 @@ proc setupAndPublish(rng: ref HmacDrbgContext) {.async.} = "Building ENR with relay sharding failed" ) - let recordRes = enrBuilder.build() - let record = - if recordRes.isErr(): - error "failed to create enr record", error = recordRes.error - quit(QuitFailure) - else: - recordRes.get() + let record = enrBuilder.build().valueOr: + error "failed to create enr record", error = error + quit(QuitFailure) var builder = WakuNodeBuilder.init() builder.withNodeKey(nodeKey) diff --git a/examples/publisher.nim b/examples/publisher.nim index 8c2d03679..6f5d34bc4 100644 --- a/examples/publisher.nim +++ b/examples/publisher.nim @@ -49,13 +49,9 @@ proc setupAndPublish(rng: ref HmacDrbgContext) {.async.} = var enrBuilder = EnrBuilder.init(nodeKey) - let recordRes = enrBuilder.build() - let record = - if recordRes.isErr(): - error "failed to create enr record", error = recordRes.error - quit(QuitFailure) - else: - recordRes.get() + let record = enrBuilder.build().valueOr: + error "failed to create enr record", error = error + quit(QuitFailure) var builder = WakuNodeBuilder.init() builder.withNodeKey(nodeKey) diff --git a/examples/subscriber.nim b/examples/subscriber.nim index fb040b05a..ce64bb803 100644 --- a/examples/subscriber.nim +++ b/examples/subscriber.nim @@ -47,13 +47,9 @@ proc setupAndSubscribe(rng: ref HmacDrbgContext) {.async.} = var enrBuilder = EnrBuilder.init(nodeKey) - let recordRes = enrBuilder.build() - let record = - if recordRes.isErr(): - error "failed to create enr record", error = recordRes.error - quit(QuitFailure) - else: - recordRes.get() + let record = enrBuilder.build().valueOr: + error "failed to create enr record", error = error + quit(QuitFailure) var builder = WakuNodeBuilder.init() builder.withNodeKey(nodeKey) diff --git a/examples/wakustealthcommitments/node_spec.nim b/examples/wakustealthcommitments/node_spec.nim index b935f9ab1..21286340e 100644 --- a/examples/wakustealthcommitments/node_spec.nim +++ b/examples/wakustealthcommitments/node_spec.nim @@ -18,13 +18,10 @@ proc setup*(): Waku = const versionString = "version / git commit hash: " & waku.git_version let rng = crypto.newRng() - let confRes = WakuNodeConf.load(version = versionString) - if confRes.isErr(): - error "failure while loading the configuration", error = $confRes.error + let conf = WakuNodeConf.load(version = versionString).valueOr: + error "failure while loading the configuration", error = $error quit(QuitFailure) - var conf = confRes.get() - let twnNetworkConf = NetworkConf.TheWakuNetworkConf() if len(conf.shards) != 0: conf.pubsubTopics = conf.shards.mapIt(twnNetworkConf.pubsubTopics[it.uint16]) diff --git a/examples/wakustealthcommitments/stealth_commitment_protocol.nim b/examples/wakustealthcommitments/stealth_commitment_protocol.nim index 9a2045a67..63311bf7b 100644 --- a/examples/wakustealthcommitments/stealth_commitment_protocol.nim +++ b/examples/wakustealthcommitments/stealth_commitment_protocol.nim @@ -95,61 +95,54 @@ proc sendResponse*( type SCPHandler* = proc(msg: WakuMessage): Future[void] {.async.} proc getSCPHandler(self: StealthCommitmentProtocol): SCPHandler = let handler = proc(msg: WakuMessage): Future[void] {.async.} = - let decodedRes = WakuStealthCommitmentMsg.decode(msg.payload) - if decodedRes.isErr(): - error "could not decode scp message" - let decoded = decodedRes.get() + let decoded = WakuStealthCommitmentMsg.decode(msg.payload).valueOr: + error "could not decode scp message", error = error + quit(QuitFailure) if decoded.request == false: # check if the generated stealth commitment belongs to the receiver # if not, continue - let ephemeralPubKeyRes = - deserialize(StealthCommitmentFFI.PublicKey, decoded.ephemeralPubKey.get()) - if ephemeralPubKeyRes.isErr(): - error "could not deserialize ephemeral public key: ", - err = ephemeralPubKeyRes.error() - let ephemeralPubKey = ephemeralPubKeyRes.get() - let stealthCommitmentPrivateKeyRes = StealthCommitmentFFI.generateStealthPrivateKey( + let ephemeralPubKey = deserialize( + StealthCommitmentFFI.PublicKey, decoded.ephemeralPubKey.get() + ).valueOr: + error "could not deserialize ephemeral public key: ", error = error + quit(QuitFailure) + let stealthCommitmentPrivateKey = StealthCommitmentFFI.generateStealthPrivateKey( ephemeralPubKey, self.spendingKeyPair.privateKey, self.viewingKeyPair.privateKey, decoded.viewTag.get(), - ) - if stealthCommitmentPrivateKeyRes.isErr(): - info "received stealth commitment does not belong to the receiver: ", - err = stealthCommitmentPrivateKeyRes.error() - - let stealthCommitmentPrivateKey = stealthCommitmentPrivateKeyRes.get() + ).valueOr: + error "received stealth commitment does not belong to the receiver: ", + error = error + quit(QuitFailure) info "received stealth commitment belongs to the receiver: ", stealthCommitmentPrivateKey, stealthCommitmentPubKey = decoded.stealthCommitment.get() return # send response # deseralize the keys - let spendingKeyRes = - deserialize(StealthCommitmentFFI.PublicKey, decoded.spendingPubKey.get()) - if spendingKeyRes.isErr(): - error "could not deserialize spending key: ", err = spendingKeyRes.error() - let spendingKey = spendingKeyRes.get() - let viewingKeyRes = - (deserialize(StealthCommitmentFFI.PublicKey, decoded.viewingPubKey.get())) - if viewingKeyRes.isErr(): - error "could not deserialize viewing key: ", err = viewingKeyRes.error() - let viewingKey = viewingKeyRes.get() + let spendingKey = deserialize( + StealthCommitmentFFI.PublicKey, decoded.spendingPubKey.get() + ).valueOr: + error "could not deserialize spending key: ", error = error + quit(QuitFailure) + let viewingKey = ( + deserialize(StealthCommitmentFFI.PublicKey, decoded.viewingPubKey.get()) + ).valueOr: + error "could not deserialize viewing key: ", error = error + quit(QuitFailure) info "received spending key", spendingKey info "received viewing key", viewingKey - let ephemeralKeyPairRes = StealthCommitmentFFI.generateKeyPair() - if ephemeralKeyPairRes.isErr(): - error "could not generate ephemeral key pair: ", err = ephemeralKeyPairRes.error() - let ephemeralKeyPair = ephemeralKeyPairRes.get() + let ephemeralKeyPair = StealthCommitmentFFI.generateKeyPair().valueOr: + error "could not generate ephemeral key pair: ", error = error + quit(QuitFailure) - let stealthCommitmentRes = StealthCommitmentFFI.generateStealthCommitment( + let stealthCommitment = StealthCommitmentFFI.generateStealthCommitment( spendingKey, viewingKey, ephemeralKeyPair.privateKey - ) - if stealthCommitmentRes.isErr(): - error "could not generate stealth commitment: ", - err = stealthCommitmentRes.error() - let stealthCommitment = stealthCommitmentRes.get() + ).valueOr: + error "could not generate stealth commitment: ", error = error + quit(QuitFailure) ( await self.sendResponse( @@ -157,7 +150,7 @@ proc getSCPHandler(self: StealthCommitmentProtocol): SCPHandler = stealthCommitment.viewTag, ) ).isOkOr: - error "could not send response: ", err = $error + error "could not send response: ", error = $error return handler diff --git a/library/waku_context.nim b/library/waku_context.nim index 64a9e3466..ab4b996af 100644 --- a/library/waku_context.nim +++ b/library/waku_context.nim @@ -96,18 +96,16 @@ proc sendRequestToWakuThread*( deallocShared(req) return err("Couldn't send a request to the waku thread: " & $req[]) - let fireSyncRes = ctx.reqSignal.fireSync() - if fireSyncRes.isErr(): + let fireSync = ctx.reqSignal.fireSync().valueOr: deallocShared(req) - return err("failed fireSync: " & $fireSyncRes.error) + return err("failed fireSync: " & $error) - if fireSyncRes.get() == false: + if not fireSync: deallocShared(req) return err("Couldn't fireSync in time") ## wait until the Waku Thread properly received the request - let res = ctx.reqReceivedSignal.waitSync(timeout) - if res.isErr(): + ctx.reqReceivedSignal.waitSync(timeout).isOkOr: deallocShared(req) return err("Couldn't receive reqReceivedSignal signal") @@ -176,9 +174,8 @@ proc wakuThreadBody(ctx: ptr WakuContext) {.thread.} = ## Handle the request asyncSpawn WakuThreadRequest.process(request, addr waku) - let fireRes = ctx.reqReceivedSignal.fireSync() - if fireRes.isErr(): - error "could not fireSync back to requester thread", error = fireRes.error + ctx.reqReceivedSignal.fireSync().isOkOr: + error "could not fireSync back to requester thread", error = error waitFor wakuRun(ctx) diff --git a/library/waku_thread_requests/requests/ping_request.nim b/library/waku_thread_requests/requests/ping_request.nim index 53d33968e..716b9ed68 100644 --- a/library/waku_thread_requests/requests/ping_request.nim +++ b/library/waku_thread_requests/requests/ping_request.nim @@ -44,13 +44,11 @@ proc process*( let pingFuture = ping() let pingRTT: Duration = if self[].timeout == chronos.milliseconds(0): # No timeout expected - (await pingFuture).valueOr: - return err(error) + ?(await pingFuture) else: let timedOut = not (await pingFuture.withTimeout(self[].timeout)) if timedOut: return err("ping timed out") - pingFuture.read().valueOr: - return err(error) + ?(pingFuture.read()) ok($(pingRTT.nanos)) diff --git a/tests/wakunode_rest/test_rest_store.nim b/tests/wakunode_rest/test_rest_store.nim index b8882328b..b86513f0d 100644 --- a/tests/wakunode_rest/test_rest_store.nim +++ b/tests/wakunode_rest/test_rest_store.nim @@ -485,7 +485,7 @@ procSuite "Waku Rest API - Store v3": $response.contentType == $MIMETYPE_TEXT response.data.messages.len == 0 response.data.statusDesc == - "Failed parsing remote peer info [MultiAddress.init [multiaddress: Invalid MultiAddress, must start with `/`]]" + "Failed parsing remote peer info: MultiAddress.init [multiaddress: Invalid MultiAddress, must start with `/`]" await restServer.stop() await restServer.closeWait() diff --git a/tools/confutils/cli_args.nim b/tools/confutils/cli_args.nim index fd1e3a576..fb8437299 100644 --- a/tools/confutils/cli_args.nim +++ b/tools/confutils/cli_args.nim @@ -725,12 +725,11 @@ proc parseCmdArg*(T: type ProtectedShard, p: string): T = raise newException( ValueError, "Invalid format for protected shard expected shard:publickey" ) - let publicKey = secp256k1.SkPublicKey.fromHex(elements[1]) - if publicKey.isErr: + let publicKey = secp256k1.SkPublicKey.fromHex(elements[1]).valueOr: raise newException(ValueError, "Invalid public key") if isNumber(elements[0]): - return ProtectedShard(shard: uint16.parseCmdArg(elements[0]), key: publicKey.get()) + return ProtectedShard(shard: uint16.parseCmdArg(elements[0]), key: publicKey) # TODO: Remove when removing protected-topic configuration let shard = RelayShard.parse(elements[0]).valueOr: @@ -738,7 +737,7 @@ proc parseCmdArg*(T: type ProtectedShard, p: string): T = ValueError, "Invalid pubsub topic. Pubsub topics must be in the format /waku/2/rs//", ) - return ProtectedShard(shard: shard.shardId, key: publicKey.get()) + return ProtectedShard(shard: shard.shardId, key: publicKey) proc completeCmdArg*(T: type ProtectedShard, val: string): seq[string] = return @[] diff --git a/tools/rln_keystore_generator/rln_keystore_generator.nim b/tools/rln_keystore_generator/rln_keystore_generator.nim index 36a3759c9..85df37982 100644 --- a/tools/rln_keystore_generator/rln_keystore_generator.nim +++ b/tools/rln_keystore_generator/rln_keystore_generator.nim @@ -31,12 +31,10 @@ proc doRlnKeystoreGenerator*(conf: RlnKeystoreGeneratorConf) = trace "configuration", conf = $conf # 2. generate credentials - let credentialRes = membershipKeyGen() - if credentialRes.isErr(): - error "failure while generating credentials", error = credentialRes.error - quit(1) + let credential = membershipKeyGen().valueOr: + error "failure while generating credentials", error = error + quit(QuitFailure) - let credential = credentialRes.get() info "credentials", idTrapdoor = credential.idTrapdoor.inHex(), idNullifier = credential.idNullifier.inHex(), @@ -45,7 +43,7 @@ proc doRlnKeystoreGenerator*(conf: RlnKeystoreGeneratorConf) = if not conf.execute: info "not executing, exiting" - quit(0) + quit(QuitSuccess) var onFatalErrorAction = proc(msg: string) {.gcsafe, closure.} = ## Action to be taken when an internal error occurs during the node run. @@ -66,12 +64,12 @@ proc doRlnKeystoreGenerator*(conf: RlnKeystoreGeneratorConf) = try: (waitFor groupManager.init()).isOkOr: error "failure while initializing OnchainGroupManager", error = $error - quit(1) + quit(QuitFailure) # handling the exception is required since waitFor raises an exception except Exception, CatchableError: error "failure while initializing OnchainGroupManager", error = getCurrentExceptionMsg() - quit(1) + quit(QuitFailure) # 4. register on-chain try: @@ -79,7 +77,7 @@ proc doRlnKeystoreGenerator*(conf: RlnKeystoreGeneratorConf) = except Exception, CatchableError: error "failure while registering credentials on-chain", error = getCurrentExceptionMsg() - quit(1) + quit(QuitFailure) info "Transaction hash", txHash = groupManager.registrationTxHash.get() @@ -99,11 +97,9 @@ proc doRlnKeystoreGenerator*(conf: RlnKeystoreGeneratorConf) = userMessageLimit: conf.userMessageLimit, ) - let persistRes = - addMembershipCredentials(conf.credPath, keystoreCred, conf.credPassword, RLNAppInfo) - if persistRes.isErr(): - error "failed to persist credentials", error = persistRes.error - quit(1) + addMembershipCredentials(conf.credPath, keystoreCred, conf.credPassword, RLNAppInfo).isOkOr: + error "failed to persist credentials", error = error + quit(QuitFailure) info "credentials persisted", path = conf.credPath @@ -111,5 +107,5 @@ proc doRlnKeystoreGenerator*(conf: RlnKeystoreGeneratorConf) = waitFor groupManager.stop() except CatchableError: error "failure while stopping OnchainGroupManager", error = getCurrentExceptionMsg() - quit(0) # 0 because we already registered on-chain - quit(0) + quit(QuitSuccess) # 0 because we already registered on-chain + quit(QuitSuccess) diff --git a/waku/common/databases/db_postgres/dbconn.nim b/waku/common/databases/db_postgres/dbconn.nim index ee758730a..a6c237ae5 100644 --- a/waku/common/databases/db_postgres/dbconn.nim +++ b/waku/common/databases/db_postgres/dbconn.nim @@ -63,9 +63,8 @@ proc openDbConn(connString: string): Result[DbConn, string] = return err("exception opening new connection: " & getCurrentExceptionMsg()) if conn.status != CONNECTION_OK: - let checkRes = conn.check() - if checkRes.isErr(): - return err("failed to connect to database: " & checkRes.error) + conn.check().isOkOr: + return err("failed to connect to database: " & error) return err("unknown reason") diff --git a/waku/common/databases/db_postgres/pgasyncpool.nim b/waku/common/databases/db_postgres/pgasyncpool.nim index 5f8bf40be..0b298084e 100644 --- a/waku/common/databases/db_postgres/pgasyncpool.nim +++ b/waku/common/databases/db_postgres/pgasyncpool.nim @@ -174,8 +174,8 @@ proc runStmt*( let len = paramValues.len discard dbConnWrapper.getDbConn().prepare(stmtName, sql(stmtDefinition), len) - if res.isErr(): - return err("failed prepare in runStmt: " & res.error.msg) + res.isOkOr: + return err("failed prepare in runStmt: " & error.msg) pool.conns[connIndex].inclPreparedStmt(stmtName) diff --git a/waku/common/databases/db_sqlite.nim b/waku/common/databases/db_sqlite.nim index a28668cde..e398ea5ac 100644 --- a/waku/common/databases/db_sqlite.nim +++ b/waku/common/databases/db_sqlite.nim @@ -265,8 +265,7 @@ proc getPageSize*(db: SqliteDatabase): DatabaseResult[int64] = proc handler(s: RawStmtPtr) = size = sqlite3_column_int64(s, 0) - let res = db.query("PRAGMA page_size;", handler) - if res.isErr(): + db.query("PRAGMA page_size;", handler).isOkOr: return err("failed to get page_size") return ok(size) @@ -277,8 +276,7 @@ proc getFreelistCount*(db: SqliteDatabase): DatabaseResult[int64] = proc handler(s: RawStmtPtr) = count = sqlite3_column_int64(s, 0) - let res = db.query("PRAGMA freelist_count;", handler) - if res.isErr(): + db.query("PRAGMA freelist_count;", handler).isOkOr: return err("failed to get freelist_count") return ok(count) @@ -289,8 +287,7 @@ proc getPageCount*(db: SqliteDatabase): DatabaseResult[int64] = proc handler(s: RawStmtPtr) = count = sqlite3_column_int64(s, 0) - let res = db.query("PRAGMA page_count;", handler) - if res.isErr(): + db.query("PRAGMA page_count;", handler).isOkOr: return err("failed to get page_count") return ok(count) @@ -319,8 +316,7 @@ proc gatherSqlitePageStats*(db: SqliteDatabase): DatabaseResult[(int64, int64, i proc vacuum*(db: SqliteDatabase): DatabaseResult[void] = ## The VACUUM command rebuilds the database file, repacking it into a minimal amount of disk space. - let res = db.query("VACUUM;", NoopRowHandler) - if res.isErr(): + db.query("VACUUM;", NoopRowHandler).isOkOr: return err("vacuum failed") return ok() @@ -339,8 +335,7 @@ proc getUserVersion*(database: SqliteDatabase): DatabaseResult[int64] = proc handler(s: ptr sqlite3_stmt) = version = sqlite3_column_int64(s, 0) - let res = database.query("PRAGMA user_version;", handler) - if res.isErr(): + database.query("PRAGMA user_version;", handler).isOkOr: return err("failed to get user_version") ok(version) @@ -354,8 +349,7 @@ proc setUserVersion*(database: SqliteDatabase, version: int64): DatabaseResult[v ## ## For more info check: https://www.sqlite.org/pragma.html#pragma_user_version let query = "PRAGMA user_version=" & $version & ";" - let res = database.query(query, NoopRowHandler) - if res.isErr(): + database.query(query, NoopRowHandler).isOkOr: return err("failed to set user_version") ok() @@ -400,11 +394,9 @@ proc filterMigrationScripts( if direction != "" and not script.toLower().endsWith("." & direction & ".sql"): return false - let scriptVersionRes = getMigrationScriptVersion(script) - if scriptVersionRes.isErr(): + let scriptVersion = getMigrationScriptVersion(script).valueOr: return false - let scriptVersion = scriptVersionRes.value return lowVersion < scriptVersion and scriptVersion <= highVersion paths.filter(filterPredicate) @@ -476,10 +468,9 @@ proc migrate*( for statement in script.breakIntoStatements(): info "executing migration statement", statement = statement - let execRes = db.query(statement, NoopRowHandler) - if execRes.isErr(): + db.query(statement, NoopRowHandler).isOkOr: error "failed to execute migration statement", - statement = statement, error = execRes.error + statement = statement, error = error return err("failed to execute migration statement") info "migration statement executed succesfully", statement = statement @@ -497,9 +488,8 @@ proc performSqliteVacuum*(db: SqliteDatabase): DatabaseResult[void] = info "starting sqlite database vacuuming" - let resVacuum = db.vacuum() - if resVacuum.isErr(): - return err("failed to execute vacuum: " & resVacuum.error) + db.vacuum().isOkOr: + return err("failed to execute vacuum: " & error) info "finished sqlite database vacuuming" ok() diff --git a/waku/common/enr/typed_record.nim b/waku/common/enr/typed_record.nim index d0b055ac4..1db357621 100644 --- a/waku/common/enr/typed_record.nim +++ b/waku/common/enr/typed_record.nim @@ -65,11 +65,10 @@ func id*(record: TypedRecord): Option[RecordId] = if fieldOpt.isNone(): return none(RecordId) - let fieldRes = toRecordId(fieldOpt.get()) - if fieldRes.isErr(): + let field = toRecordId(fieldOpt.get()).valueOr: return none(RecordId) - some(fieldRes.value) + return some(field) func secp256k1*(record: TypedRecord): Option[array[33, byte]] = record.tryGet("secp256k1", array[33, byte]) diff --git a/waku/discovery/waku_discv5.nim b/waku/discovery/waku_discv5.nim index 94fc467fa..0eb329fa4 100644 --- a/waku/discovery/waku_discv5.nim +++ b/waku/discovery/waku_discv5.nim @@ -393,12 +393,11 @@ proc addBootstrapNode*(bootstrapAddr: string, bootstrapEnrs: var seq[enr.Record] if bootstrapAddr.len == 0 or bootstrapAddr[0] == '#': return - let enrRes = parseBootstrapAddress(bootstrapAddr) - if enrRes.isErr(): - info "ignoring invalid bootstrap address", reason = enrRes.error + let enr = parseBootstrapAddress(bootstrapAddr).valueOr: + info "ignoring invalid bootstrap address", reason = error return - bootstrapEnrs.add(enrRes.value) + bootstrapEnrs.add(enr) proc setupDiscoveryV5*( myENR: enr.Record, diff --git a/waku/factory/internal_config.nim b/waku/factory/internal_config.nim index 22b101021..7aad6e615 100644 --- a/waku/factory/internal_config.nim +++ b/waku/factory/internal_config.nim @@ -29,13 +29,9 @@ proc enrConfiguration*( ).isOkOr: return err("could not initialize ENR with shards") - let recordRes = enrBuilder.build() - let record = - if recordRes.isErr(): - error "failed to create record", error = recordRes.error - return err($recordRes.error) - else: - recordRes.get() + let record = enrBuilder.build().valueOr: + error "failed to create enr record", error = error + return err($error) return ok(record) @@ -70,16 +66,13 @@ proc networkConfiguration*( ): Future[NetConfigResult] {.async.} = ## `udpPort` is only supplied to satisfy underlying APIs but is not ## actually a supported transport for libp2p traffic. - let natRes = setupNat( + var (extIp, extTcpPort, _) = setupNat( conf.natStrategy.string, clientId, Port(uint16(conf.p2pTcpPort) + portsShift), Port(uint16(conf.p2pTcpPort) + portsShift), - ) - if natRes.isErr(): - return err("failed to setup NAT: " & $natRes.error) - - var (extIp, extTcpPort, _) = natRes.get() + ).valueOr: + return err("failed to setup NAT: " & $error) let discv5UdpPort = @@ -101,12 +94,10 @@ proc networkConfiguration*( # Resolve and use DNS domain IP if conf.dns4DomainName.isSome() and extIp.isNone(): try: - let dnsRes = await dnsResolve(conf.dns4DomainName.get(), dnsAddrsNameServers) + let dns = (await dnsResolve(conf.dns4DomainName.get(), dnsAddrsNameServers)).valueOr: + return err($error) # Pass error down the stack - if dnsRes.isErr(): - return err($dnsRes.error) # Pass error down the stack - - extIp = some(parseIpAddress(dnsRes.get())) + extIp = some(parseIpAddress(dns)) except CatchableError: return err("Could not update extIp to resolved DNS IP: " & getCurrentExceptionMsg()) diff --git a/waku/factory/node_factory.nim b/waku/factory/node_factory.nim index a5eb4f2ca..488d07c06 100644 --- a/waku/factory/node_factory.nim +++ b/waku/factory/node_factory.nim @@ -47,11 +47,10 @@ proc setupPeerStorage(): Result[Option[WakuPeerStorage], string] = ?peer_store_sqlite_migrations.migrate(db) - let res = WakuPeerStorage.new(db) - if res.isErr(): - return err("failed to init peer store" & res.error) + let res = WakuPeerStorage.new(db).valueOr: + return err("failed to init peer store" & error) - ok(some(res.value)) + return ok(some(res)) ## Init waku node instance @@ -167,16 +166,17 @@ proc setupProtocols( if conf.storeServiceConf.isSome(): let storeServiceConf = conf.storeServiceConf.get() if storeServiceConf.supportV2: - let archiveDriverRes = await legacy_driver.ArchiveDriver.new( - storeServiceConf.dbUrl, storeServiceConf.dbVacuum, storeServiceConf.dbMigration, - storeServiceConf.maxNumDbConnections, onFatalErrorAction, - ) - if archiveDriverRes.isErr(): - return err("failed to setup legacy archive driver: " & archiveDriverRes.error) + let archiveDriver = ( + await legacy_driver.ArchiveDriver.new( + storeServiceConf.dbUrl, storeServiceConf.dbVacuum, + storeServiceConf.dbMigration, storeServiceConf.maxNumDbConnections, + onFatalErrorAction, + ) + ).valueOr: + return err("failed to setup legacy archive driver: " & error) - let mountArcRes = node.mountLegacyArchive(archiveDriverRes.get()) - if mountArcRes.isErr(): - return err("failed to mount waku legacy archive protocol: " & mountArcRes.error) + node.mountLegacyArchive(archiveDriver).isOkOr: + return err("failed to mount waku legacy archive protocol: " & error) ## For now we always mount the future archive driver but if the legacy one is mounted, ## then the legacy will be in charge of performing the archiving. @@ -189,11 +189,8 @@ proc setupProtocols( ## So for now, we need to make sure that when legacy store is enabled and we use sqlite ## that we migrate our db according to legacy store's schema to have the extra field - let engineRes = dburl.getDbEngine(storeServiceConf.dbUrl) - if engineRes.isErr(): - return err("error getting db engine in setupProtocols: " & engineRes.error) - - let engine = engineRes.get() + let engine = dburl.getDbEngine(storeServiceConf.dbUrl).valueOr: + return err("error getting db engine in setupProtocols: " & error) let migrate = if engine == "sqlite" and storeServiceConf.supportV2: @@ -201,20 +198,19 @@ proc setupProtocols( else: storeServiceConf.dbMigration - let archiveDriverRes = await driver.ArchiveDriver.new( - storeServiceConf.dbUrl, storeServiceConf.dbVacuum, migrate, - storeServiceConf.maxNumDbConnections, onFatalErrorAction, - ) - if archiveDriverRes.isErr(): - return err("failed to setup archive driver: " & archiveDriverRes.error) + let archiveDriver = ( + await driver.ArchiveDriver.new( + storeServiceConf.dbUrl, storeServiceConf.dbVacuum, migrate, + storeServiceConf.maxNumDbConnections, onFatalErrorAction, + ) + ).valueOr: + return err("failed to setup archive driver: " & error) - let retPolicyRes = policy.RetentionPolicy.new(storeServiceConf.retentionPolicy) - if retPolicyRes.isErr(): - return err("failed to create retention policy: " & retPolicyRes.error) + let retPolicy = policy.RetentionPolicy.new(storeServiceConf.retentionPolicy).valueOr: + return err("failed to create retention policy: " & error) - let mountArcRes = node.mountArchive(archiveDriverRes.get(), retPolicyRes.get()) - if mountArcRes.isErr(): - return err("failed to mount waku archive protocol: " & mountArcRes.error) + node.mountArchive(archiveDriver, retPolicy).isOkOr: + return err("failed to mount waku archive protocol: " & error) if storeServiceConf.supportV2: # Store legacy setup diff --git a/waku/factory/waku.nim b/waku/factory/waku.nim index 15d73d64d..0adebd44e 100644 --- a/waku/factory/waku.nim +++ b/waku/factory/waku.nim @@ -205,13 +205,11 @@ proc new*( if wakuConf.remoteStoreNode.isNone(): return err("A storenode should be set when reliability mode is on") - let deliveryMonitorRes = DeliveryMonitor.new( + let deliveryMonitor = DeliveryMonitor.new( node.wakuStoreClient, node.wakuRelay, node.wakuLightpushClient, node.wakuFilterClient, - ) - if deliveryMonitorRes.isErr(): - return err("could not create delivery monitor: " & $deliveryMonitorRes.error) - deliveryMonitor = deliveryMonitorRes.get() + ).valueOr: + return err("could not create delivery monitor: " & $error) var waku = Waku( version: git_version, @@ -328,16 +326,14 @@ proc startDnsDiscoveryRetryLoop(waku: ptr Waku): Future[void] {.async.} = await sleepAsync(30.seconds) if waku.conf.dnsDiscoveryConf.isSome(): let dnsDiscoveryConf = waku.conf.dnsDiscoveryConf.get() - let dynamicBootstrapNodesRes = await waku_dnsdisc.retrieveDynamicBootstrapNodes( - dnsDiscoveryConf.enrTreeUrl, dnsDiscoveryConf.nameServers - ) - if dynamicBootstrapNodesRes.isErr(): - error "Retrieving dynamic bootstrap nodes failed", - error = dynamicBootstrapNodesRes.error + waku[].dynamicBootstrapNodes = ( + await waku_dnsdisc.retrieveDynamicBootstrapNodes( + dnsDiscoveryConf.enrTreeUrl, dnsDiscoveryConf.nameServers + ) + ).valueOr: + error "Retrieving dynamic bootstrap nodes failed", error = error continue - waku[].dynamicBootstrapNodes = dynamicBootstrapNodesRes.get() - if not waku[].wakuDiscv5.isNil(): let dynamicBootstrapEnrs = waku[].dynamicBootstrapNodes .filterIt(it.hasUdpPort()) diff --git a/waku/incentivization/eligibility_manager.nim b/waku/incentivization/eligibility_manager.nim index b10b293e1..29443536a 100644 --- a/waku/incentivization/eligibility_manager.nim +++ b/waku/incentivization/eligibility_manager.nim @@ -42,10 +42,9 @@ proc getTxAndTxReceipt( let receiptFuture = eligibilityManager.getMinedTransactionReceipt(txHash) await allFutures(txFuture, receiptFuture) let tx = txFuture.read() - let txReceipt = receiptFuture.read() - if txReceipt.isErr(): - return err("Cannot get tx receipt: " & txReceipt.error) - return ok((tx, txReceipt.get())) + let txReceipt = receiptFuture.read().valueOr: + return err("Cannot get tx receipt: " & error) + return ok((tx, txReceipt)) proc isEligibleTxId*( eligibilityManager: EligibilityManager, diff --git a/waku/node/api/filter.nim b/waku/node/api/filter.nim index 242640a44..948035f14 100644 --- a/waku/node/api/filter.nim +++ b/waku/node/api/filter.nim @@ -108,13 +108,10 @@ proc filterSubscribe*( error = "waku filter client is not set up" return err(FilterSubscribeError.serviceUnavailable()) - let remotePeerRes = parsePeerInfo(peer) - if remotePeerRes.isErr(): - error "Couldn't parse the peer info properly", error = remotePeerRes.error + let remotePeer = parsePeerInfo(peer).valueOr: + error "Couldn't parse the peer info properly", error = error return err(FilterSubscribeError.serviceUnavailable("No peers available")) - let remotePeer = remotePeerRes.value - if pubsubTopic.isSome(): info "registering filter subscription to content", pubsubTopic = pubsubTopic.get(), @@ -143,15 +140,11 @@ proc filterSubscribe*( else: # No pubsub topic, autosharding is used to deduce it # but content topics must be well-formed for this - let topicMapRes = - node.wakuAutoSharding.get().getShardsFromContentTopics(contentTopics) - - let topicMap = - if topicMapRes.isErr(): - error "can't get shard", error = topicMapRes.error + let topicMap = node.wakuAutoSharding + .get() + .getShardsFromContentTopics(contentTopics).valueOr: + error "can't get shard", error = error return err(FilterSubscribeError.badResponse("can't get shard")) - else: - topicMapRes.get() var futures = collect(newSeq): for shard, topics in topicMap.pairs: @@ -195,13 +188,10 @@ proc filterUnsubscribe*( ): Future[FilterSubscribeResult] {.async: (raises: []).} = ## Unsubscribe from a content filter V2". - let remotePeerRes = parsePeerInfo(peer) - if remotePeerRes.isErr(): - error "couldn't parse remotePeerInfo", error = remotePeerRes.error + let remotePeer = parsePeerInfo(peer).valueOr: + error "couldn't parse remotePeerInfo", error = error return err(FilterSubscribeError.serviceUnavailable("No peers available")) - let remotePeer = remotePeerRes.value - if pubsubTopic.isSome(): info "deregistering filter subscription to content", pubsubTopic = pubsubTopic.get(), @@ -226,15 +216,11 @@ proc filterUnsubscribe*( error "Failed filter un-subscription, pubsub topic must be specified with static sharding" waku_node_errors.inc(labelValues = ["unsubscribe_filter_failure"]) else: # pubsubTopic.isNone - let topicMapRes = - node.wakuAutoSharding.get().getShardsFromContentTopics(contentTopics) - - let topicMap = - if topicMapRes.isErr(): - error "can't get shard", error = topicMapRes.error + let topicMap = node.wakuAutoSharding + .get() + .getShardsFromContentTopics(contentTopics).valueOr: + error "can't get shard", error = error return err(FilterSubscribeError.badResponse("can't get shard")) - else: - topicMapRes.get() var futures = collect(newSeq): for shard, topics in topicMap.pairs: @@ -275,13 +261,10 @@ proc filterUnsubscribeAll*( ): Future[FilterSubscribeResult] {.async: (raises: []).} = ## Unsubscribe from a content filter V2". - let remotePeerRes = parsePeerInfo(peer) - if remotePeerRes.isErr(): - error "couldn't parse remotePeerInfo", error = remotePeerRes.error + let remotePeer = parsePeerInfo(peer).valueOr: + error "couldn't parse remotePeerInfo", error = error return err(FilterSubscribeError.serviceUnavailable("No peers available")) - let remotePeer = remotePeerRes.value - info "deregistering all filter subscription to content", peer = remotePeer.peerId let unsubRes = await node.wakuFilterClient.unsubscribeAll(remotePeer) diff --git a/waku/node/api/lightpush.nim b/waku/node/api/lightpush.nim index 550c5bd9f..f42cb146e 100644 --- a/waku/node/api/lightpush.nim +++ b/waku/node/api/lightpush.nim @@ -114,14 +114,8 @@ proc legacyLightpushPublish*( if node.wakuAutoSharding.isNone(): return err("Pubsub topic must be specified when static sharding is enabled") - let topicMapRes = - node.wakuAutoSharding.get().getShardsFromContentTopics(message.contentTopic) - let topicMap = - if topicMapRes.isErr(): - return err(topicMapRes.error) - else: - topicMapRes.get() + ?node.wakuAutoSharding.get().getShardsFromContentTopics(message.contentTopic) for pubsub, _ in topicMap.pairs: # There's only one pair anyway return await internalPublish(node, $pubsub, message, peer) diff --git a/waku/node/api/peer_exchange.nim b/waku/node/api/peer_exchange.nim index d2e0f5575..a4bec727b 100644 --- a/waku/node/api/peer_exchange.nim +++ b/waku/node/api/peer_exchange.nim @@ -111,10 +111,9 @@ proc setPeerExchangePeer*( info "Set peer-exchange peer", peer = peer - let remotePeerRes = parsePeerInfo(peer) - if remotePeerRes.isErr(): - error "could not parse peer info", error = remotePeerRes.error + let remotePeer = parsePeerInfo(peer).valueOr: + error "could not parse peer info", error = error return - node.peerManager.addPeer(remotePeerRes.value, PeerExchange) + node.peerManager.addPeer(remotePeer, PeerExchange) waku_px_peers.inc() diff --git a/waku/node/api/relay.nim b/waku/node/api/relay.nim index 1e38c5535..827cc1e5f 100644 --- a/waku/node/api/relay.nim +++ b/waku/node/api/relay.nim @@ -240,11 +240,8 @@ proc mountRlnRelay*( CatchableError, "WakuRelay protocol is not mounted, cannot mount WakuRlnRelay" ) - let rlnRelayRes = await WakuRlnRelay.new(rlnConf, registrationHandler) - if rlnRelayRes.isErr(): - raise - newException(CatchableError, "failed to mount WakuRlnRelay: " & rlnRelayRes.error) - let rlnRelay = rlnRelayRes.get() + let rlnRelay = (await WakuRlnRelay.new(rlnConf, registrationHandler)).valueOr: + raise newException(CatchableError, "failed to mount WakuRlnRelay: " & error) if (rlnConf.userMessageLimit > rlnRelay.groupManager.rlnRelayMaxMessageLimit): error "rln-relay-user-message-limit can't exceed the MAX_MESSAGE_LIMIT in the rln contract" let validator = generateRlnValidator(rlnRelay, spamHandler) diff --git a/waku/node/api/store.nim b/waku/node/api/store.nim index ddac5fbfd..7edae7966 100644 --- a/waku/node/api/store.nim +++ b/waku/node/api/store.nim @@ -87,30 +87,27 @@ proc toArchiveQuery( proc toHistoryResult*( res: waku_archive_legacy.ArchiveResult ): legacy_store_common.HistoryResult = - if res.isErr(): - let error = res.error - case res.error.kind + let response = res.valueOr: + case error.kind of waku_archive_legacy.ArchiveErrorKind.DRIVER_ERROR, waku_archive_legacy.ArchiveErrorKind.INVALID_QUERY: - err(HistoryError(kind: HistoryErrorKind.BAD_REQUEST, cause: res.error.cause)) + return err(HistoryError(kind: HistoryErrorKind.BAD_REQUEST, cause: error.cause)) else: - err(HistoryError(kind: HistoryErrorKind.UNKNOWN)) - else: - let response = res.get() - ok( - HistoryResponse( - messages: response.messages, - cursor: response.cursor.map( - proc(cursor: waku_archive_legacy.ArchiveCursor): HistoryCursor = - HistoryCursor( - pubsubTopic: cursor.pubsubTopic, - senderTime: cursor.senderTime, - storeTime: cursor.storeTime, - digest: cursor.digest, - ) - ), - ) + return err(HistoryError(kind: HistoryErrorKind.UNKNOWN)) + return ok( + HistoryResponse( + messages: response.messages, + cursor: response.cursor.map( + proc(cursor: waku_archive_legacy.ArchiveCursor): HistoryCursor = + HistoryCursor( + pubsubTopic: cursor.pubsubTopic, + senderTime: cursor.senderTime, + storeTime: cursor.storeTime, + digest: cursor.digest, + ) + ), ) + ) proc mountLegacyStore*( node: WakuNode, rateLimit: RateLimitSetting = DefaultGlobalNonRelayRateLimit @@ -126,8 +123,7 @@ proc mountLegacyStore*( request: HistoryQuery ): Future[legacy_store_common.HistoryResult] {.async.} = if request.cursor.isSome(): - request.cursor.get().checkHistCursor().isOkOr: - return err(error) + ?request.cursor.get().checkHistCursor() let request = request.toArchiveQuery() let response = await node.wakuLegacyArchive.findMessagesV2(request) @@ -160,11 +156,8 @@ proc query*( if node.wakuLegacyStoreClient.isNil(): return err("waku legacy store client is nil") - let queryRes = await node.wakuLegacyStoreClient.query(query, peer) - if queryRes.isErr(): - return err("legacy store client query error: " & $queryRes.error) - - let response = queryRes.get() + let response = (await node.wakuLegacyStoreClient.query(query, peer)).valueOr: + return err("legacy store client query error: " & $error) return ok(response) @@ -201,9 +194,8 @@ when defined(waku_exp_store_resume): if node.wakuLegacyStoreClient.isNil(): return - let retrievedMessages = await node.wakuLegacyStoreClient.resume(peerList) - if retrievedMessages.isErr(): - error "failed to resume store", error = retrievedMessages.error + let retrievedMessages = (await node.wakuLegacyStoreClient.resume(peerList)).valueOr: + error "failed to resume store", error = error return info "the number of retrieved messages since the last online time: ", diff --git a/waku/node/delivery_monitor/not_delivered_storage/migrations.nim b/waku/node/delivery_monitor/not_delivered_storage/migrations.nim index 6f0b3265d..8175aea62 100644 --- a/waku/node/delivery_monitor/not_delivered_storage/migrations.nim +++ b/waku/node/delivery_monitor/not_delivered_storage/migrations.nim @@ -17,10 +17,8 @@ const PeerStoreMigrationPath: string = projectRoot / "migrations" / "sent_msgs" proc migrate*(db: SqliteDatabase): DatabaseResult[void] = info "starting peer store's sqlite database migration for sent messages" - let migrationRes = - migrate(db, TargetSchemaVersion, migrationsScriptsDir = PeerStoreMigrationPath) - if migrationRes.isErr(): - return err("failed to execute migration scripts: " & migrationRes.error) + migrate(db, TargetSchemaVersion, migrationsScriptsDir = PeerStoreMigrationPath).isOkOr: + return err("failed to execute migration scripts: " & error) info "finished peer store's sqlite database migration for sent messages" ok() diff --git a/waku/node/peer_manager/peer_store/migrations.nim b/waku/node/peer_manager/peer_store/migrations.nim index 61b416ed8..97961d25a 100644 --- a/waku/node/peer_manager/peer_store/migrations.nim +++ b/waku/node/peer_manager/peer_store/migrations.nim @@ -18,16 +18,14 @@ proc migrate*(db: SqliteDatabase, targetVersion = SchemaVersion): DatabaseResult ## it runs migration scripts if the `user_version` is outdated. The `migrationScriptsDir` path ## points to the directory holding the migrations scripts once the db is updated, it sets the ## `user_version` to the `tragetVersion`. - ## + ## ## If not `targetVersion` is provided, it defaults to `SchemaVersion`. ## ## NOTE: Down migration it is not currently supported info "starting peer store's sqlite database migration" - let migrationRes = - migrate(db, targetVersion, migrationsScriptsDir = PeerStoreMigrationPath) - if migrationRes.isErr(): - return err("failed to execute migration scripts: " & migrationRes.error) + migrate(db, targetVersion, migrationsScriptsDir = PeerStoreMigrationPath).isOkOr: + return err("failed to execute migration scripts: " & error) info "finished peer store's sqlite database migration" ok() diff --git a/waku/node/peer_manager/peer_store/waku_peer_storage.nim b/waku/node/peer_manager/peer_store/waku_peer_storage.nim index 876e8e258..dc1452618 100644 --- a/waku/node/peer_manager/peer_store/waku_peer_storage.nim +++ b/waku/node/peer_manager/peer_store/waku_peer_storage.nim @@ -67,7 +67,7 @@ proc encode*(remotePeerInfo: RemotePeerInfo): PeerStorageResult[ProtoBuffer] = let catchRes = catch: pb.write(4, remotePeerInfo.publicKey) - if catchRes.isErr(): + catchRes.isOkOr: return err("Enncoding public key failed: " & catchRes.error.msg) pb.write(5, uint32(ord(remotePeerInfo.connectedness))) @@ -154,14 +154,11 @@ method getAll*( let catchRes = catch: db.database.query("SELECT peerId, storedInfo FROM Peer", peer) - let queryRes = - if catchRes.isErr(): - return err("failed to extract peer from query result: " & catchRes.error.msg) - else: - catchRes.get() + let queryRes = catchRes.valueOr: + return err("failed to extract peer from query result: " & catchRes.error.msg) - if queryRes.isErr(): - return err("peer storage query failed: " & queryRes.error) + queryRes.isOkOr: + return err("peer storage query failed: " & error) return ok() diff --git a/waku/node/waku_node.nim b/waku/node/waku_node.nim index f34a47a01..114775951 100644 --- a/waku/node/waku_node.nim +++ b/waku/node/waku_node.nim @@ -225,8 +225,8 @@ proc mountMetadata*( let catchRes = catch: node.switch.mount(node.wakuMetadata, protocolMatcher(WakuMetadataCodec)) - if catchRes.isErr(): - return err(catchRes.error.msg) + catchRes.isOkOr: + return err(error.msg) return ok() @@ -266,8 +266,8 @@ proc mountMix*( node.wakuMix.registerDestReadBehavior(WakuLightPushCodec, readLp(int(-1))) let catchRes = catch: node.switch.mount(node.wakuMix) - if catchRes.isErr(): - return err(catchRes.error.msg) + catchRes.isOkOr: + return err(error.msg) return ok() ## Waku Sync @@ -300,8 +300,8 @@ proc mountStoreSync*( node.switch.mount( node.wakuStoreReconciliation, protocolMatcher(WakuReconciliationCodec) ) - if reconMountRes.isErr(): - return err(reconMountRes.error.msg) + reconMountRes.isOkOr: + return err(error.msg) let transfer = SyncTransfer.new( node.peerManager, node.wakuArchive, idsChannel, wantsChannel, needsChannel @@ -311,8 +311,8 @@ proc mountStoreSync*( let transMountRes = catch: node.switch.mount(node.wakuStoreTransfer, protocolMatcher(WakuTransferCodec)) - if transMountRes.isErr(): - return err(transMountRes.error.msg) + transMountRes.isOkOr: + return err(error.msg) return ok() diff --git a/waku/waku_api/rest/admin/handlers.nim b/waku/waku_api/rest/admin/handlers.nim index 172172376..1efbf7d04 100644 --- a/waku/waku_api/rest/admin/handlers.nim +++ b/waku/waku_api/rest/admin/handlers.nim @@ -426,14 +426,13 @@ proc installAdminV1GetFilterSubsHandler(router: var RestRouter, node: WakuNode) FilterSubscription(peerId: $peerId, filterCriteria: filterCriteria) ) - let resp = RestApiResponse.jsonResponse(subscriptions, status = Http200) - if resp.isErr(): - error "An error ocurred while building the json respose: ", error = resp.error + let resp = RestApiResponse.jsonResponse(subscriptions, status = Http200).valueOr: + error "An error ocurred while building the json respose", error = error return RestApiResponse.internalServerError( - fmt("An error ocurred while building the json respose: {resp.error}") + fmt("An error ocurred while building the json respose: {error}") ) - return resp.get() + return resp proc installAdminV1PostLogLevelHandler(router: var RestRouter, node: WakuNode) = router.api(MethodPost, ROUTE_ADMIN_V1_POST_LOG_LEVEL) do( diff --git a/waku/waku_api/rest/debug/handlers.nim b/waku/waku_api/rest/debug/handlers.nim index eb1529759..43b6fbbf1 100644 --- a/waku/waku_api/rest/debug/handlers.nim +++ b/waku/waku_api/rest/debug/handlers.nim @@ -15,12 +15,11 @@ const ROUTE_DEBUG_INFOV1 = "/debug/v1/info" proc installDebugInfoV1Handler(router: var RestRouter, node: WakuNode) = let getInfo = proc(): RestApiResponse = let info = node.info().toDebugWakuInfo() - let resp = RestApiResponse.jsonResponse(info, status = Http200) - if resp.isErr(): - info "An error occurred while building the json respose", error = resp.error + let resp = RestApiResponse.jsonResponse(info, status = Http200).valueOr: + info "An error occurred while building the json respose", error = error return RestApiResponse.internalServerError() - return resp.get() + return resp # /debug route is deprecated, will be removed router.api(MethodGet, ROUTE_DEBUG_INFOV1) do() -> RestApiResponse: diff --git a/waku/waku_api/rest/filter/handlers.nim b/waku/waku_api/rest/filter/handlers.nim index f3f6e4837..61d7eb96f 100644 --- a/waku/waku_api/rest/filter/handlers.nim +++ b/waku/waku_api/rest/filter/handlers.nim @@ -49,15 +49,12 @@ func decodeRequestBody[T]( let reqBodyData = contentBody.get().data - let requestResult = decodeFromJsonBytes(T, reqBodyData) - if requestResult.isErr(): + let requestResult = decodeFromJsonBytes(T, reqBodyData).valueOr: return err( - RestApiResponse.badRequest( - "Invalid content body, could not decode. " & $requestResult.error - ) + RestApiResponse.badRequest("Invalid content body, could not decode. " & $error) ) - return ok(requestResult.get()) + return ok(requestResult) proc getStatusDesc( protocolClientRes: filter_protocol_type.FilterSubscribeResult @@ -129,16 +126,15 @@ proc makeRestResponse( httpStatus = convertErrorKindToHttpStatus(protocolClientRes.error().kind) # TODO: convert status codes! - let resp = - RestApiResponse.jsonResponse(filterSubscriptionResponse, status = httpStatus) - - if resp.isErr(): - error "An error ocurred while building the json respose: ", error = resp.error + let resp = RestApiResponse.jsonResponse( + filterSubscriptionResponse, status = httpStatus + ).valueOr: + error "An error ocurred while building the json respose: ", error = error return RestApiResponse.internalServerError( - fmt("An error ocurred while building the json respose: {resp.error}") + fmt("An error ocurred while building the json respose: {error}") ) - return resp.get() + return resp proc makeRestResponse( requestId: string, protocolClientRes: filter_protocol_type.FilterSubscribeError @@ -149,16 +145,15 @@ proc makeRestResponse( let httpStatus = convertErrorKindToHttpStatus(protocolClientRes.kind) # TODO: convert status codes! - let resp = - RestApiResponse.jsonResponse(filterSubscriptionResponse, status = httpStatus) - - if resp.isErr(): - error "An error ocurred while building the json respose: ", error = resp.error + let resp = RestApiResponse.jsonResponse( + filterSubscriptionResponse, status = httpStatus + ).valueOr: + error "An error ocurred while building the json respose: ", error = error return RestApiResponse.internalServerError( - fmt("An error ocurred while building the json respose: {resp.error}") + fmt("An error ocurred while building the json respose: {error}") ) - return resp.get() + return resp const NoPeerNoDiscoError = FilterSubscribeError.serviceUnavailable( "No suitable service peer & no discovery method" @@ -175,18 +170,14 @@ proc filterPostPutSubscriptionRequestHandler( ): Future[RestApiResponse] {.async.} = ## handles any filter subscription requests, adds or modifies. - let decodedBody = decodeRequestBody[FilterSubscribeRequest](contentBody) - - if decodedBody.isErr(): + let req: FilterSubscribeRequest = decodeRequestBody[FilterSubscribeRequest]( + contentBody + ).valueOr: return makeRestResponse( "unknown", - FilterSubscribeError.badRequest( - fmt("Failed to decode request: {decodedBody.error}") - ), + FilterSubscribeError.badRequest(fmt("Failed to decode request: {error}")), ) - let req: FilterSubscribeRequest = decodedBody.value() - let peer = node.peerManager.selectPeer(WakuFilterSubscribeCodec).valueOr: let handler = discHandler.valueOr: return makeRestResponse(req.requestId, NoPeerNoDiscoError) @@ -256,18 +247,14 @@ proc installFilterDeleteSubscriptionsHandler( ## Subscribes a node to a list of contentTopics of a PubSub topic info "delete", ROUTE_FILTER_SUBSCRIPTIONS, contentBody - let decodedBody = decodeRequestBody[FilterUnsubscribeRequest](contentBody) - - if decodedBody.isErr(): + let req: FilterUnsubscribeRequest = decodeRequestBody[FilterUnsubscribeRequest]( + contentBody + ).valueOr: return makeRestResponse( "unknown", - FilterSubscribeError.badRequest( - fmt("Failed to decode request: {decodedBody.error}") - ), + FilterSubscribeError.badRequest(fmt("Failed to decode request: {error}")), ) - let req: FilterUnsubscribeRequest = decodedBody.value() - let peer = node.peerManager.selectPeer(WakuFilterSubscribeCodec).valueOr: let handler = discHandler.valueOr: return makeRestResponse(req.requestId, NoPeerNoDiscoError) @@ -308,18 +295,14 @@ proc installFilterDeleteAllSubscriptionsHandler( ## Subscribes a node to a list of contentTopics of a PubSub topic info "delete", ROUTE_FILTER_ALL_SUBSCRIPTIONS, contentBody - let decodedBody = decodeRequestBody[FilterUnsubscribeAllRequest](contentBody) - - if decodedBody.isErr(): + let req: FilterUnsubscribeAllRequest = decodeRequestBody[ + FilterUnsubscribeAllRequest + ](contentBody).valueOr: return makeRestResponse( "unknown", - FilterSubscribeError.badRequest( - fmt("Failed to decode request: {decodedBody.error}") - ), + FilterSubscribeError.badRequest(fmt("Failed to decode request: {error}")), ) - let req: FilterUnsubscribeAllRequest = decodedBody.value() - let peer = node.peerManager.selectPeer(WakuFilterSubscribeCodec).valueOr: let handler = discHandler.valueOr: return makeRestResponse(req.requestId, NoPeerNoDiscoError) @@ -399,24 +382,20 @@ proc installFilterGetMessagesHandler( ## TODO: ability to specify a return message limit, maybe use cursor to control paging response. info "get", ROUTE_FILTER_MESSAGES, contentTopic = contentTopic - if contentTopic.isErr(): + let contentTopic = contentTopic.valueOr: return RestApiResponse.badRequest("Missing contentTopic") - let contentTopic = contentTopic.get() - - let msgRes = cache.getAutoMessages(contentTopic, clear = true) - if msgRes.isErr(): + let msg = cache.getAutoMessages(contentTopic, clear = true).valueOr: return RestApiResponse.badRequest("Not subscribed to topic: " & contentTopic) - let data = FilterGetMessagesResponse(msgRes.get().map(toFilterWakuMessage)) - let resp = RestApiResponse.jsonResponse(data, status = Http200) - if resp.isErr(): - error "An error ocurred while building the json respose: ", error = resp.error + let data = FilterGetMessagesResponse(msg.map(toFilterWakuMessage)) + let resp = RestApiResponse.jsonResponse(data, status = Http200).valueOr: + error "An error ocurred while building the json respose: ", error = error return RestApiResponse.internalServerError( "An error ocurred while building the json respose" ) - return resp.get() + return resp proc installFilterRestApiHandlers*( router: var RestRouter, diff --git a/waku/waku_api/rest/legacy_lightpush/handlers.nim b/waku/waku_api/rest/legacy_lightpush/handlers.nim index b129f3ffc..7a3c5b1ed 100644 --- a/waku/waku_api/rest/legacy_lightpush/handlers.nim +++ b/waku/waku_api/rest/legacy_lightpush/handlers.nim @@ -50,12 +50,8 @@ proc installLightPushRequestHandler*( ## Send a request to push a waku message info "post", ROUTE_LIGHTPUSH, contentBody - let decodedBody = decodeRequestBody[PushRequest](contentBody) - - if decodedBody.isErr(): - return decodedBody.error() - - let req: PushRequest = decodedBody.value() + let req: PushRequest = decodeRequestBody[PushRequest](contentBody).valueOr: + return error let msg = req.message.toWakuMessage().valueOr: return RestApiResponse.badRequest("Invalid message: " & $error) @@ -80,12 +76,12 @@ proc installLightPushRequestHandler*( error "Failed to request a message push due to timeout!" return RestApiResponse.serviceUnavailable("Push request timed out") - if subFut.value().isErr(): - if subFut.value().error == TooManyRequestsMessage: + subFut.value().isOkOr: + if error == TooManyRequestsMessage: return RestApiResponse.tooManyRequests("Request rate limmit reached") return RestApiResponse.serviceUnavailable( - fmt("Failed to request a message push: {subFut.value().error}") + fmt("Failed to request a message push: {error}") ) return RestApiResponse.ok() diff --git a/waku/waku_api/rest/legacy_store/handlers.nim b/waku/waku_api/rest/legacy_store/handlers.nim index 96e1da780..4ed58f799 100644 --- a/waku/waku_api/rest/legacy_store/handlers.nim +++ b/waku/waku_api/rest/legacy_store/handlers.nim @@ -1,6 +1,7 @@ {.push raises: [].} -import std/strformat, results, chronicles, uri, json_serialization, presto/route +import + std/[strformat, sugar], results, chronicles, uri, json_serialization, presto/route import ../../../waku_core, ../../../waku_store_legacy/common, @@ -34,20 +35,17 @@ proc performHistoryQuery( error msg return RestApiResponse.internalServerError(msg) - let res = queryFut.read() - if res.isErr(): - const msg = "Error occurred in queryFut.read()" - error msg, error = res.error - return RestApiResponse.internalServerError(fmt("{msg} [{res.error}]")) + let storeResp = queryFut.read().map(res => res.toStoreResponseRest()).valueOr: + const msg = "Error occurred in queryFut.read()" + error msg, error = error + return RestApiResponse.internalServerError(fmt("{msg} [{error}]")) - let storeResp = res.value.toStoreResponseRest() - let resp = RestApiResponse.jsonResponse(storeResp, status = Http200) - if resp.isErr(): + let resp = RestApiResponse.jsonResponse(storeResp, status = Http200).valueOr: const msg = "Error building the json respose" - error msg, error = resp.error - return RestApiResponse.internalServerError(fmt("{msg} [{resp.error}]")) + error msg, error = error + return RestApiResponse.internalServerError(fmt("{msg} [{error}]")) - return resp.get() + return resp # Converts a string time representation into an Option[Timestamp]. # Only positive time is considered a valid Timestamp in the request @@ -70,16 +68,13 @@ proc parseCursor( digest: Option[string], ): Result[Option[HistoryCursor], string] = # Parse sender time - let parsedSenderTime = parseTime(senderTime).valueOr: - return err(error) + let parsedSenderTime = ?parseTime(senderTime) # Parse store time - let parsedStoreTime = parseTime(storeTime).valueOr: - return err(error) + let parsedStoreTime = ?parseTime(storeTime) # Parse message digest - let parsedMsgDigest = parseMsgDigest(digest).valueOr: - return err(error) + let parsedMsgDigest = ?parseMsgDigest(digest) # Parse cursor information if parsedPubsubTopic.isSome() and parsedSenderTime.isSome() and diff --git a/waku/waku_api/rest/legacy_store/types.nim b/waku/waku_api/rest/legacy_store/types.nim index 53a96bd69..0c547c7cc 100644 --- a/waku/waku_api/rest/legacy_store/types.nim +++ b/waku/waku_api/rest/legacy_store/types.nim @@ -60,8 +60,7 @@ proc parseMsgDigest*( return ok(none(waku_store_common.MessageDigest)) let decodedUrl = decodeUrl(input.get()) - let base64DecodedArr = base64.decode(Base64String(decodedUrl)).valueOr: - return err(error) + let base64DecodedArr = ?base64.decode(Base64String(decodedUrl)) var messageDigest = waku_store_common.MessageDigest() diff --git a/waku/waku_api/rest/origin_handler.nim b/waku/waku_api/rest/origin_handler.nim index 2317c945f..9752bfb56 100644 --- a/waku/waku_api/rest/origin_handler.nim +++ b/waku/waku_api/rest/origin_handler.nim @@ -74,13 +74,12 @@ proc originMiddlewareProc( reqfence: RequestFence, nextHandler: HttpProcessCallback2, ): Future[HttpResponseRef] {.async: (raises: [CancelledError]).} = - if reqfence.isErr(): + let request = reqfence.valueOr: # Ignore request errors that detected before our middleware. # Let final handler deal with it. return await nextHandler(reqfence) let self = OriginHandlerMiddlewareRef(middleware) - let request = reqfence.get() var reqHeaders = request.headers var response = request.getResponse() diff --git a/waku/waku_api/rest/relay/handlers.nim b/waku/waku_api/rest/relay/handlers.nim index f59c445a8..4a1415361 100644 --- a/waku/waku_api/rest/relay/handlers.nim +++ b/waku/waku_api/rest/relay/handlers.nim @@ -126,29 +126,25 @@ proc installRelayApiHandlers*( # ## TODO: ability to specify a return message limit # info "get_waku_v2_relay_v1_messages", topic=topic - if pubsubTopic.isErr(): + let pubSubTopic = pubsubTopic.valueOr: return RestApiResponse.badRequest() - let pubSubTopic = pubsubTopic.get() - let messages = cache.getMessages(pubSubTopic, clear = true) - if messages.isErr(): + let messages = cache.getMessages(pubSubTopic, clear = true).valueOr: info "Not subscribed to topic", topic = pubSubTopic return RestApiResponse.notFound() - let data = RelayGetMessagesResponse(messages.get().map(toRelayWakuMessage)) - let resp = RestApiResponse.jsonResponse(data, status = Http200) - if resp.isErr(): - info "An error ocurred while building the json respose", error = resp.error + let data = RelayGetMessagesResponse(messages.map(toRelayWakuMessage)) + let resp = RestApiResponse.jsonResponse(data, status = Http200).valueOr: + info "An error ocurred while building the json respose", error = error return RestApiResponse.internalServerError() - return resp.get() + return resp router.api(MethodPost, ROUTE_RELAY_MESSAGESV1) do( pubsubTopic: string, contentBody: Option[ContentBody] ) -> RestApiResponse: - if pubsubTopic.isErr(): + let pubSubTopic = pubsubTopic.valueOr: return RestApiResponse.badRequest() - let pubSubTopic = pubsubTopic.get() # ensure the node is subscribed to the topic. otherwise it risks publishing # to a topic with no connected peers @@ -318,9 +314,7 @@ proc installRelayApiHandlers*( if not await publishFut.withTimeout(futTimeout): return RestApiResponse.internalServerError("Failed to publish: timedout") - var res = publishFut.read() - - if res.isErr(): - return RestApiResponse.badRequest("Failed to publish. " & res.error) + publishFut.read().isOkOr: + return RestApiResponse.badRequest("Failed to publish: " & error) return RestApiResponse.ok() diff --git a/waku/waku_api/rest/rest_serdes.nim b/waku/waku_api/rest/rest_serdes.nim index 1b6d5a98d..8dcb7c8f1 100644 --- a/waku/waku_api/rest/rest_serdes.nim +++ b/waku/waku_api/rest/rest_serdes.nim @@ -45,15 +45,12 @@ func decodeRequestBody*[T]( let reqBodyData = contentBody.get().data - let requestResult = decodeFromJsonBytes(T, reqBodyData) - if requestResult.isErr(): + let requestResult = decodeFromJsonBytes(T, reqBodyData).valueOr: return err( - RestApiResponse.badRequest( - "Invalid content body, could not decode. " & $requestResult.error - ) + RestApiResponse.badRequest("Invalid content body, could not decode: " & $error) ) - return ok(requestResult.get()) + return ok(requestResult) proc decodeBytes*( t: typedesc[string], value: openarray[byte], contentType: Opt[ContentTypeData] diff --git a/waku/waku_api/rest/serdes.nim b/waku/waku_api/rest/serdes.nim index 147184602..ab7ed8d25 100644 --- a/waku/waku_api/rest/serdes.nim +++ b/waku/waku_api/rest/serdes.nim @@ -117,8 +117,4 @@ proc encodeString*(value: SomeUnsignedInt): SerdesResult[string] = ok(Base10.toString(value)) proc decodeString*(T: typedesc[SomeUnsignedInt], value: string): SerdesResult[T] = - let v = Base10.decode(T, value) - if v.isErr(): - return err(v.error()) - else: - return ok(v.get()) + return Base10.decode(T, value) diff --git a/waku/waku_api/rest/server.nim b/waku/waku_api/rest/server.nim index e5db5ee5e..1b61425c8 100644 --- a/waku/waku_api/rest/server.nim +++ b/waku/waku_api/rest/server.nim @@ -91,23 +91,23 @@ proc new*( ): Future[HttpResponseRef] {.async: (raises: [CancelledError]).} = discard - server.httpServer = HttpServerRef.new( - address, - defaultProcessCallback, - serverFlags, - socketFlags, - serverUri, - serverIdent, - maxConnections, - bufferSize, - backlogSize, - httpHeadersTimeout, - maxHeadersSize, - maxRequestBodySize, - dualstack = dualstack, - middlewares = middlewares, - ).valueOr: - return err(error) + server.httpServer = + ?HttpServerRef.new( + address, + defaultProcessCallback, + serverFlags, + socketFlags, + serverUri, + serverIdent, + maxConnections, + bufferSize, + backlogSize, + httpHeadersTimeout, + maxHeadersSize, + maxRequestBodySize, + dualstack = dualstack, + middlewares = middlewares, + ) return ok(server) proc getRouter(): RestRouter = diff --git a/waku/waku_api/rest/store/handlers.nim b/waku/waku_api/rest/store/handlers.nim index cf0e96710..79724b9d7 100644 --- a/waku/waku_api/rest/store/handlers.nim +++ b/waku/waku_api/rest/store/handlers.nim @@ -1,6 +1,7 @@ {.push raises: [].} -import std/strformat, results, chronicles, uri, json_serialization, presto/route +import + std/[strformat, sugar], results, chronicles, uri, json_serialization, presto/route import ../../../waku_core, ../../../waku_store/common, @@ -35,14 +36,10 @@ proc performStoreQuery( error msg return RestApiResponse.internalServerError(msg) - let futRes = queryFut.read() - - if futRes.isErr(): - const msg = "Error occurred in queryFut.read()" - error msg, error = futRes.error - return RestApiResponse.internalServerError(fmt("{msg} [{futRes.error}]")) - - let res = futRes.get().toHex() + let res = queryFut.read().map(val => val.toHex()).valueOr: + const msg = "Error occurred in queryFut.read()" + error msg, error = error + return RestApiResponse.internalServerError(fmt("{msg} [{error}]")) if res.statusCode == uint32(ErrorCode.TOO_MANY_REQUESTS): info "Request rate limit reached on peer ", storePeer diff --git a/waku/waku_archive/driver/builder.nim b/waku/waku_archive/driver/builder.nim index cc46afb4c..811b16999 100644 --- a/waku/waku_archive/driver/builder.nim +++ b/waku/waku_archive/driver/builder.nim @@ -32,71 +32,54 @@ proc new*( ## maxNumConn - defines the maximum number of connections to handle simultaneously (Postgres) ## onFatalErrorAction - called if, e.g., the connection with db got lost - let dbUrlValidationRes = dburl.validateDbUrl(url) - if dbUrlValidationRes.isErr(): - return err("DbUrl failure in ArchiveDriver.new: " & dbUrlValidationRes.error) + dburl.validateDbUrl(url).isOkOr: + return err("DbUrl failure in ArchiveDriver.new: " & error) - let engineRes = dburl.getDbEngine(url) - if engineRes.isErr(): - return err("error getting db engine in setupWakuArchiveDriver: " & engineRes.error) - - let engine = engineRes.get() + let engine = dburl.getDbEngine(url).valueOr: + return err("error getting db engine in setupWakuArchiveDriver: " & error) case engine of "sqlite": - let pathRes = dburl.getDbPath(url) - if pathRes.isErr(): - return err("error get path in setupWakuArchiveDriver: " & pathRes.error) + let path = dburl.getDbPath(url).valueOr: + return err("error get path in setupWakuArchiveDriver: " & error) - let dbRes = SqliteDatabase.new(pathRes.get()) - if dbRes.isErr(): - return err("error in setupWakuArchiveDriver: " & dbRes.error) - - let db = dbRes.get() + let db = SqliteDatabase.new(path).valueOr: + return err("error in setupWakuArchiveDriver: " & error) # SQLite vacuum - let sqliteStatsRes = db.gatherSqlitePageStats() - if sqliteStatsRes.isErr(): - return err("error while gathering sqlite stats: " & $sqliteStatsRes.error) + let (pageSize, pageCount, freelistCount) = db.gatherSqlitePageStats().valueOr: + return err("error while gathering sqlite stats: " & $error) - let (pageSize, pageCount, freelistCount) = sqliteStatsRes.get() info "sqlite database page stats", pageSize = pageSize, pages = pageCount, freePages = freelistCount if vacuum and (pageCount > 0 and freelistCount > 0): - let vacuumRes = db.performSqliteVacuum() - if vacuumRes.isErr(): - return err("error in vacuum sqlite: " & $vacuumRes.error) + db.performSqliteVacuum().isOkOr: + return err("error in vacuum sqlite: " & $error) # Database migration if migrate: - let migrateRes = archive_driver_sqlite_migrations.migrate(db) - if migrateRes.isErr(): - return err("error in migrate sqlite: " & $migrateRes.error) + archive_driver_sqlite_migrations.migrate(db).isOkOr: + return err("error in migrate sqlite: " & $error) info "setting up sqlite waku archive driver" - let res = SqliteDriver.new(db) - if res.isErr(): - return err("failed to init sqlite archive driver: " & res.error) + let res = SqliteDriver.new(db).valueOr: + return err("failed to init sqlite archive driver: " & error) - return ok(res.get()) + return ok(res) of "postgres": when defined(postgres): - let res = PostgresDriver.new( + let driver = PostgresDriver.new( dbUrl = url, maxConnections = maxNumConn, onFatalErrorAction = onFatalErrorAction, - ) - if res.isErr(): - return err("failed to init postgres archive driver: " & res.error) - - let driver = res.get() + ).valueOr: + return err("failed to init postgres archive driver: " & error) # Database migration if migrate: - let migrateRes = await archive_postgres_driver_migrations.migrate(driver) - if migrateRes.isErr(): - return err("ArchiveDriver build failed in migration: " & $migrateRes.error) + (await archive_postgres_driver_migrations.migrate(driver)).isOkOr: + return err("ArchiveDriver build failed in migration: " & $error) ## This should be started once we make sure the 'messages' table exists ## Hence, this should be run after the migration is completed. diff --git a/waku/waku_archive/driver/postgres_driver/postgres_driver.nim b/waku/waku_archive/driver/postgres_driver/postgres_driver.nim index d5eba9a5c..842d7cbc2 100644 --- a/waku/waku_archive/driver/postgres_driver/postgres_driver.nim +++ b/waku/waku_archive/driver/postgres_driver/postgres_driver.nim @@ -186,11 +186,11 @@ proc timeCursorCallbackImpl(pqResult: ptr PGresult, timeCursor: var Option[Times let catchable = catch: parseBiggestInt(rawTimestamp) - if catchable.isErr(): - error "could not parse correctly", error = catchable.error.msg + let time = catchable.valueOr: + error "could not parse correctly", error = error.msg return - timeCursor = some(catchable.get()) + timeCursor = some(time) proc hashCallbackImpl( pqResult: ptr PGresult, rows: var seq[(WakuMessageHash, PubsubTopic, WakuMessage)] @@ -214,11 +214,10 @@ proc hashCallbackImpl( let catchable = catch: parseHexStr(rawHash) - if catchable.isErr(): - error "could not parse correctly", error = catchable.error.msg + let hashHex = catchable.valueOr: + error "could not parse correctly", error = error.msg return - let hashHex = catchable.get() let msgHash = fromBytes(hashHex.toOpenArrayByte(0, 31)) rows.add((msgHash, "", WakuMessage())) @@ -953,11 +952,10 @@ method getDatabaseSize*( method getMessagesCount*( s: PostgresDriver ): Future[ArchiveDriverResult[int64]] {.async.} = - let intRes = await s.getInt("SELECT COUNT(1) FROM messages") - if intRes.isErr(): - return err("error in getMessagesCount: " & intRes.error) + let intRes = (await s.getInt("SELECT COUNT(1) FROM messages")).valueOr: + return err("error in getMessagesCount: " & error) - return ok(intRes.get()) + return ok(intRes) method getOldestMessageTimestamp*( s: PostgresDriver @@ -970,47 +968,44 @@ method getOldestMessageTimestamp*( let oldestPartitionTimeNanoSec = oldestPartition.getPartitionStartTimeInNanosec() - let intRes = await s.getInt("SELECT MIN(timestamp) FROM messages") - if intRes.isErr(): + let intRes = (await s.getInt("SELECT MIN(timestamp) FROM messages")).valueOr: ## Just return the oldest partition time considering the partitions set return ok(Timestamp(oldestPartitionTimeNanoSec)) - return ok(Timestamp(min(intRes.get(), oldestPartitionTimeNanoSec))) + return ok(Timestamp(min(intRes, oldestPartitionTimeNanoSec))) method getNewestMessageTimestamp*( s: PostgresDriver ): Future[ArchiveDriverResult[Timestamp]] {.async.} = - let intRes = await s.getInt("SELECT MAX(timestamp) FROM messages") + let intRes = (await s.getInt("SELECT MAX(timestamp) FROM messages")).valueOr: + return err("error in getNewestMessageTimestamp: " & error) - if intRes.isErr(): - return err("error in getNewestMessageTimestamp: " & intRes.error) - - return ok(Timestamp(intRes.get())) + return ok(Timestamp(intRes)) method deleteOldestMessagesNotWithinLimit*( s: PostgresDriver, limit: int ): Future[ArchiveDriverResult[void]] {.async.} = - var execRes = await s.writeConnPool.pgQuery( - """DELETE FROM messages WHERE messageHash NOT IN + ( + await s.writeConnPool.pgQuery( + """DELETE FROM messages WHERE messageHash NOT IN ( SELECT messageHash FROM messages ORDER BY timestamp DESC LIMIT ? );""", - @[$limit], - ) - if execRes.isErr(): - return err("error in deleteOldestMessagesNotWithinLimit: " & execRes.error) - - execRes = await s.writeConnPool.pgQuery( - """DELETE FROM messages_lookup WHERE messageHash NOT IN - ( - SELECT messageHash FROM messages ORDER BY timestamp DESC LIMIT ? - );""", - @[$limit], - ) - if execRes.isErr(): - return err( - "error in deleteOldestMessagesNotWithinLimit messages_lookup: " & execRes.error + @[$limit], ) + ).isOkOr: + return err("error in deleteOldestMessagesNotWithinLimit: " & error) + + ( + await s.writeConnPool.pgQuery( + """DELETE FROM messages_lookup WHERE messageHash NOT IN + ( + SELECT messageHash FROM messages ORDER BY timestamp DESC LIMIT ? + );""", + @[$limit], + ) + ).isOkOr: + return err("error in deleteOldestMessagesNotWithinLimit messages_lookup: " & error) return ok() diff --git a/waku/waku_archive/driver/queue_driver/queue_driver.nim b/waku/waku_archive/driver/queue_driver/queue_driver.nim index 9dbf3c112..2ffc9ab00 100644 --- a/waku/waku_archive/driver/queue_driver/queue_driver.nim +++ b/waku/waku_archive/driver/queue_driver/queue_driver.nim @@ -97,8 +97,7 @@ proc getPage( # Find starting entry if cursor.isSome(): - let cursorEntry = w.walkToCursor(cursor.get(), forward) - if cursorEntry.isErr(): + w.walkToCursor(cursor.get(), forward).isOkOr: return err(QueueDriverErrorKind.INVALID_CURSOR) # Advance walker once more @@ -177,7 +176,7 @@ proc first*(driver: QueueDriver): ArchiveDriverResult[Index] = res = w.first() w.destroy() - if res.isErr(): + res.isOkOr: return err("Not found") return ok(res.value.key) @@ -188,7 +187,7 @@ proc last*(driver: QueueDriver): ArchiveDriverResult[Index] = res = w.last() w.destroy() - if res.isErr(): + res.isOkOr: return err("Not found") return ok(res.value.key) @@ -285,14 +284,11 @@ method getMessages*( let catchable = catch: driver.getPage(maxPageSize, ascendingOrder, index, matchesQuery) - let pageRes: QueueDriverGetPageResult = - if catchable.isErr(): - return err(catchable.error.msg) - else: - catchable.get() + let pageRes: QueueDriverGetPageResult = catchable.valueOr: + return err(catchable.error.msg) - if pageRes.isErr(): - return err($pageRes.error) + pageRes.isOkOr: + return err($error) return ok(pageRes.value) diff --git a/waku/waku_archive/driver/sqlite_driver/migrations.nim b/waku/waku_archive/driver/sqlite_driver/migrations.nim index 33de5fec3..b077de19a 100644 --- a/waku/waku_archive/driver/sqlite_driver/migrations.nim +++ b/waku/waku_archive/driver/sqlite_driver/migrations.nim @@ -36,9 +36,8 @@ proc isSchemaVersion7*(db: SqliteDatabase): DatabaseResult[bool] = let query = """SELECT l.name FROM pragma_table_info("Message") as l WHERE l.pk != 0;""" - let res = db.query(query, queryRowCallback) - if res.isErr(): - return err("failed to determine the current SchemaVersion: " & $res.error) + db.query(query, queryRowCallback).isOkOr: + return err("failed to determine the current SchemaVersion: " & $error) if pkColumns == @["pubsubTopic", "id", "storedAt"]: return ok(true) @@ -65,10 +64,8 @@ proc migrate*(db: SqliteDatabase, targetVersion = SchemaVersion): DatabaseResult ## Force the correct schema version ?db.setUserVersion(7) - let migrationRes = - migrate(db, targetVersion, migrationsScriptsDir = MessageStoreMigrationPath) - if migrationRes.isErr(): - return err("failed to execute migration scripts: " & migrationRes.error) + migrate(db, targetVersion, migrationsScriptsDir = MessageStoreMigrationPath).isOkOr: + return err("failed to execute migration scripts: " & error) info "finished message store's sqlite database migration" return ok() diff --git a/waku/waku_archive/driver/sqlite_driver/queries.nim b/waku/waku_archive/driver/sqlite_driver/queries.nim index 6fafc06eb..e7e31dbe0 100644 --- a/waku/waku_archive/driver/sqlite_driver/queries.nim +++ b/waku/waku_archive/driver/sqlite_driver/queries.nim @@ -129,8 +129,7 @@ proc getMessageCount*(db: SqliteDatabase): DatabaseResult[int64] = count = sqlite3_column_int64(s, 0) let query = countMessagesQuery(DbTable) - let res = db.query(query, queryRowCallback) - if res.isErr(): + db.query(query, queryRowCallback).isOkOr: return err("failed to count number of messages in the database") return ok(count) @@ -146,8 +145,7 @@ proc selectOldestTimestamp*(db: SqliteDatabase): DatabaseResult[Timestamp] {.inl timestamp = queryRowTimestampCallback(s, 0) let query = selectOldestMessageTimestampQuery(DbTable) - let res = db.query(query, queryRowCallback) - if res.isErr(): + db.query(query, queryRowCallback).isOkOr: return err("failed to get the oldest receiver timestamp from the database") return ok(timestamp) @@ -163,8 +161,7 @@ proc selectNewestTimestamp*(db: SqliteDatabase): DatabaseResult[Timestamp] {.inl timestamp = queryRowTimestampCallback(s, 0) let query = selectNewestMessageTimestampQuery(DbTable) - let res = db.query(query, queryRowCallback) - if res.isErr(): + db.query(query, queryRowCallback).isOkOr: return err("failed to get the newest receiver timestamp from the database") return ok(timestamp) diff --git a/waku/waku_archive/driver/sqlite_driver/sqlite_driver.nim b/waku/waku_archive/driver/sqlite_driver/sqlite_driver.nim index 173dd3e81..ff7b0e7d3 100644 --- a/waku/waku_archive/driver/sqlite_driver/sqlite_driver.nim +++ b/waku/waku_archive/driver/sqlite_driver/sqlite_driver.nim @@ -20,14 +20,12 @@ proc init(db: SqliteDatabase): ArchiveDriverResult[void] = return err("db not initialized") # Create table, if doesn't exist - let resCreate = createTable(db) - if resCreate.isErr(): - return err("failed to create table: " & resCreate.error()) + createTable(db).isOkOr: + return err("failed to create table: " & error) # Create indices, if don't exist - let resRtIndex = createOldestMessageTimestampIndex(db) - if resRtIndex.isErr(): - return err("failed to create i_ts index: " & resRtIndex.error()) + createOldestMessageTimestampIndex(db).isOkOr: + return err("failed to create i_ts index: " & error) return ok() @@ -37,9 +35,7 @@ type SqliteDriver* = ref object of ArchiveDriver proc new*(T: type SqliteDriver, db: SqliteDatabase): ArchiveDriverResult[T] = # Database initialization - let resInit = init(db) - if resInit.isErr(): - return err(resInit.error()) + ?init(db) # General initialization let insertStmt = db.prepareInsertMessageStmt() diff --git a/waku/waku_archive/retention_policy/retention_policy_time.nim b/waku/waku_archive/retention_policy/retention_policy_time.nim index b0a548d2e..6d4c0815a 100644 --- a/waku/waku_archive/retention_policy/retention_policy_time.nim +++ b/waku/waku_archive/retention_policy/retention_policy_time.nim @@ -20,20 +20,18 @@ method execute*( ## Delete messages that exceed the retention time by 10% and more (batch delete for efficiency) info "beginning of executing message retention policy - time" - let omtRes = await driver.getOldestMessageTimestamp() - if omtRes.isErr(): - return err("failed to get oldest message timestamp: " & omtRes.error) + let omt = (await driver.getOldestMessageTimestamp()).valueOr: + return err("failed to get oldest message timestamp: " & error) let now = getNanosecondTime(getTime().toUnixFloat()) let retentionTimestamp = now - p.retentionTime.nanoseconds let thresholdTimestamp = retentionTimestamp - p.retentionTime.nanoseconds div 10 - if thresholdTimestamp <= omtRes.value: + if thresholdTimestamp <= omt: return ok() - let res = await driver.deleteMessagesOlderThanTimestamp(ts = retentionTimestamp) - if res.isErr(): - return err("failed to delete oldest messages: " & res.error) + (await driver.deleteMessagesOlderThanTimestamp(ts = retentionTimestamp)).isOkOr: + return err("failed to delete oldest messages: " & error) info "end of executing message retention policy - time" return ok() diff --git a/waku/waku_archive_legacy/driver/builder.nim b/waku/waku_archive_legacy/driver/builder.nim index d73803b81..0f19b3669 100644 --- a/waku/waku_archive_legacy/driver/builder.nim +++ b/waku/waku_archive_legacy/driver/builder.nim @@ -34,65 +34,50 @@ proc new*( ## maxNumConn - defines the maximum number of connections to handle simultaneously (Postgres) ## onFatalErrorAction - called if, e.g., the connection with db got lost - let dbUrlValidationRes = dburl.validateDbUrl(url) - if dbUrlValidationRes.isErr(): - return err("DbUrl failure in ArchiveDriver.new: " & dbUrlValidationRes.error) + dburl.validateDbUrl(url).isOkOr: + return err("DbUrl failure in ArchiveDriver.new: " & error) - let engineRes = dburl.getDbEngine(url) - if engineRes.isErr(): - return err("error getting db engine in setupWakuArchiveDriver: " & engineRes.error) - - let engine = engineRes.get() + let engine = dburl.getDbEngine(url).valueOr: + return err("error getting db engine in setupWakuArchiveDriver: " & error) case engine of "sqlite": - let pathRes = dburl.getDbPath(url) - if pathRes.isErr(): - return err("error get path in setupWakuArchiveDriver: " & pathRes.error) + let path = dburl.getDbPath(url).valueOr: + return err("error get path in setupWakuArchiveDriver: " & error) - let dbRes = SqliteDatabase.new(pathRes.get()) - if dbRes.isErr(): - return err("error in setupWakuArchiveDriver: " & dbRes.error) - - let db = dbRes.get() + let db = SqliteDatabase.new(path).valueOr: + return err("error in setupWakuArchiveDriver: " & error) # SQLite vacuum - let sqliteStatsRes = db.gatherSqlitePageStats() - if sqliteStatsRes.isErr(): - return err("error while gathering sqlite stats: " & $sqliteStatsRes.error) + let (pageSize, pageCount, freelistCount) = db.gatherSqlitePageStats().valueOr: + return err("error while gathering sqlite stats: " & $error) - let (pageSize, pageCount, freelistCount) = sqliteStatsRes.get() info "sqlite database page stats", pageSize = pageSize, pages = pageCount, freePages = freelistCount if vacuum and (pageCount > 0 and freelistCount > 0): - let vacuumRes = db.performSqliteVacuum() - if vacuumRes.isErr(): - return err("error in vacuum sqlite: " & $vacuumRes.error) + db.performSqliteVacuum().isOkOr: + return err("error in vacuum sqlite: " & $error) # Database migration if migrate: - let migrateRes = archive_driver_sqlite_migrations.migrate(db) - if migrateRes.isErr(): - return err("error in migrate sqlite: " & $migrateRes.error) + archive_driver_sqlite_migrations.migrate(db).isOkOr: + return err("error in migrate sqlite: " & $error) info "setting up sqlite waku archive driver" - let res = SqliteDriver.new(db) - if res.isErr(): - return err("failed to init sqlite archive driver: " & res.error) + let res = SqliteDriver.new(db).valueOr: + return err("failed to init sqlite archive driver: " & error) - return ok(res.get()) + return ok(res) of "postgres": when defined(postgres): - let res = PostgresDriver.new( + let driver = PostgresDriver.new( dbUrl = url, maxConnections = maxNumConn, onFatalErrorAction = onFatalErrorAction, - ) - if res.isErr(): - return err("failed to init postgres archive driver: " & res.error) + ).valueOr: + return err("failed to init postgres archive driver: " & error) - let driver = res.get() return ok(driver) else: return err( diff --git a/waku/waku_archive_legacy/driver/postgres_driver/postgres_driver.nim b/waku/waku_archive_legacy/driver/postgres_driver/postgres_driver.nim index 56d388b6d..1a39c1267 100644 --- a/waku/waku_archive_legacy/driver/postgres_driver/postgres_driver.nim +++ b/waku/waku_archive_legacy/driver/postgres_driver/postgres_driver.nim @@ -798,11 +798,10 @@ method getDatabaseSize*( method getMessagesCount*( s: PostgresDriver ): Future[ArchiveDriverResult[int64]] {.async.} = - let intRes = await s.getInt("SELECT COUNT(1) FROM messages") - if intRes.isErr(): - return err("error in getMessagesCount: " & intRes.error) + let intRes = (await s.getInt("SELECT COUNT(1) FROM messages")).valueOr: + return err("error in getMessagesCount: " & error) - return ok(intRes.get()) + return ok(intRes) method getOldestMessageTimestamp*( s: PostgresDriver @@ -812,11 +811,10 @@ method getOldestMessageTimestamp*( method getNewestMessageTimestamp*( s: PostgresDriver ): Future[ArchiveDriverResult[Timestamp]] {.async.} = - let intRes = await s.getInt("SELECT MAX(timestamp) FROM messages") - if intRes.isErr(): - return err("error in getNewestMessageTimestamp: " & intRes.error) + let intRes = (await s.getInt("SELECT MAX(timestamp) FROM messages")).valueOr: + return err("error in getNewestMessageTimestamp: " & error) - return ok(Timestamp(intRes.get())) + return ok(Timestamp(intRes)) method deleteOldestMessagesNotWithinLimit*( s: PostgresDriver, limit: int diff --git a/waku/waku_archive_legacy/driver/queue_driver/queue_driver.nim b/waku/waku_archive_legacy/driver/queue_driver/queue_driver.nim index 942a720df..530a84034 100644 --- a/waku/waku_archive_legacy/driver/queue_driver/queue_driver.nim +++ b/waku/waku_archive_legacy/driver/queue_driver/queue_driver.nim @@ -100,8 +100,7 @@ proc getPage( # Find starting entry if cursor.isSome(): - let cursorEntry = w.walkToCursor(cursor.get(), forward) - if cursorEntry.isErr(): + w.walkToCursor(cursor.get(), forward).isOkOr: return err(QueueDriverErrorKind.INVALID_CURSOR) # Advance walker once more @@ -182,7 +181,7 @@ proc first*(driver: QueueDriver): ArchiveDriverResult[Index] = res = w.first() w.destroy() - if res.isErr(): + res.isOkOr: return err("Not found") return ok(res.value.key) @@ -193,7 +192,7 @@ proc last*(driver: QueueDriver): ArchiveDriverResult[Index] = res = w.last() w.destroy() - if res.isErr(): + res.isOkOr: return err("Not found") return ok(res.value.key) @@ -297,8 +296,8 @@ method getMessages*( except CatchableError, Exception: return err(getCurrentExceptionMsg()) - if pageRes.isErr(): - return err($pageRes.error) + pageRes.isOkOr: + return err($error) return ok(pageRes.value) diff --git a/waku/waku_archive_legacy/driver/sqlite_driver/migrations.nim b/waku/waku_archive_legacy/driver/sqlite_driver/migrations.nim index 5fccf8f3d..3d8905e7e 100644 --- a/waku/waku_archive_legacy/driver/sqlite_driver/migrations.nim +++ b/waku/waku_archive_legacy/driver/sqlite_driver/migrations.nim @@ -36,9 +36,8 @@ proc isSchemaVersion7*(db: SqliteDatabase): DatabaseResult[bool] = let query = """SELECT l.name FROM pragma_table_info("Message") as l WHERE l.pk != 0;""" - let res = db.query(query, queryRowCallback) - if res.isErr(): - return err("failed to determine the current SchemaVersion: " & $res.error) + db.query(query, queryRowCallback).isOkOr: + return err("failed to determine the current SchemaVersion: " & $error) if pkColumns == @["pubsubTopic", "id", "storedAt"]: return ok(true) @@ -65,10 +64,8 @@ proc migrate*(db: SqliteDatabase, targetVersion = SchemaVersion): DatabaseResult ## Force the correct schema version ?db.setUserVersion(7) - let migrationRes = - migrate(db, targetVersion, migrationsScriptsDir = MessageStoreMigrationPath) - if migrationRes.isErr(): - return err("failed to execute migration scripts: " & migrationRes.error) + migrate(db, targetVersion, migrationsScriptsDir = MessageStoreMigrationPath).isOkOr: + return err("failed to execute migration scripts: " & error) info "finished message store's sqlite database migration" return ok() diff --git a/waku/waku_archive_legacy/driver/sqlite_driver/queries.nim b/waku/waku_archive_legacy/driver/sqlite_driver/queries.nim index 47f1d86ae..0cb2bf64d 100644 --- a/waku/waku_archive_legacy/driver/sqlite_driver/queries.nim +++ b/waku/waku_archive_legacy/driver/sqlite_driver/queries.nim @@ -166,8 +166,7 @@ proc getMessageCount*(db: SqliteDatabase): DatabaseResult[int64] = count = sqlite3_column_int64(s, 0) let query = countMessagesQuery(DbTable) - let res = db.query(query, queryRowCallback) - if res.isErr(): + db.query(query, queryRowCallback).isOkOr: return err("failed to count number of messages in the database") return ok(count) @@ -185,8 +184,7 @@ proc selectOldestReceiverTimestamp*( timestamp = queryRowReceiverTimestampCallback(s, 0) let query = selectOldestMessageTimestampQuery(DbTable) - let res = db.query(query, queryRowCallback) - if res.isErr(): + db.query(query, queryRowCallback).isOkOr: return err("failed to get the oldest receiver timestamp from the database") return ok(timestamp) @@ -204,8 +202,7 @@ proc selectNewestReceiverTimestamp*( timestamp = queryRowReceiverTimestampCallback(s, 0) let query = selectNewestMessageTimestampQuery(DbTable) - let res = db.query(query, queryRowCallback) - if res.isErr(): + db.query(query, queryRowCallback).isOkOr: return err("failed to get the newest receiver timestamp from the database") return ok(timestamp) @@ -280,9 +277,7 @@ proc selectAllMessages*( rows.add((pubsubTopic, wakuMessage, digest, storedAt, hash)) let query = selectAllMessagesQuery(DbTable) - let res = db.query(query, queryRowCallback) - if res.isErr(): - return err(res.error()) + discard ?db.query(query, queryRowCallback) return ok(rows) @@ -498,7 +493,7 @@ proc execSelectMessageByHash( except Exception, CatchableError: # release implicit transaction discard sqlite3_reset(s) # same return information as step - discard sqlite3_clear_bindings(s) # no errors possible + discard sqlite3_clear_bindings(s) # no errors possible proc selectMessageByHashQuery(): SqlQueryStr = var query: string diff --git a/waku/waku_archive_legacy/driver/sqlite_driver/sqlite_driver.nim b/waku/waku_archive_legacy/driver/sqlite_driver/sqlite_driver.nim index 5a6c12b05..63e7c7eac 100644 --- a/waku/waku_archive_legacy/driver/sqlite_driver/sqlite_driver.nim +++ b/waku/waku_archive_legacy/driver/sqlite_driver/sqlite_driver.nim @@ -24,18 +24,15 @@ proc init(db: SqliteDatabase): ArchiveDriverResult[void] = return err("db not initialized") # Create table, if doesn't exist - let resCreate = createTable(db) - if resCreate.isErr(): - return err("failed to create table: " & resCreate.error()) + createTable(db).isOkOr: + return err("failed to create table: " & error) # Create indices, if don't exist - let resRtIndex = createOldestMessageTimestampIndex(db) - if resRtIndex.isErr(): - return err("failed to create i_rt index: " & resRtIndex.error()) + createOldestMessageTimestampIndex(db).isOkOr: + return err("failed to create i_rt index: " & error) - let resMsgIndex = createHistoryQueryIndex(db) - if resMsgIndex.isErr(): - return err("failed to create i_query index: " & resMsgIndex.error()) + createHistoryQueryIndex(db).isOkOr: + return err("failed to create i_query index: " & error) return ok() @@ -45,9 +42,7 @@ type SqliteDriver* = ref object of ArchiveDriver proc new*(T: type SqliteDriver, db: SqliteDatabase): ArchiveDriverResult[T] = # Database initialization - let resInit = init(db) - if resInit.isErr(): - return err(resInit.error()) + ?init(db) # General initialization let insertStmt = db.prepareInsertMessageStmt() diff --git a/waku/waku_core/peers.nim b/waku/waku_core/peers.nim index 883f266bd..5591699c6 100644 --- a/waku/waku_core/peers.nim +++ b/waku/waku_core/peers.nim @@ -249,11 +249,10 @@ proc parseUrlPeerAddr*( return ok(none(RemotePeerInfo)) let parsedAddr = decodeUrl(peerAddr.get()) - let parsedPeerInfo = parsePeerInfo(parsedAddr) - if parsedPeerInfo.isErr(): - return err("Failed parsing remote peer info [" & parsedPeerInfo.error & "]") + let parsedPeerInfo = parsePeerInfo(parsedAddr).valueOr: + return err("Failed parsing remote peer info: " & error) - return ok(some(parsedPeerInfo.value)) + return ok(some(parsedPeerInfo)) proc toRemotePeerInfo*(enrRec: enr.Record): Result[RemotePeerInfo, cstring] = ## Converts an ENR to dialable RemotePeerInfo @@ -339,11 +338,10 @@ proc hasProtocol*(ma: MultiAddress, proto: string): bool = ## Returns ``true`` if ``ma`` contains protocol ``proto``. let proto = MultiCodec.codec(proto) - let protos = ma.protocols() - if protos.isErr(): + let protos = ma.protocols().valueOr: return false - return protos.get().anyIt(it == proto) + return protos.anyIt(it == proto) func hasUdpPort*(peer: RemotePeerInfo): bool = if peer.enr.isNone(): diff --git a/waku/waku_core/topics/content_topic.nim b/waku/waku_core/topics/content_topic.nim index 5984a760b..3eeb35771 100644 --- a/waku/waku_core/topics/content_topic.nim +++ b/waku/waku_core/topics/content_topic.nim @@ -127,11 +127,10 @@ proc parse*( ): ParsingResult[seq[NsContentTopic]] = var res: seq[NsContentTopic] = @[] for contentTopic in topics: - let parseRes = NsContentTopic.parse(contentTopic) - if parseRes.isErr(): - let error: ParsingError = parseRes.error - return ParsingResult[seq[NsContentTopic]].err(error) - res.add(parseRes.value) + let parseRes = NsContentTopic.parse(contentTopic).valueOr: + let pError: ParsingError = error + return ParsingResult[seq[NsContentTopic]].err(pError) + res.add(parseRes) return ParsingResult[seq[NsContentTopic]].ok(res) # Content topic compatibility diff --git a/waku/waku_core/topics/sharding.nim b/waku/waku_core/topics/sharding.nim index 006850acf..1cb5b37b3 100644 --- a/waku/waku_core/topics/sharding.nim +++ b/waku/waku_core/topics/sharding.nim @@ -59,12 +59,8 @@ proc getShardsFromContentTopics*( else: @[contentTopics] - let parseRes = NsContentTopic.parse(topics) - let nsContentTopics = - if parseRes.isErr(): - return err("Cannot parse content topic: " & $parseRes.error) - else: - parseRes.get() + let nsContentTopics = NsContentTopic.parse(topics).valueOr: + return err("Cannot parse content topic: " & $error) var topicMap = initTable[RelayShard, seq[NsContentTopic]]() for content in nsContentTopics: diff --git a/waku/waku_enr/capabilities.nim b/waku/waku_enr/capabilities.nim index b4e2bf37a..26899fbb4 100644 --- a/waku/waku_enr/capabilities.nim +++ b/waku/waku_enr/capabilities.nim @@ -94,11 +94,10 @@ func waku2*(record: TypedRecord): Option[CapabilitiesBitfield] = some(CapabilitiesBitfield(field.get()[0])) proc supportsCapability*(r: Record, cap: Capabilities): bool = - let recordRes = r.toTyped() - if recordRes.isErr(): + let recordRes = r.toTyped().valueOr: return false - let bitfieldOpt = recordRes.value.waku2 + let bitfieldOpt = recordRes.waku2 if bitfieldOpt.isNone(): return false @@ -106,11 +105,10 @@ proc supportsCapability*(r: Record, cap: Capabilities): bool = bitfield.supportsCapability(cap) proc getCapabilities*(r: Record): seq[Capabilities] = - let recordRes = r.toTyped() - if recordRes.isErr(): + let recordRes = r.toTyped().valueOr: return @[] - let bitfieldOpt = recordRes.value.waku2 + let bitfieldOpt = recordRes.waku2 if bitfieldOpt.isNone(): return @[] diff --git a/waku/waku_enr/multiaddr.nim b/waku/waku_enr/multiaddr.nim index 83e3d1992..c343fff51 100644 --- a/waku/waku_enr/multiaddr.nim +++ b/waku/waku_enr/multiaddr.nim @@ -88,8 +88,7 @@ func multiaddrs*(record: TypedRecord): Option[seq[MultiAddress]] = if field.isNone(): return none(seq[MultiAddress]) - let decodeRes = decodeMultiaddrs(field.get()) - if decodeRes.isErr(): + let decodeRes = decodeMultiaddrs(field.get()).valueOr: return none(seq[MultiAddress]) - some(decodeRes.value) + some(decodeRes) diff --git a/waku/waku_enr/sharding.nim b/waku/waku_enr/sharding.nim index d54464f94..392900cdb 100644 --- a/waku/waku_enr/sharding.nim +++ b/waku/waku_enr/sharding.nim @@ -64,8 +64,8 @@ func topicsToRelayShards*(topics: seq[string]): Result[Option[RelayShards], stri let parsedTopicsRes = topics.mapIt(RelayShard.parse(it)) for res in parsedTopicsRes: - if res.isErr(): - return err("failed to parse topic: " & $res.error) + res.isOkOr: + return err("failed to parse topic: " & $error) if parsedTopicsRes.anyIt(it.get().clusterId != parsedTopicsRes[0].get().clusterId): return err("use shards with the same cluster Id.") @@ -84,11 +84,10 @@ func contains*(rs: RelayShards, shard: RelayShard): bool = return rs.contains(shard.clusterId, shard.shardId) func contains*(rs: RelayShards, topic: PubsubTopic): bool = - let parseRes = RelayShard.parse(topic) - if parseRes.isErr(): + let parseRes = RelayShard.parse(topic).valueOr: return false - rs.contains(parseRes.value) + rs.contains(parseRes) # ENR builder extension @@ -239,12 +238,11 @@ proc containsShard*(r: Record, shard: RelayShard): bool = return containsShard(r, shard.clusterId, shard.shardId) proc containsShard*(r: Record, topic: PubsubTopic): bool = - let parseRes = RelayShard.parse(topic) - if parseRes.isErr(): - info "invalid static sharding topic", topic = topic, error = parseRes.error + let parseRes = RelayShard.parse(topic).valueOr: + info "invalid static sharding topic", topic = topic, error = error return false - containsShard(r, parseRes.value) + containsShard(r, parseRes) proc isClusterMismatched*(record: Record, clusterId: uint16): bool = ## Check the ENR sharding info for matching cluster id diff --git a/waku/waku_filter_v2/client.nim b/waku/waku_filter_v2/client.nim index 1dc018150..c42bca3db 100644 --- a/waku/waku_filter_v2/client.nim +++ b/waku/waku_filter_v2/client.nim @@ -80,14 +80,11 @@ proc sendSubscribeRequest( waku_filter_errors.inc(labelValues = [errMsg]) return err(FilterSubscribeError.badResponse(errMsg)) - let respDecodeRes = FilterSubscribeResponse.decode(respBuf) - if respDecodeRes.isErr(): + let response = FilterSubscribeResponse.decode(respBuf).valueOr: trace "Failed to decode filter subscribe response", servicePeer waku_filter_errors.inc(labelValues = [decodeRpcFailure]) return err(FilterSubscribeError.badResponse(decodeRpcFailure)) - let response = respDecodeRes.get() - # DOS protection rate limit checks does not know about request id if response.statusCode != FilterSubscribeErrorKind.TOO_MANY_REQUESTS.uint32 and response.requestId != filterSubscribeRequest.requestId: diff --git a/waku/waku_filter_v2/protocol.nim b/waku/waku_filter_v2/protocol.nim index 5e9b48496..451bf5cb2 100644 --- a/waku/waku_filter_v2/protocol.nim +++ b/waku/waku_filter_v2/protocol.nim @@ -157,15 +157,14 @@ proc handleSubscribeRequest*( requestDurationSec, labelValues = [$request.filterSubscribeType] ) - if subscribeResult.isErr(): + subscribeResult.isOkOr: error "subscription request error", peerId = shortLog(peerId), request = request return FilterSubscribeResponse( requestId: request.requestId, - statusCode: subscribeResult.error.kind.uint32, - statusDesc: some($subscribeResult.error), + statusCode: error.kind.uint32, + statusDesc: some($error), ) - else: - return FilterSubscribeResponse.ok(request.requestId) + return FilterSubscribeResponse.ok(request.requestId) proc pushToPeer( wf: WakuFilter, peerId: PeerId, buffer: seq[byte] @@ -309,15 +308,12 @@ proc initProtocolHandler(wf: WakuFilter) = amount = buf.len().int64, labelValues = [WakuFilterSubscribeCodec, "in"] ) - let decodeRes = FilterSubscribeRequest.decode(buf) - if decodeRes.isErr(): + let request = FilterSubscribeRequest.decode(buf).valueOr: error "failed to decode filter subscribe request", - peer_id = conn.peerId, err = decodeRes.error + peer_id = conn.peerId, err = error waku_filter_errors.inc(labelValues = [decodeRpcFailure]) return - let request = decodeRes.value #TODO: toAPI() split here - try: response = await wf.handleSubscribeRequest(conn.peerId, request) except CatchableError: diff --git a/waku/waku_keystore/keyfile.nim b/waku/waku_keystore/keyfile.nim index 488e241ab..c84a45dba 100644 --- a/waku/waku_keystore/keyfile.nim +++ b/waku/waku_keystore/keyfile.nim @@ -1,4 +1,4 @@ -# This implementation is originally taken from nim-eth keyfile module https://github.com/status-im/nim-eth/blob/master/eth/keyfile and adapted to +# This implementation is originally taken from nim-eth keyfile module https://github.com/status-im/nim-eth/blob/master/eth/keyfile and adapted to # - create keyfiles for arbitrary-long input byte data (rather than fixed-size private keys) # - allow storage of multiple keyfiles (encrypted with different passwords) in same file and iteration among successful decryptions # - enable/disable at compilation time the keyfile id and version fields @@ -517,26 +517,15 @@ func decryptSecret(crypto: Crypto, dkey: DKey): KfResult[seq[byte]] = proc decodeKeyFileJson*(j: JsonNode, password: string): KfResult[seq[byte]] = ## Decode secret from keyfile json object ``j`` using ## password string ``password``. - let res = decodeCrypto(j) - if res.isErr: - return err(res.error) - let crypto = res.get() + let crypto = ?decodeCrypto(j) case crypto.kind of PBKDF2: - let res = decodePbkdf2Params(crypto.kdfParams) - if res.isErr: - return err(res.error) - - let params = res.get() + let params = ?decodePbkdf2Params(crypto.kdfParams) let dkey = ?deriveKey(password, params.salt, PBKDF2, params.prf, params.c) return decryptSecret(crypto, dkey) of SCRYPT: - let res = decodeScryptParams(crypto.kdfParams) - if res.isErr: - return err(res.error) - - let params = res.get() + let params = ?decodeScryptParams(crypto.kdfParams) let dkey = ?deriveKey(password, params.salt, params.n, params.r, params.p) return decryptSecret(crypto, dkey) diff --git a/waku/waku_keystore/keystore.nim b/waku/waku_keystore/keystore.nim index 6cc4ef701..158f1a98e 100644 --- a/waku/waku_keystore/keystore.nim +++ b/waku/waku_keystore/keystore.nim @@ -50,9 +50,7 @@ proc loadAppKeystore*( # If no keystore exists at path we create a new empty one with passed keystore parameters if fileExists(path) == false: - let newKeystoreRes = createAppKeystore(path, appInfo, separator) - if newKeystoreRes.isErr(): - return err(newKeystoreRes.error) + ?createAppKeystore(path, appInfo, separator) try: # We read all the file contents @@ -175,13 +173,9 @@ proc addMembershipCredentials*( ): KeystoreResult[void] = # We load the keystore corresponding to the desired parameters # This call ensures that JSON has all required fields - let jsonKeystoreRes = loadAppKeystore(path, appInfo, separator) - - if jsonKeystoreRes.isErr(): - return err(jsonKeystoreRes.error) # We load the JSON node corresponding to the app keystore - var jsonKeystore = jsonKeystoreRes.get() + let jsonKeystore = ?loadAppKeystore(path, appInfo, separator) try: if jsonKeystore.hasKey("credentials"): @@ -193,21 +187,16 @@ proc addMembershipCredentials*( return ok() let encodedMembershipCredential = membership.encode() - let keyfileRes = createKeyFileJson(encodedMembershipCredential, password) - if keyfileRes.isErr(): - return err( - AppKeystoreError(kind: KeystoreCreateKeyfileError, msg: $keyfileRes.error) - ) - # We add it to the credentials field of the keystore - jsonKeystore["credentials"][key] = keyfileRes.get() + jsonKeystore["credentials"][key] = createKeyFileJson( + encodedMembershipCredential, password + ).valueOr: + return err(AppKeystoreError(kind: KeystoreCreateKeyfileError, msg: $error)) except CatchableError: return err(AppKeystoreError(kind: KeystoreJsonError, msg: getCurrentExceptionMsg())) # We save to disk the (updated) keystore. - let saveRes = save(jsonKeystore, path, separator) - if saveRes.isErr(): - return err(saveRes.error) + ?save(jsonKeystore, path, separator) return ok() @@ -218,13 +207,9 @@ proc getMembershipCredentials*( ): KeystoreResult[KeystoreMembership] = # We load the keystore corresponding to the desired parameters # This call ensures that JSON has all required fields - let jsonKeystoreRes = loadAppKeystore(path, appInfo) - - if jsonKeystoreRes.isErr(): - return err(jsonKeystoreRes.error) # We load the JSON node corresponding to the app keystore - var jsonKeystore = jsonKeystoreRes.get() + let jsonKeystore = ?loadAppKeystore(path, appInfo) try: if jsonKeystore.hasKey("credentials"): @@ -254,15 +239,10 @@ proc getMembershipCredentials*( ) keystoreCredential = keystoreCredentials[key] - let decodedKeyfileRes = decodeKeyFileJson(keystoreCredential, password) - if decodedKeyfileRes.isErr(): - return err( - AppKeystoreError( - kind: KeystoreReadKeyfileError, msg: $decodedKeyfileRes.error - ) - ) + let decodedKeyfile = decodeKeyFileJson(keystoreCredential, password).valueOr: + return err(AppKeystoreError(kind: KeystoreReadKeyfileError, msg: $error)) # we parse the json decrypted keystoreCredential - let decodedCredentialRes = decode(decodedKeyfileRes.get()) + let decodedCredentialRes = decode(decodedKeyfile) let keyfileMembershipCredential = decodedCredentialRes.get() return ok(keyfileMembershipCredential) except CatchableError: diff --git a/waku/waku_lightpush/callbacks.nim b/waku/waku_lightpush/callbacks.nim index 4b362e6bb..bde4e3e26 100644 --- a/waku/waku_lightpush/callbacks.nim +++ b/waku/waku_lightpush/callbacks.nim @@ -26,8 +26,7 @@ proc checkAndGenerateRLNProof*( time = getTime().toUnix() senderEpochTime = float64(time) var msgWithProof = message - rlnPeer.get().appendRLNProof(msgWithProof, senderEpochTime).isOkOr: - return err(error) + ?(rlnPeer.get().appendRLNProof(msgWithProof, senderEpochTime)) return ok(msgWithProof) proc getNilPushHandler*(): PushMessageHandler = @@ -49,12 +48,10 @@ proc getRelayPushHandler*( (await wakuRelay.validateMessage(pubSubTopic, msgWithProof)).isOkOr: return lighpushErrorResult(LightPushErrorCode.INVALID_MESSAGE, $error) - let publishedResult = await wakuRelay.publish(pubsubTopic, msgWithProof) - - if publishedResult.isErr(): + let publishedResult = (await wakuRelay.publish(pubsubTopic, msgWithProof)).valueOr: let msgHash = computeMessageHash(pubsubTopic, message).to0xHex() notice "Lightpush request has not been published to any peers", - msg_hash = msgHash, reason = $publishedResult.error - return mapPubishingErrorToPushResult(publishedResult.error) + msg_hash = msgHash, reason = $error + return mapPubishingErrorToPushResult(error) - return lightpushSuccessResult(publishedResult.get().uint32) + return lightpushSuccessResult(publishedResult.uint32) diff --git a/waku/waku_lightpush_legacy/callbacks.nim b/waku/waku_lightpush_legacy/callbacks.nim index f5a79eadc..1fe4cf302 100644 --- a/waku/waku_lightpush_legacy/callbacks.nim +++ b/waku/waku_lightpush_legacy/callbacks.nim @@ -25,8 +25,7 @@ proc checkAndGenerateRLNProof*( time = getTime().toUnix() senderEpochTime = float64(time) var msgWithProof = message - rlnPeer.get().appendRLNProof(msgWithProof, senderEpochTime).isOkOr: - return err(error) + ?(rlnPeer.get().appendRLNProof(msgWithProof, senderEpochTime)) return ok(msgWithProof) proc getNilPushHandler*(): PushMessageHandler = @@ -42,19 +41,15 @@ proc getRelayPushHandler*( peer: PeerId, pubsubTopic: string, message: WakuMessage ): Future[WakuLightPushResult[void]] {.async.} = # append RLN proof - let msgWithProof = checkAndGenerateRLNProof(rlnPeer, message) - if msgWithProof.isErr(): - return err(msgWithProof.error) + let msgWithProof = ?checkAndGenerateRLNProof(rlnPeer, message) - (await wakuRelay.validateMessage(pubSubTopic, msgWithProof.value)).isOkOr: - return err(error) + ?(await wakuRelay.validateMessage(pubSubTopic, msgWithProof)) - let publishResult = await wakuRelay.publish(pubsubTopic, msgWithProof.value) - if publishResult.isErr(): + (await wakuRelay.publish(pubsubTopic, msgWithProof)).isOkOr: ## Agreed change expected to the lightpush protocol to better handle such case. https://github.com/waku-org/pm/issues/93 let msgHash = computeMessageHash(pubsubTopic, message).to0xHex() notice "Lightpush request has not been published to any peers", - msg_hash = msgHash, reason = $publishResult.error + msg_hash = msgHash, reason = $error # for legacy lightpush we do not detail the reason towards clients. All error during publish result in not-published-to-any-peer # this let client of the legacy protocol to react as they did so far. return err(protocol_metrics.notPublishedAnyPeer) diff --git a/waku/waku_lightpush_legacy/client.nim b/waku/waku_lightpush_legacy/client.nim index ee234c996..0e3c9bd6f 100644 --- a/waku/waku_lightpush_legacy/client.nim +++ b/waku/waku_lightpush_legacy/client.nim @@ -52,13 +52,11 @@ proc sendPushRequest( except LPStreamRemoteClosedError: return err("Exception reading: " & getCurrentExceptionMsg()) - let decodeRespRes = PushRPC.decode(buffer) - if decodeRespRes.isErr(): + let pushResponseRes = PushRPC.decode(buffer).valueOr: error "failed to decode response" waku_lightpush_errors.inc(labelValues = [decodeRpcFailure]) return err(decodeRpcFailure) - let pushResponseRes = decodeRespRes.get() if pushResponseRes.response.isNone(): waku_lightpush_errors.inc(labelValues = [emptyResponseBodyFailure]) return err(emptyResponseBodyFailure) diff --git a/waku/waku_metadata/protocol.nim b/waku/waku_metadata/protocol.nim index c112cc5d5..623cbb6c3 100644 --- a/waku/waku_metadata/protocol.nim +++ b/waku/waku_metadata/protocol.nim @@ -33,8 +33,8 @@ proc respond( let res = catch: await conn.writeLP(response.encode().buffer) - if res.isErr(): - return err(res.error.msg) + res.isOkOr: + return err(error.msg) return ok() @@ -53,17 +53,14 @@ proc request*( # close no matter what let closeRes = catch: await conn.closeWithEof() - if closeRes.isErr(): - return err("close failed: " & closeRes.error.msg) + closeRes.isOkOr: + return err("close failed: " & error.msg) - if writeRes.isErr(): - return err("write failed: " & writeRes.error.msg) + writeRes.isOkOr: + return err("write failed: " & error.msg) - let buffer = - if readRes.isErr(): - return err("read failed: " & readRes.error.msg) - else: - readRes.get() + let buffer = readRes.valueOr: + return err("read failed: " & error.msg) let response = WakuMetadataResponse.decode(buffer).valueOr: return err("decode failed: " & $error) diff --git a/waku/waku_peer_exchange/protocol.nim b/waku/waku_peer_exchange/protocol.nim index de81d366e..cf7ebc2a7 100644 --- a/waku/waku_peer_exchange/protocol.nim +++ b/waku/waku_peer_exchange/protocol.nim @@ -157,15 +157,14 @@ proc initProtocolHandler(wpx: WakuPeerExchange) = error "Failed to respond with BAD_REQUEST:", error = $error return - let decBuf = PeerExchangeRpc.decode(buffer) - if decBuf.isErr(): + let decBuf = PeerExchangeRpc.decode(buffer).valueOr: waku_px_errors.inc(labelValues = [decodeRpcFailure]) - error "Failed to decode PeerExchange request", error = $decBuf.error + error "Failed to decode PeerExchange request", error = $error ( try: await wpx.respondError( - PeerExchangeResponseStatusCode.BAD_REQUEST, some($decBuf.error), conn + PeerExchangeResponseStatusCode.BAD_REQUEST, some($error), conn ) except CatchableError: error "could not send error response decode", @@ -175,7 +174,7 @@ proc initProtocolHandler(wpx: WakuPeerExchange) = error "Failed to respond with BAD_REQUEST:", error = $error return - let enrs = wpx.getEnrsFromCache(decBuf.get().request.numPeers) + let enrs = wpx.getEnrsFromCache(decBuf.request.numPeers) info "peer exchange request received" trace "px enrs to respond", enrs = $enrs try: diff --git a/waku/waku_relay/protocol.nim b/waku/waku_relay/protocol.nim index 9ebbc480f..cbf9123dd 100644 --- a/waku/waku_relay/protocol.nim +++ b/waku/waku_relay/protocol.nim @@ -399,8 +399,7 @@ proc getPeersInMesh*( ): Result[seq[PeerId], string] = ## Returns the list of peerIds in a mesh defined by the passed pubsub topic. ## The 'mesh' atribute is defined in the GossipSub ref object. - let pubSubPeers = w.getPubSubPeersInMesh(pubsubTopic).valueOr: - return err(error) + let pubSubPeers = ?w.getPubSubPeersInMesh(pubsubTopic) let peerIds = toSeq(pubSubPeers).mapIt(it.peerId) return ok(peerIds) @@ -544,22 +543,20 @@ proc subscribe*(w: WakuRelay, pubsubTopic: PubsubTopic, handler: WakuRelayHandle let topicHandler = proc( pubsubTopic: string, data: seq[byte] ): Future[void] {.gcsafe, raises: [].} = - let decMsg = WakuMessage.decode(data) - if decMsg.isErr(): + let decMsg = WakuMessage.decode(data).valueOr: # fine if triggerSelf enabled, since validators are bypassed error "failed to decode WakuMessage, validator passed a wrong message", - pubsubTopic = pubsubTopic, error = decMsg.error + pubsubTopic = pubsubTopic, error = error let fut = newFuture[void]() fut.complete() return fut - else: - # this subscription handler is called once for every validated message - # that will be relayed, hence this is the place we can count net incoming traffic - waku_relay_network_bytes.inc( - data.len.int64 + pubsubTopic.len.int64, labelValues = [pubsubTopic, "net", "in"] - ) + # this subscription handler is called once for every validated message + # that will be relayed, hence this is the place we can count net incoming traffic + waku_relay_network_bytes.inc( + data.len.int64 + pubsubTopic.len.int64, labelValues = [pubsubTopic, "net", "in"] + ) - return handler(pubsubTopic, decMsg.get()) + return handler(pubsubTopic, decMsg) # Add the ordered validator to the topic # This assumes that if `w.validatorInserted.hasKey(pubSubTopic) is true`, it contains the ordered validator. @@ -670,8 +667,7 @@ proc getConnectedPeers*( ## Returns the list of peerIds of connected peers and subscribed to the passed pubsub topic. ## The 'gossipsub' atribute is defined in the GossipSub ref object. - let peers = w.getConnectedPubSubPeers(pubsubTopic).valueOr: - return err(error) + let peers = ?w.getConnectedPubSubPeers(pubsubTopic) let peerIds = toSeq(peers).mapIt(it.peerId) return ok(peerIds) diff --git a/waku/waku_rendezvous/protocol.nim b/waku/waku_rendezvous/protocol.nim index 876082210..0eb55d350 100644 --- a/waku/waku_rendezvous/protocol.nim +++ b/waku/waku_rendezvous/protocol.nim @@ -55,18 +55,16 @@ proc batchAdvertise*( let dialCatch = catch: await allFinished(futs) - if dialCatch.isErr(): - return err("batchAdvertise: " & dialCatch.error.msg) - - futs = dialCatch.get() + futs = dialCatch.valueOr: + return err("batchAdvertise: " & error.msg) let conns = collect(newSeq): for fut in futs: let catchable = catch: fut.read() - if catchable.isErr(): - warn "a rendezvous dial failed", cause = catchable.error.msg + catchable.isOkOr: + warn "a rendezvous dial failed", cause = error.msg continue let connOpt = catchable.get() @@ -82,8 +80,8 @@ proc batchAdvertise*( for conn in conns: await conn.close() - if advertCatch.isErr(): - return err("batchAdvertise: " & advertCatch.error.msg) + advertCatch.isOkOr: + return err("batchAdvertise: " & error.msg) return ok() @@ -104,18 +102,16 @@ proc batchRequest*( let dialCatch = catch: await allFinished(futs) - if dialCatch.isErr(): - return err("batchRequest: " & dialCatch.error.msg) - - futs = dialCatch.get() + futs = dialCatch.valueOr: + return err("batchRequest: " & error.msg) let conns = collect(newSeq): for fut in futs: let catchable = catch: fut.read() - if catchable.isErr(): - warn "a rendezvous dial failed", cause = catchable.error.msg + catchable.isOkOr: + warn "a rendezvous dial failed", cause = error.msg continue let connOpt = catchable.get() @@ -131,8 +127,8 @@ proc batchRequest*( for conn in conns: await conn.close() - if reqCatch.isErr(): - return err("batchRequest: " & reqCatch.error.msg) + reqCatch.isOkOr: + return err("batchRequest: " & error.msg) return ok(reqCatch.get()) @@ -164,8 +160,8 @@ proc advertiseAll( let catchable = catch: await allFinished(futs) - if catchable.isErr(): - return err(catchable.error.msg) + catchable.isOkOr: + return err(error.msg) for fut in catchable.get(): if fut.failed(): @@ -201,8 +197,8 @@ proc initialRequestAll*( let catchable = catch: await allFinished(futs) - if catchable.isErr(): - return err(catchable.error.msg) + catchable.isOkOr: + return err(error.msg) for fut in catchable.get(): if fut.failed(): @@ -211,7 +207,7 @@ proc initialRequestAll*( let res = fut.value() let records = res.valueOr: - warn "a rendezvous request failed", cause = $res.error + warn "a rendezvous request failed", cause = $error continue for record in records: @@ -268,16 +264,14 @@ proc new*( let rvCatchable = catch: RendezVous.new(switch = switch, minDuration = DefaultRegistrationTTL) - if rvCatchable.isErr(): - return err(rvCatchable.error.msg) - - let rv = rvCatchable.get() + let rv = rvCatchable.valueOr: + return err(error.msg) let mountCatchable = catch: switch.mount(rv) - if mountCatchable.isErr(): - return err(mountCatchable.error.msg) + mountCatchable.isOkOr: + return err(error.msg) var wrv = WakuRendezVous() wrv.rendezvous = rv diff --git a/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim b/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim index 76c00408e..db68b2289 100644 --- a/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim +++ b/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim @@ -111,17 +111,17 @@ proc fetchMembershipStatus*( ): Future[Result[bool, string]] {.async.} = try: let params = idCommitment.reversed() - let resultBytes = await sendEthCallWithParams( - ethRpc = g.ethRpc.get(), - functionSignature = "isInMembershipSet(uint256)", - params = params, - fromAddress = g.ethRpc.get().defaultAccount, - toAddress = fromHex(Address, g.ethContractAddress), - chainId = g.chainId, - ) - if resultBytes.isErr(): - return err("Failed to check membership: " & resultBytes.error) - let responseBytes = resultBytes.get() + let responseBytes = ( + await sendEthCallWithParams( + ethRpc = g.ethRpc.get(), + functionSignature = "isInMembershipSet(uint256)", + params = params, + fromAddress = g.ethRpc.get().defaultAccount, + toAddress = fromHex(Address, g.ethContractAddress), + chainId = g.chainId, + ) + ).valueOr: + return err("Failed to check membership: " & error) return ok(responseBytes.len == 32 and responseBytes[^1] == 1'u8) except CatchableError: @@ -155,11 +155,10 @@ template retryWrapper( body proc updateRoots*(g: OnchainGroupManager): Future[bool] {.async.} = - let rootRes = await g.fetchMerkleRoot() - if rootRes.isErr(): + let rootRes = (await g.fetchMerkleRoot()).valueOr: return false - let merkleRoot = UInt256ToField(rootRes.get()) + let merkleRoot = UInt256ToField(rootRes) if g.validRoots.len == 0: g.validRoots.addLast(merkleRoot) @@ -193,14 +192,12 @@ proc trackRootChanges*(g: OnchainGroupManager) {.async: (raises: [CatchableError else: g.merkleProofCache = proofResult.get() - let nextFreeIndex = await g.fetchNextFreeIndex() - if nextFreeIndex.isErr(): - error "Failed to fetch next free index", error = nextFreeIndex.error - raise newException( - CatchableError, "Failed to fetch next free index: " & nextFreeIndex.error - ) + let nextFreeIndex = (await g.fetchNextFreeIndex()).valueOr: + error "Failed to fetch next free index", error = error + raise + newException(CatchableError, "Failed to fetch next free index: " & error) - let memberCount = cast[int64](nextFreeIndex.get()) + let memberCount = cast[int64](nextFreeIndex) waku_rln_number_registered_memberships.set(float64(memberCount)) except CatchableError: error "Fatal error in trackRootChanges", error = getCurrentExceptionMsg() @@ -315,11 +312,9 @@ proc getRootFromProofAndIndex( # it's currently not used anywhere, but can be used to verify the root from the proof and index # Compute leaf hash from idCommitment and messageLimit let messageLimitField = uint64ToField(g.userMessageLimit.get()) - let leafHashRes = poseidon(@[g.idCredentials.get().idCommitment, @messageLimitField]) - if leafHashRes.isErr(): - return err("Failed to compute leaf hash: " & leafHashRes.error) + var hash = poseidon(@[g.idCredentials.get().idCommitment, @messageLimitField]).valueOr: + return err("Failed to compute leaf hash: " & error) - var hash = leafHashRes.get() for i in 0 ..< bits.len: let sibling = elements[i * 32 .. (i + 1) * 32 - 1] @@ -331,7 +326,6 @@ proc getRootFromProofAndIndex( hash = hashRes.valueOr: return err("Failed to compute poseidon hash: " & error) - hash = hashRes.get() return ok(hash) diff --git a/waku/waku_rln_relay/rln_relay.nim b/waku/waku_rln_relay/rln_relay.nim index 817bb8720..6a8fea2b5 100644 --- a/waku/waku_rln_relay/rln_relay.nim +++ b/waku/waku_rln_relay/rln_relay.nim @@ -178,12 +178,9 @@ proc validateMessage*( ## `timeOption` indicates Unix epoch time (fractional part holds sub-seconds) ## if `timeOption` is supplied, then the current epoch is calculated based on that - let decodeRes = RateLimitProof.init(msg.proof) - if decodeRes.isErr(): + let proof = RateLimitProof.init(msg.proof).valueOr: return MessageValidationResult.Invalid - let proof = decodeRes.get() - # track message count for metrics waku_rln_messages_total.inc() @@ -228,7 +225,7 @@ proc validateMessage*( let proofVerificationRes = rlnPeer.groupManager.verifyProof(msg.toRLNSignal(), proof) - if proofVerificationRes.isErr(): + proofVerificationRes.isOkOr: waku_rln_errors_total.inc(labelValues = ["proof_verification"]) warn "invalid message: proof verification failed", payloadLen = msg.payload.len return MessageValidationResult.Invalid @@ -240,13 +237,12 @@ proc validateMessage*( return MessageValidationResult.Invalid # check if double messaging has happened - let proofMetadataRes = proof.extractMetadata() - if proofMetadataRes.isErr(): + let proofMetadata = proof.extractMetadata().valueOr: waku_rln_errors_total.inc(labelValues = ["proof_metadata_extraction"]) return MessageValidationResult.Invalid let msgEpoch = proof.epoch - let hasDup = rlnPeer.hasDuplicate(msgEpoch, proofMetadataRes.get()) + let hasDup = rlnPeer.hasDuplicate(msgEpoch, proofMetadata) if hasDup.isErr(): waku_rln_errors_total.inc(labelValues = ["duplicate_check"]) elif hasDup.value == true: @@ -266,20 +262,16 @@ proc validateMessageAndUpdateLog*( let isValidMessage = rlnPeer.validateMessage(msg) - let decodeRes = RateLimitProof.init(msg.proof) - if decodeRes.isErr(): + let msgProof = RateLimitProof.init(msg.proof).valueOr: return MessageValidationResult.Invalid - let msgProof = decodeRes.get() - let proofMetadataRes = msgProof.extractMetadata() - - if proofMetadataRes.isErr(): + let proofMetadata = msgProof.extractMetadata().valueOr: return MessageValidationResult.Invalid # insert the message to the log (never errors) only if the # message is valid. if isValidMessage == MessageValidationResult.Valid: - discard rlnPeer.updateLog(msgProof.epoch, proofMetadataRes.get()) + discard rlnPeer.updateLog(msgProof.epoch, proofMetadata) return isValidMessage @@ -333,14 +325,10 @@ proc generateRlnValidator*( trace "rln-relay topic validator is called" wakuRlnRelay.clearNullifierLog() - let decodeRes = RateLimitProof.init(message.proof) - - if decodeRes.isErr(): - trace "generateRlnValidator reject", error = decodeRes.error + let msgProof = RateLimitProof.init(message.proof).valueOr: + trace "generateRlnValidator reject", error = error return pubsub.ValidationResult.Reject - let msgProof = decodeRes.get() - # validate the message and update log let validationRes = wakuRlnRelay.validateMessageAndUpdateLog(message) diff --git a/waku/waku_store/protocol.nim b/waku/waku_store/protocol.nim index 395936625..891c6a93c 100644 --- a/waku/waku_store/protocol.nim +++ b/waku/waku_store/protocol.nim @@ -132,8 +132,8 @@ proc initProtocolHandler(self: WakuStore) = let writeRes = catch: await conn.writeLp(resBuf.resp) - if writeRes.isErr(): - error "Connection write error", error = writeRes.error.msg + writeRes.isOkOr: + error "Connection write error", error = error.msg return if successfulQuery: diff --git a/waku/waku_store/resume.nim b/waku/waku_store/resume.nim index 208ba0aa6..b7864da94 100644 --- a/waku/waku_store/resume.nim +++ b/waku/waku_store/resume.nim @@ -92,8 +92,8 @@ proc initTransferHandler( let catchable = catch: await wakuStoreClient.query(req, peer) - if catchable.isErr(): - return err("store client error: " & catchable.error.msg) + catchable.isOkOr: + return err("store client error: " & error.msg) let res = catchable.get() let response = res.valueOr: @@ -105,8 +105,8 @@ proc initTransferHandler( let handleRes = catch: await wakuArchive.handleMessage(kv.pubsubTopic.get(), kv.message.get()) - if handleRes.isErr(): - error "message transfer failed", error = handleRes.error.msg + handleRes.isOkOr: + error "message transfer failed", error = error.msg continue if req.paginationCursor.isNone(): diff --git a/waku/waku_store/self_req_handler.nim b/waku/waku_store/self_req_handler.nim index 116946da5..315961307 100644 --- a/waku/waku_store/self_req_handler.nim +++ b/waku/waku_store/self_req_handler.nim @@ -25,11 +25,8 @@ proc handleSelfStoreRequest*( let handlerResult = catch: await self.requestHandler(req) - let resResult = - if handlerResult.isErr(): - return err("exception in handleSelfStoreRequest: " & handlerResult.error.msg) - else: - handlerResult.get() + let resResult = handlerResult.valueOr: + return err("exception in handleSelfStoreRequest: " & error.msg) let res = resResult.valueOr: return err("error in handleSelfStoreRequest: " & $error) diff --git a/waku/waku_store_legacy/client.nim b/waku/waku_store_legacy/client.nim index d3301cfa4..3965e06cf 100644 --- a/waku/waku_store_legacy/client.nim +++ b/waku/waku_store_legacy/client.nim @@ -58,14 +58,11 @@ proc sendHistoryQueryRPC( #TODO: I see a challenge here, if storeNode uses a different MaxRPCSize this read will fail. # Need to find a workaround for this. let buf = await connection.readLp(DefaultMaxRpcSize.int) - let respDecodeRes = HistoryRPC.decode(buf) - if respDecodeRes.isErr(): + let respRpc = HistoryRPC.decode(buf).valueOr: waku_legacy_store_errors.inc(labelValues = [decodeRpcFailure]) return err(HistoryError(kind: HistoryErrorKind.BAD_RESPONSE, cause: decodeRpcFailure)) - let respRpc = respDecodeRes.get() - # Disabled ,for now, since the default response is a possible case (no messages, pagesize = 0, error = NONE(0)) # TODO: Rework the RPC protocol to differentiate the default value from an empty value (e.g., status = 200 (OK)) # and rework the protobuf parsing to return Option[T] when empty values are received @@ -112,11 +109,8 @@ when defined(waku_exp_store_resume): var messageList: seq[WakuMessage] = @[] while true: - let queryRes = await w.query(req, peer) - if queryRes.isErr(): - return err($queryRes.error) - - let response = queryRes.get() + let response = (await w.query(req, peer)).valueOr: + return err($error) messageList.add(response.messages) @@ -232,15 +226,14 @@ when defined(waku_exp_store_resume): info "a peer is selected from peer manager" res = await w.queryAll(req, peerOpt.get()) - if res.isErr(): + res.isOkOr: info "failed to resume the history" return err("failed to resume the history") # Save the retrieved messages in the store var added: uint = 0 for msg in res.get(): - let putStoreRes = w.store.put(pubsubTopic, msg) - if putStoreRes.isErr(): + w.store.put(pubsubTopic, msg).isOkOr: continue added.inc() diff --git a/waku/waku_store_legacy/protocol.nim b/waku/waku_store_legacy/protocol.nim index 058bcbe78..8916e8ac0 100644 --- a/waku/waku_store_legacy/protocol.nim +++ b/waku/waku_store_legacy/protocol.nim @@ -42,14 +42,11 @@ type StoreResp = tuple[resp: seq[byte], requestId: string] proc handleLegacyQueryRequest( self: WakuStore, requestor: PeerId, raw_request: seq[byte] ): Future[StoreResp] {.async.} = - let decodeRes = HistoryRPC.decode(raw_request) - if decodeRes.isErr(): - error "failed to decode rpc", peerId = requestor, error = $decodeRes.error + let reqRpc = HistoryRPC.decode(raw_request).valueOr: + error "failed to decode rpc", peerId = requestor, error = $error waku_legacy_store_errors.inc(labelValues = [decodeRpcFailure]) return (newSeq[byte](), "failed to decode rpc") - let reqRpc = decodeRes.value - if reqRpc.query.isNone(): error "empty query rpc", peerId = requestor, requestId = reqRpc.requestId waku_legacy_store_errors.inc(labelValues = [emptyRpcQueryFailure]) @@ -77,9 +74,9 @@ proc handleLegacyQueryRequest( requestId, ) - if responseRes.isErr(): + responseRes.isOkOr: error "history query failed", - peerId = requestor, requestId = requestId, error = responseRes.error + peerId = requestor, requestId = requestId, error = error let response = responseRes.toRPC() return ( @@ -150,8 +147,8 @@ proc initProtocolHandler(ws: WakuStore) = let writeRes = catch: await conn.writeLp(resBuf.resp) - if writeRes.isErr(): - error "Connection write error", error = writeRes.error.msg + writeRes.isOkOr: + error "Connection write error", error = error.msg return if successfulQuery: diff --git a/waku/waku_store_legacy/rpc.nim b/waku/waku_store_legacy/rpc.nim index bce3e60cd..44aad8d07 100644 --- a/waku/waku_store_legacy/rpc.nim +++ b/waku/waku_store_legacy/rpc.nim @@ -187,25 +187,20 @@ proc toAPI*(err: HistoryResponseErrorRPC): HistoryError = HistoryError(kind: HistoryErrorKind.UNKNOWN) proc toRPC*(res: HistoryResult): HistoryResponseRPC = - if res.isErr(): - let error = res.error.toRPC() + let resp = res.valueOr: + return HistoryResponseRPC(error: error.toRPC()) + let + messages = resp.messages - HistoryResponseRPC(error: error) - else: - let resp = res.get() + pagingInfo = block: + if resp.cursor.isNone(): + none(PagingInfoRPC) + else: + some(PagingInfoRPC(cursor: resp.cursor.map(toRPC))) - let - messages = resp.messages + error = HistoryResponseErrorRPC.NONE - pagingInfo = block: - if resp.cursor.isNone(): - none(PagingInfoRPC) - else: - some(PagingInfoRPC(cursor: resp.cursor.map(toRPC))) - - error = HistoryResponseErrorRPC.NONE - - HistoryResponseRPC(messages: messages, pagingInfo: pagingInfo, error: error) + HistoryResponseRPC(messages: messages, pagingInfo: pagingInfo, error: error) proc toAPI*(rpc: HistoryResponseRPC): HistoryResult = if rpc.error != HistoryResponseErrorRPC.NONE: diff --git a/waku/waku_store_sync/reconciliation.nim b/waku/waku_store_sync/reconciliation.nim index 8b196a3e9..0cc15d0df 100644 --- a/waku/waku_store_sync/reconciliation.nim +++ b/waku/waku_store_sync/reconciliation.nim @@ -230,10 +230,9 @@ proc processRequest( let writeRes = catch: await conn.writeLP(rawPayload) - if writeRes.isErr(): + writeRes.isOkOr: await conn.close() - return - err("remote " & $conn.peerId & " connection write error: " & writeRes.error.msg) + return err("remote " & $conn.peerId & " connection write error: " & error.msg) trace "sync payload sent", local = self.peerManager.switch.peerInfo.peerId, @@ -286,11 +285,9 @@ proc initiate( let writeRes = catch: await connection.writeLP(sendPayload) - if writeRes.isErr(): + writeRes.isOkOr: await connection.close() - return err( - "remote " & $connection.peerId & " connection write error: " & writeRes.error.msg - ) + return err("remote " & $connection.peerId & " connection write error: " & error.msg) trace "sync payload sent", local = self.peerManager.switch.peerInfo.peerId, diff --git a/waku/waku_store_sync/transfer.nim b/waku/waku_store_sync/transfer.nim index 5e3e376d1..6a600b4e3 100644 --- a/waku/waku_store_sync/transfer.nim +++ b/waku/waku_store_sync/transfer.nim @@ -58,9 +58,8 @@ proc sendMessage( let writeRes = catch: await conn.writeLP(rawPayload) - if writeRes.isErr(): - return - err("remote " & $conn.peerId & " connection write error: " & writeRes.error.msg) + writeRes.isOkOr: + return err("remote [" & $conn.peerId & "] connection write error: " & error.msg) total_transfer_messages_exchanged.inc(labelValues = [Sending])