diff --git a/apps/wakubridge/wakubridge.nim b/apps/wakubridge/wakubridge.nim index 700994a51..bf66d3fd0 100644 --- a/apps/wakubridge/wakubridge.nim +++ b/apps/wakubridge/wakubridge.nim @@ -339,7 +339,7 @@ when isMainModule: # Adhere to NO_COLOR initiative: https://no-color.org/ let color = try: not parseBool(os.getEnv("NO_COLOR", "false")) - except: true + except CatchableError: true logging.setupLogLevel(conf.logLevel) logging.setupLogFormat(conf.logFormat, color) diff --git a/apps/wakunode2/config.nim b/apps/wakunode2/config.nim index fbef2a92e..69d03a3ce 100644 --- a/apps/wakunode2/config.nim +++ b/apps/wakunode2/config.nim @@ -451,7 +451,7 @@ proc parseCmdArg*(T: type crypto.PrivateKey, p: string): T = try: let key = SkPrivateKey.init(utils.fromHex(p)).tryGet() crypto.PrivateKey(scheme: Secp256k1, skkey: key) - except: + except CatchableError: raise newException(ConfigurationError, "Invalid private key") proc completeCmdArg*(T: type crypto.PrivateKey, val: string): seq[string] = @@ -461,7 +461,7 @@ proc completeCmdArg*(T: type crypto.PrivateKey, val: string): seq[string] = proc parseCmdArg*(T: type ValidIpAddress, p: string): T = try: ValidIpAddress.init(p) - except: + except CatchableError: raise newException(ConfigurationError, "Invalid IP address") proc completeCmdArg*(T: type ValidIpAddress, val: string): seq[string] = @@ -476,7 +476,7 @@ proc defaultListenAddress*(): ValidIpAddress = proc parseCmdArg*(T: type Port, p: string): T = try: Port(parseInt(p)) - except: + except CatchableError: raise newException(ConfigurationError, "Invalid Port number") proc completeCmdArg*(T: type Port, val: string): seq[string] = @@ -485,7 +485,7 @@ proc completeCmdArg*(T: type Port, val: string): seq[string] = proc parseCmdArg*(T: type Option[int], p: string): T = try: some(parseInt(p)) - except: + except CatchableError: raise newException(ConfigurationError, "Invalid number") ## Configuration validation diff --git a/apps/wakunode2/wakunode2.nim b/apps/wakunode2/wakunode2.nim index 2f4d0af29..f1192a70d 100644 --- a/apps/wakunode2/wakunode2.nim +++ b/apps/wakunode2/wakunode2.nim @@ -608,7 +608,7 @@ when isMainModule: # Adhere to NO_COLOR initiative: https://no-color.org/ let color = try: not parseBool(os.getEnv("NO_COLOR", "false")) - except: true + except CatchableError: true logging.setupLogLevel(conf.logLevel) logging.setupLogFormat(conf.logFormat, color) diff --git a/tests/v2/waku_rln_relay/test_rln_group_manager_onchain.nim b/tests/v2/waku_rln_relay/test_rln_group_manager_onchain.nim index 3c817bde7..9c1d11ff9 100644 --- a/tests/v2/waku_rln_relay/test_rln_group_manager_onchain.nim +++ b/tests/v2/waku_rln_relay/test_rln_group_manager_onchain.nim @@ -133,11 +133,11 @@ proc runGanache(): Process = ganacheStartLog.add(cmdline) if cmdline.contains("Listening on 127.0.0.1:8540"): break - except: + except CatchableError: break debug "Ganache daemon is running and ready", pid=ganachePID, startLog=ganacheStartLog return runGanache - except: + except: # TODO: Fix "BareExcept" warning error "Ganache daemon run failed" @@ -153,7 +153,7 @@ proc stopGanache(runGanache: Process) {.used.} = # ref: https://nim-lang.org/docs/osproc.html#waitForExit%2CProcess%2Cint # debug "ganache logs", logs=runGanache.outputstream.readAll() debug "Sent SIGTERM to Ganache", ganachePID=ganachePID - except: + except CatchableError: error "Ganache daemon termination failed: ", err = getCurrentExceptionMsg() proc setup(signer = true): Future[OnchainGroupManager] {.async.} = diff --git a/tools/networkmonitor/networkmonitor.nim b/tools/networkmonitor/networkmonitor.nim index f00235a13..679021fb8 100644 --- a/tools/networkmonitor/networkmonitor.nim +++ b/tools/networkmonitor/networkmonitor.nim @@ -151,7 +151,7 @@ proc populateInfoFromIp(allPeersRef: CustomPeersTableRef, await sleepAsync(1400) let response = await restClient.ipToLocation(allPeersRef[peer].ip) location = response.data - except: + except CatchableError: warn "could not get location", ip=allPeersRef[peer].ip continue allPeersRef[peer].country = location.country @@ -214,7 +214,7 @@ proc getBootstrapFromDiscDns(conf: NetworkMonitorConf): Result[seq[enr.Record], if tenrRes.isOk() and (tenrRes.get().udp.isSome() or tenrRes.get().udp6.isSome()): discv5BootstrapEnrs.add(enr) return ok(discv5BootstrapEnrs) - except: + except CatchableError: error("failed discovering peers from DNS") proc initAndStartNode(conf: NetworkMonitorConf): Result[WakuNode, string] = @@ -249,7 +249,7 @@ proc initAndStartNode(conf: NetworkMonitorConf): Result[WakuNode, string] = node.wakuDiscv5.protocol.open() return ok(node) - except: + except CatchableError: error("could not start node") proc startRestApiServer(conf: NetworkMonitorConf, @@ -266,7 +266,7 @@ proc startRestApiServer(conf: NetworkMonitorConf, var sres = RestServerRef.new(router, serverAddress) let restServer = sres.get() restServer.start() - except: + except CatchableError: error("could not start rest api server") ok() diff --git a/tools/networkmonitor/networkmonitor_utils.nim b/tools/networkmonitor/networkmonitor_utils.nim index 8e8ac4c88..30e51ac09 100644 --- a/tools/networkmonitor/networkmonitor_utils.nim +++ b/tools/networkmonitor/networkmonitor_utils.nim @@ -2,7 +2,7 @@ when (NimMajor, NimMinor) < (1, 4): {.push raises: [Defect].} else: {.push raises: [].} - + import std/json, stew/results, @@ -44,7 +44,7 @@ proc decodeBytes*(t: typedesc[NodeLocation], value: openArray[byte], long: $jsonContent["lon"].getFloat(), isp: jsonContent["isp"].getStr() )) - except: + except CatchableError: return err("failed to get the location: " & getCurrentExceptionMsg()) proc encodeString*(value: string): RestResult[string] = diff --git a/waku/common/logging.nim b/waku/common/logging.nim index c38a216c6..669a2ee94 100644 --- a/waku/common/logging.nim +++ b/waku/common/logging.nim @@ -24,7 +24,7 @@ converter toChroniclesLogLevel(level: LogLevel): chronicles.LogLevel = ## Map logging log levels to the corresponding nim-chronicles' log level try: parseEnum[chronicles.LogLevel]($level) - except: + except CatchableError: chronicles.LogLevel.NONE @@ -71,7 +71,7 @@ proc writeAndFlush(f: File, s: LogOutputStr) = try: f.write(s) f.flushFile() - except: + except CatchableError: logLoggingFailure(cstring(s), getCurrentException()) diff --git a/waku/v2/node/rest/relay/types.nim b/waku/v2/node/rest/relay/types.nim index 1e976f2e7..cc7e723bf 100644 --- a/waku/v2/node/rest/relay/types.nim +++ b/waku/v2/node/rest/relay/types.nim @@ -88,7 +88,7 @@ proc readValue*(reader: var JsonReader[RestJson], value: var RelayWakuMessage) # Check for reapeated keys if keys.containsOrIncl(fieldName): let err = try: fmt"Multiple `{fieldName}` fields found" - except: "Multiple fields with the same name found" + except CatchableError: "Multiple fields with the same name found" reader.raiseUnexpectedField(err, "RelayWakuMessage") case fieldName diff --git a/waku/v2/protocol/waku_archive/driver/queue_driver/queue_driver.nim b/waku/v2/protocol/waku_archive/driver/queue_driver/queue_driver.nim index 4d2247841..a756a59a2 100644 --- a/waku/v2/protocol/waku_archive/driver/queue_driver/queue_driver.nim +++ b/waku/v2/protocol/waku_archive/driver/queue_driver/queue_driver.nim @@ -255,7 +255,7 @@ method getMessages*( ): ArchiveDriverResult[seq[ArchiveRow]] = let cursor = cursor.map(toIndex) - let matchesQuery: QueryFilterMatcher = proc(row: IndexedWakuMessage): bool = + let matchesQuery: QueryFilterMatcher = func(row: IndexedWakuMessage): bool = if pubsubTopic.isSome() and row.pubsubTopic != pubsubTopic.get(): return false @@ -273,7 +273,7 @@ method getMessages*( var pageRes: QueueDriverGetPageResult try: pageRes = driver.getPage(maxPageSize, ascendingOrder, cursor, matchesQuery) - except: + except: # TODO: Fix "BareExcept" warning return err(getCurrentExceptionMsg()) if pageRes.isErr(): diff --git a/waku/v2/protocol/waku_filter/client.nim b/waku/v2/protocol/waku_filter/client.nim index a1fbd676b..6acda3b94 100644 --- a/waku/v2/protocol/waku_filter/client.nim +++ b/waku/v2/protocol/waku_filter/client.nim @@ -47,7 +47,7 @@ proc clear(m: var SubscriptionManager) = proc registerSubscription(m: SubscriptionManager, pubsubTopic: PubsubTopic, contentTopic: ContentTopic, handler: FilterPushHandler) = try: m.subscriptions[(pubsubTopic, contentTopic)]= handler - except: + except: # TODO: Fix "BareExcept" warning error "failed to register filter subscription", error=getCurrentExceptionMsg() proc removeSubscription(m: SubscriptionManager, pubsubTopic: PubsubTopic, contentTopic: ContentTopic) = @@ -60,7 +60,7 @@ proc notifySubscriptionHandler(m: SubscriptionManager, pubsubTopic: PubsubTopic, try: let handler = m.subscriptions[(pubsubTopic, contentTopic)] handler(pubsubTopic, message) - except: + except: # TODO: Fix "BareExcept" warning discard proc getSubscriptionsCount(m: SubscriptionManager): int = diff --git a/waku/v2/protocol/waku_keystore/keystore.nim b/waku/v2/protocol/waku_keystore/keystore.nim index fd21e4571..5bcaa95dd 100644 --- a/waku/v2/protocol/waku_keystore/keystore.nim +++ b/waku/v2/protocol/waku_keystore/keystore.nim @@ -3,7 +3,7 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import +import options, json, strutils, std/[algorithm, os, sequtils, sets] @@ -42,7 +42,7 @@ proc createAppKeystore*(path: string, finally: f.close() -# This proc load a keystore based on the application, appIdentifier and version filters. +# This proc load a keystore based on the application, appIdentifier and version filters. # If none is found, it automatically creates an empty keystore for the passed parameters proc loadAppKeystore*(path: string, appInfo: AppInfo, @@ -80,11 +80,11 @@ proc loadAppKeystore*(path: string, # We parse the json data = json.parseJson(keystore) - # We check if parsed json contains the relevant keystore credentials fields and if these are set to the passed parameters + # We check if parsed json contains the relevant keystore credentials fields and if these are set to the passed parameters # (note that "if" is lazy, so if one of the .contains() fails, the json fields contents will not be checked and no ResultDefect will be raised due to accessing unavailable fields) if data.hasKeys(["application", "appIdentifier", "credentials", "version"]) and - data["application"].getStr() == appInfo.application and - data["appIdentifier"].getStr() == appInfo.appIdentifier and + data["application"].getStr() == appInfo.application and + data["appIdentifier"].getStr() == appInfo.appIdentifier and data["version"].getStr() == appInfo.version: # We return the first json keystore that matches the passed app parameters # We assume a unique kesytore with such parameters is present in the file @@ -106,7 +106,7 @@ proc loadAppKeystore*(path: string, return ok(matchingAppKeystore) -# Adds a sequence of membership credential to the keystore matching the application, appIdentifier and version filters. +# Adds a sequence of membership credential to the keystore matching the application, appIdentifier and version filters. proc addMembershipCredentials*(path: string, credentials: seq[MembershipCredentials], password: string, @@ -143,7 +143,7 @@ proc addMembershipCredentials*(path: string, # we parse the json decrypted keystoreCredential let decodedCredentialRes = decode(decodedKeyfileRes.get()) - + if decodedCredentialRes.isOk(): let keyfileMembershipCredential = decodedCredentialRes.get() @@ -166,13 +166,13 @@ proc addMembershipCredentials*(path: string, # we update the original credential field in keystoreCredentials keystoreCredential = updatedCredentialKeyfileRes.get() - + found = true # We stop decrypting other credentials in the keystore break - # If no credential in keystore with same input identityCredential value is found, we add it + # If no credential in keystore with same input identityCredential value is found, we add it if found == false: let encodedMembershipCredential = membershipCredential.encode() @@ -183,13 +183,13 @@ proc addMembershipCredentials*(path: string, # We add it to the credentials field of the keystore jsonKeystore["credentials"].add(keyfileRes.get()) - except: + except CatchableError: return err(KeystoreJsonError) # We save to disk the (updated) keystore. if save(jsonKeystore, path, separator).isErr(): return err(KeystoreOsError) - + return ok() # Returns the membership credentials in the keystore matching the application, appIdentifier and version filters, further filtered by the input @@ -226,16 +226,16 @@ proc getMembershipCredentials*(path: string, if decodedKeyfileRes.isOk(): # we parse the json decrypted keystoreCredential let decodedCredentialRes = decode(decodedKeyfileRes.get()) - + if decodedCredentialRes.isOk(): let keyfileMembershipCredential = decodedCredentialRes.get() - + let filteredCredentialOpt = filterCredential(keyfileMembershipCredential, filterIdentityCredentials, filterMembershipContracts) - + if filteredCredentialOpt.isSome(): outputMembershipCredentials.add(filteredCredentialOpt.get()) - except: + except CatchableError: return err(KeystoreJsonError) - return ok(outputMembershipCredentials) \ No newline at end of file + return ok(outputMembershipCredentials) diff --git a/waku/v2/protocol/waku_keystore/utils.nim b/waku/v2/protocol/waku_keystore/utils.nim index d06049f9e..da85298fd 100644 --- a/waku/v2/protocol/waku_keystore/utils.nim +++ b/waku/v2/protocol/waku_keystore/utils.nim @@ -3,7 +3,7 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import +import json, std/[options, os, sequtils], ./keyfile, @@ -17,17 +17,17 @@ proc hasKeys*(data: JsonNode, keys: openArray[string]): bool = proc sortMembershipGroup*(a,b: MembershipGroup): int = return cmp(a.membershipContract.address, b.membershipContract.address) -# Safely saves a Keystore's JsonNode to disk. -# If exists, the destination file is renamed with extension .bkp; the file is written at its destination and the .bkp file is removed if write is successful, otherwise is restored +# Safely saves a Keystore's JsonNode to disk. +# If exists, the destination file is renamed with extension .bkp; the file is written at its destination and the .bkp file is removed if write is successful, otherwise is restored proc save*(json: JsonNode, path: string, separator: string): KeystoreResult[void] = # We first backup the current keystore if fileExists(path): try: moveFile(path, path & ".bkp") - except: + except: # TODO: Fix "BareExcept" warning return err(KeystoreOsError) - + # We save the updated json var f: File if not f.open(path, fmAppend): @@ -45,7 +45,7 @@ proc save*(json: JsonNode, path: string, separator: string): KeystoreResult[void f.close() removeFile(path) moveFile(path & ".bkp", path) - except: + except: # TODO: Fix "BareExcept" warning # Unlucky, we just fail return err(KeystoreOsError) return err(KeystoreOsError) @@ -56,7 +56,7 @@ proc save*(json: JsonNode, path: string, separator: string): KeystoreResult[void if fileExists(path & ".bkp"): try: removeFile(path & ".bkp") - except: + except CatchableError: return err(KeystoreOsError) return ok() @@ -65,7 +65,7 @@ proc save*(json: JsonNode, path: string, separator: string): KeystoreResult[void proc filterCredential*(credential: MembershipCredentials, filterIdentityCredentials: seq[IdentityCredential], filterMembershipContracts: seq[MembershipContract]): Option[MembershipCredentials] = - + # We filter by identity credentials if filterIdentityCredentials.len() != 0: if (credential.identityCredential in filterIdentityCredentials) == false: @@ -74,7 +74,7 @@ proc filterCredential*(credential: MembershipCredentials, # We filter by membership groups credentials if filterMembershipContracts.len() != 0: # Here we keep only groups that match a contract in the filter - var membershipGroupsIntersection: seq[MembershipGroup] = @[] + var membershipGroupsIntersection: seq[MembershipGroup] = @[] # We check if we have a group in the input credential matching any contract in the filter for membershipGroup in credential.membershipGroups: if membershipGroup.membershipContract in filterMembershipContracts: @@ -87,9 +87,9 @@ proc filterCredential*(credential: MembershipCredentials, else: return none(MembershipCredentials) - - # We hit this return only if + + # We hit this return only if # - filterIdentityCredentials.len() == 0 and filterMembershipContracts.len() == 0 (no filter) # - filterIdentityCredentials.len() != 0 and filterMembershipContracts.len() == 0 (filter only on identity credential) # Indeed, filterMembershipContracts.len() != 0 will have its exclusive return based on all values of membershipGroupsIntersection.len() - return some(credential) \ No newline at end of file + return some(credential) diff --git a/waku/v2/protocol/waku_noise/noise_handshake_processing.nim b/waku/v2/protocol/waku_noise/noise_handshake_processing.nim index 1c38df279..080e96f40 100644 --- a/waku/v2/protocol/waku_noise/noise_handshake_processing.nim +++ b/waku/v2/protocol/waku_noise/noise_handshake_processing.nim @@ -55,7 +55,7 @@ proc getReadingWritingState(hs: HandshakeState, direction: MessageDirection): (b return (reading, writing) -# Checks if a pre-message is valid according to Noise specifications +# Checks if a pre-message is valid according to Noise specifications # http://www.noiseprotocol.org/noise.html#handshake-patterns proc isValid(msg: seq[PreMessagePattern]): bool = @@ -86,7 +86,7 @@ proc isValid(msg: seq[PreMessagePattern]): bool = proc processPreMessagePatternTokens(hs: var HandshakeState, inPreMessagePKs: seq[NoisePublicKey] = @[]) {.raises: [Defect, NoiseMalformedHandshake, NoiseHandshakeError, NoisePublicKeyError].} = - var + var # I make a copy of the input pre-message public keys, so that I can easily delete processed ones without using iterators/counters preMessagePKs = inPreMessagePKs # Here we store currently processed pre message public key @@ -106,10 +106,10 @@ proc processPreMessagePatternTokens(hs: var HandshakeState, inPreMessagePKs: seq let direction = messagePattern.direction tokens = messagePattern.tokens - + # We get if the user is reading or writing the current pre-message pattern var (reading, writing) = getReadingWritingState(hs , direction) - + # We process each message pattern token for token in tokens: @@ -136,13 +136,13 @@ proc processPreMessagePatternTokens(hs: var HandshakeState, inPreMessagePKs: seq else: raise newException(NoisePublicKeyError, "Noise read e, incorrect encryption flag for pre-message public key") - + # If user is writing the "e" token elif writing: - + trace "noise pre-message write e" - # When writing, the user is sending a public key, + # When writing, the user is sending a public key, # We check that the public part corresponds to the set local key and we call MixHash(e.public_key). if hs.e.publicKey == intoCurve25519Key(currPK.pk): hs.ss.mixHash(hs.e.publicKey) @@ -150,13 +150,13 @@ proc processPreMessagePatternTokens(hs: var HandshakeState, inPreMessagePKs: seq raise newException(NoisePublicKeyError, "Noise pre-message e key doesn't correspond to locally set e key pair") # Noise specification: section 9.2 - # In non-PSK handshakes, the "e" token in a pre-message pattern or message pattern always results - # in a call to MixHash(e.public_key). + # In non-PSK handshakes, the "e" token in a pre-message pattern or message pattern always results + # in a call to MixHash(e.public_key). # In a PSK handshake, all of these calls are followed by MixKey(e.public_key). if "psk" in hs.handshakePattern.name: hs.ss.mixKey(currPK.pk) - # We delete processed public key + # We delete processed public key preMessagePKs.delete(0) of T_s: @@ -183,10 +183,10 @@ proc processPreMessagePatternTokens(hs: var HandshakeState, inPreMessagePKs: seq # If user is writing the "s" token elif writing: - + trace "noise pre-message write s" - # If writing, it means that the user is sending a public key, + # If writing, it means that the user is sending a public key, # We check that the public part corresponds to the set local key and we call MixHash(s.public_key). if hs.s.publicKey == intoCurve25519Key(currPK.pk): hs.ss.mixHash(hs.s.publicKey) @@ -194,13 +194,13 @@ proc processPreMessagePatternTokens(hs: var HandshakeState, inPreMessagePKs: seq raise newException(NoisePublicKeyError, "Noise pre-message s key doesn't correspond to locally set s key pair") # Noise specification: section 9.2 - # In non-PSK handshakes, the "e" token in a pre-message pattern or message pattern always results - # in a call to MixHash(e.public_key). + # In non-PSK handshakes, the "e" token in a pre-message pattern or message pattern always results + # in a call to MixHash(e.public_key). # In a PSK handshake, all of these calls are followed by MixKey(e.public_key). if "psk" in hs.handshakePattern.name: hs.ss.mixKey(currPK.pk) - # We delete processed public key + # We delete processed public key preMessagePKs.delete(0) else: @@ -239,7 +239,7 @@ proc processMessagePatternTokens(rng: var rand.HmacDrbgContext, hs: var Handshak messagePattern = hs.handshakePattern.messagePatterns[hs.msgPatternIdx] direction = messagePattern.direction tokens = messagePattern.tokens - + # We get if the user is reading or writing the input handshake message var (reading, writing) = getReadingWritingState(hs , direction) @@ -256,7 +256,7 @@ proc processMessagePatternTokens(rng: var rand.HmacDrbgContext, hs: var Handshak # We process each message pattern token for token in tokens: - + case token of T_e: @@ -274,7 +274,7 @@ proc processMessagePatternTokens(rng: var rand.HmacDrbgContext, hs: var Handshak # Note: by specification, ephemeral keys should always be unencrypted. But we support encrypted ones. if currPK.flag == 0.uint8: - # Unencrypted Public Key + # Unencrypted Public Key # Sets re and calls MixHash(re.public_key). hs.re = intoCurve25519Key(currPK.pk) hs.ss.mixHash(hs.re) @@ -288,10 +288,10 @@ proc processMessagePatternTokens(rng: var rand.HmacDrbgContext, hs: var Handshak else: raise newException(NoisePublicKeyError, "Noise read e, incorrect encryption flag for public key") - + # Noise specification: section 9.2 - # In non-PSK handshakes, the "e" token in a pre-message pattern or message pattern always results - # in a call to MixHash(e.public_key). + # In non-PSK handshakes, the "e" token in a pre-message pattern or message pattern always results + # in a call to MixHash(e.public_key). # In a PSK handshake, all of these calls are followed by MixKey(e.public_key). if "psk" in hs.handshakePattern.name: hs.ss.mixKey(hs.re) @@ -310,8 +310,8 @@ proc processMessagePatternTokens(rng: var rand.HmacDrbgContext, hs: var Handshak hs.ss.mixHash(hs.e.publicKey) # Noise specification: section 9.2 - # In non-PSK handshakes, the "e" token in a pre-message pattern or message pattern always results - # in a call to MixHash(e.public_key). + # In non-PSK handshakes, the "e" token in a pre-message pattern or message pattern always results + # in a call to MixHash(e.public_key). # In a PSK handshake, all of these calls are followed by MixKey(e.public_key). if "psk" in hs.handshakePattern.name: hs.ss.mixKey(hs.e.publicKey) @@ -334,7 +334,7 @@ proc processMessagePatternTokens(rng: var rand.HmacDrbgContext, hs: var Handshak # We check if current key is encrypted or not if currPK.flag == 0.uint8: - # Unencrypted Public Key + # Unencrypted Public Key # Sets re and calls MixHash(re.public_key). hs.rs = intoCurve25519Key(currPK.pk) hs.ss.mixHash(hs.rs) @@ -347,7 +347,7 @@ proc processMessagePatternTokens(rng: var rand.HmacDrbgContext, hs: var Handshak else: raise newException(NoisePublicKeyError, "Noise read s, incorrect encryption flag for public key") - + # We delete processed public key inHandshakeMessage.delete(0) @@ -448,7 +448,7 @@ proc processMessagePatternTokens(rng: var rand.HmacDrbgContext, hs: var Handshak ################################# # Initializes a Handshake State -proc initialize*(hsPattern: HandshakePattern, ephemeralKey: KeyPair = default(KeyPair), staticKey: KeyPair = default(KeyPair), prologue: seq[byte] = @[], psk: seq[byte] = @[], preMessagePKs: seq[NoisePublicKey] = @[], initiator: bool = false): HandshakeState +proc initialize*(hsPattern: HandshakePattern, ephemeralKey: KeyPair = default(KeyPair), staticKey: KeyPair = default(KeyPair), prologue: seq[byte] = @[], psk: seq[byte] = @[], preMessagePKs: seq[NoisePublicKey] = @[], initiator: bool = false): HandshakeState {.raises: [Defect, NoiseMalformedHandshake, NoiseHandshakeError, NoisePublicKeyError].} = var hs = HandshakeState.init(hsPattern) hs.ss.mixHash(prologue) @@ -470,7 +470,7 @@ proc stepHandshake*(rng: var rand.HmacDrbgContext, hs: var HandshakeState, readP var hsStepResult: HandshakeStepResult - # If there are no more message patterns left for processing + # If there are no more message patterns left for processing # we return an empty HandshakeStepResult if hs.msgPatternIdx > uint8(hs.handshakePattern.messagePatterns.len - 1): debug "stepHandshake called more times than the number of message patterns present in handshake" @@ -482,12 +482,12 @@ proc stepHandshake*(rng: var rand.HmacDrbgContext, hs: var HandshakeState, readP let direction = hs.handshakePattern.messagePatterns[hs.msgPatternIdx].direction var (reading, writing) = getReadingWritingState(hs, direction) - # If we write an answer at this handshake step + # If we write an answer at this handshake step if writing: # We initialize a payload v2 and we set proper protocol ID (if supported) try: hsStepResult.payload2.protocolId = PayloadV2ProtocolIDs[hs.handshakePattern.name] - except: + except CatchableError: raise newException(NoiseMalformedHandshake, "Handshake Pattern not supported") # We set the messageNametag and the handshake and transport messages @@ -526,8 +526,8 @@ proc finalizeHandshake*(hs: var HandshakeState): HandshakeResult = var hsResult: HandshakeResult ## Noise specification, Section 5: - ## Processing the final handshake message returns two CipherState objects, - ## the first for encrypting transport messages from initiator to responder, + ## Processing the final handshake message returns two CipherState objects, + ## the first for encrypting transport messages from initiator to responder, ## and the second for messages in the other direction. # We call Split() @@ -561,15 +561,15 @@ proc finalizeHandshake*(hs: var HandshakeState): HandshakeResult = return hsResult ################################# -# After-handshake procedures +# After-handshake procedures ################################# ## Noise specification, Section 5: -## Transport messages are then encrypted and decrypted by calling EncryptWithAd() -## and DecryptWithAd() on the relevant CipherState with zero-length associated data. -## If DecryptWithAd() signals an error due to DECRYPT() failure, then the input message is discarded. -## The application may choose to delete the CipherState and terminate the session on such an error, -## or may continue to attempt communications. If EncryptWithAd() or DecryptWithAd() signal an error +## Transport messages are then encrypted and decrypted by calling EncryptWithAd() +## and DecryptWithAd() on the relevant CipherState with zero-length associated data. +## If DecryptWithAd() signals an error due to DECRYPT() failure, then the input message is discarded. +## The application may choose to delete the CipherState and terminate the session on such an error, +## or may continue to attempt communications. If EncryptWithAd() or DecryptWithAd() signal an error ## due to nonce exhaustion, then the application must delete the CipherState and terminate the session. # Writes an encrypted message using the proper Cipher State @@ -604,10 +604,10 @@ proc readMessage*(hsr: var HandshakeResult, readPayload2: PayloadV2, inboundMess let nametagIsOk = checkNametag(readPayload2.messageNametag, inboundMessageNametagBuffer).isOk assert(nametagIsOk) - # At this point the messageNametag matches the expected nametag. + # At this point the messageNametag matches the expected nametag. # According to 35/WAKU2-NOISE RFC, no Handshake protocol information is sent when exchanging messages - if readPayload2.protocolId == 0.uint8: - + if readPayload2.protocolId == 0.uint8: + # On application level we decide to discard messages which fail decryption, without raising an error try: # Decryption is done with messageNametag as associated data @@ -620,4 +620,4 @@ proc readMessage*(hsr: var HandshakeResult, readPayload2: PayloadV2, inboundMess debug "A read message failed decryption. Returning empty message as plaintext." message = @[] - return ok(message) \ No newline at end of file + return ok(message) diff --git a/waku/v2/protocol/waku_noise/noise_utils.nim b/waku/v2/protocol/waku_noise/noise_utils.nim index a9b0eed8b..4b70f9aa4 100644 --- a/waku/v2/protocol/waku_noise/noise_utils.nim +++ b/waku/v2/protocol/waku_noise/noise_utils.nim @@ -37,11 +37,11 @@ proc randomSeqByte*(rng: var HmacDrbgContext, size: int): seq[byte] = # Pads a payload according to PKCS#7 as per RFC 5652 https://datatracker.ietf.org/doc/html/rfc5652#section-6.3 proc pkcs7_pad*(payload: seq[byte], paddingSize: int): seq[byte] = - + assert(paddingSize<256) let k = paddingSize - (payload.len mod paddingSize) - + var padding: seq[byte] if k != 0: @@ -94,13 +94,13 @@ proc fromQr*(qr: string): (string, string, string, EllipticCurveKey, MDigest[256 let applicationVersion: string = decode(values[1]) let shardId: string = decode(values[2]) - let decodedEphemeralKey = decode(values[3]).toBytes - var ephemeralKey: EllipticCurveKey + let decodedEphemeralKey = decode(values[3]).toBytes + var ephemeralKey: EllipticCurveKey for i in 0.. uint8.high.int: @@ -482,17 +482,17 @@ proc serializePayloadV2*(self: PayloadV2): Result[seq[byte], cstring] = # We get the transport message byte length let transportMessageLen = self.transportMessage.len - # The output payload as in https://rfc.vac.dev/spec/35/. We concatenate all the PayloadV2 fields as + # The output payload as in https://rfc.vac.dev/spec/35/. We concatenate all the PayloadV2 fields as # payload = ( protocolId || serializedHandshakeMessageLen || serializedHandshakeMessage || transportMessageLen || transportMessage) - # We declare it as a byte sequence of length accordingly to the PayloadV2 information read + # We declare it as a byte sequence of length accordingly to the PayloadV2 information read var payload = newSeqOfCap[byte](MessageNametagLength + #MessageNametagLength bytes for messageNametag - 1 + # 1 byte for protocol ID + 1 + # 1 byte for protocol ID 1 + # 1 byte for length of serializedHandshakeMessage field serializedHandshakeMessageLen + # serializedHandshakeMessageLen bytes for serializedHandshakeMessage 8 + # 8 bytes for transportMessageLen transportMessageLen # transportMessageLen bytes for transportMessage ) - + # We concatenate all the data # The protocol ID (1 byte) and handshake message length (1 byte) can be directly casted to byte to allow direct copy to the payload byte sequence payload.add @(self.messageNametag) @@ -571,7 +571,7 @@ proc deserializePayloadV2*(payload: seq[byte]): Result[PayloadV2, cstring] let transportMessageLen = fromBytesLE(uint64, payload[i..(i+8-1)]) i += 8 - # We read the transport message (handshakeMessage bytes) + # We read the transport message (handshakeMessage bytes) payload2.transportMessage = payload[i..i+transportMessageLen-1] i += transportMessageLen diff --git a/waku/v2/protocol/waku_rln_relay/group_manager/on_chain/group_manager.nim b/waku/v2/protocol/waku_rln_relay/group_manager/on_chain/group_manager.nim index 31c372845..d291c57f3 100644 --- a/waku/v2/protocol/waku_rln_relay/group_manager/on_chain/group_manager.nim +++ b/waku/v2/protocol/waku_rln_relay/group_manager/on_chain/group_manager.nim @@ -175,7 +175,7 @@ proc parseEvent*(event: type MemberRegistered, # Parse the index offset += decode(data, offset, index) return ok(Membership(idCommitment: idComm.toIDCommitment(), index: index.toMembershipIndex())) - except: + except CatchableError: return err("failed to parse the data field of the MemberRegistered event") type BlockTable* = OrderedTable[BlockNumber, seq[Membership]] @@ -298,7 +298,7 @@ proc startListeningToEvents*(g: OnchainGroupManager): Future[void] {.async.} = let newHeadCallback = g.getNewHeadCallback() try: discard await ethRpc.subscribeForBlockHeaders(newHeadCallback, newHeadErrCallback) - except: + except CatchableError: raise newException(ValueError, "failed to subscribe to block headers: " & getCurrentExceptionMsg()) proc startOnchainSync*(g: OnchainGroupManager, fromBlock: BlockNumber = BlockNumber(0)): Future[void] {.async.} = @@ -306,13 +306,13 @@ proc startOnchainSync*(g: OnchainGroupManager, fromBlock: BlockNumber = BlockNum try: await g.getEventsAndSeedIntoTree(fromBlock, some(fromBlock)) - except: + except CatchableError: raise newException(ValueError, "failed to get the history/reconcile missed blocks: " & getCurrentExceptionMsg()) # listen to blockheaders and contract events try: await g.startListeningToEvents() - except: + except CatchableError: raise newException(ValueError, "failed to start listening to events: " & getCurrentExceptionMsg()) proc persistCredentials*(g: OnchainGroupManager): GroupManagerResult[void] = @@ -358,7 +358,7 @@ method startGroupSync*(g: OnchainGroupManager): Future[void] {.async.} = # Get archive history try: await startOnchainSync(g) - except: + except CatchableError: raise newException(ValueError, "failed to start onchain sync service: " & getCurrentExceptionMsg()) if g.ethPrivateKey.isSome() and g.idCredentials.isNone(): @@ -393,7 +393,7 @@ method init*(g: OnchainGroupManager): Future[void] {.async.} = # check if the Ethereum client is reachable try: ethRpc = await newWeb3(g.ethClientUrl) - except: + except CatchableError: raise newException(ValueError, "could not connect to the Ethereum client") # Set the chain id @@ -416,7 +416,7 @@ method init*(g: OnchainGroupManager): Future[void] {.async.} = var membershipFee: Uint256 try: membershipFee = await contract.MEMBERSHIP_DEPOSIT().call() - except: + except CatchableError: raise newException(ValueError, "could not get the membership deposit") @@ -445,7 +445,7 @@ method init*(g: OnchainGroupManager): Future[void] {.async.} = info "reconnecting with the Ethereum client, and restarting group sync", fromBlock = fromBlock try: asyncSpawn g.startOnchainSync(fromBlock) - except: + except CatchableError: error "failed to restart group sync", error = getCurrentExceptionMsg() g.initialized = true